- {createRepoFormStore.fields.name.error +
- ' No spaces and special characters other than - and . are allowed. Repo names should not begin/end with a . or - .'}
-
- );
- descriptionClass = 'large-12 columns form-error';
- }
- var defaultValue = createRepoFormStore.values.is_private ? 'private' : 'public';
- var errText = '';
- if (createRepoFormStore.fields.is_private.hasError) {
- errText =
-
- {createRepoFormStore.fields.is_private.error}
- ;
- }
- const subtitleContent = `1. Choose a namespace *(Required)*
-2. Add a repository name *(Required)*
-3. Add a short description
-4. Add markdown to the full description field
-5. Set it to be a private or public repository`;
-
- return (
- {subtitleContent}}>
-
- This will be the base wrapper of the 'Users' page where either your or another users profile will appear
- This will let you see your public facing page at /u/username/ too
- 'Your' homepage/dashboard will live at /home/
-
-
-
- This is root user page.
- When not looking at a specific user or an owned image
- This will show a list of repos/images owned by the root user
- This could be a image box of some sort
-
- );
- }
-});
-
-var User = React.createClass({
- // This page should either be the users home page or the view page for other users
- render: function() {
- return (
-
-
- This is the UID: {this.props.params.uid}
- This is main user page.
- This will show a list of repos/images owned by the user
-
-
-
- Organizations can have multiple Teams. Teams can have differing permissions. Namespace is
- unique and this is where repositories for this organization will be created.
- }>
-
-
-
- );
- }
-});
-
-export default connectToStores(AddOrganizationForm,
- [
- AddOrganizationStore
- ],
- function({ getStore }, props) {
- return getStore(AddOrganizationStore).getState();
- });
diff --git a/app/scripts/components/account/BillingPlans.css b/app/scripts/components/account/BillingPlans.css
deleted file mode 100644
index d0d01aa909..0000000000
--- a/app/scripts/components/account/BillingPlans.css
+++ /dev/null
@@ -1,21 +0,0 @@
-@import 'dux/css/colors.css';
-
-.body {
- margin-top: 2rem;
-}
-
-.error {
- background: var(--primary-5);
- color:white;
-}
-
-.plansQuestion {
- margin-bottom: 2rem;
-}
-
-.questionTitle {
- font-weight: 500;
- color: var(--secondary-1);
-}
-
-/*.questionAnswer {}*/
diff --git a/app/scripts/components/account/BillingPlans.jsx b/app/scripts/components/account/BillingPlans.jsx
deleted file mode 100644
index dc241415cc..0000000000
--- a/app/scripts/components/account/BillingPlans.jsx
+++ /dev/null
@@ -1,249 +0,0 @@
-/*global MktoForms2*/
-
-'use strict';
-
-import React, { PropTypes } from 'react';
-const { string, array, object, func, shape } = PropTypes;
-import { Link } from 'react-router';
-import connectToStores from 'fluxible-addons-react/connectToStores';
-import isEmpty from 'lodash/lang/isEmpty';
-
-import BillingPlansStore from '../../stores/BillingPlansStore';
-import updateSubscriptionPlanOrPackage from '../../actions/updateSubscriptionPlanOrPackage.js';
-
-import PlansTable from './billingplans/Plans';
-import EnterpriseSubscriptions from './billingplans/EnterpriseSubscriptions.jsx';
-import BillingInfo from './billingplans/BillingInfo.jsx';
-import InvoiceTables from './billingplans/InvoiceTables.jsx';
-import styles from './BillingPlans.css';
-import { FullSection } from '../common/Sections.jsx';
-import Route404 from '../common/RouteNotFound404Page.jsx';
-import { PageHeader } from 'dux';
-import DocumentTitle from 'react-document-title';
-
-/* Marketo constants for the marketing survey form */
-const mktoFormId = 1317;
-const mktoFormElemId = 'mktoForm_' + mktoFormId;
-const mktoFormBaseUrl = 'https://app-sj05.marketo.com';
-const mktoFormMunchkinId = '929-FJL-178';
-
-var BillingInfoPage = React.createClass({
-
- contextTypes: {
- executeAction: func.isRequired
- },
-
- PropTypes: {
- JWT: string,
- user: object,
- currentPlan: shape({
- id: string,
- plan: string,
- package: string
- }),
- accountInfo: shape({
- account_code: string,
- username: string,
- email: string,
- first_name: string,
- last_name: string,
- company_name: string
- }),
- billingInfo: shape({
- city: string,
- state: string,
- zip: string,
- first_name: string,
- last_name: string,
- address1: string,
- address2: string,
- country: string
- }),
- plansError: string,
- invoices: array,
- unsubscribing: string,
- updatePlan: string
- },
-
- stopSubscription(subscriptionType) {
- /**
- * UPDATE 4/6/16 "cloud_metered" is the new "free" plan instead of deleting
- * per ticket HUB-2219
- */
- return () => {
- const { JWT, user, currentPlan } = this.props;
- const namespace = user.username || user.orgname;
- let subscriptionData = {
- JWT,
- username: namespace,
- subscription_uuid: currentPlan.subscription_uuid
- };
- if (subscriptionType === 'plan') {
- subscriptionData.plan_code = 'cloud_metered';
- if (currentPlan.package) {
- // preserve package (like cloud_starter) if it exists
- subscriptionData.package_code = currentPlan.package;
- }
- } else if (subscriptionType === 'package') {
- // If you are removing a package, leave the plan alone
- subscriptionData.plan_code = currentPlan.plan;
- // Explicitly set null to remove
- subscriptionData.package_code = null;
- }
- this.context.executeAction(updateSubscriptionPlanOrPackage, subscriptionData);
- };
- },
-
- showSurveyModal(subscriptionType) {
- return () => {
- if (typeof MktoForms2 === 'object' &&
- typeof MktoForms2.loadForm === 'function') {
- MktoForms2.loadForm(
- mktoFormBaseUrl,
- mktoFormMunchkinId,
- mktoFormId,
- (form) => {
- form.onSubmit(this.stopSubscription(subscriptionType));
- // Don't refresh the page after a successful submission.
- // React component will re-render itself.
- form.onSuccess(() => false);
- form.vals({'Email': this.props.accountInfo.email});
- MktoForms2.lightbox(form).show();
- });
- } else {
- // If for any reason there is a problem with the Marketo script,
- // we shouldn't block the user from stopping his/her subscription.
- this.stopSubscription(subscriptionType);
- }
- };
- },
-
- getSurveyModalHtml() {
- return (
-
- The Docker Hub Registry is free to use for public repositories. Plans with private repositories are
- available in different sizes. All plans allow collaboration with unlimited people.
-
-
- );
- plansFooter = (
-
-
-
What types of payment do you accept?
-
Credit card (Visa, MasterCard, Discover, or American Express).
-
-
-
Do I have to pay to use your service?
-
No, you only have to pay if you require one or more private repository.
-
-
-
Can I change my plan at a later time?
-
Yes, you can upgrade or downgrade at any time.
-
-
-
What if I need a larger plan?
-
Please contact our Sales team at sales@docker.com or call us toll free at 888-214-4258.
Your user account will be transformed into an organization account where all administrative duties are left to another user or group of users. You will no longer be able to login to this account.
-
-
-
Email addresses for this account will be removed, freeing them up to be used for any other accounts.
-
-
-
Converting your account removes any associations to other services like GitHub or Atlassian Bitbucket. You will be able to link your external accounts to another Docker Hub user.
-
-
-
Billing details and Private Repository plans will remain attached to this account after it is converted to an organization.
-
-
-
Repository namespaces and names remain unchanged. Any user collaborators that you have configured for these repositories will be removed and must be reconfigured using group collaborators.
-
-
-
Automated Builds for this account will be updated to appear as if they were originallly configured by the initial organization owner. Any user in a group with 'admin' level access to a repository will be able to edit Automated Build Configurations.
-
-
-
-
WARNING
-
This account conversion operation can not be undone.
-
-
-
-
-
In order to complete the conversion of your account to an organization you will need to enter the Docker ID of an **existing** Docker Hub user account.
- The user account you specify will become a member of the Owners group and will have full administrative privileges to manage the organization.
-
- These account links are currently used for Automated Builds,
- so that we can access your project lists and help you configure your Automated Builds.
- Please note: A github/bitbucket account can be connected to only one docker hub account at a time.}>
-
- );
- }
-});
-
-function mkPage(pageNumber) {
- return (
-
- );
-}
-
-export default createClass({
- displayName: 'Pagination',
- propTypes: {
- next: PropTypes.string,
- prev: PropTypes.string,
- currentPage: PropTypes.number.isRequired,
- pageSize: PropTypes.number.isRequired,
- onChangePage: PropTypes.func.isRequired
- },
- _onClick(pageNumber) {
- return (e) => {
- //Check if currentPage is to the right of ellipsis and the last from the beginning or the end
- //based on whether it is the beginning side or end side, update the page ranges
- e.preventDefault();
- this.props.onChangePage(pageNumber);
- };
- },
- render() {
- var paginationComponent;
- // is there a page before this one?
- var previousPageExists = !!this.props.prev;
- // is there a page after this one?
- var nextPageExists = !!this.props.next;
- var currentPage = [this.props.currentPage].map(mkPage, this);
- var prevClasses = classnames({
- 'arrow': true,
- 'unavailable': !previousPageExists
- });
-
- var nextClasses = classnames({
- 'arrow': true,
- 'unavailable': !nextPageExists
- });
-
- var prevPage = null;
- if (previousPageExists) {
- prevPage = [(
-
No matching repositories for '{this.state.currentQuery}'.
-
- );
- } else {
- return ;
- }
- },
- renderReposList() {
- const { paginationMode, reposList } = this.state;
- const { isOwner, repos } = this.props;
-
- return ;
- },
- componentWillReceiveProps(nextProps) {
- //We assume that loading filter results will be true only on click of the filter
- //And, the next time, we receive new props we set it to false
- //Also, we don't call the API after we get into filter mode, the user is stuck with filter mode after he clicks on filter
- //We also, would go back to pagination on click out and back
- if (nextProps.STATUS === SUCCESSFUL) {
- this.setState({
- reposList: nextProps.repos,
- currentQuery: ''
- });
- }
-
- //If the context changes and the dashboard goes into an org context
- //This route will have a param only when there is an Org dashboard loading -> /u//
- //We will set the reposList to the nextProps in this scenario and also clear the query
- const { params } = this.props;
- const orgnameParam = params.user;
- if (orgnameParam && orgnameParam !== this.state.orgName) {
- this.setState({
- orgName: orgnameParam,
- reposList: nextProps.repos,
- paginationMode: true,
- currentQuery: ''
- });
- }
- },
- render() {
-
- const {
- next,
- prev,
- repos,
- STATUS,
- params,
- location,
- currentUserContext
- } = this.props;
-
- let namespace = params.user;
- var content = (
-
-
-
-
-
- );
- var currentPageNumber = parseInt(location.query ? location.query.page : 1, 10);
- var maybePagination;
- if (repos && repos.length > 0 && this.state.paginationMode) {
- maybePagination = (
-
-
-
-
-
- );
- }
-
- //A filter bar on top of the repositories list, when we are doing client side filtering
- let maybeMessage;
- var showTotal = 0;
- var showCount = 0;
- if (repos) {
- showTotal = repos.length;
- }
- if (this.state.reposList) {
- showCount = this.state.reposList.length;
- }
- if (STATUS !== ATTEMPTING && !this.state.paginationMode) {
- maybeMessage = {`Showing ${showCount} of ${showTotal}`};
- } else if (STATUS === ATTEMPTING) {
- maybeMessage = (
-
- Loading ...
-
- );
- }
-
- let maybeFilter = (
-
- );
-
- // Do not show state option unless country is US or Canada
- let stateArea;
- if (includes(COUNTRIES_WITH_STATES, values.country)) {
- const statesOrProvinces = values.country === UNITED_STATES ? states : provinces;
- stateArea = (
-
- You may { 'upload the license file, ' +
- 'provided below, in '} Settings > Licenses.
- Save and Restart UCP.
-
-
-
-
Licensing DTR
-
- {'Before proceeding, you must set the domain name to the full ' +
- 'host-name of your Trusted Registry server (this is under '}Settings
- {' in Trusted Registry). Once you’ve saved and restarted the Trusted Registry, you may now '}
- { 'upload the license file, ' +
- 'provided below, in '} Settings > Licenses.
- Save and Restart once more.
-
- {'Once the engine is installed, '}
- install Docker Trusted Registry
- by running the docker/dtr
- container below.
- {' This command pulls and runs Docker Trusted Registry on a container.'}
-
-
- {`NOTE: Your browser will warn you that this is an unsafe site, ` +
- `with a self-signed, untrusted certificate. This is normal and ` +
- `expected; please allow this connection temporarily.`}
-
-
- { 'You’re almost ready to push and pull images! You need to ' }
- secure your Trusted Registry
- first. Navigate to Settings > Security{', and enter your ' +
- 'data in the required fields. At this time, you may also want to ' +
- 'configure additional settings such as ports, storage, authentication, and so forth.' }
-
- );
- }
-}
diff --git a/app/scripts/components/enterprise/EnterpriseTrialSuccess/StepsTab.jsx b/app/scripts/components/enterprise/EnterpriseTrialSuccess/StepsTab.jsx
deleted file mode 100644
index feae8301d3..0000000000
--- a/app/scripts/components/enterprise/EnterpriseTrialSuccess/StepsTab.jsx
+++ /dev/null
@@ -1,54 +0,0 @@
-'use strict';
-
-import React, { PropTypes, Component } from 'react';
-import { Link } from 'react-router';
-import styles from '../EnterpriseTrialSuccess.css';
-import classnames from 'classnames';
-import FA from 'common/FontAwesome';
-const { number, string } = PropTypes;
-
-export default class StepsTab extends Component {
- static propTypes = {
- currentStep: number.isRequired,
- namespace: string.isRequired,
- step: number.isRequired,
- title: string.isRequired
- }
-
- render() {
- const { currentStep,
- namespace,
- step,
- title } = this.props;
- const classes = classnames({
- [styles.tab]: true,
- [styles.active]: currentStep === step,
- [styles.success]: currentStep > step,
- [styles.last]: step === 4
- });
- let icon;
- //TODO: replace the with line below when FA can accept size=1x
- //
- if (currentStep > step) {
- icon = (
-
-
-
-
- );
- } else {
- icon = (
-
-
- {step}
-
- );
- }
- return (
-
-
- { icon } { title }
-
- );
- }
-}
diff --git a/app/scripts/components/enterprise/EnterpriseTrialTerms.css b/app/scripts/components/enterprise/EnterpriseTrialTerms.css
deleted file mode 100644
index 72fad5d928..0000000000
--- a/app/scripts/components/enterprise/EnterpriseTrialTerms.css
+++ /dev/null
@@ -1,7 +0,0 @@
-.softwareAgreement {
- padding-top: .2rem;
-}
-
-.termsPageWrapper {
- padding-top: 1.25rem;
-}
\ No newline at end of file
diff --git a/app/scripts/components/enterprise/EnterpriseTrialTerms.jsx b/app/scripts/components/enterprise/EnterpriseTrialTerms.jsx
deleted file mode 100644
index cd4c8cc871..0000000000
--- a/app/scripts/components/enterprise/EnterpriseTrialTerms.jsx
+++ /dev/null
@@ -1,87 +0,0 @@
-'use strict';
-
-import React, { Component } from 'react';
-import Card, { Block } from '@dux/element-card';
-import { PageHeader } from 'dux';
-import styles from './EnterpriseTrialTerms.css';
-
-export default class extends Component {
- render() {
- return (
-
-
-
-
-
-
-
-
This Docker software evaluation agreement("agreement") is by and between Docker Inc., located at 144 Townsend St, San Francisco, CA 94107("Docker") and the individual or legal entity who has executed an order form(or other ordering or purchasing document) referencing this agreement or is using the applicable software made available by Docker("customer") and governs all use by customer of the Docker software referenced in such order form. by executing an order form, customer expressly accepts and agrees to the terms of this agreement.if you are an individual agreeing to the terms of this agreement on behalf of an entity, such as your employer, you represent that you have the legal authority to bind that entity and "customer" shall refer herein to such entity.if you do not have such authority, or if you do not agree with the terms of this agreement, you must not execute the order form and may not use the licensed software(each as defined below). this agreement does not provide a commercial license after the trial period.use after the trial period is subject to the parties entering into and executing a separate written agreement.
-
1. Definitions.
-
The following capitalized terms shall have the meanings set forth below:
-
1.1 Feedback
-
"Feedback" means any comments or other feedback Customer may provide to Docker concerning the functionality and performance of the Licensed Software, including identification of potential errors and improvements.
-
1.2 Instance
-
"Instance" means a single instance of Licensed Software, as applicable, installed on a physical or virtual computer or server.
-
1.3 Key
-
Key means the license key or similar control mechanism to help ensure compliance with the use and time limitations with respect to the Licensed Software.
-
1.4 Licensed Software
-
Licensed Software means the Docker software identified on an Order Form (other than Open Source Software) and licensed to Customer pursuant to the terms of this Agreement, e.g., the "Docker trusted registry" software or other licensed software from Docker that is identified on the Order Form (excluding any Open Source Software included therein).
-
1.5 Open Source Software
-
Open Source Software means Docker or third party software identified at
- Docker.com/components-licenses, that is distributed or otherwise made available as "free software", "open source software" or under a similar licensing or distribution model.
-
1.6 Order Form
-
Order Form means an ordering document referencing this Agreement between Customer and Docker
-
1.7 Trial Period
-
"Trial Period" means 30 days
-
2. License.
-
2.1 Licensed Software.
-
Licensed Software. Subject to Customer's compliance with the terms and conditions of this Agreement, Docker hereby grants Customer a limited, non-exclusive, non-transferable, non-sub-licensable license during the Trial Period to install, copy and use the Licensed Software solely for Customer's internal evaluation purposes, in connection with the deployment of no more than one Instance.
-
2.2 Open Source Software.
-
If applicable, Open Source Software is distributed or made available under the terms of the open source license agreements referenced in the applicable distribution or the applicable help, notices, about or source files. Copyrights and other proprietary rights to the Open Source Software are held by the copyright holders identified in the applicable distribution or the applicable help, notices, about or source files.
-
2.3 License Keys.
-
Customer shall not destroy, disable or circumvent, or attempt to destroy, disable or circumvent in any way the Key and/or the use and time limitations set by the Key or the Licensed Software. Customer acknowledges and agrees that any attempt to exceed the use of the Licensed Software beyond the limits configured into the Key will automatically and immediately terminate the licenses granted under this Agreement.
-
3. Payment.
-
Subject to Customer's compliance with the terms and conditions of this Agreement, and solely during the Trial Period, the Licensed Software shall be provided to Customer free of charge.
-
4. Restricted Activities.
-
Customer shall not, and shall not encourage any third party to: (a) modify, adapt, alter, translate, or create derivative works of the Licensed Software; (b) reverse-engineer, decompile, disassemble, or attempt to derive the source code for the Licensed Software, in whole or in part, except to the extent that such activities are permitted under applicable law; (c) distribute, license, sublicense, lease, rent, loan, or otherwise transfer the Licensed Software to any third party; (d) remove, alter, or obscure in any way the proprietary rights notices (including copyright, patent, and trademark notices and symbols) of Docker or its suppliers contained on or within any copies of the Licensed Software; (e) use the Licensed Software for the purpose of creating a product or service competitive with the Licensed Software; (f) use the Licensed Software with any unsupported software or hardware (as described in the applicable documentation provided by Docker); (g) use the Licensed Software for any time-sharing, outsourcing, service bureau, hosting, application service provider or like purposes; (h) disclose the results of any benchmark tests on the Licensed Software without Docker's prior written consent; or (i) use the Licensed Software other than as described in the documentation provided therewith, or for any unlawful purpose.
-
5. Ownership of Licensed Software.
-
Docker and its licensors own and retain all right, title, and interest, including all intellectual property rights, in and to the Licensed Software, including any improvements, modifications, and enhancements to it. Except for the rights expressly granted in this Agreement, Customer shall acquire no other rights, express or implied, in or to the Licensed Software, and all rights not expressly provided to Customer hereunder are reserved by Docker and its licensors. All the copies of the Licensed Software provided or made available hereunder are licensed, not sold.
-
6. Term.
-
Unless otherwise terminated in accordance with this section, this Agreement will remain in effect until the expiration of the Trial Period. Either party may immediately terminate this Agreement and any Order Form incorporating the terms of this Agreement if the other party materially breaches this Agreement. Either party may terminate this agreement without cause upon 10 days' prior written notice. Unless otherwise agreed by the parties, upon the expiration or termination of the Trial Period all licenses granted herein will automatically terminate and Customer will discontinue all use of the applicable Licensed Software and will return to Docker any materials (including any copies of Licensed Software) provided by Docker to Customer. Sections 1, 2.2, 4, 5, and 7 through 14 shall survive any termination or expiration of this Agreement or any Order Form.
-
7.Feedback.
-
Customer may submit to Docker bug reports, comments, feedback or ideas about the Licensed Software, including without limitation about how to improve the Licensed Software. By submitting any Feedback, Customer hereby assigns to Docker all right, title, and interest in and to the Feedback, if any.
-
8. Confidentiality.
-
8.1 Definition.
-
"Confidential Information" means any information disclosed by one party ("Discloser") to the other ("Recipient"), directly or indirectly, in writing, orally or by inspection of tangible objects, which is designated as "Confidential," "Proprietary" or some similar designation, or learned by Recipient under circumstances in which such information would reasonably be understood to be confidential. Confidential Information may include information disclosed in confidence to Discloser by third parties. For the purposes of this Agreement, the Licensed Software, and the results of any performance, functional or other evaluation of the Licensed Software, shall be deemed Confidential Information of Docker.
-
8.2 Exceptions.
-
The confidentiality obligations in this Section 8 shall not apply with respect to any of the Discloser's Confidential information which Recipient can demonstrate: (a) was in the public domain at the time it was disclosed to Recipient or has become in the public domain through no act or omission of Recipient; (b) was known to Recipient, without restriction, at the time of disclosure as shown by the files of Recipient in existence at the time of disclosure; (c) was disclosed by Recipient with the prior written approval of Discloser; (d) was independently developed by Recipient without any use of Discloser's Confidential Information by employees or other agents of (or contractors hired by) Recipient who had no access to or did not rely on Discloser's Confidential Information; or (e) became known to Recipient, without restriction, from a source other than Discloser without breach of this Agreement by Recipient and otherwise not in violation of Discloser's rights.
-
8.3 Restrictions on Use and Disclosure.
-
Recipient agrees not to use Discloser's Confidential Information or disclose, distribute or disseminate Discloser's Confidential Information except in furtherance of the performance of its obligations or enforcement of its rights hereunder or as otherwise expressly agreed by Discloser in writing. Recipient agrees to restrict access to such Confidential Information to those employees or consultants of Recipient who need to know such Confidential Information for performing as contemplated hereunder and have agreed in writing to be bound by a confidentiality obligation no less protective than that contained in this Agreement. Recipient shall exercise the same degree of care to prevent unauthorized use or disclosure of Discloser's Confidential Information to others as it takes to preserve and safeguard its own information of like importance, but in no event less than reasonable care.
-
8.4 Compelled Disclosure.
-
If Recipient is compelled by a court or other competent authority or applicable law to disclose Confidential Information of Discloser, it shall give Discloser prompt written notice and shall provide Discloser with reasonable cooperation at Discloser's expense so that Discloser may take steps to oppose such disclosure or obtain a restraining order. Recipient shall not be in breach of its obligations in this Section 9 if it makes any legally compelled disclosure provided that Recipient meets the foregoing notice and cooperation requirements.
-
8.5 Injunctive Relief.
-
Recipient acknowledges that breach of the confidentiality obligations would cause irreparable harm to Discloser, the extent of which may be difficult to ascertain. Accordingly, Recipient agrees that Discloser is entitled to immediate injunctive relief in the event of breach of an obligation of confidentiality by Recipient, and that Discloser shall not be required to post a bond or show irreparable harm in order to obtain such injunctive relief.
-
8.6 Return of Confidential Information.
-
As between the parties, Confidential Information shall remain the property of the Discloser. At any time, upon Discloser's reasonable request, Recipient shall promptly (and in any event within 30 days) return to Discloser or destroy, at the election of the Discloser, any Confidential Information of the Discloser. In addition, within 30 days after termination of this Agreement, Recipient shall (i) promptly return all tangible materials containing such Confidential Information to Discloser, (ii) remove all Confidential Information (and any copies thereof) from any computer systems of the Recipient, its contractors and its distributors, and confirm in writing that all materials containing Confidential Information have been destroyed or returned to Discloser, as applicable, by Recipient. Recipient shall cause its affiliates, agents, contractors, and employees to strictly comply with the foregoing.
-
9. No Warranties.
-
CUSTOMER EXPRESSLY UNDERSTANDS AND AGREES THAT ALL USE OF THE LICENSED SOFTWARE IS AT CUSTOMER'S SOLE RISK AND THAT THE LICENSED SOFTWARE IS PROVIDED "AS IS" AND "AS AVAILABLE." DOCKER, ITS SUBSIDIARIES AND AFFILIATES, AND ITS LICENSORS MAKE NO EXPRESS WARRANTIES AND DISCLAIM ALL IMPLIED WARRANTIES REGARDING THE LICENSED SOFTWARE, INCLUDING IMPLIED WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT, TOGETHER WITH ANY AND ALL WARRANTIES ARISING FROM COURSE OF DEALING OR USAGE IN TRADE. NO ADVICE OR INFORMATION, WHETHER ORAL OR WRITTEN, OBTAINED FROM DOCKER OR ELSEWHERE SHALL CREATE ANY WARRANTY NOT EXPRESSLY STATED IN THIS AGREEMENT. WITHOUT LIMITING THE GENERALITY OF THE FOREGOING, DOCKER, ITS SUBSIDIARIES AND AFFILIATES, AND ITS LICENSORS DO NOT REPRESENT OR WARRANT TO YOU THAT: (A) CUSTOMER'S USE OF THE LICENSED SOFTWARE WILL MEET CUSTOMER'S REQUIREMENTS, OR (B) CUSTOMER'S USE OF THE LICENSED SOFTWARE WILL BE UNINTERRUPTED, TIMELY, SECURE OR FREE FROM ERROR. NOTWITHSTANDING THE FOREGOING, NOTHING HEREIN SHALL EXCLUDE OR LIMIT DOCKER'S WARRANTY OR LIABILITY FOR LOSSES WHICH MAY NOT BE LAWFULLY EXCLUDED OR LIMITED BY APPLICABLE LAW. CUSTOMER UNDERSTANDS AND ACKNOWLEDGES THAT THE LICENSED SOFTWARE IS NOT DESIGNED, INTENDED OR WARRANTED FOR USE IN HAZARDOUS ENVIRONMENTS REQUIRING FAIL-SAFE CONTROLS, INCLUDING WITHOUT LIMITATION, OPERATION OF NUCLEAR FACILITIES, AIRCRAFT NAVIGATION OR COMMUNICATION SYSTEMS, AIR TRAFFIC CONTROL, AND LIFE SUPPORT OR WEAPONS SYSTEMS.
-
10. Indemnification by Customer.
-
Customer agrees to hold harmless and indemnify Docker and its subsidiaries, affiliates, officers, agents, employees, advertisers, licensors, suppliers or partners from and against any third party claim arising from or in any way related to Customer's breach of this Agreement, use of the Licensed Software, or violation of applicable laws, rules or regulations in connection with the Licensed Software, including any liability or expense arising from all claims, losses, damages (actual and consequential), suits, judgments, litigation costs and attorneys' fees, of every kind and nature. In such a case, Docker will provide Customer with written notice of such claim, suit or action.
-
11. Limitation of Liability.
-
11.1 Exclusion of Damages
-
Exclusion of Damages. CUSTOMER EXPRESSLY UNDERSTANDS AND AGREES THAT DOCKER, ITS SUBSIDIARIES AND AFFILIATES, AND ITS LICENSORS SHALL NOT BE LIABLE TO CUSTOMER FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL CONSEQUENTIAL OR EXEMPLARY DAMAGES INCURRED BY CUSTOMER, HOWEVER CAUSED AND UNDER ANY THEORY OF LIABILITY, INCLUDING, BUT NOT LIMITED TO, ANY LOSS OF PROFIT (WHETHER INCURRED DIRECTLY OR INDIRECTLY), ANY LOSS OF GOODWILL OR BUSINESS REPUTATION, ANY LOSS OF DATA SUFFERED, COST OF PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES, OR OTHER INTANGIBLE LOSS. THE FOREGOING LIMITATIONS ON DOCKER'S LIABILITY SHALL APPLY WHETHER OR NOT DOCKER HAS BEEN ADVISED OF OR SHOULD HAVE BEEN AWARE OF THE POSSIBILITY OF ANY SUCH LOSSES ARISING. NOTWITHSTANDING THE FOREGOING, NOTHING HEREIN SHALL EXCLUDE OR LIMIT DOCKER'S LIABILITY FOR LOSSES WHICH MAY NOT BE LAWFULLY EXCLUDED OR LIMITED BY APPLICABLE LAW.
-
11.2 Liability Cap
-
THE TOTAL LIABILITY OF DOCKER ARISING OUT OF OR RELATED TO THIS AGREEMENT WILL NOT EXCEED USD $100.
-
12. Export Restrictions.
-
Customer understands that the Licensed Software may contain encryption technology and other software programs that may require an export license from the U.S. State Department and that export or re-export of the Licensed Software to certain entities (such as a foreign government and its subdivisions) and certain countries is prohibited. Customer acknowledges that it will comply with all applicable export and import control laws and regulations of the United States and the foreign jurisdiction in which the Licensed Software is used and, in particular, Customer will not export or re-export the Licensed Software without all required United States and foreign government licenses. Customer will defend, indemnify, and hold harmless Docker and its suppliers and licensors from and against any violation of such laws or regulations by Customer or any of its agents, officers, directors or employees.
-
13. Miscellaneous.
-
The Licensed Software and any other software covered under this Agreement are "commercial items" as that term is defined at 48 C.F.R. 2.101; consisting of "commercial computer software" and "commercial computer software documentation" as such terms are used in 48 C.F.R. 12.212. Consistent with 48 C.F.R. 12.212 and 48 C.F.R. 227.7202-1 through 227.7202-4, all U.S. Government end users acquire the Licensed Software and any other software and documentation covered under this Agreement with only those rights set forth herein. This Agreement will be governed by the laws of the State of California without reference to conflict of law principles. Each party agrees to submit to the exclusive jurisdiction of the courts located within the county of San Francisco, California to resolve any legal matter arising from this Agreement. Neither party may assign any of its rights or obligations under this Agreement, whether by operation of law or otherwise, without the prior written consent of the other party (not to be unreasonably withheld). Notwithstanding the foregoing, Docker may assign the entirety of its rights and obligations under this Agreement, without consent of the Customer, to its affiliate or in connection with a merger, acquisition, corporate reorganization, or sale of all or substantially all of its assets. The application of the UN Convention of International Sale of Goods to this Agreement is disclaimed in its entirety. Together with any Order Forms, this is the entire agreement between the parties relating to the subject matter hereof. This Agreement (including applicable Order Forms) shall control over any additional or different terms of any correspondence, order, confirmation, invoice or similar document, even if accepted in writing by both parties, and waivers and amendments of any provision of this Agreement shall be effective only if made by non-preprinted agreements indicating specifically what sections of this Agreement are affected, signed by both parties and clearly understood by both parties to be an amendment or waiver. The failure of either party to enforce its rights under this Agreement at any time for any period shall not be construed as a waiver of such rights. If any provision of this Agreement is held invalid or unenforceable, the remainder of this Agreement will continue in full force and effect and the invalid or unenforceable provision shall be reformed to the extent necessary to make it valid and enforceable.
-
- Max of 1 Docker Trusted Registry for Server and 10 Engines
-
-
-
-
-
-
- );
- }
- }
-});
-
-export default connectToStores(EnterprisePaid,
- [
- EnterprisePartnerTrackingStore
- ],
- function({ getStore }, props) {
- return getStore(EnterprisePartnerTrackingStore).getState();
- });
diff --git a/app/scripts/components/enterprise/plansAndSubscriptions.js b/app/scripts/components/enterprise/plansAndSubscriptions.js
deleted file mode 100644
index eaa52d67d9..0000000000
--- a/app/scripts/components/enterprise/plansAndSubscriptions.js
+++ /dev/null
@@ -1,103 +0,0 @@
-'use strict';
-
-export default {
- server: {
- type: 'Docker Trusted Registry',
- name: 'Server Starter Edition includes',
- includes: [
- '1 instance of Docker Trusted Registry',
- '10 Docker Engines with commercial support for the servers hosting your application',
- 'Email support service levels for your Docker software'
- ],
- redirect_value: 'eval',
- notes: 'License keys and commercially supported Docker Engine software are distributed and managed within your Docker Hub account.'
- },
- trial: {
- type: 'a trial of Docker Datacenter',
- name: 'Docker Datacenter Trial includes access to',
- includes: [
- 'Trusted Registry',
- 'Universal Control Plane',
- 'Commercial Support for Docker Engine'
- ],
- redirect_value: 'eval',
- notes: 'License keys and commercially supported Docker Engine software are distributed and managed within your Docker Hub account.'
- },
- cloud: {
- type: 'Docker Cloud Subscription',
- name: 'Cloud Starter Edition includes',
- includes: [
- '20 Private Repos',
- '10 Docker Engines with commercial support for the servers hosting your application',
- 'Email support service levels for your Docker software'
- ],
- redirect_value: 'cloud_starter',
- notes: 'License keys and commercially supported Docker Engine software are distributed and managed within your Docker Hub account.'
- },
- micro: {
- type: 'our Private Repo Plans',
- name: 'Micro plan includes',
- includes: [
- '5 Private Repos',
- '5 Parallel Builds',
- 'Community Hub Support'
- ],
- redirect_value: 'micro',
- notes: null
- },
- small: {
- type: 'our Private Repo Plans',
- name: 'Small plan includes',
- includes: [
- '10 Private Repos',
- '10 Parallel Builds',
- 'Community Hub Support'
- ],
- redirect_value: 'small',
- notes: null
- },
- medium: {
- type: 'our Private Repo Plans',
- name: 'Medium plan includes',
- includes: [
- '20 Private Repos',
- '20 Parallel Builds',
- 'Community Hub Support'
- ],
- redirect_value: 'medium',
- notes: null
- },
- large: {
- type: 'our Private Repo Plans',
- name: 'Large plan includes',
- includes: [
- '50 Private Repos',
- '50 Parallel Builds',
- 'Community Hub Support'
- ],
- redirect_value: 'large',
- notes: null
- },
- xlarge: {
- type: 'our Private Repo Plans',
- name: 'XLarge plan includes',
- includes: [
- '100 Private Repos',
- '100 Parallel Builds',
- 'Community Hub Support'
- ],
- redirect_value: 'xlarge',
- notes: null
- },
- xxlarge: {
- type: 'our Private Repo Plans',
- name: 'XX-Large plan includes',
- includes: [
- '250 Private Repos',
- '250 Parallel Builds',
- 'Community Hub Support'
- ],
- redirect_value: 'xxlarge',
- notes: null
- }
-};
diff --git a/app/scripts/components/eusa.js b/app/scripts/components/eusa.js
deleted file mode 100644
index cf474ae4d0..0000000000
--- a/app/scripts/components/eusa.js
+++ /dev/null
@@ -1,455 +0,0 @@
-'use strict';
-
-module.exports = {
- md: `**DOCKER SOFTWARE END USER SUBSCRIPTION AGREEMENT**
-===================================================
-
-THIS DOCKER SOFTWARE END USER SUBSCRIPTION AGREEMENT ("AGREEMENT") IS BY
-AND BETWEEN DOCKER INC., LOCATED AT 144 TOWNSEND ST, SAN FRANCISCO, CA
-94107 ("DOCKER") AND THE INDIVIDUAL OR LEGAL ENTITY WHO HAS EXECUTED AN
-ORDER FORM (OR OTHER ORDERING OR PURCHASING DOCUMENT) REFERENCING THIS
-AGREEMENT OR IS USING THE APPLICABLE SOFTWARE MADE AVAILABLE BY DOCKER
-("CUSTOMER") AND GOVERNS ALL USE BY CUSTOMER OF THE DOCKER SOFTWARE
-REFERENCED IN SUCH ORDER FORM.
-
-BY EXECUTING AN ORDER FORM, CUSTOMER EXPRESSLY ACCEPTS AND AGREES TO THE
-TERMS OF THIS AGREEMENT. IF YOU ARE AN INDIVIDUAL AGREEING TO THE TERMS
-OF THIS AGREEMENT ON BEHALF OF AN ENTITY, SUCH AS YOUR EMPLOYER, YOU
-REPRESENT THAT YOU HAVE THE LEGAL AUTHORITY TO BIND THAT ENTITY AND
-"CUSTOMER" SHALL REFER HEREIN TO SUCH ENTITY. IF YOU DO NOT HAVE SUCH
-AUTHORITY, OR IF YOU DO NOT AGREE WITH THE TERMS OF THIS AGREEMENT, YOU
-MUST NOT EXECUTE THE ORDER FORM AND MAY NOT USE THE LICENSED SOFTWARE OR
-THE SUBSCRIPTION SERVICES (EACH AS DEFINED BELOW).
-
-### 1. Definitions
-
-The following capitalized terms shall have the meanings set forth below:
-"Docker Authorized Business Partner" shall have the meaning ascribed to
-that term in Section 3.3.
-
-1.1 **"Docker Authorized Business Partner"** shall have the meaning
-ascribed to that term in Section 3.3.
-
-1.2 **"Feedback"** means any comments or other feedback Customer may
-provide to Docker concerning the functionality and performance of the
-Supported Software, including identification of potential errors and
-improvements.
-
-1.3 **"Instance"** means a single instance of Licensed Software or
-Supported Software, as applicable, installed on a physical or virtual
-computer or server.
-
-1.4 **"Key"** means the license key or similar control mechanism to help
-ensure compliance with the use and time limitations with respect to the
-Licensed Software.
-
-1.5 **"Licensed Software"** means the Docker software identified on an
-Order Form (other than Open Source Software) and licensed to Customer
-pursuant to the terms of this Agreement, e.g., the "Docker Trusted
-Registry" software or other licensed software from Docker that is
-identified on the Order Form (excluding any Open Source Software
-included therein).
-
-1.6 **"Open Source Software"** means Docker or third party software
-identified at [placeholder for webpage or other documentation with open
-source listing], that is distributed or otherwise made available as
-"free software", "open source software" or under a similar licensing or
-distribution model.
-
-1.7 **"Order Form"** means an ordering document referencing this
-Agreement between Customer and Docker, or between Customer and a Docker
-Authorized Business Partner.
-
-1.8 **"Subscription Fee"** means the fee for Subscription Services
-purchased by the Customer. The amount of the Subscription Fee is based
-on the number of Instances and the level (e.g., 24X7 or Defined Business
-Hours) of Subscription Services specified in the Order Form
-"Subscription Term" means the applicable initial and/or renewal
-subscription term as set forth in the applicable Order Form.
-
-1.9 **"Subscription Term"** means the applicable initial and/or renewal
-subscription term as set forth in the applicable Order Form.
-
-1.10 **"Supported Software"** means the Docker or third party software
-identified on the Order Form as software for which Docker or its
-authorized resellers agree to provide Subscription Services to Customer.
-For purposes of clarity, Supported Software may include Licensed
-Software and/or identified versions of Open Source Software with respect
-to which Docker agrees to provide updates, patches and hotfixes to the
-customer.
-
-1.11 **"Subscription Services"** means standard support and maintenance
-services and software updates provided by Docker for the Supported
-Software, as set forth at:
-[*https://www.docker.com/support/*](https://www.docker.com/support/).
-
-2\. License
-
-2.1 **Licensed Software.** Subject to Customer's timely payment of the
-Subscription Fee and compliance with the terms and conditions of this
-Agreement, Docker hereby grants Customer a limited, non-exclusive,
-non-transferable, non-sub-licensable license during the applicable
-Subscription Term to install, copy and use the Licensed Software for
-Customer's internal business purposes, in connection with the deployment
-of no more than the number of Instances as are set forth in the Order
-Form.
-
-2.2 **Open Source Software.** If applicable, Open Source Software is
-distributed or made available under the terms of the open source license
-agreements referenced in the applicable distribution or the applicable
-help, notices, about or source files. Copyrights and other proprietary
-rights to the Open Source Software are held by the copyright holders
-identified in the applicable distribution or the applicable help,
-notices, about or source files.
-
-2.3 **License Keys.** Customer shall not destroy, disable or circumvent,
-or attempt to destroy, disable or circumvent in any way the Key and/or
-the use and time limitations set by the Key or the Licensed Software.
-Customer acknowledges and agrees that any attempt to exceed the use of
-the Licensed Software beyond the limits configured into the Key will
-automatically and immediately terminate the licenses granted under this
-Agreement.
-
-### 3. Subscription
-
-3.1 **Subscription Services.** Subject to Customer's timely payment of
-the Subscription Fee and compliance with the terms and conditions of
-this Agreement, Docker shall provide to Customer the Subscription
-Services during the Subscription Term. Customer must purchase
-Subscription Services corresponding to the number of Instances specified
-in the Order Form. Customer may purchase different levels of
-Subscription Services with respect to each such Instance; provided,
-however, that Customer may not use Subscription Services with a higher
-support level in connection with an Instance for which Customer had
-purchased Subscription Services with a lower support level. In addition,
-the customer may not use Instances of Open Source Software that has not
-been identified on an Order Form, on computers and or servers that are
-part of the environment in which Subscription Services are provided.
-Unless renewed, the Subscription Services will expire at the end of the
-applicable Subscription Term. This means that while the Customer is free
-to use the Open Source Software after the expiration of the applicable
-Subscription Term, Docker will not provide the Subscription Services
-after the end of the applicable Subscription Term.
-
-3.2 **Subscription Fee.** Payment of the Subscription Fee shall be made
-by Customer as set forth in the applicable Order Form.
-
-3.3 **Support from Docker's Business Partners.** In some cases,
-Customers may also receive support services,as part of the purchased
-Subscription Services, from a Docker authorized business partner (each,
-a "Docker Authorized Business Partner"). Notwithstanding anything to the
-contrary in Section 3.1, if Customer purchases support services from a
-Docker Authorized Business Partner, Docker shall have no obligation to
-provide any support services to the Customer and Customer should work
-with that Docker Authorized Business Partner to obtain all support
-services for the Supported Software.
-
-### 4. Restricted Activities
-
-Customer shall not, and shall not encourage any third party to: (a)
-modify, adapt, alter, translate, or create derivative works of the
-Licensed Software; (b) reverse-engineer, decompile, disassemble, or
-attempt to derive the source code for the Licensed Software, in whole or
-in part, except to the extent that such activities are permitted under
-applicable law; (c) distribute, license, sublicense, lease, rent, loan,
-or otherwise transfer the Licensed Software to any third party; (d)
-remove, alter, or obscure in any way the proprietary rights notices
-(including copyright, patent, and trademark notices and symbols) of
-Docker or its suppliers contained on or within any copies of the
-Licensed Software; (e) use the Licensed Software for the purpose of
-creating a product or service competitive with the Licensed Software;
-(f) use the Licensed Software with any unsupported software or hardware
-(as described in the applicable documentation provided by Docker); (g)
-use the Licensed Software for any time-sharing, outsourcing, service
-bureau, hosting, application service provider or like purposes; (h)
-disclose the results of any benchmark tests on the Licensed Software
-without Docker's prior written consent; or (i) use the Licensed Software
-other than as described in the documentation provided therewith, or for
-any unlawful purpose.
-
-### 5. Ownership of Licensed Software
-
-Docker and its licensors own and retain all right, title, and interest,
-including all intellectual property rights, in and to the Licensed
-Software, including any improvements, modifications, and enhancements to
-it. Except for the rights expressly granted in this Agreement, Customer
-shall acquire no other rights, express or implied, in or to the Licensed
-Software, and all rights not expressly provided to Customer hereunder
-are reserved by Docker and its licensors. All the copies of the Licensed
-Software provided or made available hereunder are licensed, not sold.
-
-### 6. Records and Audit
-
-Customer shall establish and maintain complete and accurate records
-related to the location, access and use of the Supported Software by
-Customer, its employees or its agents, and any such other information as
-reasonably necessary for Docker to verify compliance with the terms of
-this Agreement. Such records shall be kept for at least 3 years
-following the end of the quarter to which they pertain. Upon prior
-notice, Docker or its representative may inspect such records to confirm
-Customer's compliance with the terms of this Agreement. Prompt
-adjustments shall be made by Customer as directed by Docker to
-compensate for any errors or breach discovered by such audit, such as
-underpayment of the Subscription Fee, with the applicable late payment
-interest. Additionally, if Customer has underpaid Docker or its
-authorized reseller by more than 5% of the total amount owed hereunder,
-the cost of the audit shall be borne by Customer.
-
-### 7. Term
-
-Unless otherwise terminated in accordance with this section, this
-Agreement will remain in effect until all Subscription Services granted
-under this Agreement have expired. Either party may terminate this
-Agreement and any Order Form incorporating the terms of this Agreement
-(if Docker is a party to such Order Form) if the other party materially
-breaches this Agreement and fails to cure such breach within 30 days of
-receiving written notice thereof. Unless otherwise agreed by the
-parties, upon the expiration or termination of this Agreement or any
-Order Form all Subscription Services granted herein or therein will
-automatically terminate and Customer will discontinue all use of the
-applicable Licensed Software and Supported Software and will return to
-Docker any materials (including any copies of Licensed Software)
-provided by Docker to Customer. Sections 1, 2.4, 3, 5, and 7 through 14
-shall survive any termination or expiration of this Agreement or any
-Order Form.
-
-### 8. Feedback.
-
-Customer may submit to Docker bug reports, comments, feedback or ideas
-about the Supported Software, including without limitation about how to
-improve the Supported Software. By submitting any Feedback, Customer
-hereby assigns to Docker all right, title, and interest in and to the
-Feedback, if any.
-
-### 9. Confidentiality
-
-9.1 **Definition.** "Confidential Information" means any information
-disclosed by one party ("Discloser") to the other ("Recipient"),
-directly or indirectly, in writing, orally or by inspection of tangible
-objects, which is designated as "Confidential," "Proprietary" or some
-similar designation, or learned by Recipient under circumstances in
-which such information would reasonably be understood to be
-confidential. Confidential Information may include information disclosed
-in confidence to Discloser by third parties. For the purposes of this
-Agreement, the Licensed Software, and the results of any performance,
-functional or other evaluation of the Licensed Software, shall be deemed
-Confidential Information of Docker.
-
-9.2 **Exceptions.** The confidentiality obligations in this Section 8
-shall not apply with respect to any of the Discloser's Confidential
-information which Recipient can demonstrate: (a) was in the public
-domain at the time it was disclosed to Recipient or has become in the
-public domain through no act or omission of Recipient; (b) was known to
-Recipient, without restriction, at the time of disclosure as shown by
-the files of Recipient in existence at the time of disclosure; (c) was
-disclosed by Recipient with the prior written approval of Discloser; (d)
-was independently developed by Recipient without any use of Discloser's
-Confidential Information by employees or other agents of (or contractors
-hired by) Recipient who had no access to or did not rely on Discloser's
-Confidential Information; or (e) became known to Recipient, without
-restriction, from a source other than Discloser without breach of this
-Agreement by Recipient and otherwise not in violation of Discloser's
-rights.
-
-9.3 **Restrictions on Use and Disclosure.** Recipient agrees not to use
-Discloser's Confidential Information or disclose, distribute or
-disseminate Discloser's Confidential Information except in furtherance
-of the performance of its obligations or enforcement of its rights
-hereunder or as otherwise expressly agreed by Discloser in writing.
-Recipient agrees to restrict access to such Confidential Information to
-those employees or consultants of Recipient who need to know such
-Confidential Information for performing as contemplated hereunder and
-have agreed in writing to be bound by a confidentiality obligation no
-less protective than that contained in this Agreement. Recipient shall
-exercise the same degree of care to prevent unauthorized use or
-disclosure of Discloser's Confidential Information to others as it takes
-to preserve and safeguard its own information of like importance, but in
-no event less than reasonable care.
-
-9.4 **Compelled Disclosure.** If Recipient is compelled by a court or
-other competent authority or applicable law to disclose Confidential
-Information of Discloser, it shall give Discloser prompt written notice
-and shall provide Discloser with reasonable cooperation at Discloser's
-expense so that Discloser may take steps to oppose such disclosure or
-obtain a restraining order. Recipient shall not be in breach of its
-obligations in this Section 9 if it makes any legally compelled
-disclosure provided that Recipient meets the foregoing notice and
-cooperation requirements.
-
-9.5 **Injunctive Relief.** Recipient acknowledges that breach of the
-confidentiality obligations would cause irreparable harm to Discloser,
-the extent of which may be difficult to ascertain. Accordingly,
-Recipient agrees that Discloser is entitled to immediate injunctive
-relief in the event of breach of an obligation of confidentiality by
-Recipient, and that Discloser shall not be required to post a bond or
-show irreparable harm in order to obtain such injunctive relief.
-
-9.6 **Return of Confidential Information.** As between the parties,
-Confidential Information shall remain the property of the Discloser. At
-any time, upon Discloser’s reasonable request, Recipient shall promptly
-(and in any event within 30 days) return to Discloser or destroy, at the
-election of the Discloser, any Confidential Information of the
-Discloser. In addition, within 30 days after termination of this
-Agreement, Recipient shall (i) promptly return all tangible materials
-containing such Confidential Information to Discloser, (ii) remove all
-Confidential Information (and any copies thereof) from any computer
-systems of the Recipient, its contractors and its distributors, and
-confirm in writing that all materials containing Confidential
-Information have been destroyed or returned to Discloser, as applicable,
-by Recipient. Recipient shall cause its affiliates, agents, contractors,
-and employees to strictly comply with the foregoing.
-
-### 10. No Warranties
-
-CUSTOMER EXPRESSLY UNDERSTAND AND AGREE THAT ALL USE OF THE SUPPORTED
-SOFTWARE IS AT CUSTOMER'S SOLE RISK AND THAT THE SUPPORTED SOFTWARE AND
-SUPPORT SERVICES ARE PROVIDED "AS IS" AND "AS AVAILABLE." DOCKER, ITS
-SUBSIDIARIES AND AFFILIATES, AND ITS LICENSORS MAKE NO EXPRESS
-WARRANTIES AND DISCLAIM ALL IMPLIED WARRANTIES REGARDING THE SUPPORTED
-SOFTWARE OR SUPPORT SERVICES, INCLUDING IMPLIED WARRANTIES OF
-MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT,
-TOGETHER WITH ANY AND ALL WARRANTIES ARISING FROM COURSE OF DEALING OR
-USAGE IN TRADE. NO ADVICE OR INFORMATION, WHETHER ORAL OR WRITTEN,
-OBTAINED FROM DOCKER OR ELSEWHERE SHALL CREATE ANY WARRANTY NOT
-EXPRESSLY STATED IN THIS AGREEMENT. WITHOUT LIMITING THE GENERALITY OF
-THE FOREGOING, DOCKER, ITS SUBSIDIARIES AND AFFILIATES, AND ITS
-LICENSORS DO NOT REPRESENT OR WARRANT TO YOU THAT: (A) CUSTOMER’S USE OF
-THE SUPPORTED SOFTWARE OR SUPPORT SERVICES WILL MEET CUSTOMER’S
-REQUIREMENTS, OR (B) CUSTOMER’S USE OF THE SUPPORTED SOFTWARE OR SUPPORT
-SERVICES WILL BE UNINTERRUPTED, TIMELY, SECURE OR FREE FROM ERROR.
-NOTWITHSTANDING THE FOREGOING, NOTHING HEREIN SHALL EXCLUDE OR LIMIT
-DOCKER'S WARRANTY OR LIABILITY FOR LOSSES WHICH MAY NOT BE LAWFULLY
-EXCLUDED OR LIMITED BY APPLICABLE LAW. CUSTOMER UNDERSTANDS AND
-ACKNOWLEDGES THAT THE SUPPORTED SOFTWARE IS NOT DESIGNED, INTENDED OR
-WARRANTED FOR USE IN HAZARDOUS ENVIRONMENTS REQUIRING FAIL-SAFE
-CONTROLS, INCLUDING WITHOUT LIMITATION, OPERATION OF NUCLEAR FACILITIES,
-AIRCRAFT NAVIGATION OR COMMUNICATION SYSTEMS, AIR TRAFFIC CONTROL, AND
-LIFE SUPPORT OR WEAPONS SYSTEMS.
-
-### 11. Indemnification
-
-11.1 **By Docker.** Docker shall defend at its own expense any legal
-action brought against Customer to the extent that it is based on a
-claim or allegation that the Licensed Software infringes a U.S. patent
-or copyright of a third party, and Docker will pay any costs and damages
-awarded against Customer in any such action, or agreed to under a
-settlement signed by Docker, that are attributable to any such claim but
-shall not be responsible for any compromise made or expense incurred
-without Docker’s consent. Such defense and payments are subject to the
-conditions that (a) Customer gives Docker prompt written notice of such
-claim, (b) tenders to Docker sole control of the defense and settlement
-of the claim, and (c) reasonably cooperates with Docker when requested
-in connection with the defense and settlement of the claim. Docker will
-have no liability to so defend and pay for any infringement claim to the
-extent it (i) is based on modification of the Licensed Software other
-than by Docker, with or without authorization; (ii) results from failure
-of Customer to use an updated version of the Licensed Software; or (iii)
-is based on the combination or use of the Licensed Software with any
-software (including, without limitation, Open Source Software), program
-or device not provided by Docker if such infringement would not have
-arisen but for such use or combination; or (iv) results from use of the
-Licensed Software by Customer after the license was terminated.
-
-11.2 **Limitation of IP Damages.** Should any Licensed Software, or the
-operation thereof, become or in Docker's opinion be likely to become,
-the subject of such claim described in Section 11.1, Docker may, at
-Docker's option and expense, procure for Customer the right to continue
-using the Licensed Software, replace or modify the Licensed Software so
-that it becomes non-infringing, or terminate the license granted
-hereunder for such Licensed Software. THIS SECTION 11 STATES DOCKER'S
-SOLE AND EXCLUSIVE LIABILITY, AND CUSTOMER'S SOLE AND EXCLUSIVE REMEDY,
-WITH RESPECT TO INFRINGEMENT OR MISAPPROPRIATION OF INTELLECTUAL
-PROPERTY RIGHTS OF ANY KIND.
-
-11.3 **By Customer.** Customer agrees to hold harmless and indemnify
-Docker and its subsidiaries, affiliates, officers, agents, employees,
-advertisers, licensors, suppliers or partners from and against any third
-party claim arising from or in any way related to Customer’s breach of
-this Agreement, use of the Supported Software, or violation of
-applicable laws, rules or regulations in connection with the Supported
-Software, including any liability or expense arising from all claims,
-losses, damages (actual and consequential), suits, judgments, litigation
-costs and attorneys' fees, of every kind and nature. In such a case,
-Docker will provide Customer with written notice of such claim, suit or
-action.
-
-### 12. Limitation of Liability.
-
-12.1 **Exclusion of Damages.** CUSTOMER EXPRESSLY UNDERSTANDS AND AGREES
-THAT DOCKER, ITS SUBSIDIARIES AND AFFILIATES, AND ITS LICENSORS SHALL
-NOT BE LIABLE TO CUSTOMER FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL
-CONSEQUENTIAL OR EXEMPLARY DAMAGES INCURRED BY CUSTOMER, HOWEVER CAUSED
-AND UNDER ANY THEORY OF LIABILITY, INCLUDING, BUT NOT LIMITED TO, ANY
-LOSS OF PROFIT (WHETHER INCURRED DIRECTLY OR INDIRECTLY), ANY LOSS OF
-GOODWILL OR BUSINESS REPUTATION, ANY LOSS OF DATA SUFFERED, COST OF
-PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES, OR OTHER INTANGIBLE LOSS.
-THE FOREGOING LIMITATIONS ON DOCKER'S LIABILITY SHALL APPLY WHETHER OR
-NOT DOCKER HAS BEEN ADVISED OF OR SHOULD HAVE BEEN AWARE OF THE
-POSSIBILITY OF ANY SUCH LOSSES ARISING. NOTWITHSTANDING THE FOREGOING,
-NOTHING HEREIN SHALL EXCLUDE OR LIMIT DOCKER'S LIABILITY FOR LOSSES
-WHICH MAY NOT BE LAWFULLY EXCLUDED OR LIMITED BY APPLICABLE LAW.
-
-12.2 **Liability Cap.** EXCEPT WITH RESPECT TO EITHER PARTY’S
-OBLIGATIONS OF INDEMNIFICATION, THE TOTAL LIABILITY OF DOCKER ARISING
-OUT OF OR RELATED TO THIS AGREEMENT WILL NOT EXCEED THE GREATER OF USD
-\$100 OR THE TOTAL AMOUNTS PAID BY CUSTOMER FOR THE RELEVANT SUPPORTED
-SOFTWARE UNDER THE APPLICABLE ORDER FORM, IN THE TWELVE (12) MONTH
-PERIOD IMMEDIATELY PRECEDING THE EVENT GIVING RISE TO THE LIABILITY.
-
-### 13. Export Restrictions.
-
-Customer understands that the Supported Software may contain encryption
-technology and other software programs that may require an export
-license from the U.S. State Department and that export or re-export of
-the Supported Software to certain entities (such as a foreign government
-and its subdivisions) and certain countries is prohibited. Customer
-acknowledges that it will comply with all applicable export and import
-control laws and regulations of the United States and the foreign
-jurisdiction in which the Supported Software is used and, in particular,
-Customer will not export or re-export the Supported Software without all
-required United States and foreign government licenses. Customer will
-defend, indemnify, and hold harmless Docker and its suppliers and
-licensors from and against any violation of such laws or regulations by
-Customer or any of its agents, officers, directors or employees.
-
-### 14. Miscellaneous
-
-The Supported Software and any other software covered under this
-Agreement are "commercial items" as that term is defined at 48 C.F.R.
-2.101; consisting of "commercial computer software" and "commercial
-computer software documentation" as such terms are used in 48 C.F.R.
-12.212. Consistent with 48 C.F.R. 12.212 and 48 C.F.R. 227.7202-1
-through 227.7202-4, all U.S. Government end users acquire the Supported
-Software and any other software and documentation covered under this
-Agreement with only those rights set forth herein. This Agreement will
-be governed by the laws of the State of California without reference to
-conflict of law principles. Each party agrees to submit to the exclusive
-jurisdiction of the courts located within the county of San Francisco,
-California to resolve any legal matter arising from this Agreement.
-Neither party may assign any of its rights or obligations under this
-Agreement, whether by operation of law or otherwise, without the prior
-written consent of the other party (not to be unreasonably withheld).
-Notwithstanding the foregoing, Docker may assign the entirety of its
-rights and obligations under this Agreement, without consent of the
-Customer, to its affiliate or in connection with a merger, acquisition,
-corporate reorganization, or sale of all or substantially all of its
-assets. The application of the UN Convention of International Sale of
-Goods to this Agreement is disclaimed in its entirety. Together with any
-Order Forms, this is the entire agreement between the parties relating
-to the subject matter hereof. This Agreement (including applicable Order
-Forms) shall control over any additional or different terms of any
-correspondence, order, confirmation, invoice or similar document, even
-if accepted in writing by both parties, and waivers and amendments of
-any provision of this Agreement shall be effective only if made by
-non-preprinted agreements indicating specifically what sections of this
-Agreement are affected, signed by both parties and clearly understood by
-both parties to be an amendment or waiver. The failure of either party
-to enforce its rights under this Agreement at any time for any period
-shall not be construed as a waiver of such rights. If any provision of
-this Agreement is held invalid or unenforceable, the remainder of this
-Agreement will continue in full force and effect and the invalid or
-unenforceable provision shall be reformed to the extent necessary to
-make it valid and enforceable. When a new Subscription Term begins the
-most current version of this Agreement made available by Docker shall be
-applicable to Customer's use of the Software.`
-};
diff --git a/app/scripts/components/filter/FilterBar.css b/app/scripts/components/filter/FilterBar.css
deleted file mode 100644
index 1caf8b5154..0000000000
--- a/app/scripts/components/filter/FilterBar.css
+++ /dev/null
@@ -1,19 +0,0 @@
-
-.filterInput {
- input[type='text'] {
- background-color: white;
- border-right: 0;
- &:focus {
- border-color: #ccc;
- }
- }
-}
-
-.iconStyle {
- border-left: 0;
- background: white !important;
- line-height: 2.5;
- &:hover {
- cursor: pointer;
- }
-}
\ No newline at end of file
diff --git a/app/scripts/components/filter/FilterBar.jsx b/app/scripts/components/filter/FilterBar.jsx
deleted file mode 100644
index 6058646589..0000000000
--- a/app/scripts/components/filter/FilterBar.jsx
+++ /dev/null
@@ -1,66 +0,0 @@
-'use strict';
-import React, { createClass, PropTypes } from 'react';
-import FA from 'common/FontAwesome';
-import styles from './FilterBar.css';
-const { bool, func, string } = PropTypes;
-const debug = require('debug')('COMPONENT:FilterBar');
-
-const FilterBar = createClass({
- //Optional placeholder string
- //Filter function to invoke when the query is submitted
- propTypes: {
- placeholder: string,
- onFilter: func.isRequired,
- onClick: func
- },
- getDefaultProps() {
- return {
- placeholder: 'Type to filter'
- };
- },
- getInitialState() {
- return {
- query: ''
- };
- },
- _clearQuery: function(event) {
- this.setState({
- query: ''
- });
- this.props.onFilter('');
- },
- _handleQueryChange: function(event) {
- event.preventDefault();
- this.props.onFilter(event.target.value);
- this.setState({query: event.target.value});
- },
- _onSubmit: function(event) {
- event.preventDefault();
- },
- render: function() {
- const { onClick, placeholder } = this.props;
- const { query } = this.state;
- const maybeCancel = query ? '╳' : '';
-
- return (
-
- );
- }
-
- componentDidMount() {
- const teamExists = find(this.props.teams, (team) => {
- return team.name === this.props.location.query.team;
- });
- if (!this.props.location.query.team) {
- //If there are teams, on load navigate to the first team and show members
- if (this.props.teams && this.props.teams.length > 0) {
- this.props.history.pushState(null, `/u/${this.props.currentUserContext}/dashboard/teams/`, {team: this.props.teams[0].name});
- }
- } else if (!teamExists) {
- //If the team query isn't one of the orgs teams navigate to first in list
- this.props.history.pushState(null, `/u/${this.props.currentUserContext}/dashboard/teams/`, {team: this.props.teams[0].name});
- }
- }
-
- render() {
- var addOrUpdateTeamForm;
- if (this.state.addingTeam && !this.props.teamReadOnly) {
- addOrUpdateTeamForm = (
-
-
-
-
- {`The build rules below specify how to build your source into Docker images.
- The name can be a string or a regex. The Docker Tag name may contain variables.
- We currently support {sourceref}, which refers to the source branch/tag name.`}
- {!this.state.isContextualHelpOpen ? ( Show more ) : ( Show less )}
-
-
- Trigger endpoints are activated. Use the trigger token or URL below in
- your requests.
-
- { showExamples ? ' Hide examples.' : ' Show examples.' }
-
-
- );
-
- let triggerToken = null;
- if (this.props.triggerStatus.token) {
- triggerToken = (
-
-
Trigger Token
-
-
-
-
-
- {regen}
-
-
-
- );
- }
- let triggerUrl = null;
- if (this.props.triggerStatus.trigger_url) {
- triggerUrl = (
-
-
Trigger URL
-
-
-
-
-
-
- );
- }
- return (
-
-
Note: Build requests are throttled so that they don't
- overload the system. If there is already a build request
- pending, the request will be ignored.
- {examplesToggle}
- {button}
-
-
- {triggerToken}
- {triggerUrl}
- {examples}
-
-
-
- );
- }
-});
-
-// Currently logs will be static
-var TriggerLogs = createClass({
- PropTypes: {
- triggerLogs: PropTypes.array.isRequired
- },
- _makeLogs() {
- if (this.props.triggerLogs.length === 0) {
- return (
-
-
- No logs to show
-
-
- );
- } else {
- return (
-
- If your automated build is linked to private repository on github or bitbucket,
- we need a way to have access to the repository. We do this with deploy keys.
- We try to do this step automatically for you, but sometimes we don't have access to do it.
- When this happens you need to add it yourself.
-
- A webhook is an HTTP call-back triggered by a specific event.
- You can create a single webhook to start and connect multiple
- webhooks to further build out your workflow.
-
- );
- }
-
-}
diff --git a/app/scripts/components/repo/repo_details/ScannedTag.css b/app/scripts/components/repo/repo_details/ScannedTag.css
deleted file mode 100644
index d577917f10..0000000000
--- a/app/scripts/components/repo/repo_details/ScannedTag.css
+++ /dev/null
@@ -1,8 +0,0 @@
-.scanTitle {
- font-size: 1.2rem;
-}
-
-/* add extra padding so that borders work */
-.wrapper {
- padding: 0.5rem;
-}
diff --git a/app/scripts/components/repo/repo_details/ScannedTag.jsx b/app/scripts/components/repo/repo_details/ScannedTag.jsx
deleted file mode 100644
index 08accb3303..0000000000
--- a/app/scripts/components/repo/repo_details/ScannedTag.jsx
+++ /dev/null
@@ -1,124 +0,0 @@
-'use strict';
-
-import React, { PropTypes, Component } from 'react';
-import Card, { Block } from '@dux/element-card';
-import { connect } from 'react-redux';
-import { createSelector, createStructuredSelector } from 'reselect';
-import {
- getComponents,
- getComponentsBySeverity,
- getLayers,
- getScan,
- getVulnerabilities,
- getVulnerabilitiesByLayer
-} from './scannedTag/selectors';
-import { ERROR } from 'reduxConsts';
-import forEach from 'lodash/collection/forEach';
-import ScanHeader from './scannedTag/ScanHeader.jsx';
-import Layer from './scannedTag/Layer.jsx';
-import styles from './ScannedTag.css';
-import { Map } from 'immutable';
-import { getStatus } from 'selectors/status';
-
-const { object, array, instanceOf, shape } = PropTypes;
-
-// TODO: conversion to records
-// Right now we're storing only *one* scan in the reducer, therefore all entities
-// can be merged in to the scan as we know they all belong to this scan.
-//
-// This produces a denormalized, nested scan with all entities as the child of the scan.
-const getFullScan = createSelector(
- [getScan, getLayers, getComponents, getVulnerabilities],
- (scan, layers, components, vulns) => ({
- ...scan,
- layers,
- components,
- vulnerabilities: vulns
- })
-);
-
-let mapState = createStructuredSelector({
- componentsBySeverity: getComponentsBySeverity,
- scan: getFullScan,
- status: getStatus,
- vulnerabilitiesByLayer: getVulnerabilitiesByLayer
-});
-
-/**
- * This component is the detail view of particular scan/tag combination, showing
- * vulnerability and component information for a tag.
- */
-@connect(mapState)
-export default class ScannedTag extends Component {
-
- static propTypes = {
- componentsBySeverity: shape({
- critical: array,
- major: array,
- minor: array,
- secure: array
- }),
- scan: object,
- status: instanceOf(Map),
- vulnerabilitiesByLayer: object
- }
-
- mkLayer = (layerIndex) => {
- const { scan, vulnerabilitiesByLayer } = this.props;
- const layer = scan.layers[layerIndex];
- const layerVulnerabilities = vulnerabilitiesByLayer[layerIndex];
- let layerComponents = {};
- //layer.components is an array with ids
- forEach(layer.components, c => {
- layerComponents[c] = scan.components[c];
- });
- return (
-
- );
- };
-
- render() {
- const {
- componentsBySeverity,
- params,
- namespace,
- scan,
- status,
- vulnerabilitiesByLayer
- } = this.props;
- const { blobs, reponame, tag, scan_id } = scan;
- //TODO change to use redux-simple-router params when we include it
- const ns = namespace ? namespace : params.user;
- const rn = reponame ? reponame : params.splat;
- const tn = tag ? tag : params.tagname;
- //No scan_id ==> first scan has failed or is in progress
- const scanError = !blobs || !scan_id;
- if (status.getIn(['getScanForTag', ns, rn, tn, 'status']) === ERROR || scanError) {
- return (
-
-
-
Scan results unavailable.
-
-
- );
- }
- const layerInfo = {`${reponame}:${tag}`};
- // blobs is an ordered array of layer ids, so we must use that to preserve API ordering
- return (
- Scan results for {layerInfo}}>
-
-
-
- {blobs.map(this.mkLayer)}
-
-
-
- );
- }
-}
diff --git a/app/scripts/components/repo/repo_details/Tags.css b/app/scripts/components/repo/repo_details/Tags.css
deleted file mode 100644
index 64cc8c458b..0000000000
--- a/app/scripts/components/repo/repo_details/Tags.css
+++ /dev/null
@@ -1,36 +0,0 @@
-@import "dux/css/colors.css";
-
-.cardHeader {
- font-size: 1.2rem;
-}
-
-.secondaryTableHeader {
- font-weight: 500;
- font-size: 0.9rem;
- padding: 0.4rem 0;
-}
-
-.empty {
- font-size: 1.25rem;
- font-weight: 300;
- color: var(--secondary-3);
-}
-
-.inlineBlock {
- display: inline-block;
-}
-
-.questionMark {
- composes: inlineBlock;
- color: #C4CDD9;
- cursor: pointer;
- font-size: 1.2rem;
-}
-
-.tooltipTitle {
- font-weight: 500;
-}
-
-.toggleButtonWrapper {
- text-align: center;
-}
diff --git a/app/scripts/components/repo/repo_details/Tags.jsx b/app/scripts/components/repo/repo_details/Tags.jsx
deleted file mode 100644
index da20e5bd8e..0000000000
--- a/app/scripts/components/repo/repo_details/Tags.jsx
+++ /dev/null
@@ -1,212 +0,0 @@
-'use strict';
-import React, { PropTypes, Component } from 'react';
-const { array, object, bool, string, number, shape, func } = PropTypes;
-import { connect } from 'react-redux';
-import Card, { Block } from '@dux/element-card';
-
-import { FlexTable, FlexRow, FlexHeader, FlexItem } from 'common/FlexTable.jsx';
-import ScannedTagRow from './tags/ScannedTagRow.jsx';
-import UnscannedTagRow from './tags/UnscannedTagRow.jsx';
-import styles from './Tags.css';
-import FontAwesome from 'common/FontAwesome';
-import Tooltip from 'rc-tooltip';
-import { createStructuredSelector } from 'reselect';
-import {
- getScannedTags,
- getScannedTagCount,
- getUnscannedTags,
- getUnscannedTagCount
-} from './tags/selectors';
-import { getStatus } from 'selectors/status';
-import * as tagActions from 'actions/redux/tags.js';
-import { mapActions } from 'reduxUtils';
-import Button from '@dux/element-button';
-import { StatusRecord } from 'records';
-import moment from 'moment';
-
-const debug = require('debug')('RepositoryDetailsTags');
-
-const mapState = createStructuredSelector({
- scannedTags: getScannedTags,
- scannedTagCount: getScannedTagCount,
- unscannedTags: getUnscannedTags,
- unscannedTagCount: getUnscannedTagCount,
- status: getStatus
-});
-
-/**
- * TagDisplay is the new UI for listing tags with vulnerability information from
- * nautilus.
- * It connects to the redux store and uses redux actions.
- */
-@connect(mapState, mapActions(tagActions))
-class TagDisplay extends Component {
-
- static propTypes = {
- actions: shape({
- deleteRepoTag: func
- }),
- status: object,
-
- scannedTags: array,
- scannedTagCount: number,
- unscannedTags: array,
- unscannedTagCount: number
- }
-
- state = {
- //one of 'unknown', 'show'
- showUnscannedTags: 'unknown'
- }
-
- toggleShowUnscannedTags = (e) => {
- const { showUnscannedTags } = this.state;
- //will be 'unknown' on the first time clicking Show Outdated Tags
- this.setState({
- showUnscannedTags: 'show'
- });
- }
-
- mkUnscannedTagRow = (tag) => {
- const { status } = this.props;
- const tagName = tag.name;
- const tagStatus = status.getIn(['deleteRepoTag', tagName], new StatusRecord());
- return (
-
- );
- }
-
- mkUnscannedTagTable = () => {
- const { unscannedTags, unscannedTagCount, scannedTagCount } = this.props;
- const { showUnscannedTags } = this.state;
- if (!unscannedTagCount) {
- return null;
- }
- //Nautilus scan results exist --> show the button instead of the table
- if (showUnscannedTags === 'unknown' && scannedTagCount) {
- return (
-
-
-
- );
- }
- //Nautilus scan results exist and the button has been pressed to show unscanned tags
- if (showUnscannedTags === 'show' && scannedTagCount) {
- return (
-
-
-
-
- );
-
- }
-}
diff --git a/app/scripts/components/repo/repo_details/tags/selectors.js b/app/scripts/components/repo/repo_details/tags/selectors.js
deleted file mode 100644
index 07cc2e5104..0000000000
--- a/app/scripts/components/repo/repo_details/tags/selectors.js
+++ /dev/null
@@ -1,65 +0,0 @@
-'use strict';
-
-import { createSelector } from 'reselect';
-import { Map, List } from 'immutable';
-import filter from 'lodash/collection/filter';
-import size from 'lodash/collection/size';
-import map from 'lodash/collection/map';
-import values from 'lodash/object/values';
-
-
-// Returns all repository tags (unordered)
-export const getRepoTags = (state) => {
- const reponame = state.repos.get('name', '');
- const namespace = state.repos.get('namespace', '');
- // We need to use toJS() to deeply convert tags from immutable to objects.
- // We also return an array because getScannedTags and getUnscannedTags return
- // arrays - keeping things consistent.
- return values(state.tags.getIn([namespace, reponame, 'tags'], new Map()).toJS());
-};
-
-// Returns all repository tags in the order of the hub API
-// Note: This does _not_ return any tags that are returned by nautilus but not hub
-// so we use the getRepoTags for the scannedTag selector
-export const getRepoTagsInOrder = (state) => {
- const reponame = state.repos.get('name', '');
- const namespace = state.repos.get('namespace', '');
- // We need to use toJS() to deeply convert tags from immutable to objects.
- // We also return an array because getScannedTags and getUnscannedTags return
- // arrays - keeping things consistent.
- let orderedTags = state.tags.getIn([namespace, reponame, 'result'], []);
- if (orderedTags.toArray) {
- orderedTags = orderedTags.toArray();
- }
- const tags = state.tags.getIn([namespace, reponame, 'tags'], new Map()).toJS();
- return map(orderedTags, (tagId) => tags[tagId]);
-};
-
-
-// Returns only tags which have been scanned by nautilus
-export const getScannedTags = createSelector(
- [getRepoTags],
- (tags) => {
- // If the tag has a 'healthy' key then this has been scanned by nautilus
- return filter(tags, (tag) => tag.healthy !== undefined);
- }
-);
-// Number of tags scanned by nautilus
-export const getScannedTagCount = createSelector(
- [getScannedTags],
- (tags) => size(tags)
-);
-
-// getUnscannedTags returns only tags **not** scanned by nautilus
-export const getUnscannedTags = createSelector(
- [getRepoTagsInOrder],
- (tags) => {
- // If healthy is undefined this tag only has a hub response
- return filter(tags, (tag) => tag.healthy === undefined);
- }
-);
-
-export const getUnscannedTagCount = createSelector(
- [getUnscannedTags],
- (tags) => size(tags)
-);
diff --git a/app/scripts/components/repositories/AutoBuildSetupForm.css b/app/scripts/components/repositories/AutoBuildSetupForm.css
deleted file mode 100644
index c078308f01..0000000000
--- a/app/scripts/components/repositories/AutoBuildSetupForm.css
+++ /dev/null
@@ -1,49 +0,0 @@
-@import "dux/css/box.css";
-@import "dux/css/colors.css";
-
-.errorText {
- white-space: pre;
-}
-
-.input {
- margin-right: var(--default-margin);
-}
-
-.formContainer {
- margin-top: var(--default-margin);
-}
-
-.error {
- color: var(--primary-5);
- font-size: .875rem;
- margin-bottom: 0.3rem;
-}
-
-/* TODO: this is also used in EnterpriseTrialForm.css | a candidate to be in colors.css */
-.label {
- color: #7a8491;
- font-weight: 500;
- sup {
- font-size: 1rem;
- vertical-align: text-bottom;
- }
-}
-
-.customizeLabel {
- composes: label;
- margin-bottom: 0.5rem;
-}
-
-.floatRight {
- float: right;
- margin-right: 1rem;
-}
-
-.globalError {
- composes: error;
- composes: floatRight;
-}
-
-.select {
- border-radius: var(--global-radius);
-}
\ No newline at end of file
diff --git a/app/scripts/components/repositories/AutoBuildSetupForm.jsx b/app/scripts/components/repositories/AutoBuildSetupForm.jsx
deleted file mode 100644
index 19d03c80af..0000000000
--- a/app/scripts/components/repositories/AutoBuildSetupForm.jsx
+++ /dev/null
@@ -1,410 +0,0 @@
-'use strict';
-
-import React, { PropTypes } from 'react';
-import findIndex from 'lodash/array/findIndex';
-import includes from 'lodash/collection/includes';
-import omit from 'lodash/object/omit';
-import map from 'lodash/collection/map';
-import AutoBuildTagsInput from './AutoBuildTagsInput.jsx';
-import connectToStores from 'fluxible-addons-react/connectToStores';
-import RepositoryNameInput from 'common/RepositoryNameInput.jsx';
-import SimpleTextArea from 'common/SimpleTextArea.jsx';
-import AutobuildStore from '../../stores/AutobuildStore';
-import AutobuildConfigStore from '../../stores/AutobuildConfigStore';
-import AutobuildSourceRepositoriesStore from '../../stores/AutobuildSourceRepositoriesStore';
-import RepoStore from '../../stores/RepositoryPageStore';
-import UserStore from '../../stores/UserStore';
-import createAutobuild from '../../actions/createAutobuild';
-import updateAutobuildFormField from '../../actions/updateAutobuildFormField.js';
-import getSettingsData from 'actions/getSettingsData';
-import { PageHeader } from 'dux';
-import AlertBox from 'common/AlertBox';
-import Card, { Block } from '@dux/element-card';
-import Button from '@dux/element-button';
-import { validateRepositoryName } from '../utils/validateRepositoryName';
-import { STATUS as COMMONSTATUS } from '../../stores/common/Constants';
-import Markdown from '@dux/element-markdown';
-
-const {
- ATTEMPTING
-} = COMMONSTATUS;
-
-const buildTagsClientSideError = 'No empty strings allowed for docker tag (or) source tag/branch name specification.';
-
-import styles from './AutoBuildSetupForm.css';
-
-const {
- array,
- bool,
- func,
- number,
- object,
- oneOf,
- shape,
- string
-} = PropTypes;
-
-var AutoBuildSetupForm = React.createClass({
- contextTypes: {
- executeAction: func.isRequired
- },
- propTypes: {
- user: object.isRequired,
- JWT: string.isRequired,
- ownedNamespaces: array.isRequired,
- configStore: shape({
- description: string,
- isPrivate: oneOf(['private', 'public']).isRequired,
- name: string.isRequired,
- namespace: string.isRequired,
- sourceRepoName: string.isRequired,
- STATUS: string.isRequired
- }),
- sourceRepositories: shape({
- type: string.isRequired
- })
- },
- getInitialState: function() {
- return {
- isActive: true,
- buildTags: this.defaultBuildTags,
- clientSideError: '',
- advancedMode: false
- };
- },
- /*eslint-disable camelcase*/
-
- /*
- * By default, if the input is empty for source tag/branch name, send the string: '{sourceref}' & 'master'
- * By default, if the input is empty for docker tag name, send the string with regex for all matches & 'latest'
- */
- defaultBuildTags: [
- {
- id: 'tag-0',
- name: 'latest',
- source_type: 'Branch',
- source_name: 'master',
- dockerfile_location: '/'
- },
- {
- id: 'tag-1',
- name: '{sourceref}',
- source_type: 'Branch',
- source_name: '/^([^m]|.[^a]|..[^s]|...[^t]|....[^e]|.....[^r]|.{0,5}$|.{7,})/',
- dockerfile_location: '/'
- }
- ],
- getBuildTagsToSend: function() {
- let bTags = this.state.buildTags;
- return map(bTags, (tag) => {
- return omit(tag, 'id');
- });
- },
- /*eslint-enable camelcase */
- _handleCreate: function(evt) {
- evt.preventDefault();
- const { username } = this.props.user;
- const { buildTags, isActive } = this.state;
- const { description, isPrivate, name, namespace, sourceRepoName } = this.props.configStore;
- const { type } = this.props.sourceRepositories;
-
- const params = this.props.params;
- const sourceRepoFallback = `${params.sourceRepoNamespace}/${params.sourceRepoName}`;
-
- if (!validateRepositoryName(name.toLowerCase())) {
- //check if the repo name is valid | client side check
- this.setState({
- clientSideError: `No spaces and special characters other than '.' and '-' are allowed.
-Repository names should not begin/end with a '.' or '-'.`
- });
- } else {
-
- var newAutobuild = {
- user: username,
- namespace: namespace,
- name: name.toLowerCase(),
- description: description,
- is_private: isPrivate === 'private',
- build_name: sourceRepoName.toLowerCase() || sourceRepoFallback.toLowerCase(),
- provider: type.toLowerCase(),
- active: isActive,
- tags: this.getBuildTagsToSend()
- };
-
- this.context.executeAction(createAutobuild, {JWT: this.props.JWT, autobuildConfig: newAutobuild});
- }
- },
- _onActiveStateChange: function(e) {
- this.setState({isActive: !this.state.isActive});
- },
- _getTagIndex: function(id) {
- return findIndex(this.state.buildTags, function(tag) {
- return (tag.id === id);
- });
- },
- _setTagState: function(id, prop, value) {
- let bTags = this.state.buildTags;
- bTags[this._getTagIndex(id)][prop] = value; //find tag and update property
- this.setState({
- buildTags: bTags
- });
- },
- _onTagRemoved: function(id) {
- //Remove tag, when user removes it from the form
- let bTags = this.state.buildTags;
- bTags.splice(this._getTagIndex(id), 1);
- this.setState({
- buildTags: bTags
- });
- },
- _onTagAdded: function(tag) {
- let bTags = this.state.buildTags;
- bTags.push(tag);
- this.setState({
- buildTags: bTags
- });
- },
- _resetBuildTagsError: function() {
- //Reset clientSideError on change
- if (this.state.clientSideError === buildTagsClientSideError) {
- this.setState({
- clientSideError: ''
- });
- }
- },
- _onSourceNameChange: function(tagId, e) {
- let sourceName = e.target.value;
- let sourceType = this.state.buildTags[this._getTagIndex(tagId)].sourceType;
- if (sourceName === '' && sourceType && sourceType === 'Branch') {
- sourceName = '/^([^m]|.[^a]|..[^s]|...[^t]|....[^e]|.....[^r]|.{0,5}$|.{7,})/';
- } else if (sourceName === '' && sourceType && sourceType === 'Tag') {
- sourceName = '/.*/';
- } else {
- this._resetBuildTagsError();
- //Strip trailing and leading spaces. If we end up with empty string, throw an error.
- if (sourceName.trim() === '') {
- this.setState({
- clientSideError: buildTagsClientSideError
- });
- }
- }
- this._setTagState(tagId, 'source_name', sourceName.trim());
- },
- _onSourceTypeChange: function(tagId, e) {
- this._setTagState(tagId, 'source_type', e.target.value);
- },
- _onDockerfileLocationChange: function(tagId, e) {
- this._setTagState(tagId, 'dockerfile_location', e.target.value);
- },
- _onTagChange: function(tagId, e) {
- let tagName = e.target.value;
- if (tagName === '') {
- tagName = '{sourceref}';
- }
- this._setTagState(tagId, 'name', tagName.trim());
- },
- _updateForm(fieldKey) {
- return (e) => {
- if (fieldKey === 'namespace') {
- this.setState({
- currentNamespace: e.target.value
- });
- this.context.executeAction(getSettingsData, {
- JWT: this.props.JWT,
- username: e.target.value,
- repoType: 'autobuild'
- });
- } else if (fieldKey === 'name' && this.state.clientSideError) {
- this.setState({
- clientSideError: ''
- });
- }
- this.context.executeAction(updateAutobuildFormField, {
- fieldKey,
- fieldValue: e.target.value
- });
- };
- },
- componentWillReceiveProps: function(nextProps) {
- const { name, namespace, success, STATUS } = nextProps.configStore;
- //If autobuild was created successfully
- if (STATUS.SUCCESSFUL || success) {
- this.props.history.pushState(null, `/r/${namespace}/${name.toLowerCase()}/`);
- }
- },
- customTagsConfig: function() {
- this.setState({
- advancedMode: true,
- buildTags: []
- });
- },
- defaultTagsConfig: function() {
- this.setState({
- advancedMode: false,
- buildTags: this.defaultBuildTags
- });
- },
- render: function() {
-
- const {
- description,
- error,
- isPrivate,
- name,
- namespace,
- success,
- STATUS
- } = this.props.configStore;
-
- /* start error/success handling */
- let maybeSuccess = ;
- if (success) {
- maybeSuccess = {success};
- }
-
- let nameError;
- let nameErrorContent = error.dockerhub_repo_name;
- if(nameErrorContent) {
- nameError = nameErrorContent;
- }
-
- let descriptionError;
- if(error.description) {
- descriptionError = error.description;
- }
-
- let privateRepoError;
- if(error.is_private) {
- privateRepoError = error.is_private;
- }
-
- let buildTagsError;
- if (error.buildTags) {
- buildTagsError = error.buildTags;
- }
-
- let maybeError = null;
- let errorDetail = error.detail || this.state.clientSideError;
- if (errorDetail) {
- maybeError = (
-
-
- {errorDetail}
-
-
- );
- }
- /* end error handling */
-
- //Check if user has passed in namespace as query | verify if they have access to it
- let currentUserNamespace = this.props.location.query.namespace;
- if (!includes(this.props.ownedNamespaces, currentUserNamespace)) {
- //If they don't have access to the namespace set in the query param ? then fallback to default namespace
- currentUserNamespace = this.props.user.namespace;
- }
- let tagsConfigList = null;
-
- if (this.state.advancedMode) {
- tagsConfigList = (
-
-
- );
- }
-});
-module.exports = Search;
diff --git a/app/scripts/components/search/SearchBar.css b/app/scripts/components/search/SearchBar.css
deleted file mode 100644
index 46c8b37690..0000000000
--- a/app/scripts/components/search/SearchBar.css
+++ /dev/null
@@ -1,17 +0,0 @@
-@import "dux/css/box";
-
-input.searchInput {
- background: #405165;
- border: 1px solid #4c5968;
- border-radius: var(--global-radius);
- color: #fff;
- padding-left: 24px;
-}
-
-.fa {
- position: relative;
- color: white;
- max-width: 1rem;
- top: -7px;
- left: 6px;
-}
\ No newline at end of file
diff --git a/app/scripts/components/search/SearchBar.jsx b/app/scripts/components/search/SearchBar.jsx
deleted file mode 100644
index 86994c2df3..0000000000
--- a/app/scripts/components/search/SearchBar.jsx
+++ /dev/null
@@ -1,84 +0,0 @@
-'use strict';
-import React from 'react';
-import FluxibleMixin from 'fluxible-addons-react/FluxibleMixin';
-import SearchStore from '../../stores/SearchStore';
-import styles from './SearchBar.css';
-import FA from '../common/FontAwesome';
-
-var debug = require('debug')('COMPONENT:SearchBar');
-
-var _getQueryParams = function(state) {
- //transition to will always have `q` appended as query param at the very least
- //Other query params like: `s` -> sort by | `t=User` -> user | `t=Organization` -> Org | `f=official`
- // `f=automated_builds` | `s=date_created`, `s=last_updated`, `s=alphabetical`, `s=stars`, `s=downloads`
- // `s=pushes`
- var queryParams = {
- q: state.query || '',
- page: state.page || 1,
- isAutomated: state.isAutomated || 0,
- isOfficial: state.isOfficial || 0,
- starCount: state.starCount || 0,
- pullCount: state.pullCount || 0
- };
- return queryParams;
-};
-
-var SearchBar = React.createClass({
- mixins: [FluxibleMixin],
- statics: {
- storeListeners: [SearchStore]
- },
- contextTypes: {
- getStore: React.PropTypes.func.isRequired
- },
- getDefaultProps() {
- return {
- placeholder: 'Search'
- };
- },
- getInitialState: function() {
- return this.context.getStore(SearchStore).getState();
- },
- //on Search Store Change
- onChange: function() {
- //When a search query has been submitted
- var state = this.context.getStore(SearchStore).getState();
- this.setState(state);
- },
- _handleQueryChange: function(event) {
- event.preventDefault();
- //Change page to number 1 when the query is changed
- this.setState({
- page: 1
- });
- this.setState({query: event.target.value});
- },
- _handleQuerySubmit: function(event) {
- event.preventDefault();
- //second parameter will be empty object always since we don't have /search/{?}/
- //third param will be the query /search/?q=whatever&s=blah&f=bleh
- this.props.history.pushState(null, '/search/', _getQueryParams(this.state));
- },
- render: function() {
- var searchQuery = this.state.query;
- var inputPlaceholder = this.props.placeholder;
- return (
-
-
-
- );
- }
-});
-
-module.exports = SearchBar;
diff --git a/app/scripts/components/search/SearchResultItem.jsx b/app/scripts/components/search/SearchResultItem.jsx
deleted file mode 100644
index 579c35becf..0000000000
--- a/app/scripts/components/search/SearchResultItem.jsx
+++ /dev/null
@@ -1,67 +0,0 @@
-'use strict';
-
-import React from 'react';
-import Badge from '../Badge.jsx';
-import StatsComponent from '../StatsComponent.jsx';
-var debug = require('debug')('COMPONENT:SearchResultItem');
-
-//TODO: will go under the ul in item info, will be a bunch of key value pairs reused across
-//TODO: Logged out views will have the `owner/reponame` (think about this)
-//TODO: Star icon should be passed to badge as d-`iconname` where `d-` is for the docker font icons
-
-var SearchResultItem = React.createClass({
- render: function() {
- var resultItem = this.props.resultItem;
-
- //Push badges based on result item
- var badges = [];
- var officialBadge =
;
- var autobuildBadge =
;
-
- // jscs:disable requireCamelCaseOrUpperCaseIdentifiers
- if (resultItem.is_official) {
- badges.push(officialBadge);
- } else if (resultItem.is_automated) {
- badges.push(autobuildBadge);
- }
-
- //TODO: repo_owner is null atm, since API performance degrades if we try to get it
- //
- The image that the service will run. Docker images should be referenced
- with full content hash to fully specify the deployment artifact for the
- service. Example:
- postgres@sha256:e0a230a9f5b4e1b8b03bb3e8cf7322b0e42b7838c5c87f4545edb48f5eb8f077
-
-
- Command []string
-
-
- Command to run in service containers.
-
-
- Args []string
-
-
- Arguments passed to the service containers.
-
-
- Env []string
-
-
- Environment variables.
-
-
- Labels map[string]string
-
-
- Labels used for setting meta data on services.
-
-
- Ports []Port
-
-
- Service ports (composed of Port (int) and
- Protocol (string). A service description can
- only specify the container port to be exposed. These ports can be
- mapped on runtime hosts at the operator's discretion.
-
-
-
- WorkingDir string
-
-
- Working directory inside the service containers.
-
-
-
- User string
-
-
- Username or UID (format: <name|uid>[:<group|gid>]).
-
-
-
- Networks []string
-
-
- Networks that the service containers should be connected to. An entity
- deploying a bundle should create networks as needed.
-
-
-
-> **Note:** Some configuration options are not yet supported in the DAB format,
-> including volume mounts.
diff --git a/compose/completion.md b/compose/completion.md
deleted file mode 100644
index 2076d512c3..0000000000
--- a/compose/completion.md
+++ /dev/null
@@ -1,68 +0,0 @@
-
-
-# Command-line Completion
-
-Compose comes with [command completion](http://en.wikipedia.org/wiki/Command-line_completion)
-for the bash and zsh shell.
-
-## Installing Command Completion
-
-### Bash
-
-Make sure bash completion is installed. If you use a current Linux in a non-minimal installation, bash completion should be available.
-On a Mac, install with `brew install bash-completion`
-
-Place the completion script in `/etc/bash_completion.d/` (`/usr/local/etc/bash_completion.d/` on a Mac), using e.g.
-
- curl -L https://raw.githubusercontent.com/docker/compose/$(docker-compose version --short)/contrib/completion/bash/docker-compose > /etc/bash_completion.d/docker-compose
-
-Completion will be available upon next login.
-
-### Zsh
-
-Place the completion script in your `/path/to/zsh/completion`, using e.g. `~/.zsh/completion/`
-
- mkdir -p ~/.zsh/completion
- curl -L https://raw.githubusercontent.com/docker/compose/$(docker-compose version --short)/contrib/completion/zsh/_docker-compose > ~/.zsh/completion/_docker-compose
-
-Include the directory in your `$fpath`, e.g. by adding in `~/.zshrc`
-
- fpath=(~/.zsh/completion $fpath)
-
-Make sure `compinit` is loaded or do it by adding in `~/.zshrc`
-
- autoload -Uz compinit && compinit -i
-
-Then reload your shell
-
- exec $SHELL -l
-
-## Available completions
-
-Depending on what you typed on the command line so far, it will complete
-
- - available docker-compose commands
- - options that are available for a particular command
- - service names that make sense in a given context (e.g. services with running or stopped instances or services based on images vs. services based on Dockerfiles). For `docker-compose scale`, completed service names will automatically have "=" appended.
- - arguments for selected options, e.g. `docker-compose kill -s` will complete some signals like SIGHUP and SIGUSR1.
-
-Enjoy working with Compose faster and with less typos!
-
-## Compose documentation
-
-- [User guide](index.md)
-- [Installing Compose](install.md)
-- [Get started with Django](django.md)
-- [Get started with Rails](rails.md)
-- [Get started with WordPress](wordpress.md)
-- [Command line reference](./reference/index.md)
-- [Compose file reference](compose-file.md)
diff --git a/compose/compose-file.md b/compose/compose-file.md
deleted file mode 100644
index 853a886e78..0000000000
--- a/compose/compose-file.md
+++ /dev/null
@@ -1,1170 +0,0 @@
-
-
-
-# Compose file reference
-
-The Compose file is a [YAML](http://yaml.org/) file defining
-[services](#service-configuration-reference),
-[networks](#network-configuration-reference) and
-[volumes](#volume-configuration-reference).
-The default path for a Compose file is `./docker-compose.yml`.
-
-A service definition contains configuration which will be applied to each
-container started for that service, much like passing command-line parameters to
-`docker run`. Likewise, network and volume definitions are analogous to
-`docker network create` and `docker volume create`.
-
-As with `docker run`, options specified in the Dockerfile (e.g., `CMD`,
-`EXPOSE`, `VOLUME`, `ENV`) are respected by default - you don't need to
-specify them again in `docker-compose.yml`.
-
-You can use environment variables in configuration values with a Bash-like
-`${VARIABLE}` syntax - see [variable substitution](#variable-substitution) for
-full details.
-
-
-## Service configuration reference
-
-> **Note:** There are two versions of the Compose file format – version 1 (the
-> legacy format, which does not support volumes or networks) and version 2 (the
-> most up-to-date). For more information, see the [Versioning](#versioning)
-> section.
-
-This section contains a list of all configuration options supported by a service
-definition.
-
-### build
-
-Configuration options that are applied at build time.
-
-`build` can be specified either as a string containing a path to the build
-context, or an object with the path specified under [context](#context) and
-optionally [dockerfile](#dockerfile) and [args](#args).
-
- build: ./dir
-
- build:
- context: ./dir
- dockerfile: Dockerfile-alternate
- args:
- buildno: 1
-
-If you specify `image` as well as `build`, then Compose names the built image
-with the `webapp` and optional `tag` specified in `image`:
-
- build: ./dir
- image: webapp:tag
-
-This will result in an image named `webapp` and tagged `tag`, built from `./dir`.
-
-> **Note**: In the [version 1 file format](#version-1), `build` is different in
-> two ways:
->
-> - Only the string form (`build: .`) is allowed - not the object form.
-> - Using `build` together with `image` is not allowed. Attempting to do so
-> results in an error.
-
-#### context
-
-> [Version 2 file format](#version-2) only. In version 1, just use
-> [build](#build).
-
-Either a path to a directory containing a Dockerfile, or a url to a git repository.
-
-When the value supplied is a relative path, it is interpreted as relative to the
-location of the Compose file. This directory is also the build context that is
-sent to the Docker daemon.
-
-Compose will build and tag it with a generated name, and use that image thereafter.
-
- build:
- context: ./dir
-
-#### dockerfile
-
-Alternate Dockerfile.
-
-Compose will use an alternate file to build with. A build path must also be
-specified.
-
- build:
- context: .
- dockerfile: Dockerfile-alternate
-
-> **Note**: In the [version 1 file format](#version-1), `dockerfile` is
-> different in two ways:
-
- * It appears alongside `build`, not as a sub-option:
-
- build: .
- dockerfile: Dockerfile-alternate
-
- * Using `dockerfile` together with `image` is not allowed. Attempting to do so results in an error.
-
-#### args
-
-> [Version 2 file format](#version-2) only.
-
-Add build arguments, which are environment variables accessible only during the
-build process.
-
-First, specify the arguments in your Dockerfile:
-
- ARG buildno
- ARG password
-
- RUN echo "Build number: $buildno"
- RUN script-requiring-password.sh "$password"
-
-Then specify the arguments under the `build` key. You can pass either a mapping
-or a list:
-
- build:
- context: .
- args:
- buildno: 1
- password: secret
-
- build:
- context: .
- args:
- - buildno=1
- - password=secret
-
-You can omit the value when specifying a build argument, in which case its value
-at build time is the value in the environment where Compose is running.
-
- args:
- - buildno
- - password
-
-> **Note**: YAML boolean values (`true`, `false`, `yes`, `no`, `on`, `off`) must
-> be enclosed in quotes, so that the parser interprets them as strings.
-
-### cap_add, cap_drop
-
-Add or drop container capabilities.
-See `man 7 capabilities` for a full list.
-
- cap_add:
- - ALL
-
- cap_drop:
- - NET_ADMIN
- - SYS_ADMIN
-
-### command
-
-Override the default command.
-
- command: bundle exec thin -p 3000
-
-The command can also be a list, in a manner similar to [dockerfile](https://docs.docker.com/engine/reference/builder/#cmd):
-
- command: [bundle, exec, thin, -p, 3000]
-
-### cgroup_parent
-
-Specify an optional parent cgroup for the container.
-
- cgroup_parent: m-executor-abcd
-
-### container_name
-
-Specify a custom container name, rather than a generated default name.
-
- container_name: my-web-container
-
-Because Docker container names must be unique, you cannot scale a service
-beyond 1 container if you have specified a custom name. Attempting to do so
-results in an error.
-
-### devices
-
-List of device mappings. Uses the same format as the `--device` docker
-client create option.
-
- devices:
- - "/dev/ttyUSB0:/dev/ttyUSB0"
-
-### depends_on
-
-Express dependency between services, which has two effects:
-
-- `docker-compose up` will start services in dependency order. In the following
- example, `db` and `redis` will be started before `web`.
-
-- `docker-compose up SERVICE` will automatically include `SERVICE`'s
- dependencies. In the following example, `docker-compose up web` will also
- create and start `db` and `redis`.
-
-Simple example:
-
- version: '2'
- services:
- web:
- build: .
- depends_on:
- - db
- - redis
- redis:
- image: redis
- db:
- image: postgres
-
-> **Note:** `depends_on` will not wait for `db` and `redis` to be "ready" before
-> starting `web` - only until they have been started. If you need to wait
-> for a service to be ready, see [Controlling startup order](startup-order.md)
-> for more on this problem and strategies for solving it.
-
-### dns
-
-Custom DNS servers. Can be a single value or a list.
-
- dns: 8.8.8.8
- dns:
- - 8.8.8.8
- - 9.9.9.9
-
-### dns_search
-
-Custom DNS search domains. Can be a single value or a list.
-
- dns_search: example.com
- dns_search:
- - dc1.example.com
- - dc2.example.com
-
-### tmpfs
-
-> [Version 2 file format](#version-2) only.
-
-Mount a temporary file system inside the container. Can be a single value or a list.
-
- tmpfs: /run
- tmpfs:
- - /run
- - /tmp
-
-### entrypoint
-
-Override the default entrypoint.
-
- entrypoint: /code/entrypoint.sh
-
-The entrypoint can also be a list, in a manner similar to [dockerfile](https://docs.docker.com/engine/reference/builder/#entrypoint):
-
- entrypoint:
- - php
- - -d
- - zend_extension=/usr/local/lib/php/extensions/no-debug-non-zts-20100525/xdebug.so
- - -d
- - memory_limit=-1
- - vendor/bin/phpunit
-
-
-### env_file
-
-Add environment variables from a file. Can be a single value or a list.
-
-If you have specified a Compose file with `docker-compose -f FILE`, paths in
-`env_file` are relative to the directory that file is in.
-
-Environment variables specified in `environment` override these values.
-
- env_file: .env
-
- env_file:
- - ./common.env
- - ./apps/web.env
- - /opt/secrets.env
-
-Compose expects each line in an env file to be in `VAR=VAL` format. Lines
-beginning with `#` (i.e. comments) are ignored, as are blank lines.
-
- # Set Rails/Rack environment
- RACK_ENV=development
-
-> **Note:** If your service specifies a [build](#build) option, variables
-> defined in environment files will _not_ be automatically visible during the
-> build. Use the [args](#args) sub-option of `build` to define build-time
-> environment variables.
-
-### environment
-
-Add environment variables. You can use either an array or a dictionary. Any
-boolean values; true, false, yes no, need to be enclosed in quotes to ensure
-they are not converted to True or False by the YML parser.
-
-Environment variables with only a key are resolved to their values on the
-machine Compose is running on, which can be helpful for secret or host-specific values.
-
- environment:
- RACK_ENV: development
- SHOW: 'true'
- SESSION_SECRET:
-
- environment:
- - RACK_ENV=development
- - SHOW=true
- - SESSION_SECRET
-
-> **Note:** If your service specifies a [build](#build) option, variables
-> defined in `environment` will _not_ be automatically visible during the
-> build. Use the [args](#args) sub-option of `build` to define build-time
-> environment variables.
-
-### expose
-
-Expose ports without publishing them to the host machine - they'll only be
-accessible to linked services. Only the internal port can be specified.
-
- expose:
- - "3000"
- - "8000"
-
-### extends
-
-Extend another service, in the current file or another, optionally overriding
-configuration.
-
-You can use `extends` on any service together with other configuration keys.
-The `extends` value must be a dictionary defined with a required `service`
-and an optional `file` key.
-
- extends:
- file: common.yml
- service: webapp
-
-The `service` the name of the service being extended, for example
-`web` or `database`. The `file` is the location of a Compose configuration
-file defining that service.
-
-If you omit the `file` Compose looks for the service configuration in the
-current file. The `file` value can be an absolute or relative path. If you
-specify a relative path, Compose treats it as relative to the location of the
-current file.
-
-You can extend a service that itself extends another. You can extend
-indefinitely. Compose does not support circular references and `docker-compose`
-returns an error if it encounters one.
-
-For more on `extends`, see the
-[the extends documentation](extends.md#extending-services).
-
-### external_links
-
-Link to containers started outside this `docker-compose.yml` or even outside
-of Compose, especially for containers that provide shared or common services.
-`external_links` follow semantics similar to `links` when specifying both the
-container name and the link alias (`CONTAINER:ALIAS`).
-
- external_links:
- - redis_1
- - project_db_1:mysql
- - project_db_1:postgresql
-
-> **Note:** If you're using the [version 2 file format](#version-2), the
-> externally-created containers must be connected to at least one of the same
-> networks as the service which is linking to them.
-
-### extra_hosts
-
-Add hostname mappings. Use the same values as the docker client `--add-host` parameter.
-
- extra_hosts:
- - "somehost:162.242.195.82"
- - "otherhost:50.31.209.229"
-
-An entry with the ip address and hostname will be created in `/etc/hosts` inside containers for this service, e.g:
-
- 162.242.195.82 somehost
- 50.31.209.229 otherhost
-
-### image
-
-Specify the image to start the container from. Can either be a repository/tag or
-a partial image ID.
-
- image: redis
- image: ubuntu:14.04
- image: tutum/influxdb
- image: example-registry.com:4000/postgresql
- image: a4bc65fd
-
-If the image does not exist, Compose attempts to pull it, unless you have also
-specified [build](#build), in which case it builds it using the specified
-options and tags it with the specified tag.
-
-> **Note**: In the [version 1 file format](#version-1), using `build` together
-> with `image` is not allowed. Attempting to do so results in an error.
-
-### labels
-
-Add metadata to containers using [Docker labels](https://docs.docker.com/engine/userguide/labels-custom-metadata/). You can use either an array or a dictionary.
-
-It's recommended that you use reverse-DNS notation to prevent your labels from conflicting with those used by other software.
-
- labels:
- com.example.description: "Accounting webapp"
- com.example.department: "Finance"
- com.example.label-with-empty-value: ""
-
- labels:
- - "com.example.description=Accounting webapp"
- - "com.example.department=Finance"
- - "com.example.label-with-empty-value"
-
-### links
-
-Link to containers in another service. Either specify both the service name and
-a link alias (`SERVICE:ALIAS`), or just the service name.
-
- web:
- links:
- - db
- - db:database
- - redis
-
-Containers for the linked service will be reachable at a hostname identical to
-the alias, or the service name if no alias was specified.
-
-Links also express dependency between services in the same way as
-[depends_on](#depends-on), so they determine the order of service startup.
-
-> **Note:** If you define both links and [networks](#networks), services with
-> links between them must share at least one network in common in order to
-> communicate.
-
-### logging
-
-> [Version 2 file format](#version-2) only. In version 1, use
-> [log_driver](#log_driver) and [log_opt](#log_opt).
-
-Logging configuration for the service.
-
- logging:
- driver: syslog
- options:
- syslog-address: "tcp://192.168.0.42:123"
-
-The `driver` name specifies a logging driver for the service's
-containers, as with the ``--log-driver`` option for docker run
-([documented here](https://docs.docker.com/engine/reference/logging/overview/)).
-
-The default value is json-file.
-
- driver: "json-file"
- driver: "syslog"
- driver: "none"
-
-> **Note:** Only the `json-file` driver makes the logs available directly from
-> `docker-compose up` and `docker-compose logs`. Using any other driver will not
-> print any logs.
-
-Specify logging options for the logging driver with the ``options`` key, as with the ``--log-opt`` option for `docker run`.
-
-Logging options are key-value pairs. An example of `syslog` options:
-
- driver: "syslog"
- options:
- syslog-address: "tcp://192.168.0.42:123"
-
-### log_driver
-
-> [Version 1 file format](#version-1) only. In version 2, use
-> [logging](#logging).
-
-Specify a log driver. The default is `json-file`.
-
- log_driver: syslog
-
-### log_opt
-
-> [Version 1 file format](#version-1) only. In version 2, use
-> [logging](#logging).
-
-Specify logging options as key-value pairs. An example of `syslog` options:
-
- log_opt:
- syslog-address: "tcp://192.168.0.42:123"
-
-### net
-
-> [Version 1 file format](#version-1) only. In version 2, use
-> [network_mode](#network_mode).
-
-Network mode. Use the same values as the docker client `--net` parameter.
-The `container:...` form can take a service name instead of a container name or
-id.
-
- net: "bridge"
- net: "host"
- net: "none"
- net: "container:[service name or container name/id]"
-
-### network_mode
-
-> [Version 2 file format](#version-2) only. In version 1, use [net](#net).
-
-Network mode. Use the same values as the docker client `--net` parameter, plus
-the special form `service:[service name]`.
-
- network_mode: "bridge"
- network_mode: "host"
- network_mode: "none"
- network_mode: "service:[service name]"
- network_mode: "container:[container name/id]"
-
-### networks
-
-> [Version 2 file format](#version-2) only. In version 1, use [net](#net).
-
-Networks to join, referencing entries under the
-[top-level `networks` key](#network-configuration-reference).
-
- services:
- some-service:
- networks:
- - some-network
- - other-network
-
-#### aliases
-
-Aliases (alternative hostnames) for this service on the network. Other containers on the same network can use either the service name or this alias to connect to one of the service's containers.
-
-Since `aliases` is network-scoped, the same service can have different aliases on different networks.
-
-> **Note**: A network-wide alias can be shared by multiple containers, and even by multiple services. If it is, then exactly which container the name will resolve to is not guaranteed.
-
-The general format is shown here.
-
- services:
- some-service:
- networks:
- some-network:
- aliases:
- - alias1
- - alias3
- other-network:
- aliases:
- - alias2
-
-In the example below, three services are provided (`web`, `worker`, and `db`), along with two networks (`new` and `legacy`). The `db` service is reachable at the hostname `db` or `database` on the `new` network, and at `db` or `mysql` on the `legacy` network.
-
- version: '2'
-
- services:
- web:
- build: ./web
- networks:
- - new
-
- worker:
- build: ./worker
- networks:
- - legacy
-
- db:
- image: mysql
- networks:
- new:
- aliases:
- - database
- legacy:
- aliases:
- - mysql
-
- networks:
- new:
- legacy:
-
-#### ipv4_address, ipv6_address
-
-Specify a static IP address for containers for this service when joining the network.
-
-The corresponding network configuration in the [top-level networks section](#network-configuration-reference) must have an `ipam` block with subnet and gateway configurations covering each static address. If IPv6 addressing is desired, the `com.docker.network.enable_ipv6` driver option must be set to `true`.
-
-An example:
-
- version: '2'
-
- services:
- app:
- image: busybox
- command: ifconfig
- networks:
- app_net:
- ipv4_address: 172.16.238.10
- ipv6_address: 2001:3984:3989::10
-
- networks:
- app_net:
- driver: bridge
- driver_opts:
- com.docker.network.enable_ipv6: "true"
- ipam:
- driver: default
- config:
- - subnet: 172.16.238.0/24
- gateway: 172.16.238.1
- - subnet: 2001:3984:3989::/64
- gateway: 2001:3984:3989::1
-
-### pid
-
- pid: "host"
-
-Sets the PID mode to the host PID mode. This turns on sharing between
-container and the host operating system the PID address space. Containers
-launched with this flag will be able to access and manipulate other
-containers in the bare-metal machine's namespace and vise-versa.
-
-### ports
-
-Expose ports. Either specify both ports (`HOST:CONTAINER`), or just the container
-port (a random host port will be chosen).
-
-> **Note:** When mapping ports in the `HOST:CONTAINER` format, you may experience
-> erroneous results when using a container port lower than 60, because YAML will
-> parse numbers in the format `xx:yy` as sexagesimal (base 60). For this reason,
-> we recommend always explicitly specifying your port mappings as strings.
-
- ports:
- - "3000"
- - "3000-3005"
- - "8000:8000"
- - "9090-9091:8080-8081"
- - "49100:22"
- - "127.0.0.1:8001:8001"
- - "127.0.0.1:5000-5010:5000-5010"
-
-### security_opt
-
-Override the default labeling scheme for each container.
-
- security_opt:
- - label:user:USER
- - label:role:ROLE
-
-### stop_signal
-
-Sets an alternative signal to stop the container. By default `stop` uses
-SIGTERM. Setting an alternative signal using `stop_signal` will cause
-`stop` to send that signal instead.
-
- stop_signal: SIGUSR1
-
-### ulimits
-
-Override the default ulimits for a container. You can either specify a single
-limit as an integer or soft/hard limits as a mapping.
-
-
- ulimits:
- nproc: 65535
- nofile:
- soft: 20000
- hard: 40000
-
-### volumes, volume\_driver
-
-Mount paths or named volumes, optionally specifying a path on the host machine
-(`HOST:CONTAINER`), or an access mode (`HOST:CONTAINER:ro`).
-For [version 2 files](#version-2), named volumes need to be specified with the
-[top-level `volumes` key](#volume-configuration-reference).
-When using [version 1](#version-1), the Docker Engine will create the named
-volume automatically if it doesn't exist.
-
-You can mount a relative path on the host, which will expand relative to
-the directory of the Compose configuration file being used. Relative paths
-should always begin with `.` or `..`.
-
- volumes:
- # Just specify a path and let the Engine create a volume
- - /var/lib/mysql
-
- # Specify an absolute path mapping
- - /opt/data:/var/lib/mysql
-
- # Path on the host, relative to the Compose file
- - ./cache:/tmp/cache
-
- # User-relative path
- - ~/configs:/etc/configs/:ro
-
- # Named volume
- - datavolume:/var/lib/mysql
-
-If you do not use a host path, you may specify a `volume_driver`.
-
- volume_driver: mydriver
-
-Note that for [version 2 files](#version-2), this driver
-will not apply to named volumes (you should use the `driver` option when
-[declaring the volume](#volume-configuration-reference) instead).
-For [version 1](#version-1), both named volumes and container volumes will
-use the specified driver.
-
-> Note: No path expansion will be done if you have also specified a
-> `volume_driver`.
-
-See [Docker Volumes](https://docs.docker.com/engine/userguide/dockervolumes/) and
-[Volume Plugins](https://docs.docker.com/engine/extend/plugins_volume/) for more
-information.
-
-### volumes_from
-
-Mount all of the volumes from another service or container, optionally
-specifying read-only access (``ro``) or read-write (``rw``). If no access level is specified,
-then read-write will be used.
-
- volumes_from:
- - service_name
- - service_name:ro
- - container:container_name
- - container:container_name:rw
-
-> **Note:** The `container:...` formats are only supported in the
-> [version 2 file format](#version-2). In [version 1](#version-1), you can use
-> container names without marking them as such:
->
-> - service_name
-> - service_name:ro
-> - container_name
-> - container_name:rw
-
-### cpu\_shares, cpu\_quota, cpuset, domainname, hostname, ipc, mac\_address, mem\_limit, memswap\_limit, privileged, read\_only, restart, shm\_size, stdin\_open, tty, user, working\_dir
-
-Each of these is a single value, analogous to its
-[docker run](https://docs.docker.com/engine/reference/run/) counterpart.
-
- cpu_shares: 73
- cpu_quota: 50000
- cpuset: 0,1
-
- user: postgresql
- working_dir: /code
-
- domainname: foo.com
- hostname: foo
- ipc: host
- mac_address: 02:42:ac:11:65:43
-
- mem_limit: 1000000000
- memswap_limit: 2000000000
- privileged: true
-
- restart: always
-
- read_only: true
- shm_size: 64M
- stdin_open: true
- tty: true
-
-
-## Volume configuration reference
-
-While it is possible to declare volumes on the fly as part of the service
-declaration, this section allows you to create named volumes that can be
-reused across multiple services (without relying on `volumes_from`), and are
-easily retrieved and inspected using the docker command line or API.
-See the [docker volume](https://docs.docker.com/engine/reference/commandline/volume_create/)
-subcommand documentation for more information.
-
-### driver
-
-Specify which volume driver should be used for this volume. Defaults to
-`local`. The Docker Engine will return an error if the driver is not available.
-
- driver: foobar
-
-### driver_opts
-
-Specify a list of options as key-value pairs to pass to the driver for this
-volume. Those options are driver-dependent - consult the driver's
-documentation for more information. Optional.
-
- driver_opts:
- foo: "bar"
- baz: 1
-
-### external
-
-If set to `true`, specifies that this volume has been created outside of
-Compose. `docker-compose up` will not attempt to create it, and will raise
-an error if it doesn't exist.
-
-`external` cannot be used in conjunction with other volume configuration keys
-(`driver`, `driver_opts`).
-
-In the example below, instead of attemping to create a volume called
-`[projectname]_data`, Compose will look for an existing volume simply
-called `data` and mount it into the `db` service's containers.
-
- version: '2'
-
- services:
- db:
- image: postgres
- volumes:
- - data:/var/lib/postgresql/data
-
- volumes:
- data:
- external: true
-
-You can also specify the name of the volume separately from the name used to
-refer to it within the Compose file:
-
- volumes:
- data:
- external:
- name: actual-name-of-volume
-
-
-## Network configuration reference
-
-The top-level `networks` key lets you specify networks to be created. For a full
-explanation of Compose's use of Docker networking features, see the
-[Networking guide](networking.md).
-
-### driver
-
-Specify which driver should be used for this network.
-
-The default driver depends on how the Docker Engine you're using is configured,
-but in most instances it will be `bridge` on a single host and `overlay` on a
-Swarm.
-
-The Docker Engine will return an error if the driver is not available.
-
- driver: overlay
-
-### driver_opts
-
-Specify a list of options as key-value pairs to pass to the driver for this
-network. Those options are driver-dependent - consult the driver's
-documentation for more information. Optional.
-
- driver_opts:
- foo: "bar"
- baz: 1
-
-### ipam
-
-Specify custom IPAM config. This is an object with several properties, each of
-which is optional:
-
-- `driver`: Custom IPAM driver, instead of the default.
-- `config`: A list with zero or more config blocks, each containing any of
- the following keys:
- - `subnet`: Subnet in CIDR format that represents a network segment
- - `ip_range`: Range of IPs from which to allocate container IPs
- - `gateway`: IPv4 or IPv6 gateway for the master subnet
- - `aux_addresses`: Auxiliary IPv4 or IPv6 addresses used by Network driver,
- as a mapping from hostname to IP
-
-A full example:
-
- ipam:
- driver: default
- config:
- - subnet: 172.28.0.0/16
- ip_range: 172.28.5.0/24
- gateway: 172.28.5.254
- aux_addresses:
- host1: 172.28.1.5
- host2: 172.28.1.6
- host3: 172.28.1.7
-
-### external
-
-If set to `true`, specifies that this network has been created outside of
-Compose. `docker-compose up` will not attempt to create it, and will raise
-an error if it doesn't exist.
-
-`external` cannot be used in conjunction with other network configuration keys
-(`driver`, `driver_opts`, `ipam`).
-
-In the example below, `proxy` is the gateway to the outside world. Instead of
-attemping to create a network called `[projectname]_outside`, Compose will
-look for an existing network simply called `outside` and connect the `proxy`
-service's containers to it.
-
- version: '2'
-
- services:
- proxy:
- build: ./proxy
- networks:
- - outside
- - default
- app:
- build: ./app
- networks:
- - default
-
- networks:
- outside:
- external: true
-
-You can also specify the name of the network separately from the name used to
-refer to it within the Compose file:
-
- networks:
- outside:
- external:
- name: actual-name-of-network
-
-
-## Versioning
-
-There are two versions of the Compose file format:
-
-- Version 1, the legacy format. This is specified by omitting a `version` key at
- the root of the YAML.
-- Version 2, the recommended format. This is specified with a `version: '2'` entry
- at the root of the YAML.
-
-To move your project from version 1 to 2, see the [Upgrading](#upgrading)
-section.
-
-> **Note:** If you're using
-> [multiple Compose files](extends.md#different-environments) or
-> [extending services](extends.md#extending-services), each file must be of the
-> same version - you cannot mix version 1 and 2 in a single project.
-
-Several things differ depending on which version you use:
-
-- The structure and permitted configuration keys
-- The minimum Docker Engine version you must be running
-- Compose's behaviour with regards to networking
-
-These differences are explained below.
-
-
-### Version 1
-
-Compose files that do not declare a version are considered "version 1". In
-those files, all the [services](#service-configuration-reference) are declared
-at the root of the document.
-
-Version 1 is supported by **Compose up to 1.6.x**. It will be deprecated in a
-future Compose release.
-
-Version 1 files cannot declare named
-[volumes](#volume-configuration-reference), [networks](networking.md) or
-[build arguments](#args).
-
-Example:
-
- web:
- build: .
- ports:
- - "5000:5000"
- volumes:
- - .:/code
- links:
- - redis
- redis:
- image: redis
-
-
-### Version 2
-
-Compose files using the version 2 syntax must indicate the version number at
-the root of the document. All [services](#service-configuration-reference)
-must be declared under the `services` key.
-
-Version 2 files are supported by **Compose 1.6.0+** and require a Docker Engine
-of version **1.10.0+**.
-
-Named [volumes](#volume-configuration-reference) can be declared under the
-`volumes` key, and [networks](#network-configuration-reference) can be declared
-under the `networks` key.
-
-Simple example:
-
- version: '2'
- services:
- web:
- build: .
- ports:
- - "5000:5000"
- volumes:
- - .:/code
- redis:
- image: redis
-
-A more extended example, defining volumes and networks:
-
- version: '2'
- services:
- web:
- build: .
- ports:
- - "5000:5000"
- volumes:
- - .:/code
- networks:
- - front-tier
- - back-tier
- redis:
- image: redis
- volumes:
- - redis-data:/var/lib/redis
- networks:
- - back-tier
- volumes:
- redis-data:
- driver: local
- networks:
- front-tier:
- driver: bridge
- back-tier:
- driver: bridge
-
-
-### Upgrading
-
-In the majority of cases, moving from version 1 to 2 is a very simple process:
-
-1. Indent the whole file by one level and put a `services:` key at the top.
-2. Add a `version: '2'` line at the top of the file.
-
-It's more complicated if you're using particular configuration features:
-
-- `dockerfile`: This now lives under the `build` key:
-
- build:
- context: .
- dockerfile: Dockerfile-alternate
-
-- `log_driver`, `log_opt`: These now live under the `logging` key:
-
- logging:
- driver: syslog
- options:
- syslog-address: "tcp://192.168.0.42:123"
-
-- `links` with environment variables: As documented in the
- [environment variables reference](link-env-deprecated.md), environment variables
- created by
- links have been deprecated for some time. In the new Docker network system,
- they have been removed. You should either connect directly to the
- appropriate hostname or set the relevant environment variable yourself,
- using the link hostname:
-
- web:
- links:
- - db
- environment:
- - DB_PORT=tcp://db:5432
-
-- `external_links`: Compose uses Docker networks when running version 2
- projects, so links behave slightly differently. In particular, two
- containers must be connected to at least one network in common in order to
- communicate, even if explicitly linked together.
-
- Either connect the external container to your app's
- [default network](networking.md), or connect both the external container and
- your service's containers to an
- [external network](networking.md#using-a-pre-existing-network).
-
-- `net`: This is now replaced by [network_mode](#network_mode):
-
- net: host -> network_mode: host
- net: bridge -> network_mode: bridge
- net: none -> network_mode: none
-
- If you're using `net: "container:[service name]"`, you must now use
- `network_mode: "service:[service name]"` instead.
-
- net: "container:web" -> network_mode: "service:web"
-
- If you're using `net: "container:[container name/id]"`, the value does not
- need to change.
-
- net: "container:cont-name" -> network_mode: "container:cont-name"
- net: "container:abc12345" -> network_mode: "container:abc12345"
-
-- `volumes` with named volumes: these must now be explicitly declared in a
- top-level `volumes` section of your Compose file. If a service mounts a
- named volume called `data`, you must declare a `data` volume in your
- top-level `volumes` section. The whole file might look like this:
-
- version: '2'
- services:
- db:
- image: postgres
- volumes:
- - data:/var/lib/postgresql/data
- volumes:
- data: {}
-
- By default, Compose creates a volume whose name is prefixed with your
- project name. If you want it to just be called `data`, declare it as
- external:
-
- volumes:
- data:
- external: true
-
-## Variable substitution
-
-Your configuration options can contain environment variables. Compose uses the
-variable values from the shell environment in which `docker-compose` is run.
-For example, suppose the shell contains `EXTERNAL_PORT=8000` and you supply
-this configuration:
-
- web:
- build: .
- ports:
- - "${EXTERNAL_PORT}:5000"
-
-When you run `docker-compose up` with this configuration, Compose looks for
-the `EXTERNAL_PORT` environment variable in the shell and substitutes its
-value in. In this example, Compose resolves the port mapping to `"8000:5000"`
-before creating the `web` container.
-
-If an environment variable is not set, Compose substitutes with an empty
-string. In the example above, if `EXTERNAL_PORT` is not set, the value for the
-port mapping is `:5000` (which is of course an invalid port mapping, and will
-result in an error when attempting to create the container).
-
-Both `$VARIABLE` and `${VARIABLE}` syntax are supported. Extended shell-style
-features, such as `${VARIABLE-default}` and `${VARIABLE/foo/bar}`, are not
-supported.
-
-You can use a `$$` (double-dollar sign) when your configuration needs a literal
-dollar sign. This also prevents Compose from interpolating a value, so a `$$`
-allows you to refer to environment variables that you don't want processed by
-Compose.
-
- web:
- build: .
- command: "$$VAR_NOT_INTERPOLATED_BY_COMPOSE"
-
-If you forget and use a single dollar sign (`$`), Compose interprets the value as an environment variable and will warn you:
-
- The VAR_NOT_INTERPOLATED_BY_COMPOSE is not set. Substituting an empty string.
-
-## Compose documentation
-
-- [User guide](index.md)
-- [Installing Compose](install.md)
-- [Get started with Django](django.md)
-- [Get started with Rails](rails.md)
-- [Get started with WordPress](wordpress.md)
-- [Command line reference](./reference/index.md)
diff --git a/compose/django.md b/compose/django.md
deleted file mode 100644
index 1cf2a5675c..0000000000
--- a/compose/django.md
+++ /dev/null
@@ -1,194 +0,0 @@
-
-
-
-# Quickstart: Docker Compose and Django
-
-This quick-start guide demonstrates how to use Docker Compose to set up and run a simple Django/PostgreSQL app. Before starting, you'll need to have
-[Compose installed](install.md).
-
-### Define the project components
-
-For this project, you need to create a Dockerfile, a Python dependencies file,
-and a `docker-compose.yml` file.
-
-1. Create an empty project directory.
-
- You can name the directory something easy for you to remember. This directory is the context for your application image. The directory should only contain resources to build that image.
-
-2. Create a new file called `Dockerfile` in your project directory.
-
- The Dockerfile defines an application's image content via one or more build
- commands that configure that image. Once built, you can run the image in a
- container. For more information on `Dockerfiles`, see the [Docker user
- guide](/engine/tutorials/dockerimages.md#building-an-image-from-a-dockerfile)
- and the [Dockerfile reference](/engine/reference/builder.md).
-
-3. Add the following content to the `Dockerfile`.
-
- FROM python:2.7
- ENV PYTHONUNBUFFERED 1
- RUN mkdir /code
- WORKDIR /code
- ADD requirements.txt /code/
- RUN pip install -r requirements.txt
- ADD . /code/
-
- This `Dockerfile` starts with a Python 2.7 base image. The base image is
- modified by adding a new `code` directory. The base image is further modified
- by installing the Python requirements defined in the `requirements.txt` file.
-
-4. Save and close the `Dockerfile`.
-
-5. Create a `requirements.txt` in your project directory.
-
- This file is used by the `RUN pip install -r requirements.txt` command in your `Dockerfile`.
-
-6. Add the required software in the file.
-
- Django
- psycopg2
-
-7. Save and close the `requirements.txt` file.
-
-8. Create a file called `docker-compose.yml` in your project directory.
-
- The `docker-compose.yml` file describes the services that make your app. In
- this example those services are a web server and database. The compose file
- also describes which Docker images these services use, how they link
- together, any volumes they might need mounted inside the containers.
- Finally, the `docker-compose.yml` file describes which ports these services
- expose. See the [`docker-compose.yml` reference](compose-file.md) for more
- information on how this file works.
-
-9. Add the following configuration to the file.
-
- version: '2'
- services:
- db:
- image: postgres
- web:
- build: .
- command: python manage.py runserver 0.0.0.0:8000
- volumes:
- - .:/code
- ports:
- - "8000:8000"
- depends_on:
- - db
-
- This file defines two services: The `db` service and the `web` service.
-
-10. Save and close the `docker-compose.yml` file.
-
-### Create a Django project
-
-In this step, you create a Django started project by building the image from the build context defined in the previous procedure.
-
-1. Change to the root of your project directory.
-
-2. Create the Django project using the `docker-compose` command.
-
- $ docker-compose run web django-admin.py startproject composeexample .
-
- This instructs Compose to run `django-admin.py startproject composeeexample`
- in a container, using the `web` service's image and configuration. Because
- the `web` image doesn't exist yet, Compose builds it from the current
- directory, as specified by the `build: .` line in `docker-compose.yml`.
-
- Once the `web` service image is built, Compose runs it and executes the
- `django-admin.py startproject` command in the container. This command
- instructs Django to create a set of files and directories representing a
- Django project.
-
-3. After the `docker-compose` command completes, list the contents of your project.
-
- $ ls -l
- drwxr-xr-x 2 root root composeexample
- -rw-rw-r-- 1 user user docker-compose.yml
- -rw-rw-r-- 1 user user Dockerfile
- -rwxr-xr-x 1 root root manage.py
- -rw-rw-r-- 1 user user requirements.txt
-
- If you are running Docker on Linux, the files `django-admin` created are owned
- by root. This happens because the container runs as the root user. Change the
- ownership of the the new files.
-
- sudo chown -R $USER:$USER .
-
- If you are running Docker on Mac or Windows, you should already have ownership
- of all files, including those generated by `django-admin`. List the files just
- verify this.
-
- $ ls -l
- total 32
- -rw-r--r-- 1 user staff 145 Feb 13 23:00 Dockerfile
- drwxr-xr-x 6 user staff 204 Feb 13 23:07 composeexample
- -rw-r--r-- 1 user staff 159 Feb 13 23:02 docker-compose.yml
- -rwxr-xr-x 1 user staff 257 Feb 13 23:07 manage.py
- -rw-r--r-- 1 user staff 16 Feb 13 23:01 requirements.txt
-
-
-### Connect the database
-
-In this section, you set up the database connection for Django.
-
-1. In your project directory, edit the `composeexample/settings.py` file.
-
-2. Replace the `DATABASES = ...` with the following:
-
- DATABASES = {
- 'default': {
- 'ENGINE': 'django.db.backends.postgresql_psycopg2',
- 'NAME': 'postgres',
- 'USER': 'postgres',
- 'HOST': 'db',
- 'PORT': 5432,
- }
- }
-
- These settings are determined by the
- [postgres](https://hub.docker.com/_/postgres/) Docker image
- specified in `docker-compose.yml`.
-
-3. Save and close the file.
-
-4. Run the `docker-compose up` command.
-
- $ docker-compose up
- Starting composepractice_db_1...
- Starting composepractice_web_1...
- Attaching to composepractice_db_1, composepractice_web_1
- ...
- db_1 | PostgreSQL init process complete; ready for start up.
- ...
- db_1 | LOG: database system is ready to accept connections
- db_1 | LOG: autovacuum launcher started
- ..
- web_1 | Django version 1.8.4, using settings 'composeexample.settings'
- web_1 | Starting development server at http://0.0.0.0:8000/
- web_1 | Quit the server with CONTROL-C.
-
- At this point, your Django app should be running at port `8000` on your
- Docker host. If you are using a Docker Machine VM, you can use the
- `docker-machine ip MACHINE_NAME` to get the IP address.
-
- 
-
-## More Compose documentation
-
-- [User guide](index.md)
-- [Installing Compose](install.md)
-- [Getting Started](gettingstarted.md)
-- [Get started with Rails](rails.md)
-- [Get started with WordPress](wordpress.md)
-- [Command line reference](./reference/index.md)
-- [Compose file reference](compose-file.md)
diff --git a/compose/env-file.md b/compose/env-file.md
deleted file mode 100644
index be2625f889..0000000000
--- a/compose/env-file.md
+++ /dev/null
@@ -1,43 +0,0 @@
-
-
-
-# Environment file
-
-Compose supports declaring default environment variables in an environment
-file named `.env` placed in the folder `docker-compose` command is executed from
-*(current working directory)*.
-
-Compose expects each line in an env file to be in `VAR=VAL` format. Lines
-beginning with `#` (i.e. comments) are ignored, as are blank lines.
-
-> Note: Values present in the environment at runtime will always override
-> those defined inside the `.env` file. Similarly, values passed via
-> command-line arguments take precedence as well.
-
-Those environment variables will be used for
-[variable substitution](compose-file.md#variable-substitution) in your Compose
-file, but can also be used to define the following
-[CLI variables](reference/envvars.md):
-
-- `COMPOSE_API_VERSION`
-- `COMPOSE_FILE`
-- `COMPOSE_HTTP_TIMEOUT`
-- `COMPOSE_PROJECT_NAME`
-- `DOCKER_CERT_PATH`
-- `DOCKER_HOST`
-- `DOCKER_TLS_VERIFY`
-
-## More Compose documentation
-
-- [User guide](index.md)
-- [Command line reference](./reference/index.md)
-- [Compose file reference](compose-file.md)
diff --git a/compose/environment-variables.md b/compose/environment-variables.md
deleted file mode 100644
index a2e74f0a96..0000000000
--- a/compose/environment-variables.md
+++ /dev/null
@@ -1,107 +0,0 @@
-
-
-# Environment variables in Compose
-
-There are multiple parts of Compose that deal with environment variables in one sense or another. This page should help you find the information you need.
-
-
-## Substituting environment variables in Compose files
-
-It's possible to use environment variables in your shell to populate values inside a Compose file:
-
- web:
- image: "webapp:${TAG}"
-
-For more information, see the [Variable substitution](compose-file.md#variable-substitution) section in the Compose file reference.
-
-
-## Setting environment variables in containers
-
-You can set environment variables in a service's containers with the ['environment' key](compose-file.md#environment), just like with `docker run -e VARIABLE=VALUE ...`:
-
- web:
- environment:
- - DEBUG=1
-
-
-## Passing environment variables through to containers
-
-You can pass environment variables from your shell straight through to a service's containers with the ['environment' key](compose-file.md#environment) by not giving them a value, just like with `docker run -e VARIABLE ...`:
-
- web:
- environment:
- - DEBUG
-
-The value of the `DEBUG` variable in the container will be taken from the value for the same variable in the shell in which Compose is run.
-
-
-## The “env_file” configuration option
-
-You can pass multiple environment variables from an external file through to a service's containers with the ['env_file' option](compose-file.md#env-file), just like with `docker run --env-file=FILE ...`:
-
- web:
- env_file:
- - web-variables.env
-
-
-## Setting environment variables with 'docker-compose run'
-
-Just like with `docker run -e`, you can set environment variables on a one-off container with `docker-compose run -e`:
-
- $ docker-compose run -e DEBUG=1 web python console.py
-
-You can also pass a variable through from the shell by not giving it a value:
-
- $ docker-compose run -e DEBUG web python console.py
-
-The value of the `DEBUG` variable in the container will be taken from the value for the same variable in the shell in which Compose is run.
-
-
-## The “.env” file
-
-You can set default values for any environment variables referenced in the Compose file, or used to configure Compose, in an [environment file](env-file.md) named `.env`:
-
- $ cat .env
- TAG=v1.5
-
- $ cat docker-compose.yml
- version: '2.0'
- services:
- web:
- image: "webapp:${TAG}"
-
-When you run `docker-compose up`, the `web` service defined above uses the image `webapp:v1.5`. You can verify this with the [config command](reference/config.md), which prints your resolved application config to the terminal:
-
- $ docker-compose config
- version: '2.0'
- services:
- web:
- image: 'webapp:v1.5'
-
-Values in the shell take precedence over those specified in the `.env` file. If you set `TAG` to a different value in your shell, the substitution in `image` uses that instead:
-
- $ export TAG=v2.0
-
- $ docker-compose config
- version: '2.0'
- services:
- web:
- image: 'webapp:v2.0'
-
-## Configuring Compose using environment variables
-
-Several environment variables are available for you to configure the Docker Compose command-line behaviour. They begin with `COMPOSE_` or `DOCKER_`, and are documented in [CLI Environment Variables](reference/envvars.md).
-
-
-## Environment variables created by links
-
-When using the ['links' option](compose-file.md#links) in a [v1 Compose file](compose-file.md#version-1), environment variables will be created for each link. They are documented in the [Link environment variables reference](link-env-deprecated.md). Please note, however, that these variables are deprecated - you should just use the link alias as a hostname instead.
diff --git a/compose/extends.md b/compose/extends.md
deleted file mode 100644
index 6f457391f5..0000000000
--- a/compose/extends.md
+++ /dev/null
@@ -1,354 +0,0 @@
-
-
-
-# Extending services and Compose files
-
-Compose supports two methods of sharing common configuration:
-
-1. Extending an entire Compose file by
- [using multiple Compose files](#multiple-compose-files)
-2. Extending individual services with [the `extends` field](#extending-services)
-
-
-## Multiple Compose files
-
-Using multiple Compose files enables you to customize a Compose application
-for different environments or different workflows.
-
-### Understanding multiple Compose files
-
-By default, Compose reads two files, a `docker-compose.yml` and an optional
-`docker-compose.override.yml` file. By convention, the `docker-compose.yml`
-contains your base configuration. The override file, as its name implies, can
-contain configuration overrides for existing services or entirely new
-services.
-
-If a service is defined in both files Compose merges the configurations using
-the rules described in [Adding and overriding
-configuration](#adding-and-overriding-configuration).
-
-To use multiple override files, or an override file with a different name, you
-can use the `-f` option to specify the list of files. Compose merges files in
-the order they're specified on the command line. See the [`docker-compose`
-command reference](./reference/overview.md) for more information about
-using `-f`.
-
-When you use multiple configuration files, you must make sure all paths in the
-files are relative to the base Compose file (the first Compose file specified
-with `-f`). This is required because override files need not be valid
-Compose files. Override files can contain small fragments of configuration.
-Tracking which fragment of a service is relative to which path is difficult and
-confusing, so to keep paths easier to understand, all paths must be defined
-relative to the base file.
-
-### Example use case
-
-In this section are two common use cases for multiple compose files: changing a
-Compose app for different environments, and running administrative tasks
-against a Compose app.
-
-#### Different environments
-
-A common use case for multiple files is changing a development Compose app
-for a production-like environment (which may be production, staging or CI).
-To support these differences, you can split your Compose configuration into
-a few different files:
-
-Start with a base file that defines the canonical configuration for the
-services.
-
-**docker-compose.yml**
-
- web:
- image: example/my_web_app:latest
- links:
- - db
- - cache
-
- db:
- image: postgres:latest
-
- cache:
- image: redis:latest
-
-In this example the development configuration exposes some ports to the
-host, mounts our code as a volume, and builds the web image.
-
-**docker-compose.override.yml**
-
-
- web:
- build: .
- volumes:
- - '.:/code'
- ports:
- - 8883:80
- environment:
- DEBUG: 'true'
-
- db:
- command: '-d'
- ports:
- - 5432:5432
-
- cache:
- ports:
- - 6379:6379
-
-When you run `docker-compose up` it reads the overrides automatically.
-
-Now, it would be nice to use this Compose app in a production environment. So,
-create another override file (which might be stored in a different git
-repo or managed by a different team).
-
-**docker-compose.prod.yml**
-
- web:
- ports:
- - 80:80
- environment:
- PRODUCTION: 'true'
-
- cache:
- environment:
- TTL: '500'
-
-To deploy with this production Compose file you can run
-
- docker-compose -f docker-compose.yml -f docker-compose.prod.yml up -d
-
-This deploys all three services using the configuration in
-`docker-compose.yml` and `docker-compose.prod.yml` (but not the
-dev configuration in `docker-compose.override.yml`).
-
-
-See [production](production.md) for more information about Compose in
-production.
-
-#### Administrative tasks
-
-Another common use case is running adhoc or administrative tasks against one
-or more services in a Compose app. This example demonstrates running a
-database backup.
-
-Start with a **docker-compose.yml**.
-
- web:
- image: example/my_web_app:latest
- links:
- - db
-
- db:
- image: postgres:latest
-
-In a **docker-compose.admin.yml** add a new service to run the database
-export or backup.
-
- dbadmin:
- build: database_admin/
- links:
- - db
-
-To start a normal environment run `docker-compose up -d`. To run a database
-backup, include the `docker-compose.admin.yml` as well.
-
- docker-compose -f docker-compose.yml -f docker-compose.admin.yml \
- run dbadmin db-backup
-
-
-## Extending services
-
-Docker Compose's `extends` keyword enables sharing of common configurations
-among different files, or even different projects entirely. Extending services
-is useful if you have several services that reuse a common set of configuration
-options. Using `extends` you can define a common set of service options in one
-place and refer to it from anywhere.
-
-> **Note:** `links`, `volumes_from`, and `depends_on` are never shared between
-> services using >`extends`. These exceptions exist to avoid
-> implicit dependencies—you always define `links` and `volumes_from`
-> locally. This ensures dependencies between services are clearly visible when
-> reading the current file. Defining these locally also ensures changes to the
-> referenced file don't result in breakage.
-
-### Understand the extends configuration
-
-When defining any service in `docker-compose.yml`, you can declare that you are
-extending another service like this:
-
- web:
- extends:
- file: common-services.yml
- service: webapp
-
-This instructs Compose to re-use the configuration for the `webapp` service
-defined in the `common-services.yml` file. Suppose that `common-services.yml`
-looks like this:
-
- webapp:
- build: .
- ports:
- - "8000:8000"
- volumes:
- - "/data"
-
-In this case, you'll get exactly the same result as if you wrote
-`docker-compose.yml` with the same `build`, `ports` and `volumes` configuration
-values defined directly under `web`.
-
-You can go further and define (or re-define) configuration locally in
-`docker-compose.yml`:
-
- web:
- extends:
- file: common-services.yml
- service: webapp
- environment:
- - DEBUG=1
- cpu_shares: 5
-
- important_web:
- extends: web
- cpu_shares: 10
-
-You can also write other services and link your `web` service to them:
-
- web:
- extends:
- file: common-services.yml
- service: webapp
- environment:
- - DEBUG=1
- cpu_shares: 5
- links:
- - db
- db:
- image: postgres
-
-### Example use case
-
-Extending an individual service is useful when you have multiple services that
-have a common configuration. The example below is a Compose app with
-two services: a web application and a queue worker. Both services use the same
-codebase and share many configuration options.
-
-In a **common.yml** we define the common configuration:
-
- app:
- build: .
- environment:
- CONFIG_FILE_PATH: /code/config
- API_KEY: xxxyyy
- cpu_shares: 5
-
-In a **docker-compose.yml** we define the concrete services which use the
-common configuration:
-
- webapp:
- extends:
- file: common.yml
- service: app
- command: /code/run_web_app
- ports:
- - 8080:8080
- links:
- - queue
- - db
-
- queue_worker:
- extends:
- file: common.yml
- service: app
- command: /code/run_worker
- links:
- - queue
-
-## Adding and overriding configuration
-
-Compose copies configurations from the original service over to the local one.
-If a configuration option is defined in both the original service the local
-service, the local value *replaces* or *extends* the original value.
-
-For single-value options like `image`, `command` or `mem_limit`, the new value
-replaces the old value.
-
- # original service
- command: python app.py
-
- # local service
- command: python otherapp.py
-
- # result
- command: python otherapp.py
-
-> **Note:** In the case of `build` and `image`, when using
-> [version 1 of the Compose file format](compose-file.md#version-1), using one
-> option in the local service causes Compose to discard the other option if it
-> was defined in the original service.
->
-> For example, if the original service defines `image: webapp` and the
-> local service defines `build: .` then the resulting service will have
-> `build: .` and no `image` option.
->
-> This is because `build` and `image` cannot be used together in a version 1
-> file.
-
-For the **multi-value options** `ports`, `expose`, `external_links`, `dns`,
-`dns_search`, and `tmpfs`, Compose concatenates both sets of values:
-
- # original service
- expose:
- - "3000"
-
- # local service
- expose:
- - "4000"
- - "5000"
-
- # result
- expose:
- - "3000"
- - "4000"
- - "5000"
-
-In the case of `environment`, `labels`, `volumes` and `devices`, Compose
-"merges" entries together with locally-defined values taking precedence:
-
- # original service
- environment:
- - FOO=original
- - BAR=original
-
- # local service
- environment:
- - BAR=local
- - BAZ=local
-
- # result
- environment:
- - FOO=original
- - BAR=local
- - BAZ=local
-
-
-
-
-## Compose documentation
-
-- [User guide](index.md)
-- [Installing Compose](install.md)
-- [Getting Started](gettingstarted.md)
-- [Get started with Django](django.md)
-- [Get started with Rails](rails.md)
-- [Get started with WordPress](wordpress.md)
-- [Command line reference](./reference/index.md)
-- [Compose file reference](compose-file.md)
diff --git a/compose/faq.md b/compose/faq.md
deleted file mode 100644
index 45885255f8..0000000000
--- a/compose/faq.md
+++ /dev/null
@@ -1,128 +0,0 @@
-
-
-# Frequently asked questions
-
-If you don’t see your question here, feel free to drop by `#docker-compose` on
-freenode IRC and ask the community.
-
-
-## Can I control service startup order?
-
-Yes - see [Controlling startup order](startup-order.md).
-
-
-## Why do my services take 10 seconds to recreate or stop?
-
-Compose stop attempts to stop a container by sending a `SIGTERM`. It then waits
-for a [default timeout of 10 seconds](./reference/stop.md). After the timeout,
-a `SIGKILL` is sent to the container to forcefully kill it. If you
-are waiting for this timeout, it means that your containers aren't shutting down
-when they receive the `SIGTERM` signal.
-
-There has already been a lot written about this problem of
-[processes handling signals](https://medium.com/@gchudnov/trapping-signals-in-docker-containers-7a57fdda7d86)
-in containers.
-
-To fix this problem, try the following:
-
-* Make sure you're using the JSON form of `CMD` and `ENTRYPOINT`
-in your Dockerfile.
-
- For example use `["program", "arg1", "arg2"]` not `"program arg1 arg2"`.
- Using the string form causes Docker to run your process using `bash` which
- doesn't handle signals properly. Compose always uses the JSON form, so don't
- worry if you override the command or entrypoint in your Compose file.
-
-* If you are able, modify the application that you're running to
-add an explicit signal handler for `SIGTERM`.
-
-* Set the `stop_signal` to a signal which the application knows how to handle:
-
- web:
- build: .
- stop_signal: SIGINT
-
-* If you can't modify the application, wrap the application in a lightweight init
-system (like [s6](http://skarnet.org/software/s6/)) or a signal proxy (like
-[dumb-init](https://github.com/Yelp/dumb-init) or
-[tini](https://github.com/krallin/tini)). Either of these wrappers take care of
-handling `SIGTERM` properly.
-
-## How do I run multiple copies of a Compose file on the same host?
-
-Compose uses the project name to create unique identifiers for all of a
-project's containers and other resources. To run multiple copies of a project,
-set a custom project name using the [`-p` command line
-option](./reference/overview.md) or the [`COMPOSE_PROJECT_NAME`
-environment variable](./reference/envvars.md#compose-project-name).
-
-## What's the difference between `up`, `run`, and `start`?
-
-Typically, you want `docker-compose up`. Use `up` to start or restart all the
-services defined in a `docker-compose.yml`. In the default "attached"
-mode, you'll see all the logs from all the containers. In "detached" mode (`-d`),
-Compose exits after starting the containers, but the containers continue to run
-in the background.
-
-The `docker-compose run` command is for running "one-off" or "adhoc" tasks. It
-requires the service name you want to run and only starts containers for services
-that the running service depends on. Use `run` to run tests or perform
-an administrative task such as removing or adding data to a data volume
-container. The `run` command acts like `docker run -ti` in that it opens an
-interactive terminal to the container and returns an exit status matching the
-exit status of the process in the container.
-
-The `docker-compose start` command is useful only to restart containers
-that were previously created, but were stopped. It never creates new
-containers.
-
-## Can I use json instead of yaml for my Compose file?
-
-Yes. [Yaml is a superset of json](http://stackoverflow.com/a/1729545/444646) so
-any JSON file should be valid Yaml. To use a JSON file with Compose,
-specify the filename to use, for example:
-
-```bash
-docker-compose -f docker-compose.json up
-```
-
-## Should I include my code with `COPY`/`ADD` or a volume?
-
-You can add your code to the image using `COPY` or `ADD` directive in a
-`Dockerfile`. This is useful if you need to relocate your code along with the
-Docker image, for example when you're sending code to another environment
-(production, CI, etc).
-
-You should use a `volume` if you want to make changes to your code and see them
-reflected immediately, for example when you're developing code and your server
-supports hot code reloading or live-reload.
-
-There may be cases where you'll want to use both. You can have the image
-include the code using a `COPY`, and use a `volume` in your Compose file to
-include the code from the host during development. The volume overrides
-the directory contents of the image.
-
-## Where can I find example compose files?
-
-There are [many examples of Compose files on
-github](https://github.com/search?q=in%3Apath+docker-compose.yml+extension%3Ayml&type=Code).
-
-
-## Compose documentation
-
-- [Installing Compose](install.md)
-- [Get started with Django](django.md)
-- [Get started with Rails](rails.md)
-- [Get started with WordPress](wordpress.md)
-- [Command line reference](./reference/index.md)
-- [Compose file reference](compose-file.md)
diff --git a/compose/gettingstarted.md b/compose/gettingstarted.md
deleted file mode 100644
index 249bff725e..0000000000
--- a/compose/gettingstarted.md
+++ /dev/null
@@ -1,191 +0,0 @@
-
-
-
-# Getting Started
-
-On this page you build a simple Python web application running on Docker Compose. The
-application uses the Flask framework and increments a value in Redis. While the
-sample uses Python, the concepts demonstrated here should be understandable even
-if you're not familiar with it.
-
-## Prerequisites
-
-Make sure you have already
-[installed both Docker Engine and Docker Compose](install.md). You
-don't need to install Python, it is provided by a Docker image.
-
-## Step 1: Setup
-
-1. Create a directory for the project:
-
- $ mkdir composetest
- $ cd composetest
-
-2. With your favorite text editor create a file called `app.py` in your project
- directory.
-
- from flask import Flask
- from redis import Redis
-
- app = Flask(__name__)
- redis = Redis(host='redis', port=6379)
-
- @app.route('/')
- def hello():
- redis.incr('hits')
- return 'Hello World! I have been seen %s times.' % redis.get('hits')
-
- if __name__ == "__main__":
- app.run(host="0.0.0.0", debug=True)
-
-3. Create another file called `requirements.txt` in your project directory and
- add the following:
-
- flask
- redis
-
- These define the applications dependencies.
-
-## Step 2: Create a Docker image
-
-In this step, you build a new Docker image. The image contains all the
-dependencies the Python application requires, including Python itself.
-
-1. In your project directory create a file named `Dockerfile` and add the
- following:
-
- FROM python:2.7
- ADD . /code
- WORKDIR /code
- RUN pip install -r requirements.txt
- CMD python app.py
-
- This tells Docker to:
-
- * Build an image starting with the Python 2.7 image.
- * Add the current directory `.` into the path `/code` in the image.
- * Set the working directory to `/code`.
- * Install the Python dependencies.
- * Set the default command for the container to `python app.py`
-
- For more information on how to write Dockerfiles, see the [Docker user guide](/engine/tutorials/dockerimages.md#building-an-image-from-a-dockerfile) and the [Dockerfile reference](/engine/reference/builder.md).
-
-2. Build the image.
-
- $ docker build -t web .
-
- This command builds an image named `web` from the contents of the current
- directory. The command automatically locates the `Dockerfile`, `app.py`, and
- `requirements.txt` files.
-
-
-## Step 3: Define services
-
-Define a set of services using `docker-compose.yml`:
-
-1. Create a file called docker-compose.yml in your project directory and add
- the following:
-
-
- version: '2'
- services:
- web:
- build: .
- ports:
- - "5000:5000"
- volumes:
- - .:/code
- depends_on:
- - redis
- redis:
- image: redis
-
-This Compose file defines two services, `web` and `redis`. The web service:
-
-* Builds from the `Dockerfile` in the current directory.
-* Forwards the exposed port 5000 on the container to port 5000 on the host machine.
-* Mounts the project directory on the host to `/code` inside the container allowing you to modify the code without having to rebuild the image.
-* Links the web service to the Redis service.
-
-The `redis` service uses the latest public [Redis](https://registry.hub.docker.com/_/redis/) image pulled from the Docker Hub registry.
-
-## Step 4: Build and run your app with Compose
-
-1. From your project directory, start up your application.
-
- $ docker-compose up
- Pulling image redis...
- Building web...
- Starting composetest_redis_1...
- Starting composetest_web_1...
- redis_1 | [8] 02 Jan 18:43:35.576 # Server started, Redis version 2.8.3
- web_1 | * Running on http://0.0.0.0:5000/
- web_1 | * Restarting with stat
-
- Compose pulls a Redis image, builds an image for your code, and start the
- services you defined.
-
-2. Enter `http://0.0.0.0:5000/` in a browser to see the application running.
-
- If you're using Docker on Linux natively, then the web app should now be
- listening on port 5000 on your Docker daemon host. If `http://0.0.0.0:5000`
- doesn't resolve, you can also try `http://localhost:5000`.
-
- If you're using Docker Machine on a Mac, use `docker-machine ip MACHINE_VM` to get
- the IP address of your Docker host. Then, `open http://MACHINE_VM_IP:5000` in a
- browser.
-
- You should see a message in your browser saying:
-
- `Hello World! I have been seen 1 times.`
-
-3. Refresh the page.
-
- The number should increment.
-
-## Step 5: Experiment with some other commands
-
-If you want to run your services in the background, you can pass the `-d` flag
-(for "detached" mode) to `docker-compose up` and use `docker-compose ps` to
-see what is currently running:
-
- $ docker-compose up -d
- Starting composetest_redis_1...
- Starting composetest_web_1...
- $ docker-compose ps
- Name Command State Ports
- -------------------------------------------------------------------
- composetest_redis_1 /usr/local/bin/run Up
- composetest_web_1 /bin/sh -c python app.py Up 5000->5000/tcp
-
-The `docker-compose run` command allows you to run one-off commands for your
-services. For example, to see what environment variables are available to the
-`web` service:
-
- $ docker-compose run web env
-
-See `docker-compose --help` to see other available commands. You can also install [command completion](completion.md) for the bash and zsh shell, which will also show you available commands.
-
-If you started Compose with `docker-compose up -d`, you'll probably want to stop
-your services once you've finished with them:
-
- $ docker-compose stop
-
-At this point, you have seen the basics of how Compose works.
-
-
-## Where to go next
-
-- Next, try the quick start guide for [Django](django.md),
- [Rails](rails.md), or [WordPress](wordpress.md).
-- [Explore the full list of Compose commands](./reference/index.md)
-- [Compose configuration file reference](compose-file.md)
diff --git a/compose/images/django-it-worked.png b/compose/images/django-it-worked.png
deleted file mode 100644
index 75769754b9..0000000000
Binary files a/compose/images/django-it-worked.png and /dev/null differ
diff --git a/compose/images/rails-welcome.png b/compose/images/rails-welcome.png
deleted file mode 100644
index 51512dbda6..0000000000
Binary files a/compose/images/rails-welcome.png and /dev/null differ
diff --git a/compose/images/wordpress-files.png b/compose/images/wordpress-files.png
deleted file mode 100644
index 4762935bae..0000000000
Binary files a/compose/images/wordpress-files.png and /dev/null differ
diff --git a/compose/images/wordpress-lang.png b/compose/images/wordpress-lang.png
deleted file mode 100644
index f0bd864ef0..0000000000
Binary files a/compose/images/wordpress-lang.png and /dev/null differ
diff --git a/compose/images/wordpress-welcome.png b/compose/images/wordpress-welcome.png
deleted file mode 100644
index c9ba20368c..0000000000
Binary files a/compose/images/wordpress-welcome.png and /dev/null differ
diff --git a/compose/index.md b/compose/index.md
deleted file mode 100644
index f1b710794e..0000000000
--- a/compose/index.md
+++ /dev/null
@@ -1,30 +0,0 @@
-
-
-
-# Docker Compose
-
-Compose is a tool for defining and running multi-container Docker applications. To learn more about Compose refer to the following documentation:
-
-- [Compose Overview](overview.md)
-- [Install Compose](install.md)
-- [Getting Started](gettingstarted.md)
-- [Get started with Django](django.md)
-- [Get started with Rails](rails.md)
-- [Get started with WordPress](wordpress.md)
-- [Frequently asked questions](faq.md)
-- [Command line reference](./reference/index.md)
-- [Compose file reference](compose-file.md)
-- [Environment file](env-file.md)
-
-To see a detailed list of changes for past and current releases of Docker
-Compose, please refer to the
-[CHANGELOG](https://github.com/docker/compose/blob/master/CHANGELOG.md).
diff --git a/compose/install.md b/compose/install.md
deleted file mode 100644
index bb7f07b3d1..0000000000
--- a/compose/install.md
+++ /dev/null
@@ -1,136 +0,0 @@
-
-
-
-# Install Docker Compose
-
-You can run Compose on OS X, Windows and 64-bit Linux. To install it, you'll need to install Docker first.
-
-To install Compose, do the following:
-
-1. Install Docker Engine:
-
- * Mac OS X installation
-
- * Windows installation
-
- * Ubuntu installation
-
- * other system installations
-
-2. The Docker Toolbox installation includes both Engine and Compose, so Mac and Windows users are done installing. Others should continue to the next step.
-
-3. Go to the Compose repository release page on GitHub.
-
-4. Follow the instructions from the release page and run the `curl` command,
-which the release page specifies, in your terminal.
-
- > Note: If you get a "Permission denied" error, your `/usr/local/bin` directory
- probably isn't writable and you'll need to install Compose as the superuser. Run
- `sudo -i`, then the two commands below, then `exit`.
-
- The following is an example command illustrating the format:
-
- curl -L https://github.com/docker/compose/releases/download/1.8.0/docker-compose-`uname -s`-`uname -m` > /usr/local/bin/docker-compose
-
- If you have problems installing with `curl`, see
- [Alternative Install Options](#alternative-install-options).
-
-5. Apply executable permissions to the binary:
-
- $ chmod +x /usr/local/bin/docker-compose
-
-6. Optionally, install [command completion](completion.md) for the
-`bash` and `zsh` shell.
-
-7. Test the installation.
-
- $ docker-compose --version
- docker-compose version: 1.8.0
-
-
-## Alternative install options
-
-### Install using pip
-
-Compose can be installed from [pypi](https://pypi.python.org/pypi/docker-compose)
-using `pip`. If you install using `pip` it is highly recommended that you use a
-[virtualenv](https://virtualenv.pypa.io/en/latest/) because many operating systems
-have python system packages that conflict with docker-compose dependencies. See
-the [virtualenv tutorial](http://docs.python-guide.org/en/latest/dev/virtualenvs/)
-to get started.
-
- $ pip install docker-compose
-
-> **Note:** pip version 6.0 or greater is required
-
-### Install as a container
-
-Compose can also be run inside a container, from a small bash script wrapper.
-To install compose as a container run:
-
- $ curl -L https://github.com/docker/compose/releases/download/1.8.0/run.sh > /usr/local/bin/docker-compose
- $ chmod +x /usr/local/bin/docker-compose
-
-## Master builds
-
-If you're interested in trying out a pre-release build you can download a
-binary from https://dl.bintray.com/docker-compose/master/. Pre-release
-builds allow you to try out new features before they are released, but may
-be less stable.
-
-
-## Upgrading
-
-If you're upgrading from Compose 1.2 or earlier, you'll need to remove or migrate
-your existing containers after upgrading Compose. This is because, as of version
-1.3, Compose uses Docker labels to keep track of containers, and so they need to
-be recreated with labels added.
-
-If Compose detects containers that were created without labels, it will refuse
-to run so that you don't end up with two sets of them. If you want to keep using
-your existing containers (for example, because they have data volumes you want
-to preserve) you can use compose 1.5.x to migrate them with the following command:
-
- $ docker-compose migrate-to-labels
-
-Alternatively, if you're not worried about keeping them, you can remove them.
-Compose will just create new ones.
-
- $ docker rm -f -v myapp_web_1 myapp_db_1 ...
-
-
-## Uninstallation
-
-To uninstall Docker Compose if you installed using `curl`:
-
- $ rm /usr/local/bin/docker-compose
-
-
-To uninstall Docker Compose if you installed using `pip`:
-
- $ pip uninstall docker-compose
-
->**Note**: If you get a "Permission denied" error using either of the above
->methods, you probably do not have the proper permissions to remove
->`docker-compose`. To force the removal, prepend `sudo` to either of the above
->commands and run again.
-
-
-## Where to go next
-
-- [User guide](index.md)
-- [Getting Started](gettingstarted.md)
-- [Get started with Django](django.md)
-- [Get started with Rails](rails.md)
-- [Get started with WordPress](wordpress.md)
-- [Command line reference](./reference/index.md)
-- [Compose file reference](compose-file.md)
diff --git a/compose/link-env-deprecated.md b/compose/link-env-deprecated.md
deleted file mode 100644
index b1f01b3b6a..0000000000
--- a/compose/link-env-deprecated.md
+++ /dev/null
@@ -1,48 +0,0 @@
-
-
-# Link environment variables reference
-
-> **Note:** Environment variables are no longer the recommended method for connecting to linked services. Instead, you should use the link name (by default, the name of the linked service) as the hostname to connect to. See the [docker-compose.yml documentation](compose-file.md#links) for details.
->
-> Environment variables will only be populated if you're using the [legacy version 1 Compose file format](compose-file.md#versioning).
-
-Compose uses [Docker links](/engine/userguide/networking/default_network/dockerlinks.md)
-to expose services' containers to one another. Each linked container injects a set of
-environment variables, each of which begins with the uppercase name of the container.
-
-To see what environment variables are available to a service, run `docker-compose run SERVICE env`.
-
-name\_PORT
-Full URL, e.g. `DB_PORT=tcp://172.17.0.5:5432`
-
-name\_PORT\_num\_protocol
-Full URL, e.g. `DB_PORT_5432_TCP=tcp://172.17.0.5:5432`
-
-name\_PORT\_num\_protocol\_ADDR
-Container's IP address, e.g. `DB_PORT_5432_TCP_ADDR=172.17.0.5`
-
-name\_PORT\_num\_protocol\_PORT
-Exposed port number, e.g. `DB_PORT_5432_TCP_PORT=5432`
-
-name\_PORT\_num\_protocol\_PROTO
-Protocol (tcp or udp), e.g. `DB_PORT_5432_TCP_PROTO=tcp`
-
-name\_NAME
-Fully qualified container name, e.g. `DB_1_NAME=/myapp_web_1/myapp_db_1`
-
-## Related Information
-
-- [User guide](index.md)
-- [Installing Compose](install.md)
-- [Command line reference](./reference/index.md)
-- [Compose file reference](compose-file.md)
diff --git a/compose/networking.md b/compose/networking.md
deleted file mode 100644
index 9739a08840..0000000000
--- a/compose/networking.md
+++ /dev/null
@@ -1,154 +0,0 @@
-
-
-
-# Networking in Compose
-
-> **Note:** This document only applies if you're using [version 2 of the Compose file format](compose-file.md#versioning). Networking features are not supported for version 1 (legacy) Compose files.
-
-By default Compose sets up a single
-[network](https://docs.docker.com/engine/reference/commandline/network_create/) for your app. Each
-container for a service joins the default network and is both *reachable* by
-other containers on that network, and *discoverable* by them at a hostname
-identical to the container name.
-
-> **Note:** Your app's network is given a name based on the "project name",
-> which is based on the name of the directory it lives in. You can override the
-> project name with either the [`--project-name`
-> flag](reference/overview.md) or the [`COMPOSE_PROJECT_NAME` environment
-> variable](reference/envvars.md#compose-project-name).
-
-For example, suppose your app is in a directory called `myapp`, and your `docker-compose.yml` looks like this:
-
- version: '2'
-
- services:
- web:
- build: .
- ports:
- - "8000:8000"
- db:
- image: postgres
-
-When you run `docker-compose up`, the following happens:
-
-1. A network called `myapp_default` is created.
-2. A container is created using `web`'s configuration. It joins the network
- `myapp_default` under the name `web`.
-3. A container is created using `db`'s configuration. It joins the network
- `myapp_default` under the name `db`.
-
-Each container can now look up the hostname `web` or `db` and
-get back the appropriate container's IP address. For example, `web`'s
-application code could connect to the URL `postgres://db:5432` and start
-using the Postgres database.
-
-Because `web` explicitly maps a port, it's also accessible from the outside world via port 8000 on your Docker host's network interface.
-
-## Updating containers
-
-If you make a configuration change to a service and run `docker-compose up` to update it, the old container will be removed and the new one will join the network under a different IP address but the same name. Running containers will be able to look up that name and connect to the new address, but the old address will stop working.
-
-If any containers have connections open to the old container, they will be closed. It is a container's responsibility to detect this condition, look up the name again and reconnect.
-
-## Links
-
-Links allow you to define extra aliases by which a service is reachable from another service. They are not required to enable services to communicate - by default, any service can reach any other service at that service's name. In the following example, `db` is reachable from `web` at the hostnames `db` and `database`:
-
- version: '2'
- services:
- web:
- build: .
- links:
- - "db:database"
- db:
- image: postgres
-
-See the [links reference](compose-file.md#links) for more information.
-
-## Multi-host networking
-
-When [deploying a Compose application to a Swarm cluster](swarm.md), you can make use of the built-in `overlay` driver to enable multi-host communication between containers with no changes to your Compose file or application code.
-
-Consult the [Getting started with multi-host networking](https://docs.docker.com/engine/userguide/networking/get-started-overlay/) to see how to set up a Swarm cluster. The cluster will use the `overlay` driver by default, but you can specify it explicitly if you prefer - see below for how to do this.
-
-## Specifying custom networks
-
-Instead of just using the default app network, you can specify your own networks with the top-level `networks` key. This lets you create more complex topologies and specify [custom network drivers](https://docs.docker.com/engine/extend/plugins_network/) and options. You can also use it to connect services to externally-created networks which aren't managed by Compose.
-
-Each service can specify what networks to connect to with the *service-level* `networks` key, which is a list of names referencing entries under the *top-level* `networks` key.
-
-Here's an example Compose file defining two custom networks. The `proxy` service is isolated from the `db` service, because they do not share a network in common - only `app` can talk to both.
-
- version: '2'
-
- services:
- proxy:
- build: ./proxy
- networks:
- - front
- app:
- build: ./app
- networks:
- - front
- - back
- db:
- image: postgres
- networks:
- - back
-
- networks:
- front:
- # Use a custom driver
- driver: custom-driver-1
- back:
- # Use a custom driver which takes special options
- driver: custom-driver-2
- driver_opts:
- foo: "1"
- bar: "2"
-
-Networks can be configured with static IP addresses by setting the [ipv4_address and/or ipv6_address](compose-file.md#ipv4-address-ipv6-address) for each attached network.
-
-For full details of the network configuration options available, see the following references:
-
-- [Top-level `networks` key](compose-file.md#network-configuration-reference)
-- [Service-level `networks` key](compose-file.md#networks)
-
-## Configuring the default network
-
-Instead of (or as well as) specifying your own networks, you can also change the settings of the app-wide default network by defining an entry under `networks` named `default`:
-
- version: '2'
-
- services:
- web:
- build: .
- ports:
- - "8000:8000"
- db:
- image: postgres
-
- networks:
- default:
- # Use a custom driver
- driver: custom-driver-1
-
-## Using a pre-existing network
-
-If you want your containers to join a pre-existing network, use the [`external` option](compose-file.md#network-configuration-reference):
-
- networks:
- default:
- external:
- name: my-pre-existing-network
-
-Instead of attemping to create a network called `[projectname]_default`, Compose will look for a network called `my-pre-existing-network` and connect your app's containers to it.
diff --git a/compose/overview.md b/compose/overview.md
deleted file mode 100644
index ef07a45be5..0000000000
--- a/compose/overview.md
+++ /dev/null
@@ -1,188 +0,0 @@
-
-
-
-# Overview of Docker Compose
-
-Compose is a tool for defining and running multi-container Docker applications.
-With Compose, you use a Compose file to configure your application's services.
-Then, using a single command, you create and start all the services
-from your configuration. To learn more about all the features of Compose
-see [the list of features](#features).
-
-Compose is great for development, testing, and staging environments, as well as
-CI workflows. You can learn more about each case in
-[Common Use Cases](#common-use-cases).
-
-Using Compose is basically a three-step process.
-
-1. Define your app's environment with a `Dockerfile` so it can be reproduced
-anywhere.
-
-2. Define the services that make up your app in `docker-compose.yml`
-so they can be run together in an isolated environment.
-
-3. Lastly, run
-`docker-compose up` and Compose will start and run your entire app.
-
-A `docker-compose.yml` looks like this:
-
- version: '2'
- services:
- web:
- build: .
- ports:
- - "5000:5000"
- volumes:
- - .:/code
- - logvolume01:/var/log
- links:
- - redis
- redis:
- image: redis
- volumes:
- logvolume01: {}
-
-For more information about the Compose file, see the
-[Compose file reference](compose-file.md)
-
-Compose has commands for managing the whole lifecycle of your application:
-
- * Start, stop and rebuild services
- * View the status of running services
- * Stream the log output of running services
- * Run a one-off command on a service
-
-## Compose documentation
-
-- [Installing Compose](install.md)
-- [Getting Started](gettingstarted.md)
-- [Get started with Django](django.md)
-- [Get started with Rails](rails.md)
-- [Get started with WordPress](wordpress.md)
-- [Frequently asked questions](faq.md)
-- [Command line reference](./reference/index.md)
-- [Compose file reference](compose-file.md)
-
-## Features
-
-The features of Compose that make it effective are:
-
-* [Multiple isolated environments on a single host](#Multiple-isolated-environments-on-a-single-host)
-* [Preserve volume data when containers are created](#preserve-volume-data-when-containers-are-created)
-* [Only recreate containers that have changed](#only-recreate-containers-that-have-changed)
-* [Variables and moving a composition between environments](#variables-and-moving-a-composition-between-environments)
-
-### Multiple isolated environments on a single host
-
-Compose uses a project name to isolate environments from each other. You can make use of this project name in several different contexts:
-
-* on a dev host, to create multiple copies of a single environment (e.g., you want to run a stable copy for each feature branch of a project)
-* on a CI server, to keep builds from interfering with each other, you can set
- the project name to a unique build number
-* on a shared host or dev host, to prevent different projects, which may use the
- same service names, from interfering with each other
-
-The default project name is the basename of the project directory. You can set
-a custom project name by using the
-[`-p` command line option](./reference/overview.md) or the
-[`COMPOSE_PROJECT_NAME` environment variable](./reference/envvars.md#compose-project-name).
-
-### Preserve volume data when containers are created
-
-Compose preserves all volumes used by your services. When `docker-compose up`
-runs, if it finds any containers from previous runs, it copies the volumes from
-the old container to the new container. This process ensures that any data
-you've created in volumes isn't lost.
-
-
-### Only recreate containers that have changed
-
-Compose caches the configuration used to create a container. When you
-restart a service that has not changed, Compose re-uses the existing
-containers. Re-using containers means that you can make changes to your
-environment very quickly.
-
-
-### Variables and moving a composition between environments
-
-Compose supports variables in the Compose file. You can use these variables
-to customize your composition for different environments, or different users.
-See [Variable substitution](compose-file.md#variable-substitution) for more
-details.
-
-You can extend a Compose file using the `extends` field or by creating multiple
-Compose files. See [extends](extends.md) for more details.
-
-
-## Common Use Cases
-
-Compose can be used in many different ways. Some common use cases are outlined
-below.
-
-### Development environments
-
-When you're developing software, the ability to run an application in an
-isolated environment and interact with it is crucial. The Compose command
-line tool can be used to create the environment and interact with it.
-
-The [Compose file](compose-file.md) provides a way to document and configure
-all of the application's service dependencies (databases, queues, caches,
-web service APIs, etc). Using the Compose command line tool you can create
-and start one or more containers for each dependency with a single command
-(`docker-compose up`).
-
-Together, these features provide a convenient way for developers to get
-started on a project. Compose can reduce a multi-page "developer getting
-started guide" to a single machine readable Compose file and a few commands.
-
-### Automated testing environments
-
-An important part of any Continuous Deployment or Continuous Integration process
-is the automated test suite. Automated end-to-end testing requires an
-environment in which to run tests. Compose provides a convenient way to create
-and destroy isolated testing environments for your test suite. By defining the full environment in a [Compose file](compose-file.md) you can create and destroy these environments in just a few commands:
-
- $ docker-compose up -d
- $ ./run_tests
- $ docker-compose down
-
-### Single host deployments
-
-Compose has traditionally been focused on development and testing workflows,
-but with each release we're making progress on more production-oriented features. You can use Compose to deploy to a remote Docker Engine. The Docker Engine may be a single instance provisioned with
-[Docker Machine](/machine/overview.md) or an entire
-[Docker Swarm](/swarm/overview.md) cluster.
-
-For details on using production-oriented features, see
-[compose in production](production.md) in this documentation.
-
-
-## Release Notes
-
-To see a detailed list of changes for past and current releases of Docker
-Compose, please refer to the
-[CHANGELOG](https://github.com/docker/compose/blob/master/CHANGELOG.md).
-
-## Getting help
-
-Docker Compose is under active development. If you need help, would like to
-contribute, or simply want to talk about the project with like-minded
-individuals, we have a number of open channels for communication.
-
-* To report bugs or file feature requests: please use the [issue tracker on Github](https://github.com/docker/compose/issues).
-
-* To talk about the project with people in real time: please join the
- `#docker-compose` channel on freenode IRC.
-
-* To contribute code or documentation changes: please submit a [pull request on Github](https://github.com/docker/compose/pulls).
-
-For more information and resources, please visit the [Getting Help project page](https://docs.docker.com/opensource/get-help/).
diff --git a/compose/production.md b/compose/production.md
deleted file mode 100644
index cfb8729363..0000000000
--- a/compose/production.md
+++ /dev/null
@@ -1,88 +0,0 @@
-
-
-
-## Using Compose in production
-
-When you define your app with Compose in development, you can use this
-definition to run your application in different environments such as CI,
-staging, and production.
-
-The easiest way to deploy an application is to run it on a single server,
-similar to how you would run your development environment. If you want to scale
-up your application, you can run Compose apps on a Swarm cluster.
-
-### Modify your Compose file for production
-
-You'll almost certainly want to make changes to your app configuration that are
-more appropriate to a live environment. These changes may include:
-
-- Removing any volume bindings for application code, so that code stays inside
- the container and can't be changed from outside
-- Binding to different ports on the host
-- Setting environment variables differently (e.g., to decrease the verbosity of
- logging, or to enable email sending)
-- Specifying a restart policy (e.g., `restart: always`) to avoid downtime
-- Adding extra services (e.g., a log aggregator)
-
-For this reason, you'll probably want to define an additional Compose file, say
-`production.yml`, which specifies production-appropriate
-configuration. This configuration file only needs to include the changes you'd
-like to make from the original Compose file. The additional Compose file
-can be applied over the original `docker-compose.yml` to create a new configuration.
-
-Once you've got a second configuration file, tell Compose to use it with the
-`-f` option:
-
- $ docker-compose -f docker-compose.yml -f production.yml up -d
-
-See [Using multiple compose files](extends.md#different-environments) for a more
-complete example.
-
-### Deploying changes
-
-When you make changes to your app code, you'll need to rebuild your image and
-recreate your app's containers. To redeploy a service called
-`web`, you would use:
-
- $ docker-compose build web
- $ docker-compose up --no-deps -d web
-
-This will first rebuild the image for `web` and then stop, destroy, and recreate
-*just* the `web` service. The `--no-deps` flag prevents Compose from also
-recreating any services which `web` depends on.
-
-### Running Compose on a single server
-
-You can use Compose to deploy an app to a remote Docker host by setting the
-`DOCKER_HOST`, `DOCKER_TLS_VERIFY`, and `DOCKER_CERT_PATH` environment variables
-appropriately. For tasks like this,
-[Docker Machine](/machine/overview.md) makes managing local and
-remote Docker hosts very easy, and is recommended even if you're not deploying
-remotely.
-
-Once you've set up your environment variables, all the normal `docker-compose`
-commands will work with no further configuration.
-
-### Running Compose on a Swarm cluster
-
-[Docker Swarm](/swarm/overview.md), a Docker-native clustering
-system, exposes the same API as a single Docker host, which means you can use
-Compose against a Swarm instance and run your apps across multiple hosts.
-
-Read more about the Compose/Swarm integration in the
-[integration guide](swarm.md).
-
-## Compose documentation
-
-- [Installing Compose](install.md)
-- [Command line reference](./reference/index.md)
-- [Compose file reference](compose-file.md)
diff --git a/compose/rails.md b/compose/rails.md
deleted file mode 100644
index 267776872e..0000000000
--- a/compose/rails.md
+++ /dev/null
@@ -1,174 +0,0 @@
-
-
-## Quickstart: Docker Compose and Rails
-
-This Quickstart guide will show you how to use Docker Compose to set up and run a Rails/PostgreSQL app. Before starting, you'll need to have [Compose installed](install.md).
-
-### Define the project
-
-Start by setting up the three files you'll need to build the app. First, since
-your app is going to run inside a Docker container containing all of its
-dependencies, you'll need to define exactly what needs to be included in the
-container. This is done using a file called `Dockerfile`. To begin with, the
-Dockerfile consists of:
-
- FROM ruby:2.2.0
- RUN apt-get update -qq && apt-get install -y build-essential libpq-dev nodejs
- RUN mkdir /myapp
- WORKDIR /myapp
- ADD Gemfile /myapp/Gemfile
- ADD Gemfile.lock /myapp/Gemfile.lock
- RUN bundle install
- ADD . /myapp
-
-That'll put your application code inside an image that will build a container
-with Ruby, Bundler and all your dependencies inside it. For more information on
-how to write Dockerfiles, see the [Docker user guide](/engine/tutorials/dockerimages.md#building-an-image-from-a-dockerfile) and the [Dockerfile reference](/engine/reference/builder.md).
-
-Next, create a bootstrap `Gemfile` which just loads Rails. It'll be overwritten in a moment by `rails new`.
-
- source 'https://rubygems.org'
- gem 'rails', '4.2.0'
-
-You'll need an empty `Gemfile.lock` in order to build our `Dockerfile`.
-
- $ touch Gemfile.lock
-
-Finally, `docker-compose.yml` is where the magic happens. This file describes
-the services that comprise your app (a database and a web app), how to get each
-one's Docker image (the database just runs on a pre-made PostgreSQL image, and
-the web app is built from the current directory), and the configuration needed
-to link them together and expose the web app's port.
-
- version: '2'
- services:
- db:
- image: postgres
- web:
- build: .
- command: bundle exec rails s -p 3000 -b '0.0.0.0'
- volumes:
- - .:/myapp
- ports:
- - "3000:3000"
- depends_on:
- - db
-
-### Build the project
-
-With those three files in place, you can now generate the Rails skeleton app
-using `docker-compose run`:
-
- $ docker-compose run web rails new . --force --database=postgresql --skip-bundle
-
-First, Compose will build the image for the `web` service using the `Dockerfile`. Then it'll run `rails new` inside a new container, using that image. Once it's done, you should have generated a fresh app:
-
- $ ls -l
- total 56
- -rw-r--r-- 1 user staff 215 Feb 13 23:33 Dockerfile
- -rw-r--r-- 1 user staff 1480 Feb 13 23:43 Gemfile
- -rw-r--r-- 1 user staff 2535 Feb 13 23:43 Gemfile.lock
- -rw-r--r-- 1 root root 478 Feb 13 23:43 README.rdoc
- -rw-r--r-- 1 root root 249 Feb 13 23:43 Rakefile
- drwxr-xr-x 8 root root 272 Feb 13 23:43 app
- drwxr-xr-x 6 root root 204 Feb 13 23:43 bin
- drwxr-xr-x 11 root root 374 Feb 13 23:43 config
- -rw-r--r-- 1 root root 153 Feb 13 23:43 config.ru
- drwxr-xr-x 3 root root 102 Feb 13 23:43 db
- -rw-r--r-- 1 user staff 161 Feb 13 23:35 docker-compose.yml
- drwxr-xr-x 4 root root 136 Feb 13 23:43 lib
- drwxr-xr-x 3 root root 102 Feb 13 23:43 log
- drwxr-xr-x 7 root root 238 Feb 13 23:43 public
- drwxr-xr-x 9 root root 306 Feb 13 23:43 test
- drwxr-xr-x 3 root root 102 Feb 13 23:43 tmp
- drwxr-xr-x 3 root root 102 Feb 13 23:43 vendor
-
-
-If you are running Docker on Linux, the files `rails new` created are owned by
-root. This happens because the container runs as the root user. Change the
-ownership of the the new files.
-
- sudo chown -R $USER:$USER .
-
-If you are running Docker on Mac or Windows, you should already have ownership
-of all files, including those generated by `rails new`. List the files just to
-verify this.
-
-Uncomment the line in your new `Gemfile` which loads `therubyracer`, so you've
-got a Javascript runtime:
-
- gem 'therubyracer', platforms: :ruby
-
-Now that you've got a new `Gemfile`, you need to build the image again. (This,
-and changes to the Dockerfile itself, should be the only times you'll need to
-rebuild.)
-
- $ docker-compose build
-
-
-### Connect the database
-
-The app is now bootable, but you're not quite there yet. By default, Rails
-expects a database to be running on `localhost` - so you need to point it at the
-`db` container instead. You also need to change the database and username to
-align with the defaults set by the `postgres` image.
-
-Replace the contents of `config/database.yml` with the following:
-
- development: &default
- adapter: postgresql
- encoding: unicode
- database: postgres
- pool: 5
- username: postgres
- password:
- host: db
-
- test:
- <<: *default
- database: myapp_test
-
-You can now boot the app with:
-
- $ docker-compose up
-
-If all's well, you should see some PostgreSQL output, and then—after a few
-seconds—the familiar refrain:
-
- myapp_web_1 | [2014-01-17 17:16:29] INFO WEBrick 1.3.1
- myapp_web_1 | [2014-01-17 17:16:29] INFO ruby 2.2.0 (2014-12-25) [x86_64-linux-gnu]
- myapp_web_1 | [2014-01-17 17:16:29] INFO WEBrick::HTTPServer#start: pid=1 port=3000
-
-Finally, you need to create the database. In another terminal, run:
-
- $ docker-compose run web rake db:create
-
-That's it. Your app should now be running on port 3000 on your Docker daemon. If you're using [Docker Machine](/machine/overview.md), then `docker-machine ip MACHINE_VM` returns the Docker host IP address.
-
-
-
->**Note**: If you stop the example application and attempt to restart it, you might get the
-following error: `web_1 | A server is already running. Check
-/myapp/tmp/pids/server.pid.` One way to resolve this is to delete the file
-`tmp/pids/server.pid`, and then re-start the application with `docker-compose
-up`.
-
-
-## More Compose documentation
-
-- [User guide](index.md)
-- [Installing Compose](install.md)
-- [Getting Started](gettingstarted.md)
-- [Get started with Django](django.md)
-- [Get started with WordPress](wordpress.md)
-- [Command line reference](./reference/index.md)
-- [Compose file reference](compose-file.md)
diff --git a/compose/reference/build.md b/compose/reference/build.md
deleted file mode 100644
index 84aefc253f..0000000000
--- a/compose/reference/build.md
+++ /dev/null
@@ -1,25 +0,0 @@
-
-
-# build
-
-```
-Usage: build [options] [SERVICE...]
-
-Options:
---force-rm Always remove intermediate containers.
---no-cache Do not use cache when building the image.
---pull Always attempt to pull a newer version of the image.
-```
-
-Services are built once and then tagged as `project_service`, e.g.,
-`composetest_db`. If you change a service's Dockerfile or the contents of its
-build directory, run `docker-compose build` to rebuild it.
diff --git a/compose/reference/bundle.md b/compose/reference/bundle.md
deleted file mode 100644
index fca93a8aa6..0000000000
--- a/compose/reference/bundle.md
+++ /dev/null
@@ -1,31 +0,0 @@
-
-
-# bundle
-
-```
-Usage: bundle [options]
-
-Options:
- --push-images Automatically push images for any services
- which have a `build` option specified.
-
- -o, --output PATH Path to write the bundle file to.
- Defaults to ".dab".
-```
-
-Generate a Distributed Application Bundle (DAB) from the Compose file.
-
-Images must have digests stored, which requires interaction with a
-Docker registry. If digests aren't stored for all images, you can fetch
-them with `docker-compose pull` or `docker-compose push`. To push images
-automatically when bundling, pass `--push-images`. Only services with
-a `build` option specified will have their images pushed.
diff --git a/compose/reference/config.md b/compose/reference/config.md
deleted file mode 100644
index 1a9706f4da..0000000000
--- a/compose/reference/config.md
+++ /dev/null
@@ -1,23 +0,0 @@
-
-
-# config
-
-```:
-Usage: config [options]
-
-Options:
--q, --quiet Only validate the configuration, don't print
- anything.
---services Print the service names, one per line.
-```
-
-Validate and view the compose file.
diff --git a/compose/reference/create.md b/compose/reference/create.md
deleted file mode 100644
index 5065e8bebe..0000000000
--- a/compose/reference/create.md
+++ /dev/null
@@ -1,26 +0,0 @@
-
-
-# create
-
-```
-Creates containers for a service.
-
-Usage: create [options] [SERVICE...]
-
-Options:
- --force-recreate Recreate containers even if their configuration and
- image haven't changed. Incompatible with --no-recreate.
- --no-recreate If containers already exist, don't recreate them.
- Incompatible with --force-recreate.
- --no-build Don't build an image, even if it's missing.
- --build Build images before creating containers.
-```
diff --git a/compose/reference/down.md b/compose/reference/down.md
deleted file mode 100644
index ffe88b4e05..0000000000
--- a/compose/reference/down.md
+++ /dev/null
@@ -1,38 +0,0 @@
-
-
-# down
-
-```
-Usage: down [options]
-
-Options:
- --rmi type Remove images. Type must be one of:
- 'all': Remove all images used by any service.
- 'local': Remove only images that don't have a custom tag
- set by the `image` field.
- -v, --volumes Remove named volumes declared in the `volumes` section
- of the Compose file and anonymous volumes
- attached to containers.
- --remove-orphans Remove containers for services not defined in the
- Compose file
-```
-
-Stops containers and removes containers, networks, volumes, and images
-created by `up`.
-
-By default, the only things removed are:
-
-- Containers for services defined in the Compose file
-- Networks defined in the `networks` section of the Compose file
-- The default network, if one is used
-
-Networks and volumes defined as `external` are never removed.
diff --git a/compose/reference/envvars.md b/compose/reference/envvars.md
deleted file mode 100644
index 22516debdc..0000000000
--- a/compose/reference/envvars.md
+++ /dev/null
@@ -1,92 +0,0 @@
-
-
-
-# CLI Environment Variables
-
-Several environment variables are available for you to configure the Docker Compose command-line behaviour.
-
-Variables starting with `DOCKER_` are the same as those used to configure the
-Docker command-line client. If you're using `docker-machine`, then the `eval "$(docker-machine env my-docker-vm)"` command should set them to their correct values. (In this example, `my-docker-vm` is the name of a machine you created.)
-
-> Note: Some of these variables can also be provided using an
-> [environment file](../env-file.md)
-
-## COMPOSE\_PROJECT\_NAME
-
-Sets the project name. This value is prepended along with the service name to the container container on start up. For example, if you project name is `myapp` and it includes two services `db` and `web` then compose starts containers named `myapp_db_1` and `myapp_web_1` respectively.
-
-Setting this is optional. If you do not set this, the `COMPOSE_PROJECT_NAME`
-defaults to the `basename` of the project directory. See also the `-p`
-[command-line option](overview.md).
-
-## COMPOSE\_FILE
-
-Specify the path to a Compose file. If not provided, Compose looks for a file named
-`docker-compose.yml` in the current directory and then each parent directory in
-succession until a file by that name is found.
-
-This variable supports multiple compose files separate by a path separator (on
-Linux and OSX the path separator is `:`, on Windows it is `;`). For example:
-`COMPOSE_FILE=docker-compose.yml:docker-compose.prod.yml`
-
-See also the `-f` [command-line option](overview.md).
-
-## COMPOSE\_API\_VERSION
-
-The Docker API only supports requests from clients which report a specific
-version. If you receive a `client and server don't have same version error` using
-`docker-compose`, you can workaround this error by setting this environment
-variable. Set the version value to match the server version.
-
-Setting this variable is intended as a workaround for situations where you need
-to run temporarily with a mismatch between the client and server version. For
-example, if you can upgrade the client but need to wait to upgrade the server.
-
-Running with this variable set and a known mismatch does prevent some Docker
-features from working properly. The exact features that fail would depend on the
-Docker client and server versions. For this reason, running with this variable
-set is only intended as a workaround and it is not officially supported.
-
-If you run into problems running with this set, resolve the mismatch through
-upgrade and remove this setting to see if your problems resolve before notifying
-support.
-
-## DOCKER\_HOST
-
-Sets the URL of the `docker` daemon. As with the Docker client, defaults to `unix:///var/run/docker.sock`.
-
-## DOCKER\_TLS\_VERIFY
-
-When set to anything other than an empty string, enables TLS communication with
-the `docker` daemon.
-
-## DOCKER\_CERT\_PATH
-
-Configures the path to the `ca.pem`, `cert.pem`, and `key.pem` files used for TLS verification. Defaults to `~/.docker`.
-
-## COMPOSE\_HTTP\_TIMEOUT
-
-Configures the time (in seconds) a request to the Docker daemon is allowed to hang before Compose considers
-it failed. Defaults to 60 seconds.
-
-## COMPOSE\_TLS\_VERSION
-
-Configure which TLS version is used for TLS communication with the `docker`
-daemon. Defaults to `TLSv1`.
-Supported values are: `TLSv1`, `TLSv1_1`, `TLSv1_2`.
-
-## Related Information
-
-- [User guide](../index.md)
-- [Installing Compose](../install.md)
-- [Compose file reference](../compose-file.md)
-- [Environment file](../env-file.md)
diff --git a/compose/reference/events.md b/compose/reference/events.md
deleted file mode 100644
index 827258f249..0000000000
--- a/compose/reference/events.md
+++ /dev/null
@@ -1,34 +0,0 @@
-
-
-# events
-
-```
-Usage: events [options] [SERVICE...]
-
-Options:
- --json Output events as a stream of json objects
-```
-
-Stream container events for every container in the project.
-
-With the `--json` flag, a json object will be printed one per line with the
-format:
-
-```
-{
- "service": "web",
- "event": "create",
- "container": "213cf75fc39a",
- "image": "alpine:edge",
- "time": "2015-11-20T18:01:03.615550",
-}
-```
diff --git a/compose/reference/exec.md b/compose/reference/exec.md
deleted file mode 100644
index 6c0eeb04dc..0000000000
--- a/compose/reference/exec.md
+++ /dev/null
@@ -1,29 +0,0 @@
-
-
-# exec
-
-```
-Usage: exec [options] SERVICE COMMAND [ARGS...]
-
-Options:
--d Detached mode: Run command in the background.
---privileged Give extended privileges to the process.
---user USER Run the command as this user.
--T Disable pseudo-tty allocation. By default `docker-compose exec`
- allocates a TTY.
---index=index index of the container if there are multiple
- instances of a service [default: 1]
-```
-
-This is equivalent of `docker exec`. With this subcommand you can run arbitrary
-commands in your services. Commands are by default allocating a TTY, so you can
-do e.g. `docker-compose exec web sh` to get an interactive prompt.
diff --git a/compose/reference/help.md b/compose/reference/help.md
deleted file mode 100644
index 613708ed2f..0000000000
--- a/compose/reference/help.md
+++ /dev/null
@@ -1,18 +0,0 @@
-
-
-# help
-
-```
-Usage: help COMMAND
-```
-
-Displays help and usage instructions for a command.
diff --git a/compose/reference/index.md b/compose/reference/index.md
deleted file mode 100644
index 2ac3676af0..0000000000
--- a/compose/reference/index.md
+++ /dev/null
@@ -1,42 +0,0 @@
-
-
-## Compose command-line reference
-
-The following pages describe the usage information for the [docker-compose](overview.md) subcommands. You can also see this information by running `docker-compose [SUBCOMMAND] --help` from the command line.
-
-* [docker-compose](overview.md)
-* [build](build.md)
-* [config](config.md)
-* [create](create.md)
-* [down](down.md)
-* [events](events.md)
-* [help](help.md)
-* [kill](kill.md)
-* [logs](logs.md)
-* [pause](pause.md)
-* [port](port.md)
-* [ps](ps.md)
-* [pull](pull.md)
-* [restart](restart.md)
-* [rm](rm.md)
-* [run](run.md)
-* [scale](scale.md)
-* [start](start.md)
-* [stop](stop.md)
-* [unpause](unpause.md)
-* [up](up.md)
-
-## Where to go next
-
-* [CLI environment variables](envvars.md)
-* [docker-compose Command](overview.md)
diff --git a/compose/reference/kill.md b/compose/reference/kill.md
deleted file mode 100644
index dc4bf23a1b..0000000000
--- a/compose/reference/kill.md
+++ /dev/null
@@ -1,24 +0,0 @@
-
-
-# kill
-
-```
-Usage: kill [options] [SERVICE...]
-
-Options:
--s SIGNAL SIGNAL to send to the container. Default signal is SIGKILL.
-```
-
-Forces running containers to stop by sending a `SIGKILL` signal. Optionally the
-signal can be passed, for example:
-
- $ docker-compose kill -s SIGINT
diff --git a/compose/reference/logs.md b/compose/reference/logs.md
deleted file mode 100644
index 745d24f7fe..0000000000
--- a/compose/reference/logs.md
+++ /dev/null
@@ -1,25 +0,0 @@
-
-
-# logs
-
-```
-Usage: logs [options] [SERVICE...]
-
-Options:
---no-color Produce monochrome output.
--f, --follow Follow log output
--t, --timestamps Show timestamps
---tail Number of lines to show from the end of the logs
- for each container.
-```
-
-Displays log output from services.
diff --git a/compose/reference/overview.md b/compose/reference/overview.md
deleted file mode 100644
index d59fa56575..0000000000
--- a/compose/reference/overview.md
+++ /dev/null
@@ -1,127 +0,0 @@
-
-
-
-# Overview of docker-compose CLI
-
-This page provides the usage information for the `docker-compose` Command.
-You can also see this information by running `docker-compose --help` from the
-command line.
-
-```
-Define and run multi-container applications with Docker.
-
-Usage:
- docker-compose [-f=...] [options] [COMMAND] [ARGS...]
- docker-compose -h|--help
-
-Options:
- -f, --file FILE Specify an alternate compose file (default: docker-compose.yml)
- -p, --project-name NAME Specify an alternate project name (default: directory name)
- --verbose Show more output
- -v, --version Print version and exit
- -H, --host HOST Daemon socket to connect to
-
- --tls Use TLS; implied by --tlsverify
- --tlscacert CA_PATH Trust certs signed only by this CA
- --tlscert CLIENT_CERT_PATH Path to TLS certificate file
- --tlskey TLS_KEY_PATH Path to TLS key file
- --tlsverify Use TLS and verify the remote
- --skip-hostname-check Don't check the daemon's hostname against the name specified
- in the client certificate (for example if your docker host
- is an IP address)
-
-Commands:
- build Build or rebuild services
- config Validate and view the compose file
- create Create services
- down Stop and remove containers, networks, images, and volumes
- events Receive real time events from containers
- help Get help on a command
- kill Kill containers
- logs View output from containers
- pause Pause services
- port Print the public port for a port binding
- ps List containers
- pull Pulls service images
- restart Restart services
- rm Remove stopped containers
- run Run a one-off command
- scale Set number of containers for a service
- start Start services
- stop Stop services
- unpause Unpause services
- up Create and start containers
- version Show the Docker-Compose version information
-
-```
-
-The Docker Compose binary. You use this command to build and manage multiple
-services in Docker containers.
-
-Use the `-f` flag to specify the location of a Compose configuration file. You
-can supply multiple `-f` configuration files. When you supply multiple files,
-Compose combines them into a single configuration. Compose builds the
-configuration in the order you supply the files. Subsequent files override and
-add to their successors.
-
-For example, consider this command line:
-
-```
-$ docker-compose -f docker-compose.yml -f docker-compose.admin.yml run backup_db`
-```
-
-The `docker-compose.yml` file might specify a `webapp` service.
-
-```
-webapp:
- image: examples/web
- ports:
- - "8000:8000"
- volumes:
- - "/data"
-```
-
-If the `docker-compose.admin.yml` also specifies this same service, any matching
-fields will override the previous file. New values, add to the `webapp` service
-configuration.
-
-```
-webapp:
- build: .
- environment:
- - DEBUG=1
-```
-
-Use a `-f` with `-` (dash) as the filename to read the configuration from
-stdin. When stdin is used all paths in the configuration are
-relative to the current working directory.
-
-The `-f` flag is optional. If you don't provide this flag on the command line,
-Compose traverses the working directory and its parent directories looking for a
-`docker-compose.yml` and a `docker-compose.override.yml` file. You must
-supply at least the `docker-compose.yml` file. If both files are present on the
-same directory level, Compose combines the two files into a single configuration.
-The configuration in the `docker-compose.override.yml` file is applied over and
-in addition to the values in the `docker-compose.yml` file.
-
-See also the `COMPOSE_FILE` [environment variable](envvars.md#compose-file).
-
-Each configuration has a project name. If you supply a `-p` flag, you can
-specify a project name. If you don't specify the flag, Compose uses the current
-directory name. See also the `COMPOSE_PROJECT_NAME` [environment variable](
-envvars.md#compose-project-name)
-
-
-## Where to go next
-
-* [CLI environment variables](envvars.md)
diff --git a/compose/reference/pause.md b/compose/reference/pause.md
deleted file mode 100644
index a0ffab0359..0000000000
--- a/compose/reference/pause.md
+++ /dev/null
@@ -1,18 +0,0 @@
-
-
-# pause
-
-```
-Usage: pause [SERVICE...]
-```
-
-Pauses running containers of a service. They can be unpaused with `docker-compose unpause`.
diff --git a/compose/reference/port.md b/compose/reference/port.md
deleted file mode 100644
index c946a97d39..0000000000
--- a/compose/reference/port.md
+++ /dev/null
@@ -1,23 +0,0 @@
-
-
-# port
-
-```
-Usage: port [options] SERVICE PRIVATE_PORT
-
-Options:
---protocol=proto tcp or udp [default: tcp]
---index=index index of the container if there are multiple
- instances of a service [default: 1]
-```
-
-Prints the public port for a port binding.
diff --git a/compose/reference/ps.md b/compose/reference/ps.md
deleted file mode 100644
index 546d68e76c..0000000000
--- a/compose/reference/ps.md
+++ /dev/null
@@ -1,21 +0,0 @@
-
-
-# ps
-
-```
-Usage: ps [options] [SERVICE...]
-
-Options:
--q Only display IDs
-```
-
-Lists containers.
diff --git a/compose/reference/pull.md b/compose/reference/pull.md
deleted file mode 100644
index 5ec184b72c..0000000000
--- a/compose/reference/pull.md
+++ /dev/null
@@ -1,21 +0,0 @@
-
-
-# pull
-
-```
-Usage: pull [options] [SERVICE...]
-
-Options:
---ignore-pull-failures Pull what it can and ignores images with pull failures.
-```
-
-Pulls service images.
diff --git a/compose/reference/push.md b/compose/reference/push.md
deleted file mode 100644
index bdc3112e83..0000000000
--- a/compose/reference/push.md
+++ /dev/null
@@ -1,21 +0,0 @@
-
-
-# push
-
-```
-Usage: push [options] [SERVICE...]
-
-Options:
- --ignore-push-failures Push what it can and ignores images with push failures.
-```
-
-Pushes images for services.
diff --git a/compose/reference/restart.md b/compose/reference/restart.md
deleted file mode 100644
index bbd4a68b0f..0000000000
--- a/compose/reference/restart.md
+++ /dev/null
@@ -1,21 +0,0 @@
-
-
-# restart
-
-```
-Usage: restart [options] [SERVICE...]
-
-Options:
--t, --timeout TIMEOUT Specify a shutdown timeout in seconds. (default: 10)
-```
-
-Restarts services.
diff --git a/compose/reference/rm.md b/compose/reference/rm.md
deleted file mode 100644
index 8285a4ae52..0000000000
--- a/compose/reference/rm.md
+++ /dev/null
@@ -1,29 +0,0 @@
-
-
-# rm
-
-```
-Usage: rm [options] [SERVICE...]
-
-Options:
- -f, --force Don't ask to confirm removal
- -v Remove any anonymous volumes attached to containers
- -a, --all Also remove one-off containers created by
- docker-compose run
-```
-
-Removes stopped service containers.
-
-By default, anonymous volumes attached to containers will not be removed. You
-can override this with `-v`. To list all volumes, use `docker volume ls`.
-
-Any data which is not in a volume will be lost.
diff --git a/compose/reference/run.md b/compose/reference/run.md
deleted file mode 100644
index 863544246d..0000000000
--- a/compose/reference/run.md
+++ /dev/null
@@ -1,56 +0,0 @@
-
-
-# run
-
-```
-Usage: run [options] [-e KEY=VAL...] SERVICE [COMMAND] [ARGS...]
-
-Options:
--d Detached mode: Run container in the background, print
- new container name.
---name NAME Assign a name to the container
---entrypoint CMD Override the entrypoint of the image.
--e KEY=VAL Set an environment variable (can be used multiple times)
--u, --user="" Run as specified username or uid
---no-deps Don't start linked services.
---rm Remove container after run. Ignored in detached mode.
--p, --publish=[] Publish a container's port(s) to the host
---service-ports Run command with the service's ports enabled and mapped to the host.
--T Disable pseudo-tty allocation. By default `docker-compose run` allocates a TTY.
--w, --workdir="" Working directory inside the container
-```
-
-Runs a one-time command against a service. For example, the following command starts the `web` service and runs `bash` as its command.
-
- $ docker-compose run web bash
-
-Commands you use with `run` start in new containers with the same configuration as defined by the service' configuration. This means the container has the same volumes, links, as defined in the configuration file. There two differences though.
-
-First, the command passed by `run` overrides the command defined in the service configuration. For example, if the `web` service configuration is started with `bash`, then `docker-compose run web python app.py` overrides it with `python app.py`.
-
-The second difference is the `docker-compose run` command does not create any of the ports specified in the service configuration. This prevents the port collisions with already open ports. If you *do want* the service's ports created and mapped to the host, specify the `--service-ports` flag:
-
- $ docker-compose run --service-ports web python manage.py shell
-
-Alternatively manual port mapping can be specified. Same as when running Docker's `run` command - using `--publish` or `-p` options:
-
- $ docker-compose run --publish 8080:80 -p 2022:22 -p 127.0.0.1:2021:21 web python manage.py shell
-
-If you start a service configured with links, the `run` command first checks to see if the linked service is running and starts the service if it is stopped. Once all the linked services are running, the `run` executes the command you passed it. So, for example, you could run:
-
- $ docker-compose run db psql -h db -U docker
-
-This would open up an interactive PostgreSQL shell for the linked `db` container.
-
-If you do not want the `run` command to start linked containers, specify the `--no-deps` flag:
-
- $ docker-compose run --no-deps web python manage.py shell
diff --git a/compose/reference/scale.md b/compose/reference/scale.md
deleted file mode 100644
index 75140ee9e5..0000000000
--- a/compose/reference/scale.md
+++ /dev/null
@@ -1,21 +0,0 @@
-
-
-# scale
-
-```
-Usage: scale [SERVICE=NUM...]
-```
-
-Sets the number of containers to run for a service.
-
-Numbers are specified as arguments in the form `service=num`. For example:
-
- $ docker-compose scale web=2 worker=3
diff --git a/compose/reference/start.md b/compose/reference/start.md
deleted file mode 100644
index f0bdd5a97c..0000000000
--- a/compose/reference/start.md
+++ /dev/null
@@ -1,18 +0,0 @@
-
-
-# start
-
-```
-Usage: start [SERVICE...]
-```
-
-Starts existing containers for a service.
diff --git a/compose/reference/stop.md b/compose/reference/stop.md
deleted file mode 100644
index ec7e6688a5..0000000000
--- a/compose/reference/stop.md
+++ /dev/null
@@ -1,22 +0,0 @@
-
-
-# stop
-
-```
-Usage: stop [options] [SERVICE...]
-
-Options:
--t, --timeout TIMEOUT Specify a shutdown timeout in seconds (default: 10).
-```
-
-Stops running containers without removing them. They can be started again with
-`docker-compose start`.
diff --git a/compose/reference/unpause.md b/compose/reference/unpause.md
deleted file mode 100644
index 846b229e3c..0000000000
--- a/compose/reference/unpause.md
+++ /dev/null
@@ -1,18 +0,0 @@
-
-
-# unpause
-
-```
-Usage: unpause [SERVICE...]
-```
-
-Unpauses paused containers of a service.
diff --git a/compose/reference/up.md b/compose/reference/up.md
deleted file mode 100644
index 3951f87925..0000000000
--- a/compose/reference/up.md
+++ /dev/null
@@ -1,55 +0,0 @@
-
-
-# up
-
-```
-Usage: up [options] [SERVICE...]
-
-Options:
- -d Detached mode: Run containers in the background,
- print new container names.
- Incompatible with --abort-on-container-exit.
- --no-color Produce monochrome output.
- --no-deps Don't start linked services.
- --force-recreate Recreate containers even if their configuration
- and image haven't changed.
- Incompatible with --no-recreate.
- --no-recreate If containers already exist, don't recreate them.
- Incompatible with --force-recreate.
- --no-build Don't build an image, even if it's missing.
- --build Build images before starting containers.
- --abort-on-container-exit Stops all containers if any container was stopped.
- Incompatible with -d.
- -t, --timeout TIMEOUT Use this timeout in seconds for container shutdown
- when attached or when containers are already
- running. (default: 10)
- --remove-orphans Remove containers for services not defined in
- the Compose file
-
-```
-
-Builds, (re)creates, starts, and attaches to containers for a service.
-
-Unless they are already running, this command also starts any linked services.
-
-The `docker-compose up` command aggregates the output of each container. When
-the command exits, all containers are stopped. Running `docker-compose up -d`
-starts the containers in the background and leaves them running.
-
-If there are existing containers for a service, and the service's configuration
-or image was changed after the container's creation, `docker-compose up` picks
-up the changes by stopping and recreating the containers (preserving mounted
-volumes). To prevent Compose from picking up changes, use the `--no-recreate`
-flag.
-
-If you want to force Compose to stop and recreate all containers, use the
-`--force-recreate` flag.
diff --git a/compose/startup-order.md b/compose/startup-order.md
deleted file mode 100644
index c67e18295a..0000000000
--- a/compose/startup-order.md
+++ /dev/null
@@ -1,88 +0,0 @@
-
-
-# Controlling startup order in Compose
-
-You can control the order of service startup with the
-[depends_on](compose-file.md#depends-on) option. Compose always starts
-containers in dependency order, where dependencies are determined by
-`depends_on`, `links`, `volumes_from` and `network_mode: "service:..."`.
-
-However, Compose will not wait until a container is "ready" (whatever that means
-for your particular application) - only until it's running. There's a good
-reason for this.
-
-The problem of waiting for a database (for example) to be ready is really just
-a subset of a much larger problem of distributed systems. In production, your
-database could become unavailable or move hosts at any time. Your application
-needs to be resilient to these types of failures.
-
-To handle this, your application should attempt to re-establish a connection to
-the database after a failure. If the application retries the connection,
-it should eventually be able to connect to the database.
-
-The best solution is to perform this check in your application code, both at
-startup and whenever a connection is lost for any reason. However, if you don't
-need this level of resilience, you can work around the problem with a wrapper
-script:
-
-- Use a tool such as [wait-for-it](https://github.com/vishnubob/wait-for-it)
- or [dockerize](https://github.com/jwilder/dockerize). These are small
- wrapper scripts which you can include in your application's image and will
- poll a given host and port until it's accepting TCP connections.
-
- Supposing your application's image has a `CMD` set in its Dockerfile, you
- can wrap it by setting the entrypoint in `docker-compose.yml`:
-
- version: "2"
- services:
- web:
- build: .
- ports:
- - "80:8000"
- depends_on:
- - "db"
- entrypoint: ./wait-for-it.sh db:5432
- db:
- image: postgres
-
-- Write your own wrapper script to perform a more application-specific health
- check. For example, you might want to wait until Postgres is definitely
- ready to accept commands:
-
- #!/bin/bash
-
- set -e
-
- host="$1"
- shift
- cmd="$@"
-
- until psql -h "$host" -U "postgres" -c '\l'; do
- >&2 echo "Postgres is unavailable - sleeping"
- sleep 1
- done
-
- >&2 echo "Postgres is up - executing command"
- exec $cmd
-
- You can use this as a wrapper script as in the previous example, by setting
- `entrypoint: ./wait-for-postgres.sh db`.
-
-
-## Compose documentation
-
-- [Installing Compose](install.md)
-- [Get started with Django](django.md)
-- [Get started with Rails](rails.md)
-- [Get started with WordPress](wordpress.md)
-- [Command line reference](./reference/index.md)
-- [Compose file reference](compose-file.md)
diff --git a/compose/swarm.md b/compose/swarm.md
deleted file mode 100644
index bbab690879..0000000000
--- a/compose/swarm.md
+++ /dev/null
@@ -1,181 +0,0 @@
-
-
-
-# Using Compose with Swarm
-
-Docker Compose and [Docker Swarm](/swarm/overview.md) aim to have full integration, meaning
-you can point a Compose app at a Swarm cluster and have it all just work as if
-you were using a single Docker host.
-
-The actual extent of integration depends on which version of the [Compose file
-format](compose-file.md#versioning) you are using:
-
-1. If you're using version 1 along with `links`, your app will work, but Swarm
- will schedule all containers on one host, because links between containers
- do not work across hosts with the old networking system.
-
-2. If you're using version 2, your app should work with no changes:
-
- - subject to the [limitations](#limitations) described below,
-
- - as long as the Swarm cluster is configured to use the [overlay driver](https://docs.docker.com/engine/userguide/networking/dockernetworks/#an-overlay-network),
- or a custom driver which supports multi-host networking.
-
-Read [Get started with multi-host networking](https://docs.docker.com/engine/userguide/networking/get-started-overlay/) to see how to
-set up a Swarm cluster with [Docker Machine](/machine/overview.md) and the overlay driver. Once you've got it running, deploying your app to it should be as simple as:
-
- $ eval "$(docker-machine env --swarm )"
- $ docker-compose up
-
-
-## Limitations
-
-### Building images
-
-Swarm can build an image from a Dockerfile just like a single-host Docker
-instance can, but the resulting image will only live on a single node and won't
-be distributed to other nodes.
-
-If you want to use Compose to scale the service in question to multiple nodes,
-you'll have to build it yourself, push it to a registry (e.g. the Docker Hub)
-and reference it from `docker-compose.yml`:
-
- $ docker build -t myusername/web .
- $ docker push myusername/web
-
- $ cat docker-compose.yml
- web:
- image: myusername/web
-
- $ docker-compose up -d
- $ docker-compose scale web=3
-
-### Multiple dependencies
-
-If a service has multiple dependencies of the type which force co-scheduling
-(see [Automatic scheduling](#automatic-scheduling) below), it's possible that
-Swarm will schedule the dependencies on different nodes, making the dependent
-service impossible to schedule. For example, here `foo` needs to be co-scheduled
-with `bar` and `baz`:
-
- version: "2"
- services:
- foo:
- image: foo
- volumes_from: ["bar"]
- network_mode: "service:baz"
- bar:
- image: bar
- baz:
- image: baz
-
-The problem is that Swarm might first schedule `bar` and `baz` on different
-nodes (since they're not dependent on one another), making it impossible to
-pick an appropriate node for `foo`.
-
-To work around this, use [manual scheduling](#manual-scheduling) to ensure that
-all three services end up on the same node:
-
- version: "2"
- services:
- foo:
- image: foo
- volumes_from: ["bar"]
- network_mode: "service:baz"
- environment:
- - "constraint:node==node-1"
- bar:
- image: bar
- environment:
- - "constraint:node==node-1"
- baz:
- image: baz
- environment:
- - "constraint:node==node-1"
-
-### Host ports and recreating containers
-
-If a service maps a port from the host, e.g. `80:8000`, then you may get an
-error like this when running `docker-compose up` on it after the first time:
-
- docker: Error response from daemon: unable to find a node that satisfies
- container==6ab2dfe36615ae786ef3fc35d641a260e3ea9663d6e69c5b70ce0ca6cb373c02.
-
-The usual cause of this error is that the container has a volume (defined either
-in its image or in the Compose file) without an explicit mapping, and so in
-order to preserve its data, Compose has directed Swarm to schedule the new
-container on the same node as the old container. This results in a port clash.
-
-There are two viable workarounds for this problem:
-
-- Specify a named volume, and use a volume driver which is capable of mounting
- the volume into the container regardless of what node it's scheduled on.
-
- Compose does not give Swarm any specific scheduling instructions if a
- service uses only named volumes.
-
- version: "2"
-
- services:
- web:
- build: .
- ports:
- - "80:8000"
- volumes:
- - web-logs:/var/log/web
-
- volumes:
- web-logs:
- driver: custom-volume-driver
-
-- Remove the old container before creating the new one. You will lose any data
- in the volume.
-
- $ docker-compose stop web
- $ docker-compose rm -f web
- $ docker-compose up web
-
-
-## Scheduling containers
-
-### Automatic scheduling
-
-Some configuration options will result in containers being automatically
-scheduled on the same Swarm node to ensure that they work correctly. These are:
-
-- `network_mode: "service:..."` and `network_mode: "container:..."` (and
- `net: "container:..."` in the version 1 file format).
-
-- `volumes_from`
-
-- `links`
-
-### Manual scheduling
-
-Swarm offers a rich set of scheduling and affinity hints, enabling you to
-control where containers are located. They are specified via container
-environment variables, so you can use Compose's `environment` option to set
-them.
-
- # Schedule containers on a specific node
- environment:
- - "constraint:node==node-1"
-
- # Schedule containers on a node that has the 'storage' label set to 'ssd'
- environment:
- - "constraint:storage==ssd"
-
- # Schedule containers where the 'redis' image is already pulled
- environment:
- - "affinity:image==redis"
-
-For the full set of available filters and expressions, see the [Swarm
-documentation](/swarm/scheduler/filter.md).
diff --git a/compose/wordpress.md b/compose/wordpress.md
deleted file mode 100644
index b39a8bbbe6..0000000000
--- a/compose/wordpress.md
+++ /dev/null
@@ -1,112 +0,0 @@
-
-
-
-# Quickstart: Docker Compose and WordPress
-
-You can use Docker Compose to easily run WordPress in an isolated environment built
-with Docker containers. This quick-start guide demonstrates how to use Compose to set up and run WordPress. Before starting, you'll need to have
-[Compose installed](install.md).
-
-### Define the project
-
-1. Create an empty project directory.
-
- You can name the directory something easy for you to remember. This directory is the context for your application image. The directory should only contain resources to build that image.
-
- This project directory will contain a `docker-compose.yaml` file which will be complete in itself for a good starter wordpress project.
-
-2. Change directories into your project directory.
-
- For example, if you named your directory `my_wordpress`:
-
- $ cd my-wordpress/
-
-3. Create a `docker-compose.yml` file that will start your `Wordpress` blog and a separate `MySQL` instance with a volume mount for data persistence:
-
- version: '2'
- services:
- db:
- image: mysql:5.7
- volumes:
- - "./.data/db:/var/lib/mysql"
- restart: always
- environment:
- MYSQL_ROOT_PASSWORD: wordpress
- MYSQL_DATABASE: wordpress
- MYSQL_USER: wordpress
- MYSQL_PASSWORD: wordpress
-
- wordpress:
- depends_on:
- - db
- image: wordpress:latest
- links:
- - db
- ports:
- - "8000:80"
- restart: always
- environment:
- WORDPRESS_DB_HOST: db:3306
- WORDPRESS_DB_PASSWORD: wordpress
-
- **NOTE**: The folder `./.data/db` will be automatically created in the project directory
- alongside the `docker-compose.yml` which will persist any updates made by wordpress to the
- database.
-
-### Build the project
-
-Now, run `docker-compose up -d` from your project directory.
-
-This pulls the needed images, and starts the wordpress and database containers, as shown in the example below.
-
- $ docker-compose up -d
- Creating network "my_wordpress_default" with the default driver
- Pulling db (mysql:5.7)...
- 5.7: Pulling from library/mysql
- efd26ecc9548: Pull complete
- a3ed95caeb02: Pull complete
- ...
- Digest: sha256:34a0aca88e85f2efa5edff1cea77cf5d3147ad93545dbec99cfe705b03c520de
- Status: Downloaded newer image for mysql:5.7
- Pulling wordpress (wordpress:latest)...
- latest: Pulling from library/wordpress
- efd26ecc9548: Already exists
- a3ed95caeb02: Pull complete
- 589a9d9a7c64: Pull complete
- ...
- Digest: sha256:ed28506ae44d5def89075fd5c01456610cd6c64006addfe5210b8c675881aff6
- Status: Downloaded newer image for wordpress:latest
- Creating my_wordpress_db_1
- Creating my_wordpress_wordpress_1
-
-### Bring up WordPress in a web browser
-
-If you're using [Docker Machine](https://docs.docker.com/machine/), then `docker-machine ip MACHINE_VM` gives you the machine address and you can open `http://MACHINE_VM_IP:8000` in a browser.
-
-At this point, WordPress should be running on port `8000` of your Docker Host, and you can complete the "famous five-minute installation" as a WordPress administrator.
-
-**NOTE**: The Wordpress site will not be immediately available on port `8000` because the containers are still being initialized and may take a couple of minutes before the first load.
-
-
-
-
-
-
-## More Compose documentation
-
-- [User guide](index.md)
-- [Installing Compose](install.md)
-- [Getting Started](gettingstarted.md)
-- [Get started with Django](django.md)
-- [Get started with Rails](rails.md)
-- [Command line reference](./reference/index.md)
-- [Compose file reference](compose-file.md)
diff --git a/containers/README.md b/containers/README.md
deleted file mode 100644
index 6cb24e12b8..0000000000
--- a/containers/README.md
+++ /dev/null
@@ -1,48 +0,0 @@
-
-# Containers
-
-These are the "accessory" containers with which Hub 2.0 is run.
-
-## dnsmasq
-
-dnsmasq is used to fake the `Origin` header in CORS requests. This is
-necessary because the browser automatically sends `Origin: localhost`
-(users can't modify it) and we need it to be in the `*.docker.com`
-space, since staging is set up to handle single dot subdomains.
-
-We've chosen `bagels.docker.com` as the development domain (something
-that is unlikely to ever be deployed in production so that we won't
-have to change the name in the future).
-
-### prerequisites
-
-```bash
-cd $PROJECT
-make dns
-```
-
-This runs `$PROJECT/containers/configure_system_dns.sh`, which will
-add `bagels.docker.com` to your host system's `/etc/resolver/`. This
-makes it so that `bagels.docker.com` will resolver to `boot2docker ip`.
-
-### run
-
-```bash
-cd $PROJECT/containers/dnsmasq
-docker build -t bagelteam/dnsmasq
-docker run -itp 53:53/udp bagelteam/dnsmasq
-```
-
-## HAProxy
-
-HAProxy is a load balancer used to terminate SSL.
-
-Currently Out-of-Order.
-
-```bash
-docker run -itp 80:80 -p 443:433 bagelteam/haproxy
-```
-
-HAProxy will load balance `bagels.docker.com` across a single
-container (hah), and more importantly, take care of SSL Offloading at
-the load balancer. The image has it's own SSL certificates.
diff --git a/containers/dnsmasq/Dockerfile b/containers/dnsmasq/Dockerfile
deleted file mode 100644
index 40c00e83d5..0000000000
--- a/containers/dnsmasq/Dockerfile
+++ /dev/null
@@ -1,12 +0,0 @@
-FROM debian:jessie
-
-MAINTAINER Chris Biscardi
-
-RUN apt-get update && apt-get install -y dnsmasq
-
-EXPOSE 53/udp
-
-ADD ./run /opt/run
-
-CMD "/opt/run"
-# docker run -d -p 53:53/udp --name docker-dnsmasq dnsmasq --address=/dev.docker.io/172.16.200.100
diff --git a/containers/dnsmasq/configure_system_dns.sh b/containers/dnsmasq/configure_system_dns.sh
deleted file mode 100755
index d84112198a..0000000000
--- a/containers/dnsmasq/configure_system_dns.sh
+++ /dev/null
@@ -1,24 +0,0 @@
-#!/bin/bash
-
-# Docker Bridge IP - https://docs.docker.com/articles/networking/
-# $DOCKER_HOST will be the IP of the boot2docker or docker-machine
-# instance *currently sourced in your shell*. This means something
-# like $(docker-machine env dev) or $(boot2docker shellinit)
-if [[ $DOCKER_HOST =~ ([0-9]{1,3}[\.]){3}[0-9]{1,3} ]]; then
- DAEMON_IPV4=$BASH_REMATCH
- echo $DAEMON_IPV4
-else
- echo "unable to parse string $DOCKER_HOST"
-fi
-
-set_dev_resolver() {
- echo "Bagels need your permission to configure system DNS."
- sudo mkdir -p /etc/resolver
- echo "nameserver $DAEMON_IPV4" | sudo tee /etc/resolver/bagels.docker.com
-}
-
-if [ ! -f /etc/resolver/bagels.docker.com ]; then
- set_dev_resolver
-elif [ "$(cat /etc/resolver/bagels.docker.com)" != "nameserver $DAEMON_IPV4" ]; then
- set_dev_resolver
-fi
diff --git a/containers/dnsmasq/run b/containers/dnsmasq/run
deleted file mode 100755
index b2b60ad240..0000000000
--- a/containers/dnsmasq/run
+++ /dev/null
@@ -1,13 +0,0 @@
-#!/bin/bash
-
-# match the ip address from a DOCKER_HOST which is set by boot2docker
-# and docker-machine
-if [[ $DOCKER_HOST =~ ([0-9]{1,3}[\.]){3}[0-9]{1,3} ]]; then
- strresult=$BASH_REMATCH
- echo $strresult
-else
- echo "unable to parse string $DOCKER_HOST"
-fi
-
-/usr/sbin/dnsmasq -q --no-daemon --address=/bagels.docker.com/$strresult
-#$strresult
diff --git a/containers/haproxy/Dockerfile b/containers/haproxy/Dockerfile
deleted file mode 100644
index 034260c7e2..0000000000
--- a/containers/haproxy/Dockerfile
+++ /dev/null
@@ -1,10 +0,0 @@
-FROM fish/haproxy
-
-ADD . /haproxy
-
-EXPOSE 80 443
-
-# Check is haproxy.cfg is valid before we start
-# CMD "(haproxy -c -f /haproxy/haproxy.cfg || ( echo 'Bad haproxy config'; exit; )) && /usr/sbin/haproxy -f /haproxy/haproxy.cfg & && wait $!"
-
-ENTRYPOINT ["/haproxy/run"]
\ No newline at end of file
diff --git a/containers/haproxy/haproxy.cfg b/containers/haproxy/haproxy.cfg
deleted file mode 100644
index b9d1c5014a..0000000000
--- a/containers/haproxy/haproxy.cfg
+++ /dev/null
@@ -1,41 +0,0 @@
-global
- chroot /var/lib/haproxy
- user haproxy
- group haproxy
-
-defaults
- log global
- mode http
- option httplog
- option dontlognull
- timeout connect 5000
- timeout client 50000
- timeout server 50000
- errorfile 400 /etc/haproxy/errors/400.http
- errorfile 403 /etc/haproxy/errors/403.http
- errorfile 408 /etc/haproxy/errors/408.http
- errorfile 500 /etc/haproxy/errors/500.http
- errorfile 502 /etc/haproxy/errors/502.http
- errorfile 503 /etc/haproxy/errors/503.http
- errorfile 504 /etc/haproxy/errors/504.http
- stats enable
- stats auth haproxy:hapass
-
-frontend https
- bind :443 ssl crt /haproxy/keys/bagels.docker.com/bagels.docker.pem
- acl is-ssl dst_port 443
-
- http-request set-header X-Real-IP %ci
-
- reqadd X-Forwarded-Proto:\ https if is-ssl
- reqadd X-Forwarded-Port:\ 443 if is-ssl
- rspadd Strict-Transport-Security:\ max-age=31536000 if is-ssl
-
- acl is_hub_dev hdr(host) -i bagels.docker.com
-
- use_backend hub_dev if is_hub_dev
-
-backend hub_dev
- balance leastconn
- option httpclose
- server docker-1 {DOCKER_HOST}:7001 check
\ No newline at end of file
diff --git a/containers/haproxy/keys/bagels.docker.com/bagels.docker.crt b/containers/haproxy/keys/bagels.docker.com/bagels.docker.crt
deleted file mode 100644
index 44765408fb..0000000000
--- a/containers/haproxy/keys/bagels.docker.com/bagels.docker.crt
+++ /dev/null
@@ -1,14 +0,0 @@
------BEGIN CERTIFICATE-----
-MIICNTCCAZ4CCQDY33gN8y9BQzANBgkqhkiG9w0BAQUFADBfMQswCQYDVQQGEwJV
-UzELMAkGA1UECBMCQ0ExFjAUBgNVBAcTDVNhbiBGcmFuY2lzY28xDzANBgNVBAoT
-BkRvY2tlcjEaMBgGA1UEAxMRYmFnZWxzLmRvY2tlci5jb20wHhcNMTUwMTIxMDM1
-NTEzWhcNMTYwMTIxMDM1NTEzWjBfMQswCQYDVQQGEwJVUzELMAkGA1UECBMCQ0Ex
-FjAUBgNVBAcTDVNhbiBGcmFuY2lzY28xDzANBgNVBAoTBkRvY2tlcjEaMBgGA1UE
-AxMRYmFnZWxzLmRvY2tlci5jb20wgZ8wDQYJKoZIhvcNAQEBBQADgY0AMIGJAoGB
-AMluBCvOUrdFkFGCmpKBPduoZgYE/hNKnmX3Cqrn+FsodOBiin1lOs7+XX3EY078
-u5QIULNZ3j/LUSuxgHBS8RcVc3ljCkvwRURwVy6FWunahdTULLEq+qOByv6Hq2/W
-itlzT2Rw6Tu29IThb7Mtxb1B6LoAorkWX/YEXankpVPnAgMBAAEwDQYJKoZIhvcN
-AQEFBQADgYEAPdqZ2jLxOuZ52wucJN1DoOBUCWnCM5bfBHOU3wBqSPA/mT2Bw5Fo
-evqqd+mRWizgmSkDM9NpO9cj9tpeidTrHsTutkqjQttIeNAtZm82sSWH7ul1N1du
-4aDDKwAk4j9BYPUmYQFaSRKNtE/OpGVPxxK/ZBS8YeVT0knzTr/a9to=
------END CERTIFICATE-----
diff --git a/containers/haproxy/keys/bagels.docker.com/bagels.docker.csr b/containers/haproxy/keys/bagels.docker.com/bagels.docker.csr
deleted file mode 100644
index b9d96abb7e..0000000000
--- a/containers/haproxy/keys/bagels.docker.com/bagels.docker.csr
+++ /dev/null
@@ -1,11 +0,0 @@
------BEGIN CERTIFICATE REQUEST-----
-MIIBnzCCAQgCAQAwXzELMAkGA1UEBhMCVVMxCzAJBgNVBAgTAkNBMRYwFAYDVQQH
-Ew1TYW4gRnJhbmNpc2NvMQ8wDQYDVQQKEwZEb2NrZXIxGjAYBgNVBAMTEWJhZ2Vs
-cy5kb2NrZXIuY29tMIGfMA0GCSqGSIb3DQEBAQUAA4GNADCBiQKBgQDJbgQrzlK3
-RZBRgpqSgT3bqGYGBP4TSp5l9wqq5/hbKHTgYop9ZTrO/l19xGNO/LuUCFCzWd4/
-y1ErsYBwUvEXFXN5YwpL8EVEcFcuhVrp2oXU1CyxKvqjgcr+h6tv1orZc09kcOk7
-tvSE4W+zLcW9Qei6AKK5Fl/2BF2p5KVT5wIDAQABoAAwDQYJKoZIhvcNAQEFBQAD
-gYEAlAQKhy4j7wenWqnKzfpp/o0cbzQAcve76XSwfWrzONFDZidhQlwAKBdYbYN3
-4ITqNw4MPSCMBkMMCQFFFHM/+NqlAmYYbJHv8uDxKel/7IsxIEPRun0b6k/+wL2e
-2nyJJrMwesVrzvDwfB+8eoUOZFJIiX6htpxU4vgq9xMgMAg=
------END CERTIFICATE REQUEST-----
diff --git a/containers/haproxy/keys/bagels.docker.com/bagels.docker.key b/containers/haproxy/keys/bagels.docker.com/bagels.docker.key
deleted file mode 100644
index 6659dc144c..0000000000
--- a/containers/haproxy/keys/bagels.docker.com/bagels.docker.key
+++ /dev/null
@@ -1,15 +0,0 @@
------BEGIN RSA PRIVATE KEY-----
-MIICWwIBAAKBgQDJbgQrzlK3RZBRgpqSgT3bqGYGBP4TSp5l9wqq5/hbKHTgYop9
-ZTrO/l19xGNO/LuUCFCzWd4/y1ErsYBwUvEXFXN5YwpL8EVEcFcuhVrp2oXU1Cyx
-Kvqjgcr+h6tv1orZc09kcOk7tvSE4W+zLcW9Qei6AKK5Fl/2BF2p5KVT5wIDAQAB
-AoGAcA+Qqn5Cbkt5Gp+6Nr9IFqf8+mRUpY7hyIBDowkiljRPsXWg7loe+YFxqcJU
-LWFVSenGW8Enb/5AzjoV5md+UAiERbF13SzrEx7J7riwb1ljHe82RqUbyfpnnDWT
-aSZ/ce+9LYoYggFVfH7DloT8NNsQzTDi2/g+66dXi8fcIIECQQDqykIcF20sz8Ar
-H3StlgITEorJiZRpvbzuQ6G7XoC1XOI1/0+1NbIHm3lv4XfvaRSdKv0mnPaZmi3h
-PWZC/zj3AkEA26BE6iEGO3eJ39l1zRpB1jS/VrEAa7pzGeEFeL+k/5XEWqsgHOtQ
-qIbPtCyKcN5mtCYEg6GEK/pqNALWL6ZwkQJATT9CRO/IMaggd4+f2cSy5geBthEX
-zTppwJJr0bOj8QegPVfEp8AE1M/oQlESHqiZ6aPNKjkWQS8izSpgTMafvQJAILDT
-cTInNlTNvfcldLkS0aqaTHIeSOrA1TpMUTPdgHmvd3t/VS6lm+AtLHlwxeokyW3b
-QCibftxQUJuXfBI/MQJAAt2m8P0V+U/MFjNhYUd2jwJIFFh7AVYeSH26NxzQMgO0
-YNQAaRKxwuhDrxyVwezryzyBcVWKdfhCtgOK6U5mFw==
------END RSA PRIVATE KEY-----
diff --git a/containers/haproxy/keys/bagels.docker.com/bagels.docker.pem b/containers/haproxy/keys/bagels.docker.com/bagels.docker.pem
deleted file mode 100644
index 753429af60..0000000000
--- a/containers/haproxy/keys/bagels.docker.com/bagels.docker.pem
+++ /dev/null
@@ -1,29 +0,0 @@
------BEGIN CERTIFICATE-----
-MIICNTCCAZ4CCQDY33gN8y9BQzANBgkqhkiG9w0BAQUFADBfMQswCQYDVQQGEwJV
-UzELMAkGA1UECBMCQ0ExFjAUBgNVBAcTDVNhbiBGcmFuY2lzY28xDzANBgNVBAoT
-BkRvY2tlcjEaMBgGA1UEAxMRYmFnZWxzLmRvY2tlci5jb20wHhcNMTUwMTIxMDM1
-NTEzWhcNMTYwMTIxMDM1NTEzWjBfMQswCQYDVQQGEwJVUzELMAkGA1UECBMCQ0Ex
-FjAUBgNVBAcTDVNhbiBGcmFuY2lzY28xDzANBgNVBAoTBkRvY2tlcjEaMBgGA1UE
-AxMRYmFnZWxzLmRvY2tlci5jb20wgZ8wDQYJKoZIhvcNAQEBBQADgY0AMIGJAoGB
-AMluBCvOUrdFkFGCmpKBPduoZgYE/hNKnmX3Cqrn+FsodOBiin1lOs7+XX3EY078
-u5QIULNZ3j/LUSuxgHBS8RcVc3ljCkvwRURwVy6FWunahdTULLEq+qOByv6Hq2/W
-itlzT2Rw6Tu29IThb7Mtxb1B6LoAorkWX/YEXankpVPnAgMBAAEwDQYJKoZIhvcN
-AQEFBQADgYEAPdqZ2jLxOuZ52wucJN1DoOBUCWnCM5bfBHOU3wBqSPA/mT2Bw5Fo
-evqqd+mRWizgmSkDM9NpO9cj9tpeidTrHsTutkqjQttIeNAtZm82sSWH7ul1N1du
-4aDDKwAk4j9BYPUmYQFaSRKNtE/OpGVPxxK/ZBS8YeVT0knzTr/a9to=
------END CERTIFICATE-----
------BEGIN RSA PRIVATE KEY-----
-MIICWwIBAAKBgQDJbgQrzlK3RZBRgpqSgT3bqGYGBP4TSp5l9wqq5/hbKHTgYop9
-ZTrO/l19xGNO/LuUCFCzWd4/y1ErsYBwUvEXFXN5YwpL8EVEcFcuhVrp2oXU1Cyx
-Kvqjgcr+h6tv1orZc09kcOk7tvSE4W+zLcW9Qei6AKK5Fl/2BF2p5KVT5wIDAQAB
-AoGAcA+Qqn5Cbkt5Gp+6Nr9IFqf8+mRUpY7hyIBDowkiljRPsXWg7loe+YFxqcJU
-LWFVSenGW8Enb/5AzjoV5md+UAiERbF13SzrEx7J7riwb1ljHe82RqUbyfpnnDWT
-aSZ/ce+9LYoYggFVfH7DloT8NNsQzTDi2/g+66dXi8fcIIECQQDqykIcF20sz8Ar
-H3StlgITEorJiZRpvbzuQ6G7XoC1XOI1/0+1NbIHm3lv4XfvaRSdKv0mnPaZmi3h
-PWZC/zj3AkEA26BE6iEGO3eJ39l1zRpB1jS/VrEAa7pzGeEFeL+k/5XEWqsgHOtQ
-qIbPtCyKcN5mtCYEg6GEK/pqNALWL6ZwkQJATT9CRO/IMaggd4+f2cSy5geBthEX
-zTppwJJr0bOj8QegPVfEp8AE1M/oQlESHqiZ6aPNKjkWQS8izSpgTMafvQJAILDT
-cTInNlTNvfcldLkS0aqaTHIeSOrA1TpMUTPdgHmvd3t/VS6lm+AtLHlwxeokyW3b
-QCibftxQUJuXfBI/MQJAAt2m8P0V+U/MFjNhYUd2jwJIFFh7AVYeSH26NxzQMgO0
-YNQAaRKxwuhDrxyVwezryzyBcVWKdfhCtgOK6U5mFw==
------END RSA PRIVATE KEY-----
diff --git a/containers/haproxy/run b/containers/haproxy/run
deleted file mode 100755
index b26edf0b91..0000000000
--- a/containers/haproxy/run
+++ /dev/null
@@ -1,19 +0,0 @@
-#!/bin/bash
-
-# match the ip address from a DOCKER_HOST which is set by boot2docker
-# and docker-machine
-if [[ $DOCKER_HOST =~ ([0-9]{1,3}[\.]){3}[0-9]{1,3} ]]; then
- strresult=$BASH_REMATCH
- echo $strresult
-else
- echo "unable to parse string $DOCKER_HOST"
-fi
-
-sed -i s/{DOCKER_HOST}/"$strresult"/g /haproxy/haproxy.cfg
-
-# Check is haproxy.cfg is valid before we start
-haproxy -c -f /haproxy/haproxy.cfg || ( echo 'Bad haproxy config'; exit; )
-
-/usr/sbin/haproxy -f /haproxy/haproxy.cfg &
-
-wait $!
diff --git a/containers/prod/build b/containers/prod/build
deleted file mode 100755
index af29f19d61..0000000000
--- a/containers/prod/build
+++ /dev/null
@@ -1 +0,0 @@
-docker build -t bagel/haproxy_beta ./haproxy
\ No newline at end of file
diff --git a/containers/prod/docker-compose.yml b/containers/prod/docker-compose.yml
deleted file mode 100644
index 838d385d12..0000000000
--- a/containers/prod/docker-compose.yml
+++ /dev/null
@@ -1,13 +0,0 @@
-haproxy:
- build: ./haproxy
- ports:
- - "80:80"
- - "443:443"
-hub:
- build: bagelteam/hubtest
- volumes:
- - .:/opt/hub
- ports:
- - "7001:3000"
- environment:
- ENV: production
\ No newline at end of file
diff --git a/containers/prod/haproxy/Dockerfile b/containers/prod/haproxy/Dockerfile
deleted file mode 100644
index 12a913f9de..0000000000
--- a/containers/prod/haproxy/Dockerfile
+++ /dev/null
@@ -1,9 +0,0 @@
-FROM fish/haproxy
-
-ADD ./haproxy.cfg /haproxy/haproxy.cfg
-ADD ./run /haproxy/run
-
-EXPOSE 80 443
-
-ENTRYPOINT ["/haproxy/run"]
-
diff --git a/containers/prod/haproxy/haproxy.cfg b/containers/prod/haproxy/haproxy.cfg
deleted file mode 100644
index 72a144b20e..0000000000
--- a/containers/prod/haproxy/haproxy.cfg
+++ /dev/null
@@ -1,46 +0,0 @@
-global
- chroot /var/lib/haproxy
- user haproxy
- group haproxy
-
-defaults
- log global
- mode http
- option httplog
- option dontlognull
- timeout connect 5000
- timeout client 50000
- timeout server 50000
- errorfile 400 /etc/haproxy/errors/400.http
- errorfile 403 /etc/haproxy/errors/403.http
- errorfile 408 /etc/haproxy/errors/408.http
- errorfile 500 /etc/haproxy/errors/500.http
- errorfile 502 /etc/haproxy/errors/502.http
- errorfile 503 /etc/haproxy/errors/503.http
- errorfile 504 /etc/haproxy/errors/504.http
- stats enable
- stats auth haproxy:hapass
-
-userlist Bagels
- user betalist insecure-password {BETA_PASSWORD}
-
-frontend https
- bind :443 ssl crt /haproxy/keys/hub-beta.docker.com/hub-beta.docker.pem
- acl is-ssl dst_port 443
- acl Auth_Bagels http_auth(Bagels)
- http-request auth realm HubBeta if !Auth_Bagels
-
- http-request set-header X-Real-IP %ci
-
- reqadd X-Forwarded-Proto:\ https if is-ssl
- reqadd X-Forwarded-Port:\ 443 if is-ssl
- rspadd Strict-Transport-Security:\ max-age=31536000 if is-ssl
-
- acl is_hub_dev hdr(host) -i hub-beta.docker.com
-
- use_backend hub_dev if is_hub_dev
-
-backend hub_dev
- balance leastconn
- option httpclose
- server docker-1 172.17.42.1:7001 check
diff --git a/containers/prod/haproxy/keys/bagels.docker.com/bagels.docker.crt b/containers/prod/haproxy/keys/bagels.docker.com/bagels.docker.crt
deleted file mode 100644
index 44765408fb..0000000000
--- a/containers/prod/haproxy/keys/bagels.docker.com/bagels.docker.crt
+++ /dev/null
@@ -1,14 +0,0 @@
------BEGIN CERTIFICATE-----
-MIICNTCCAZ4CCQDY33gN8y9BQzANBgkqhkiG9w0BAQUFADBfMQswCQYDVQQGEwJV
-UzELMAkGA1UECBMCQ0ExFjAUBgNVBAcTDVNhbiBGcmFuY2lzY28xDzANBgNVBAoT
-BkRvY2tlcjEaMBgGA1UEAxMRYmFnZWxzLmRvY2tlci5jb20wHhcNMTUwMTIxMDM1
-NTEzWhcNMTYwMTIxMDM1NTEzWjBfMQswCQYDVQQGEwJVUzELMAkGA1UECBMCQ0Ex
-FjAUBgNVBAcTDVNhbiBGcmFuY2lzY28xDzANBgNVBAoTBkRvY2tlcjEaMBgGA1UE
-AxMRYmFnZWxzLmRvY2tlci5jb20wgZ8wDQYJKoZIhvcNAQEBBQADgY0AMIGJAoGB
-AMluBCvOUrdFkFGCmpKBPduoZgYE/hNKnmX3Cqrn+FsodOBiin1lOs7+XX3EY078
-u5QIULNZ3j/LUSuxgHBS8RcVc3ljCkvwRURwVy6FWunahdTULLEq+qOByv6Hq2/W
-itlzT2Rw6Tu29IThb7Mtxb1B6LoAorkWX/YEXankpVPnAgMBAAEwDQYJKoZIhvcN
-AQEFBQADgYEAPdqZ2jLxOuZ52wucJN1DoOBUCWnCM5bfBHOU3wBqSPA/mT2Bw5Fo
-evqqd+mRWizgmSkDM9NpO9cj9tpeidTrHsTutkqjQttIeNAtZm82sSWH7ul1N1du
-4aDDKwAk4j9BYPUmYQFaSRKNtE/OpGVPxxK/ZBS8YeVT0knzTr/a9to=
------END CERTIFICATE-----
diff --git a/containers/prod/haproxy/keys/bagels.docker.com/bagels.docker.csr b/containers/prod/haproxy/keys/bagels.docker.com/bagels.docker.csr
deleted file mode 100644
index b9d96abb7e..0000000000
--- a/containers/prod/haproxy/keys/bagels.docker.com/bagels.docker.csr
+++ /dev/null
@@ -1,11 +0,0 @@
------BEGIN CERTIFICATE REQUEST-----
-MIIBnzCCAQgCAQAwXzELMAkGA1UEBhMCVVMxCzAJBgNVBAgTAkNBMRYwFAYDVQQH
-Ew1TYW4gRnJhbmNpc2NvMQ8wDQYDVQQKEwZEb2NrZXIxGjAYBgNVBAMTEWJhZ2Vs
-cy5kb2NrZXIuY29tMIGfMA0GCSqGSIb3DQEBAQUAA4GNADCBiQKBgQDJbgQrzlK3
-RZBRgpqSgT3bqGYGBP4TSp5l9wqq5/hbKHTgYop9ZTrO/l19xGNO/LuUCFCzWd4/
-y1ErsYBwUvEXFXN5YwpL8EVEcFcuhVrp2oXU1CyxKvqjgcr+h6tv1orZc09kcOk7
-tvSE4W+zLcW9Qei6AKK5Fl/2BF2p5KVT5wIDAQABoAAwDQYJKoZIhvcNAQEFBQAD
-gYEAlAQKhy4j7wenWqnKzfpp/o0cbzQAcve76XSwfWrzONFDZidhQlwAKBdYbYN3
-4ITqNw4MPSCMBkMMCQFFFHM/+NqlAmYYbJHv8uDxKel/7IsxIEPRun0b6k/+wL2e
-2nyJJrMwesVrzvDwfB+8eoUOZFJIiX6htpxU4vgq9xMgMAg=
------END CERTIFICATE REQUEST-----
diff --git a/containers/prod/haproxy/keys/bagels.docker.com/bagels.docker.key b/containers/prod/haproxy/keys/bagels.docker.com/bagels.docker.key
deleted file mode 100644
index 6659dc144c..0000000000
--- a/containers/prod/haproxy/keys/bagels.docker.com/bagels.docker.key
+++ /dev/null
@@ -1,15 +0,0 @@
------BEGIN RSA PRIVATE KEY-----
-MIICWwIBAAKBgQDJbgQrzlK3RZBRgpqSgT3bqGYGBP4TSp5l9wqq5/hbKHTgYop9
-ZTrO/l19xGNO/LuUCFCzWd4/y1ErsYBwUvEXFXN5YwpL8EVEcFcuhVrp2oXU1Cyx
-Kvqjgcr+h6tv1orZc09kcOk7tvSE4W+zLcW9Qei6AKK5Fl/2BF2p5KVT5wIDAQAB
-AoGAcA+Qqn5Cbkt5Gp+6Nr9IFqf8+mRUpY7hyIBDowkiljRPsXWg7loe+YFxqcJU
-LWFVSenGW8Enb/5AzjoV5md+UAiERbF13SzrEx7J7riwb1ljHe82RqUbyfpnnDWT
-aSZ/ce+9LYoYggFVfH7DloT8NNsQzTDi2/g+66dXi8fcIIECQQDqykIcF20sz8Ar
-H3StlgITEorJiZRpvbzuQ6G7XoC1XOI1/0+1NbIHm3lv4XfvaRSdKv0mnPaZmi3h
-PWZC/zj3AkEA26BE6iEGO3eJ39l1zRpB1jS/VrEAa7pzGeEFeL+k/5XEWqsgHOtQ
-qIbPtCyKcN5mtCYEg6GEK/pqNALWL6ZwkQJATT9CRO/IMaggd4+f2cSy5geBthEX
-zTppwJJr0bOj8QegPVfEp8AE1M/oQlESHqiZ6aPNKjkWQS8izSpgTMafvQJAILDT
-cTInNlTNvfcldLkS0aqaTHIeSOrA1TpMUTPdgHmvd3t/VS6lm+AtLHlwxeokyW3b
-QCibftxQUJuXfBI/MQJAAt2m8P0V+U/MFjNhYUd2jwJIFFh7AVYeSH26NxzQMgO0
-YNQAaRKxwuhDrxyVwezryzyBcVWKdfhCtgOK6U5mFw==
------END RSA PRIVATE KEY-----
diff --git a/containers/prod/haproxy/keys/bagels.docker.com/bagels.docker.pem b/containers/prod/haproxy/keys/bagels.docker.com/bagels.docker.pem
deleted file mode 100644
index 753429af60..0000000000
--- a/containers/prod/haproxy/keys/bagels.docker.com/bagels.docker.pem
+++ /dev/null
@@ -1,29 +0,0 @@
------BEGIN CERTIFICATE-----
-MIICNTCCAZ4CCQDY33gN8y9BQzANBgkqhkiG9w0BAQUFADBfMQswCQYDVQQGEwJV
-UzELMAkGA1UECBMCQ0ExFjAUBgNVBAcTDVNhbiBGcmFuY2lzY28xDzANBgNVBAoT
-BkRvY2tlcjEaMBgGA1UEAxMRYmFnZWxzLmRvY2tlci5jb20wHhcNMTUwMTIxMDM1
-NTEzWhcNMTYwMTIxMDM1NTEzWjBfMQswCQYDVQQGEwJVUzELMAkGA1UECBMCQ0Ex
-FjAUBgNVBAcTDVNhbiBGcmFuY2lzY28xDzANBgNVBAoTBkRvY2tlcjEaMBgGA1UE
-AxMRYmFnZWxzLmRvY2tlci5jb20wgZ8wDQYJKoZIhvcNAQEBBQADgY0AMIGJAoGB
-AMluBCvOUrdFkFGCmpKBPduoZgYE/hNKnmX3Cqrn+FsodOBiin1lOs7+XX3EY078
-u5QIULNZ3j/LUSuxgHBS8RcVc3ljCkvwRURwVy6FWunahdTULLEq+qOByv6Hq2/W
-itlzT2Rw6Tu29IThb7Mtxb1B6LoAorkWX/YEXankpVPnAgMBAAEwDQYJKoZIhvcN
-AQEFBQADgYEAPdqZ2jLxOuZ52wucJN1DoOBUCWnCM5bfBHOU3wBqSPA/mT2Bw5Fo
-evqqd+mRWizgmSkDM9NpO9cj9tpeidTrHsTutkqjQttIeNAtZm82sSWH7ul1N1du
-4aDDKwAk4j9BYPUmYQFaSRKNtE/OpGVPxxK/ZBS8YeVT0knzTr/a9to=
------END CERTIFICATE-----
------BEGIN RSA PRIVATE KEY-----
-MIICWwIBAAKBgQDJbgQrzlK3RZBRgpqSgT3bqGYGBP4TSp5l9wqq5/hbKHTgYop9
-ZTrO/l19xGNO/LuUCFCzWd4/y1ErsYBwUvEXFXN5YwpL8EVEcFcuhVrp2oXU1Cyx
-Kvqjgcr+h6tv1orZc09kcOk7tvSE4W+zLcW9Qei6AKK5Fl/2BF2p5KVT5wIDAQAB
-AoGAcA+Qqn5Cbkt5Gp+6Nr9IFqf8+mRUpY7hyIBDowkiljRPsXWg7loe+YFxqcJU
-LWFVSenGW8Enb/5AzjoV5md+UAiERbF13SzrEx7J7riwb1ljHe82RqUbyfpnnDWT
-aSZ/ce+9LYoYggFVfH7DloT8NNsQzTDi2/g+66dXi8fcIIECQQDqykIcF20sz8Ar
-H3StlgITEorJiZRpvbzuQ6G7XoC1XOI1/0+1NbIHm3lv4XfvaRSdKv0mnPaZmi3h
-PWZC/zj3AkEA26BE6iEGO3eJ39l1zRpB1jS/VrEAa7pzGeEFeL+k/5XEWqsgHOtQ
-qIbPtCyKcN5mtCYEg6GEK/pqNALWL6ZwkQJATT9CRO/IMaggd4+f2cSy5geBthEX
-zTppwJJr0bOj8QegPVfEp8AE1M/oQlESHqiZ6aPNKjkWQS8izSpgTMafvQJAILDT
-cTInNlTNvfcldLkS0aqaTHIeSOrA1TpMUTPdgHmvd3t/VS6lm+AtLHlwxeokyW3b
-QCibftxQUJuXfBI/MQJAAt2m8P0V+U/MFjNhYUd2jwJIFFh7AVYeSH26NxzQMgO0
-YNQAaRKxwuhDrxyVwezryzyBcVWKdfhCtgOK6U5mFw==
------END RSA PRIVATE KEY-----
diff --git a/containers/prod/haproxy/run b/containers/prod/haproxy/run
deleted file mode 100755
index 145832a4d5..0000000000
--- a/containers/prod/haproxy/run
+++ /dev/null
@@ -1,10 +0,0 @@
-#!/bin/bash
-
-sed -i s/{BETA_PASSWORD}/"$BETA_PASSWORD"/g /haproxy/haproxy.cfg
-
-# Check is haproxy.cfg is valid before we start
-haproxy -c -f /haproxy/haproxy.cfg || ( echo 'Bad haproxy config'; exit; )
-
-/usr/sbin/haproxy -f /haproxy/haproxy.cfg &
-
-wait $!
diff --git a/deployment/deploy.sh b/deployment/deploy.sh
deleted file mode 100755
index 9eded10710..0000000000
--- a/deployment/deploy.sh
+++ /dev/null
@@ -1,171 +0,0 @@
-#!/bin/sh
-
-DOCKER_CMD=docker
-
-alias AWS_HUB_PROD='aws ec2 describe-instances --filters "Name=tag:aws:cloudformation:stack-name,Values=us-east-1*" "Name=tag:secondary-role,Values=hub" "Name=instance-state-name,Values=running" --output=json'
-alias AWS_HUB_STAGE='aws ec2 describe-instances --filters "Name=tag:aws:cloudformation:stack-name,Values=stage-us-east-1*" "Name=tag:secondary-role,Values=hub" "Name=instance-state-name,Values=running" --output=json'
-alias AWS_IP="jq -r '.Reservations[].Instances[].PrivateIpAddress'"
-
-HUB_GATEWAY="https://hub.docker.com"
-HUB_SERVICE_NAME="hub-web-v2"
-
-DEFAULT_IMAGE_PROD="bagel/hub-prod"
-DEFAULT_IMAGE_STAGE="bagel/hub-stage"
-
-NEW_RELIC_APP_NAME="hub.docker.com(aws-node)"
-NEW_RELIC_LICENSE_KEY="582e3891446a63a3f99b4d32f9585ec74af1d8d7"
-
-NO_COLOR="\033[0m"
-RED="\033[0;31m"
-GREEN="\033[0;32m"
-YELLOW="\033[0;33m"
-
-MESSAGE_MISSING_OR_INVALID_ARGS="${RED}Missing or invalid arguments${NO_COLOR}"
-
-# $1: prod or stage
-getAWSHosts() {
- if [ $1 == "prod" ]; then
- echo $(AWS_HUB_PROD | ( AWS_IP ; echo ) | sed -e ':a' -e 'N' -e '$!ba' -e 's/\n/:2376 /g')
- elif [ $1 == "stage" ]; then
- echo $(AWS_HUB_STAGE | ( AWS_IP ; echo ) | sed -e ':a' -e 'N' -e '$!ba' -e 's/\n/:2376 /g')
- fi
-}
-
-# $1: Exit code
-printUsageAndExit() {
- echo
- echo "Usage: deploy.sh [prod|stage|-h ] [IMAGE]"
- echo
- echo " prod A predefined list of hosts for production"
- echo " stage A predefined list of hosts for staging"
- echo " -h A single host address"
- echo
- exit $1
-}
-
-# $1: Image argument
-parseImageArg() {
- if [ -z "$1" ]; then
- echo $MESSAGE_MISSING_OR_INVALID_ARGS
- printUsageAndExit 1
- fi
- IMAGE=$1
-}
-
-parseArgs() {
- if [ $1 == "-h" ]; then
- parseImageArg $3
- HOSTS=$2
- else
- if [ $1 == "prod" ]; then
- if [ -z "$2" ]; then
- IMAGE=$DEFAULT_IMAGE_PROD
- else
- parseImageArg $2
- fi
- HOSTS=$( getAWSHosts "prod" )
- elif [ $1 == "stage" ]; then
- if [ -z "$2" ]; then
- IMAGE=$DEFAULT_IMAGE_STAGE
- else
- parseImageArg $2
- fi
- HOSTS=$( getAWSHosts "stage" )
- else
- echo
- echo $MESSAGE_MISSING_OR_INVALID_ARGS
- printUsageAndExit 1
- fi
- fi
-}
-
-# $1: Host IP
-# $2: Image
-# $3: Container name
-# $4: Container port
-runContainer() {
- $DOCKER_CMD --tlsverify=false -H tcp://$1 run \
- -de ENV=production \
- -e HUB_API_BASE_URL=$HUB_GATEWAY \
- -e REGISTRY_API_BASE_URL=$HUB_GATEWAY \
- -e SERVICE_NAME=$HUB_SERVICE_NAME \
- -e SERVICE_80_NAME=$HUB_SERVICE_NAME \
- -e NEW_RELIC_LICENSE_KEY=$NEW_RELIC_LICENSE_KEY \
- -e NEW_RELIC_APP_NAME=$NEW_RELIC_APP_NAME \
- -e PORT=80 \
- -p $4:80 \
- --restart "unless-stopped" \
- --name $3 \
- $2
-}
-
-# $1: Host IP
-# $2: Container name
-removeContainer() {
- $DOCKER_CMD --tlsverify=false -H tcp://$1 stop $2
- $DOCKER_CMD --tlsverify=false -H tcp://$1 rm $2
-}
-
-# $1: Host IP
-# $2: Image name
-pullImage() {
- $DOCKER_CMD --tlsverify=false -H tcp://$1 pull $2
-}
-
-# $1: Host IP
-# $2: Image
-deployHost() {
- echo
- echo "Starting to deploy ${YELLOW}$IMAGE${NO_COLOR} to ${YELLOW}$1${NO_COLOR}"
-
- pullImage $1 $2
-
- removeContainer $1 "hub_2_0"
- runContainer $1 $2 "hub_2_0" 6600
-
- removeContainer $1 "hub_2_1"
- runContainer $1 $2 "hub_2_1" 6601
-
- removeContainer $1 "hub_2_2"
- runContainer $1 $2 "hub_2_2" 6602
-}
-
-# Prerequisites:
-# 1- AWS
-type aws >/dev/null 2>&1 || { echo >&2 "AWS client is required. Make sure 'aws' command is available:\nhttp://docs.aws.amazon.com/cli/latest/userguide/installing.html"; exit 1; }
-# 2- JQ
-type jq >/dev/null 2>&1 || { echo >&2 "jq JSON processor is required. Make sure 'jq' command is available:\nbrew install jq"; exit 1; }
-
-# Case for no paremeters specified
-if [ -z "$1" ]
- then
- echo
- echo $MESSAGE_MISSING_OR_INVALID_ARGS
- printUsageAndExit 1
-fi
-
-parseArgs "$@"
-
-echo
-echo "Image: ${YELLOW}$IMAGE ${NO_COLOR}"
-echo "Hosts: ${YELLOW}$HOSTS${NO_COLOR}"
-echo
-read -p "Do you want to proceed? [Y/n]" -s -n 1 KEY
-echo
-if [[ ! $KEY =~ ^[Yy]$ ]]; then
- exit 1
-fi
-
-# Run deployment for each host
-for HUB_HOST in $HOSTS
-do
- deployHost $HUB_HOST $IMAGE
- echo
- echo "Sleeping for 10 seconds to let the containers boot up..."
- echo
- sleep 10
-done
-
-echo
-echo "${GREEN}All done!${NO_COLOR}"
-echo
diff --git a/docker-compose.yml b/docker-compose.yml
deleted file mode 100644
index c1257d728f..0000000000
--- a/docker-compose.yml
+++ /dev/null
@@ -1,28 +0,0 @@
-dnsmasq:
- build: ./containers/dnsmasq
- ports:
- - "53:53/udp"
- environment:
- - DOCKER_HOST
-haproxy:
- build: ./containers/haproxy
- environment:
- - DOCKER_HOST
- ports:
- - "80:80"
- - "443:443"
-hub:
- build: .
- command: node --harmony ./server.js
- working_dir: /opt/hub/app/.build
- volumes:
- - .:/opt/hub
- - ./private-deps/docker-ux:/opt/node_modules/docker-ux
- - ./private-deps/hub-js-sdk:/opt/node_modules/hub-js-sdk
- ports:
- - "7001:3000"
- environment:
- DEBUG: "hub:*"
- HUB_API_BASE_URL: "https://hub-beta-stage.docker.com"
- REGISTRY_API_BASE_URL: "https://hub-beta-stage.docker.com"
- ENV: development
diff --git a/docker-for-mac/docker-toolbox.md b/docker-for-mac/docker-toolbox.md
deleted file mode 100644
index 1be3f5bc83..0000000000
--- a/docker-for-mac/docker-toolbox.md
+++ /dev/null
@@ -1,145 +0,0 @@
-
-
-# Docker for Mac vs. Docker Toolbox
-
-If you already have an installation of Docker Toolbox, please read these topics first to learn how Docker for Mac and Docker Toolbox differ, and how they can coexist.
-
-
-## The Docker Toolbox environment
-
-Docker Toolbox installs `docker`, `docker-compose` and `docker-machine` in `/usr/local/bin` on your Mac. It also installs VirtualBox. At installation time, Toolbox uses `docker-machine` to provision a VirtualBox VM called `default`, running the `boot2docker` Linux distribution, with [Docker Engine](https://docs.docker.com/engine/) with certificates located on your Mac at `$HOME/.docker/machine/machines/default`.
-
-Before you use `docker` or `docker-compose` on your Mac, you typically use the command `eval $(docker-machine env default)` to set environment variables so that `docker` or `docker-compose` know how to talk to Docker Engine running on VirtualBox.
-
-This setup is shown in the following diagram.
-
-
-
-
-## The Docker for Mac environment
-
-Docker for Mac is a Mac native application, that you install in `/Applications`. At installation time, it creates symlinks in `/usr/local/bin` for `docker` and `docker-compose`, to the version of the commands inside the Mac application bundle, in `/Applications/Docker.app/Contents/Resources/bin`.
-
-Here are some key points to know about Docker for Mac before you get started:
-
-* Docker for Mac does not use VirtualBox, but rather HyperKit, a lightweight OS X virtualization solution built on top of Hypervisor.framework in OS X 10.10 Yosemite and higher.
-
-* Installing Docker for Mac does not affect machines you created with Docker Machine. The install offers to copy containers and images from your local `default` machine (if one exists) to the new Docker for Mac HyperKit VM. If chosen, content from `default` is copied to the new Docker for Mac HyperKit VM, and your original `default` machine is kept as is.
-
-* The Docker for Mac application does not use `docker-machine` to provision that VM; but rather creates and manages it directly.
-
-* At installation time, Docker for Mac provisions an HyperKit VM based on Alpine Linux, running Docker Engine. It exposes the docker API on a socket in `/var/tmp/docker.sock`. Since this is the default location where `docker` will look if no environment variables are set, you can start using `docker` and `docker-compose` without setting any environment variables.
-
-This setup is shown in the following diagram.
-
-
-
-With Docker for Mac, you get only one VM, and you don't manage it. It is managed by the Docker for Mac application, which includes autoupdate to update the client and server versions of Docker.
-
-If you need several VMs and want to manage the version of the Docker client or server you are using, you can continue to use `docker-machine`, on the same machine, as described in [Docker Toolbox and Docker for Mac coexistence](#docker-toolbox-and-docker-for-mac-coexistence).
-
-
-## Setting up to run Docker for Mac
-
-1. Check whether Toolbox DOCKER environment variables are set:
-
- $ env | grep DOCKER
- DOCKER_HOST=tcp://192.168.99.100:2376
- DOCKER_MACHINE_NAME=default
- DOCKER_TLS_VERIFY=1
- DOCKER_CERT_PATH=/Users/victoriabialas/.docker/machine/machines/default
-
- If this command returns no output, you are ready to use Docker for Mac.
-
- If it returns output (as shown in the example), you need to unset the `DOCKER` environment variables to make the client talk to the Docker for Mac Engine (next step).
-
-2. Run the `unset` command on the following `DOCKER` environment variables to unset them in the current shell.
-
- unset DOCKER_TLS_VERIFY
- unset DOCKER_CERT_PATH
- unset DOCKER_MACHINE_NAME
- unset DOCKER_HOST
-
- Now, this command should return no output.
-
- $ env | grep DOCKER
-
- If you are using a Bash shell, you can use `unset ${!DOCKER_*}` to unset all DOCKER environment variables at once. (This will not work in other shells such as `.zsh`; you will need to unset each variable individually.)
-
->**Note**: If you have a shell script as part of your profile that sets these `DOCKER` environment variables automatically each time you open a command window, then you will need to unset these each time you want to use Docker for Mac.
-
-> **Warning**: If you install Docker for Mac on a machine where Docker Toolbox is installed, it will replace the `docker` and `docker-compose` command lines in `/usr/local/bin` with symlinks to its own versions.
-
-
-## Docker Toolbox and Docker for Mac coexistence
-
-You can use Docker for Mac and Docker Toolbox together on the same machine. When you want to use Docker for Mac, make sure all DOCKER environment variables are unset. You can do this in bash with `unset ${!DOCKER_*}`. When you want to use one of the VirtualBox VMs you have set with `docker-machine`, just run a `eval $(docker-machine env default)` (or the name of the machine you want to target). This will switch the current command shell to talk to the specified Toolbox machine.
-
-This setup is represented in the following diagram.
-
-
-
-
-## Using different versions of Docker tools
-
-The coexistence setup works as is as long as your VirtualBox VMs provisioned with `docker-machine` run the same version of Docker Engine as Docker for Mac. If you need to use VMs running older versions of Docker Engine, you can use a tool like Docker Version Manager to manage several versions of docker client.
-
-
-### Checking component versions
-
-Ideally, the Docker CLI client and Docker Engine should be the same version. Mismatches between client and server, and among host machines you might have created with Docker Machine can cause problems (client can't talk to the server or host machines).
-
-If you already have Docker Toolbox installed, and then install Docker for Mac, you might get a newer version of the Docker client. Run `docker version` in a command shell to see client and server versions. In this example, the client installed with Docker for Mac is `Version: 1.11.1` and the server (which was installed earlier with Toolbox) is Version: 1.11.0.
-
- $ docker version
- Client:
- Version: 1.11.1
- ...
-
- Server:
- Version: 1.11.0
- ...
-
-Also, if you created machines with Docker Machine (installed with Toolbox) then upgraded or installed Docker for Mac, you might have machines running different versions of Engine. Run `docker-machine ls` to view version information for the machines you created. In this example, the DOCKER column shows that each machine is running a different version of server.
-
- $ docker-machine ls
- NAME ACTIVE DRIVER STATE URL SWARM DOCKER ERRORS
- aws-sandbox - amazonec2 Running tcp://52.90.113.128:2376 v1.10.0
- default * virtualbox Running tcp://192.168.99.100:2376 v1.10.1
- docker-sandbox - digitalocean Running tcp://104.131.43.236:2376 v1.10.0
-
-You might also run into a similar situation with Docker Universal Control Plan (UCP).
-
-There are a few ways to address this problem and keep using your older machines. One solution is to use a version manager like DVM.
-
-## How do I uninstall Docker Toolbox?
-
-You might decide that you do not need Toolbox now that you have Docker for Mac,
-and want to uninstall it. For details on how to perform a clean uninstall of
-Toolbox on the Mac, see [How to uninstall
-Toolbox](/toolbox/toolbox_install_mac.md#how-to-uninstall-toolbox) in the
-Toolbox Mac topics.
-
-
-
diff --git a/docker-for-mac/examples.md b/docker-for-mac/examples.md
deleted file mode 100644
index c784dd7042..0000000000
--- a/docker-for-mac/examples.md
+++ /dev/null
@@ -1,38 +0,0 @@
-
-
-# Example Applications
-
-Upcoming releases will include example applications especially tailored for Docker for Mac and Docker for Windows.
-
-Examples will highlight develop, build, and run workfows in several languages, including Node.js, Python, Ruby, and Java.
-
-For now, if you want get started experimenting with the Beta apps and Docker Compose (which is installed automatically with Docker Desktop Editions), have a look at these example applications in the Compose documentation. You should be able to run these with Docker for Mac and Docker for Windows.
-
-Quickstart: Compose and Django
-
-Quickstart: Compose and Rails
-
-Quickstart: Compose and WordPress
-
-See also [learn by example](/engine/tutorials/index.md) tutorials on building images, runnning containers, networking, managing data, and storing images on Docker Hub.
-
-
-
diff --git a/docker-for-mac/faqs.md b/docker-for-mac/faqs.md
deleted file mode 100644
index 191cddc350..0000000000
--- a/docker-for-mac/faqs.md
+++ /dev/null
@@ -1,156 +0,0 @@
-
-
-# Frequently Asked Questions (FAQs)
-
-**Looking for popular FAQs on Docker for Mac?** Check out the [Docker Knowledge Hub](http://success.docker.com/) for knowledge base articles, FAQs, technical support for various subscription levels, and more.
-
-### Stable and beta channels
-
-**Q: How do I get the stable or beta version of Docker for Mac?**
-
-A: Use the download links for the channels given in the topic [Download Docker for Mac](index.md#download-docker-for-mac).
-
-This topic also has more information about the two channels.
-
-**Q: What is the difference between the stable and beta versions of Docker for Mac?**
-
-A: Two different download channels are available for Docker for Mac:
-
-* The stable channel provides a general availability release-ready installer for a fully baked and tested, more reliable app. The stable version of Docker for Mac comes with the latest released version of Docker Engine. The release schedule is synched with Docker Engine releases and hotfixes.
-
-* The beta channel provides an installer with new features we are working on, but is not necessarily fully tested. It comes with the experimental version of Docker Engine. Bugs, crashes and issues are more likely to occur with the beta app, but you get a chance to preview new functionality, experiment, and provide feedback as the apps evolve. Releases are typically more frequent than for stable, often one or more per month.
-
-**Q: Can I switch back and forth between stable and beta versions of Docker for Mac?**
-
-A: Yes, you can switch between versions to try out the betas to see what's new, then go back to stable for other work. However, **you can have only one app installed at a time**. Switching back and forth between stable and beta apps can de-stabilize your development environment, particularly in cases where you switch from a newer (beta) channel to older (stable).
-
-For example, containers created with a newer beta version of Docker for Mac may not work after you switch back to stable because they may have been created leveraging beta features that aren't in stable yet. Just keep this in mind as you create and work with beta containers, perhaps in the spirit of a playground space where you are prepared to troubleshoot or start over.
-
-To safely switch between beta and stable versions be sure to save images and export the containers you need, then uninstall the current version before installing another. The workflow is described in more detail below.
-
-Do the following each time:
-
-1. Use `docker save` to save any images you want to keep. (See [save](/engine/reference/commandline/save.md) in the Docker Engine command line reference.)
-
-2. Use `docker export` to export containers you want to keep. (See [export](/engine/reference/commandline/export.md) in the Docker Engine command line reference.)
-
-3. Uninstall the current app (whether stable or beta).
-
-4. Install a different version of the app (stable or beta).
-
-### What is Docker.app?
-
-`Docker.app` is Docker for Mac, a bundle of Docker client, and Docker
-Engine. `Docker.app` uses the OS X
-Hypervisor.framework (part of MacOS X 10.10 Yosemite and higher)
-to run containers, meaning that _**no separate VirtualBox is required**_.
-
-### What kind of feedback are we looking for?
-
-Everything is fair game. We'd like your impressions on the download-install process, startup, functionality available, the GUI, usefulness of the app,
-command line integration, and so on. Tell us about problems, what you like, or functionality you'd like to see added.
-
-We are especially interested in getting feedback on the new swarm mode described in [Docker Swarm](/engine/swarm/index.md). A good place to start is the [tutorial](/engine/swarm/swarm-tutorial/index.md).
-
-### What if I have problems or questions?
-
-You can find the list of frequent issues in
-[Logs and Troubleshooting](troubleshoot.md).
-
-If you do not find a solution in Troubleshooting, browse issues on [Docker for Mac issues on GitHub](https://github.com/docker/for-mac/issues) or create a new one. You can also create new issues based on diagnostics. To learn more, see [Diagnose problems, send feedback, and create GitHub issues](troubleshoot.md#diagnose-problems-send-feedback-and-create-github-issues).
-
-[Docker for Mac forum](https://forums.docker.com/c/docker-for-mac) provides discussion threads as well, and you can create discussion topics there, but we recommend using the GitHub issues over the forums for better tracking and response.
-
-### Can I use Docker for Mac with new swarm mode?
-
-Yes, you can use Docker for Mac to test single-node features of [swarm mode](/engine/swarm/index.md) introduced with Docker Engine 1.12, including
-initializing a swarm with a single node, creating services, and scaling
-services. Docker “Moby” on Hyperkit will serve as the single swarm node. You can
-also use Docker Machine, which comes with Docker for Mac, to create and
-experiment a multi-node swarm. Check out the tutorial at [Get started with swarm mode](/engine/swarm/swarm-tutorial/index.md).
-
-### How do I connect to the remote Docker Engine API?
-
-You might need to provide the location of the remote API for Docker clients and development tools.
-
-On Docker for Mac, clients can connect to the Docker Engine through a Unix socket: `unix:///var/run/docker.sock`.
-
-See also [Docker Remote API](/engine/reference/api/docker_remote_api.md) and Docker for Mac forums topic [Using pycharm Docker plugin..](https://forums.docker.com/t/using-pycharm-docker-plugin-with-docker-beta/8617).
-
-If you are working with applications like [Apache Maven](https://maven.apache.org/) that expect settings for `DOCKER_HOST` and `DOCKER_CERT_PATH` environment variables, specify these to connect to Docker instances through Unix sockets. For example:
-
- export DOCKER_HOST=unix:///var/run/docker.sock
-
-### How do I connect from a container to a service on the host?
-
-The Mac has a changing IP address (or none if you have no network access). Our current recommendation is to attach an unused IP to the `lo0` interface on the Mac so that containers can connect to this address.
-
-For a full explanation and examples, see [I want to connect from a container to a service on the host](networking.md#i-want-to-connect-from-a-container-to-a-service-on-the-host) under [Known Limitations, Use Cases, and Workarounds](networking.md#known-limitations-use-cases-and-workarounds) in the Networking topic.
-
-### How do I to connect to a container from the Mac?
-
-Our current recommendation is to publish a port, or to connect from another container. Note that this is what you have to do even on Linux if the container is on an overlay network, not a bridge network, as these are not routed.
-
-For a full explanation and examples, see [I want to connect to a container from the Mac](networking.md#i-want-to-connect-to-a-container-from-the-mac) under [Known Limitations, Use Cases, and Workarounds](networking.md#known-limitations-use-cases-and-workarounds) in the Networking topic.
-
-### What are system requirements for Docker for Mac?
-
-Note that you need a Mac that supports hardware virtualization, which is most non ancient ones; i.e., use OS X `10.10.3+` or `10.11` (OS X Yosemite or OS X El Capitan). See also "What to know before you install" in [Getting Started](index.md).
-
-
-### Do I need to uninstall Docker Toolbox to use Docker for Mac?
-
-No, you can use these side by side. Docker Toolbox leverages a Docker daemon installed using `docker-machine` in a machine called `default`. Running `eval $(docker-machine env default)` in a shell sets DOCKER environment variables locally to connect to the default machine using Engine from Toolbox. To check whether Toolbox DOCKER environment variables are set, run `env | grep DOCKER`.
-
-To make the client talk to the Docker for Mac Engine, run the command `unset ${!DOCKER_*}` to unset all DOCKER environment variables in the current shell. (Now, `env | grep DOCKER` should return no output.) You can have multiple command line shells open, some set to talk to Engine from Toolbox and others set to talk to Docker for Mac. The same applies to `docker-compose`.
-
-### How do I uninstall Docker Toolbox?
-
-You might decide that you do not need Toolbox now that you have Docker for Mac,
-and want to uninstall it. For details on how to perform a clean uninstall of
-Toolbox on the Mac, see [How to uninstall
-Toolbox](/toolbox/toolbox_install_mac.md#how-to-uninstall-toolbox) in the
-Toolbox Mac topics.
-
-### What is HyperKit?
-
-HyperKit is a hypervisor built on top of the Hypervisor.framework in OS X 10.10 Yosemite and higher. It runs entirely in userspace and has no other dependencies.
-
-We use HyperKit to eliminate the need for other VM products, such as Oracle Virtualbox or VMWare Fusion.
-
-### What is the benefit of HyperKit?
-
-It is thinner than VirtualBox and VMWare fusion, and the version we include is tailor made for Docker workloads on the Mac.
-
-### Why is com.docker.vmnetd running after I quit the app?
-
-The privileged helper process `com.docker.vmnetd` is started by `launchd` and runs in the background. The process will not
-consume any resources unless Docker.app connects to it, so it's safe to ignore.
-
-### Can I pass through a USB device to a container?
-
- Unfortunately it is not possible to pass through a USB device (or a serial port) to a container. For use cases requiring this, we recommend the use of [Docker Toolbox](/toolbox/overview.md).
-
-
-
diff --git a/docker-for-mac/images/About.png b/docker-for-mac/images/About.png
deleted file mode 100644
index 4fc78daf7b..0000000000
Binary files a/docker-for-mac/images/About.png and /dev/null differ
diff --git a/docker-for-mac/images/changelog-placeholder.png b/docker-for-mac/images/changelog-placeholder.png
deleted file mode 100644
index 87717b49cd..0000000000
Binary files a/docker-for-mac/images/changelog-placeholder.png and /dev/null differ
diff --git a/docker-for-mac/images/chat.png b/docker-for-mac/images/chat.png
deleted file mode 100644
index 597db5aae9..0000000000
Binary files a/docker-for-mac/images/chat.png and /dev/null differ
diff --git a/docker-for-mac/images/console_logs.png b/docker-for-mac/images/console_logs.png
deleted file mode 100644
index 951eac2820..0000000000
Binary files a/docker-for-mac/images/console_logs.png and /dev/null differ
diff --git a/docker-for-mac/images/console_logs_search.png b/docker-for-mac/images/console_logs_search.png
deleted file mode 100644
index bd3583b3cb..0000000000
Binary files a/docker-for-mac/images/console_logs_search.png and /dev/null differ
diff --git a/docker-for-mac/images/diagnose-d4mac-issues-template.png b/docker-for-mac/images/diagnose-d4mac-issues-template.png
deleted file mode 100644
index 328328e516..0000000000
Binary files a/docker-for-mac/images/diagnose-d4mac-issues-template.png and /dev/null differ
diff --git a/docker-for-mac/images/diagnose-id-forums.png b/docker-for-mac/images/diagnose-id-forums.png
deleted file mode 100644
index ae902907fb..0000000000
Binary files a/docker-for-mac/images/diagnose-id-forums.png and /dev/null differ
diff --git a/docker-for-mac/images/diagnose-issue.png b/docker-for-mac/images/diagnose-issue.png
deleted file mode 100644
index 4dd243a2c2..0000000000
Binary files a/docker-for-mac/images/diagnose-issue.png and /dev/null differ
diff --git a/docker-for-mac/images/diagnose.png b/docker-for-mac/images/diagnose.png
deleted file mode 100644
index 8c3a5945ca..0000000000
Binary files a/docker-for-mac/images/diagnose.png and /dev/null differ
diff --git a/docker-for-mac/images/diagnostic-forums-topic.png b/docker-for-mac/images/diagnostic-forums-topic.png
deleted file mode 100644
index 09973b0c96..0000000000
Binary files a/docker-for-mac/images/diagnostic-forums-topic.png and /dev/null differ
diff --git a/docker-for-mac/images/docker-app-drag-old.png b/docker-for-mac/images/docker-app-drag-old.png
deleted file mode 100644
index b05124d3b1..0000000000
Binary files a/docker-for-mac/images/docker-app-drag-old.png and /dev/null differ
diff --git a/docker-for-mac/images/docker-app-drag.png b/docker-for-mac/images/docker-app-drag.png
deleted file mode 100644
index b4744e47d8..0000000000
Binary files a/docker-for-mac/images/docker-app-drag.png and /dev/null differ
diff --git a/docker-for-mac/images/docker-app-in-apps-no-annotation.png b/docker-for-mac/images/docker-app-in-apps-no-annotation.png
deleted file mode 100644
index c1ace536af..0000000000
Binary files a/docker-for-mac/images/docker-app-in-apps-no-annotation.png and /dev/null differ
diff --git a/docker-for-mac/images/docker-app-in-apps.png b/docker-for-mac/images/docker-app-in-apps.png
deleted file mode 100644
index 2290320ea4..0000000000
Binary files a/docker-for-mac/images/docker-app-in-apps.png and /dev/null differ
diff --git a/docker-for-mac/images/docker-app-log.png b/docker-for-mac/images/docker-app-log.png
deleted file mode 100644
index 6aa36e7aef..0000000000
Binary files a/docker-for-mac/images/docker-app-log.png and /dev/null differ
diff --git a/docker-for-mac/images/docker-app.png b/docker-for-mac/images/docker-app.png
deleted file mode 100644
index 530fccb9ca..0000000000
Binary files a/docker-for-mac/images/docker-app.png and /dev/null differ
diff --git a/docker-for-mac/images/docker-for-mac-and-toolbox.png b/docker-for-mac/images/docker-for-mac-and-toolbox.png
deleted file mode 100644
index 1bbf09bd0a..0000000000
Binary files a/docker-for-mac/images/docker-for-mac-and-toolbox.png and /dev/null differ
diff --git a/docker-for-mac/images/docker-for-mac-install.png b/docker-for-mac/images/docker-for-mac-install.png
deleted file mode 100644
index 6a51c8b858..0000000000
Binary files a/docker-for-mac/images/docker-for-mac-install.png and /dev/null differ
diff --git a/docker-for-mac/images/download.png b/docker-for-mac/images/download.png
deleted file mode 100644
index bfa896d89e..0000000000
Binary files a/docker-for-mac/images/download.png and /dev/null differ
diff --git a/docker-for-mac/images/hello-world-nginx.png b/docker-for-mac/images/hello-world-nginx.png
deleted file mode 100644
index bf6cb20496..0000000000
Binary files a/docker-for-mac/images/hello-world-nginx.png and /dev/null differ
diff --git a/docker-for-mac/images/hello-world.png b/docker-for-mac/images/hello-world.png
deleted file mode 100644
index ededac6bbd..0000000000
Binary files a/docker-for-mac/images/hello-world.png and /dev/null differ
diff --git a/docker-for-mac/images/hockeyapp-docker.png b/docker-for-mac/images/hockeyapp-docker.png
deleted file mode 100644
index d07f85b97c..0000000000
Binary files a/docker-for-mac/images/hockeyapp-docker.png and /dev/null differ
diff --git a/docker-for-mac/images/log-files-finder.png b/docker-for-mac/images/log-files-finder.png
deleted file mode 100644
index a006274086..0000000000
Binary files a/docker-for-mac/images/log-files-finder.png and /dev/null differ
diff --git a/docker-for-mac/images/logs.png b/docker-for-mac/images/logs.png
deleted file mode 100644
index d16ff51520..0000000000
Binary files a/docker-for-mac/images/logs.png and /dev/null differ
diff --git a/docker-for-mac/images/mac-activity-monitor-docker-app.png b/docker-for-mac/images/mac-activity-monitor-docker-app.png
deleted file mode 100644
index 4510767cea..0000000000
Binary files a/docker-for-mac/images/mac-activity-monitor-docker-app.png and /dev/null differ
diff --git a/docker-for-mac/images/mac-install-success-docker-ps.png b/docker-for-mac/images/mac-install-success-docker-ps.png
deleted file mode 100644
index 62e8b9aad1..0000000000
Binary files a/docker-for-mac/images/mac-install-success-docker-ps.png and /dev/null differ
diff --git a/docker-for-mac/images/mac-install-success-docker-wait.png b/docker-for-mac/images/mac-install-success-docker-wait.png
deleted file mode 100644
index a6bb45f816..0000000000
Binary files a/docker-for-mac/images/mac-install-success-docker-wait.png and /dev/null differ
diff --git a/docker-for-mac/images/menu.png b/docker-for-mac/images/menu.png
deleted file mode 100644
index 659e329309..0000000000
Binary files a/docker-for-mac/images/menu.png and /dev/null differ
diff --git a/docker-for-mac/images/privacy.png b/docker-for-mac/images/privacy.png
deleted file mode 100644
index 7f0a52aaa7..0000000000
Binary files a/docker-for-mac/images/privacy.png and /dev/null differ
diff --git a/docker-for-mac/images/proxy-settings.png b/docker-for-mac/images/proxy-settings.png
deleted file mode 100644
index 7411df9afc..0000000000
Binary files a/docker-for-mac/images/proxy-settings.png and /dev/null differ
diff --git a/docker-for-mac/images/remove-app-instances.png b/docker-for-mac/images/remove-app-instances.png
deleted file mode 100644
index 5ba3588c4e..0000000000
Binary files a/docker-for-mac/images/remove-app-instances.png and /dev/null differ
diff --git a/docker-for-mac/images/settings-advanced.png b/docker-for-mac/images/settings-advanced.png
deleted file mode 100644
index 9d735f9555..0000000000
Binary files a/docker-for-mac/images/settings-advanced.png and /dev/null differ
diff --git a/docker-for-mac/images/settings-diagnose-id.png b/docker-for-mac/images/settings-diagnose-id.png
deleted file mode 100644
index f149356cd3..0000000000
Binary files a/docker-for-mac/images/settings-diagnose-id.png and /dev/null differ
diff --git a/docker-for-mac/images/settings-diagnose.png b/docker-for-mac/images/settings-diagnose.png
deleted file mode 100644
index 28547df813..0000000000
Binary files a/docker-for-mac/images/settings-diagnose.png and /dev/null differ
diff --git a/docker-for-mac/images/settings-diagnostic-results-only.png b/docker-for-mac/images/settings-diagnostic-results-only.png
deleted file mode 100644
index 82f1fd3fda..0000000000
Binary files a/docker-for-mac/images/settings-diagnostic-results-only.png and /dev/null differ
diff --git a/docker-for-mac/images/settings-file-share-choose.png b/docker-for-mac/images/settings-file-share-choose.png
deleted file mode 100644
index 248407e8e7..0000000000
Binary files a/docker-for-mac/images/settings-file-share-choose.png and /dev/null differ
diff --git a/docker-for-mac/images/settings-file-share.png b/docker-for-mac/images/settings-file-share.png
deleted file mode 100644
index 69b26c3802..0000000000
Binary files a/docker-for-mac/images/settings-file-share.png and /dev/null differ
diff --git a/docker-for-mac/images/settings-uninstall.png b/docker-for-mac/images/settings-uninstall.png
deleted file mode 100644
index bfb65af22c..0000000000
Binary files a/docker-for-mac/images/settings-uninstall.png and /dev/null differ
diff --git a/docker-for-mac/images/settings.png b/docker-for-mac/images/settings.png
deleted file mode 100644
index a9f9e95f67..0000000000
Binary files a/docker-for-mac/images/settings.png and /dev/null differ
diff --git a/docker-for-mac/images/startup-help.png b/docker-for-mac/images/startup-help.png
deleted file mode 100644
index 0db4aca3db..0000000000
Binary files a/docker-for-mac/images/startup-help.png and /dev/null differ
diff --git a/docker-for-mac/images/toolbox-install.png b/docker-for-mac/images/toolbox-install.png
deleted file mode 100644
index fa10bc28b9..0000000000
Binary files a/docker-for-mac/images/toolbox-install.png and /dev/null differ
diff --git a/docker-for-mac/images/whale-in-menu-bar.png b/docker-for-mac/images/whale-in-menu-bar.png
deleted file mode 100644
index 780201e127..0000000000
Binary files a/docker-for-mac/images/whale-in-menu-bar.png and /dev/null differ
diff --git a/docker-for-mac/images/whale-x.png b/docker-for-mac/images/whale-x.png
deleted file mode 100644
index c99e8d5898..0000000000
Binary files a/docker-for-mac/images/whale-x.png and /dev/null differ
diff --git a/docker-for-mac/images/whale.png b/docker-for-mac/images/whale.png
deleted file mode 100644
index 894c379349..0000000000
Binary files a/docker-for-mac/images/whale.png and /dev/null differ
diff --git a/docker-for-mac/index.md b/docker-for-mac/index.md
deleted file mode 100644
index 41049349f8..0000000000
--- a/docker-for-mac/index.md
+++ /dev/null
@@ -1,293 +0,0 @@
-
-
-# Getting Started with Docker for Mac
-
-Welcome to Docker for Mac!
-
-Please read through these topics on how to get started. To **give us feedback** on your experience with the app and report bugs or problems, log in to our [Docker for Mac forum](https://forums.docker.com/c/docker-for-mac).
-
->**Already have Docker for Mac?** If you already have Docker for Mac installed, and are ready to get started, skip over to the [Getting Started with Docker](/engine/getstarted/index.md) tutorial.
-
-
-## Download Docker for Mac
-
-If you have not already done so, please install Docker for Mac. You can download installers from the stable or beta channel.
-
-For more about stable and beta channels, see the [FAQs](faqs.md#stable-and-beta-channels).
-
-
-
-
Stable channel
-
Beta channel
-
-
-
This installer is fully baked and tested, and comes with the latest GA version of Docker Engine.
This is the best channel to use if you want a reliable platform to work with.
These releases follow a version schedule with a longer lead time than the betas, synched with Docker Engine releases and hotfixes.
-
-
This installer offers cutting edge features and comes with the experimental version of Docker Engine, which is described in the Docker Experimental Features README on GitHub.
This is the best channel to use if you want to experiment with features we are working on as they become available, and can weather some instability and bugs. This channel is a continuation of the beta program, where you can provide feedback as the apps evolve. Releases are typically more frequent than for stable, often one or more per month.
-
->**Important Notes**:
->
->* Docker for Mac requires OS X 10.10.3 Yosemite or newer running on a 2010 or newer Mac, with Intel's hardware support for MMU virtualization. Please see [What to know before you install](#what-to-know-before-you-install) for a full list of prerequisites.
->
->* You can switch between beta and stable versions, but _you must have only one app installed at a time_. Also, you will need to save images and export containers you want to keep before uninstalling the current version before installing another. For more about this, see the [FAQs about beta and stable channels](faqs.md#stable-and-beta-channels).
-
-
-## What to know before you install
-
-* **README FIRST for Docker Toolbox and Docker Machine users**: If you are already running Docker on your machine, first read [Docker for Mac vs. Docker Toolbox](docker-toolbox.md) to understand the impact of this installation on your existing setup, how to set your environment for Docker for Mac, and how the two products can coexist.
-
-* **Relationship to Docker Machine**: Installing Docker for Mac does not affect machines you created with Docker Machine. You'll get the option to copy containers and images from your local `default` machine (if one exists) to the new Docker for Mac HyperKit VM.
-
-* **System Requirements**: Docker for Mac will launch only if all these requirements are met.
-
- - Mac must be a 2010 or newer model, with Intel's hardware support for memory management unit (MMU) virtualization; i.e., Extended Page Tables (EPT)
-
- - OS X 10.10.3 Yosemite or newer
-
- - At least 4GB of RAM
-
- - VirtualBox prior to version 4.3.30 must NOT be installed (it is incompatible with Docker for Mac)
-
- > **Note**: If your system does not satisfy these requirements, you can install [Docker Toolbox](/toolbox/overview.md), which uses Oracle Virtual Box instead of HyperKit.
-
-* **What the install includes**: The installation provides [Docker Engine](https://docs.docker.com/engine/userguide/intro/), Docker CLI client, [Docker Compose](https://docs.docker.com/compose/overview/), and [Docker Machine](https://docs.docker.com/machine/overview/).
-
-
-## Step 1. Install and Run Docker for Mac
-
-1. Double-click `Docker.dmg` to open the installer, then drag Moby the whale to the Applications folder.
-
- 
-
- You will be asked to authorize `Docker.app` with your system password during the install process. Privileged access is needed to install networking components and links to the Docker apps.
-
-2. Double-click `Docker.app` to start Docker.
-
- 
-
- The whale in the top status bar indicates that Docker is running, and accessible from a terminal.
-
- 
-
- If you just installed the app, you also get a success message with suggested next steps and a link to this documentation. Click the whale () in the status bar to dismiss this popup.
-
- 
-
-3. Click the whale () to get Preferences, and other options.
-
- 
-
-4. Select **About Docker** to verify that you have the latest version.
-
- Congratulations! You are up and running with Docker for Mac.
-
-
-## Step 2. Check versions of Docker Engine, Compose, and Machine
-
-Run these commands to test if your versions of `docker`, `docker-compose`, and `docker-machine` are up-to-date and compatible with `Docker.app`.
-
-```shell
- $ docker --version
- Docker version 1.12.0, build 8eab29e
-
- $ docker-compose --version
- docker-compose version 1.8.0, build f3628c7
-
- $ docker-machine --version
- docker-machine version 0.8.0, build b85aac1
-```
-
->**Note**: The above is an example. Your output will differ if you are running different (e.g., newer) versions.
-
-
-## Step 3. Explore the application and run examples
-
-1. Open a command-line terminal, and run some Docker commands to verify that Docker is working as expected.
-
- Some good commands to try are `docker version` to check that you have the latest release installed, and `docker ps` and `docker run hello-world` to verify that Docker is running.
-
-2. For something more adventurous, start a Dockerized web server.
-
- ```shell
- docker run -d -p 80:80 --name webserver nginx
- ```
-
- If the image is not found locally, Docker will pull it from Docker Hub.
-
- In a web browser, go to `http://localhost/` to bring up the home page. (Since you specified the default HTTP port, it isn't necessary to append `:80` at the end of the URL.)
-
- 
-
- >**Note:** Early beta releases used `docker` as the hostname to build the URL. Now, ports are exposed on the private IP addresses of the VM and forwarded to `localhost` with no other host name set. See also, [Release Notes](release-notes.md) for Beta 9.
- >
-
-3. Run `docker ps` while your web server is running to see details on the webserver container.
-
- CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES
- 56f433965490 nginx "nginx -g 'daemon off" About a minute ago Up About a minute 0.0.0.0:80->80/tcp, 443/tcp webserver
-
-4. Stop or remove containers and images.
-
- The `nginx` webserver will continue to run in the container on that port until you stop and/or remove the container. If you want to stop the webserver, type: `docker stop webserver` and start it again with `docker start webserver`.
-
- To stop and remove the running container with a single command, type: `docker rm -f webserver`. This will remove the container, but not the `nginx` image. You can list local images with `docker images`. You might want to keep some images around so that you don't have to pull them again from Docker Hub. To remove an image you no longer need, use `docker rmi |`. For example, `docker rmi nginx`.
-
-**Want more example applictions?** - For more example walkthroughs that include setting up services and databases in Docker Compose, see [Example Applications](examples.md).
-
-## Preferences
-
-Choose --> **Preferences** from the menu bar. You can set the following runtime options.
-
-#### General
-
-
-
-* Docker for Mac is set to **automatically start** when you log in. Uncheck the login autostart option if you don't want Docker to start when you open your session.
-
-* Docker for Mac is set to **check for updates** automatically and notify you when an update is available. If an update is found, click **OK** to accept and install it (or cancel to keep the current version). If you disable the check for updates, you can still find out about updates manually by choosing -> **Check for Updates**
-
-* Check **Exclude VM from Time Machine backups** to prevent Time Machine from backing up the Docker for Mac virtual machine.
-
-* **CPUs** - By default, Docker for Mac is set to use 2 processors. You can increase processing power for the app by setting this to a higher number, or lower it to have Docker for Mac use fewer computing resources.
-
-* **Memory** - By default, Docker for Mac is set to use `2` GB runtime memory, allocated from the total available memory on your Mac. You can increase the RAM on the app to get faster performance by setting this number higher (for example to `3`) or lower (to `1`) if you want Docker for Mac to use less memory.
-
-#### Advanced
-
-
-
-* **Adding registries** - As an alternative to using [Docker Hub](https://hub.docker.com/) to store your public or private images or [Docker Trusted Registry](https://docs.docker.com/docker-trusted-registry/overview/), you can use Docker to set up your own insecure [registry](https://docs.docker.com/registry/introduction/). Add URLs for insecure registries and registry mirrors on which to host your images.
-
-* **HTTP proxy settings** - Docker for Mac will detect HTTP/HTTPS Proxy Settings and automatically propagate these to Docker and to your containers.
-For example, if you set your proxy settings to `http://proxy.example.com`, Docker will use this proxy when pulling containers.
-
-
-#### File sharing
-
-You can decide which directories on your Mac to share with containers.
-
-* **Add a Directory** - Click `+` and navigate to the directory you want to add.
-
- 
-
-* Click **Apply & Restart** to make the directory available to
- containers using Docker's bind mount (`-v`) feature.
-
-There are some limitations on the directories that can be shared:
-
-* They cannot be a subdirectory of an already shared directory.
-
-* They cannot already exist inside of Docker.
-
-See [Namespaces](osxfs.md#namespaces) in the topic on [osxfs file system sharing](osxfs.md) for more information.
-
-#### Privacy
-
-You can set Docker for Mac to auto-send diagnostics, crash reports, and usage data. This information can help Docker improve the application and get more context for troubleshooting problems.
-
-Uncheck any of the options to opt out and prevent auto-send of data. Docker may prompt for more information in some cases, even with auto-send enabled.
-
-
-
-Also, you can enable or disable these auto-reporting settings with one click on the information popup when you first start Docker.
-
-
-
-## Uninstall or reset
-Choose --> **Preferences** from the menu bar, then click **Uninstall / Reset** on the Preferences dialog.
-
-
-
-* **Uninstall** - Choose this option to remove Docker for Mac from your system.
-
-* **Reset to factory defaults** - Choose this option to reset all options on Docker for Mac to its initial state, the same as when it was first installed.
-
-You can uninstall Docker for Mac from the command line with this command: ` --uninstall`. If Docker is installed in the default location, the following command will provide a clean uninstall.
-
-```shell
-$ /Applications/Docker.app/Contents/MacOS/Docker --uninstall
-Docker is running, exiting...
-Docker uninstalled successfully. You can move the Docker application to the trash.
-```
-
-You might want to use the command-line uninstall if, for example, you find that the app is non-functional, and you cannot uninstall it from the menu.
-
-
-## Installing bash completion
-
-If you are using [bash completion](https://www.debian-administration.org/article/316/An_introduction_to_bash_completion_part_1), such as [homebrew bash-completion on Mac](http://davidalger.com/development/bash-completion-on-os-x-with-brew/), bash completion scripts for
-- docker
-- docker-machine
-- docker-compose
-may be found inside Docker.app, in the Contents/Resources/etc folder.
-
-To activate bash completion, these files need to be copied or symlinked to
-your bash_completion.d directory. For example, if you use Homebrew:
-
-```
-cd /usr/local/etc/bash_completion.d
-ln -s /Applications/Docker.app/Contents/Resources/etc/docker.bash-completion
-ln -s /Applications/Docker.app/Contents/Resources/etc/docker-machine.bash-completion
-ln -s /Applications/Docker.app/Contents/Resources/etc/docker-compose.bash-completion
-```
-
-## Where to go next
-
-* Try out the [Getting Started with Docker](/engine/getstarted/index.md) tutorial.
-
-* Dig in deeper with [learn by example](/engine/tutorials/index.md) tutorials on on building images, runnning containers, networking, managing data, and storing images on Docker Hub.
-
-* See [Example Applications](examples.md) for example applications that include setting up services and databases in Docker Compose.
-
-* Interested in trying out the new [swarm mode](/engine/swarm/index.md) on Docker Engine v1.12?
-
- See [Get started with swarm mode](/engine/swarm/swarm-tutorial/index.md), a tutorial which includes specifics on how to leverage your Docker for Mac installation to run single and multi-node swarms.
-
- Also, try out the Swarm examples in [docker labs](https://github.com/docker/labs/tree/master/swarm-mode/beginner-tutorial). Run the `bash script` and follow the accompanying [Docker Swarm Tutorial](https://github.com/docker/labs/blob/master/swarm-mode/beginner-tutorial/README.md). The script uses Docker Machine to create a multi-node swarm, then walks you through various Swarm tasks and commands.
-
-* For a summary of Docker command line interface (CLI) commands, see [Docker CLI Reference Guide](/engine/reference/index.md).
-
-* Check out the blog posts on Docker for Mac and Docker for Windows public betas, and earlier posts on the intial private beta.
-
-* Please give feedback on your experience with the app and report bugs and problems by logging into our [Docker for Mac forum](https://forums.docker.com/c/docker-for-mac).
-
-
-
-
diff --git a/docker-for-mac/menu.md b/docker-for-mac/menu.md
deleted file mode 100644
index fa706b6da0..0000000000
--- a/docker-for-mac/menu.md
+++ /dev/null
@@ -1,14 +0,0 @@
-
-
-# Docker for Mac
diff --git a/docker-for-mac/multi-arch.md b/docker-for-mac/multi-arch.md
deleted file mode 100644
index 6bd76ff4ef..0000000000
--- a/docker-for-mac/multi-arch.md
+++ /dev/null
@@ -1,48 +0,0 @@
-
-
-# Leveraging Multi-CPU Architecture Support
-
-Docker for Mac provides `binfmt_misc` multi architecture support, so you can run containers for different Linux architectures, such as `arm`, `mips`, `ppc64le` and even `s390x`.
-
-This should just work without any configuration, but the containers you run need to have the appropriate `qemu` binary inside the container before you can do this. (See QEMU for more information.)
-
-So, you can run a container that already has this set up, like the resin arm builds:
-
-```
-$ docker run resin/armv7hf-debian uname -a
-
-Linux 7ed2fca7a3f0 4.1.12 #1 SMP Tue Jan 12 10:51:00 UTC 2016 armv7l GNU/Linux
-
-$ docker run justincormack/ppc64le-debian uname -a
-
-Linux edd13885f316 4.1.12 #1 SMP Tue Jan 12 10:51:00 UTC 2016 ppc64le GNU/Linux
-
-```
-
-Running containers pre-configured with `qemu` has the advantage that you can use these to do builds `FROM`, so you can build new Multi-CPU architecture packages.
-
-Alternatively, you can bind mount in the `qemu` static binaries to any cross-architecture package, such as the semi-official ones using a script like this one https://github.com/justincormack/cross-docker. (See the README at the given link for details on how to use the script.)
-
-
-
diff --git a/docker-for-mac/networking.md b/docker-for-mac/networking.md
deleted file mode 100644
index 8a7b579f15..0000000000
--- a/docker-for-mac/networking.md
+++ /dev/null
@@ -1,121 +0,0 @@
-
-
-# Networking
-
-Docker for Mac provides several networking features to make it easier to use.
-
-## Features
-
-### VPN Passthrough
-
-Docker for Mac's networking can work when attached to a VPN.
-To do this, Docker for Mac intercepts traffic from the `HyperKit` and injects it into OSX as if it originated from the Docker application.
-
-### Port Mapping
-
-When you run a container with the `-p` argument, for example:
-```
-$ docker run -p 80:80 -d nginx
-```
-Docker for Mac will make the container port available at `localhost`.
-
-### HTTP/HTTPS Proxy Support
-
-Docker for Mac will detect HTTP/HTTPS Proxy Settings from OSX and automatically propagate these to Docker and to your containers.
-For example, if you set your proxy settings to `http://proxy.example.com` in OSX, Docker will use this proxy when pulling containers.
-
-
-
-When you start a container, you will see that your proxy settings propagate into the containers. For example:
-
-```
-$ docker run -it alpine env
-PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin
-HOSTNAME=b7edf988b2b5
-TERM=xterm
-HOME=/root
-HTTP_PROXY=http://proxy.example.com:3128
-http_proxy=http://proxy.example.com:3128
-no_proxy=*.local, 169.254/16
-```
-
-You can see from the above output that the `HTTP_PROXY`, `http_proxy` and `no_proxy` environment variables are set.
-When your proxy configuration changes, Docker restarts automatically to pick up the new settings.
-If you have containers that you wish to keep running across restarts, you should consider using [restart policies](https://docs.docker.com/engine/reference/run/#restart-policies-restart)
-
-## Known Limitations, Use Cases, and Workarounds
-
-Following is a summary of current limitations on the Docker for Mac networking stack, along with some ideas for workarounds.
-
-### There is no docker0 bridge on OSX
-
-Because of the way networking is implemented in Docker for Mac, you cannot see a `docker0` interface in OSX.
-This interface is actually within `HyperKit`.
-
-### I cannot ping my containers
-
-Unfortunately, due to limtations in OSX, we're unable to route traffic to containers, and from containers back to the host.
-
-### Per-container IP addressing is not possible
-
-The docker (Linux) bridge network is not reachable from the OSX host.
-
-### Use cases and workarounds
-
-There are two scenarios that the above limitations will affect:
-
-#### I want to connect from a container to a service on the host
-
-The Mac has a changing IP address (or none if you have no network access). Our current recommendation is to attach an unused IP to the `lo0` interface on the Mac; for example: `sudo ifconfig lo0 alias 10.200.10.1/24`, and make sure that your service is listening on this address or `0.0.0.0` (ie not `127.0.0.1`). Then containers can connect to this address.
-
-#### I want to connect to a container from the Mac
-
-Port forwarding works for `localhost`; `--publish`, `-p`, or `-P` all work. Ports exposed from Linux are forwarded to the Mac.
-
-Our current recommendation is to publish a port, or to connect from another container. Note that this is what you have to do even on Linux if the container is on an overlay network, not a bridge network, as these are not routed.
-
-The command to run the `nginx` webserver shown in [Getting Started](index.md#explore) is an example of this.
-
-```shell
-docker run -d -p 80:80 --name webserver nginx
-```
-
-To clarify the syntax, the following two commands both expose port `80` on the container to port `8000` on the host:
-
- docker run --publish 8000:80 --name webserver nginx
- docker run --p 8000:80 --name webserver nginx
-
-To expose all ports, use the `-P` flag. For example, the following command starts a container (in detached mode) and the `-P` exposes all ports on the container to random ports on the host.
-
- docker run -d -P --name webserver nginx
-
-See the [run commmand](/engine/reference/commandline/run.md) for more details on publish options used with `docker run`.
-
-#### A view into implementation
-
-We understand that these workarounds are not ideal, but there are several problems. In particular, there is a bug in OSX that is only fixed in 10.12 and is not being backported as far as we can tell, which means that we could not support this in all supported OSX versions. In addition, this network setup would require root access which we are trying to avoid entirely in Docker for Mac (we currently have a very small root helper that we are trying to remove).
-
-
-
-
diff --git a/docker-for-mac/opensource.md b/docker-for-mac/opensource.md
deleted file mode 100644
index a21338693d..0000000000
--- a/docker-for-mac/opensource.md
+++ /dev/null
@@ -1,34 +0,0 @@
-
-
-# Open Source Components and Licensing
-
-Docker Desktop Editions are built using open source software. For
-details on the licensing, choose -->
-**About Docker** from within the application, then click
-**Acknowledgements**.
-
-Docker Desktop Editions distribute some components that are licensed under the GNU General Public License. You can download the source for these components [here](https://download.docker.com/opensource/License.tar.gz).
-
-The sources for `qemu-img` can be obtained [here](http://wiki.qemu-project.org/download/qemu-2.4.1.tar.bz2).
-The sources for the `gettext` and `glib` libraries that `qemu-img` requires were obtained from [Homebrew](https://brew.sh) and may be retrieved using `brew install --build-from-source gettext glib`.
-
-
-
diff --git a/docker-for-mac/osxfs.md b/docker-for-mac/osxfs.md
deleted file mode 100644
index 6d17758967..0000000000
--- a/docker-for-mac/osxfs.md
+++ /dev/null
@@ -1,166 +0,0 @@
-
-
-# File system sharing (osxfs)
-
-`osxfs` is a new shared file system solution, exclusive to Docker for
-Mac. `osxfs` provides a close-to-native
-user experience for bind mounting OS X file system trees into Docker
-containers. To this end, `osxfs` features a number of unique
-capabilities as well as differences from a classical Linux file system.
-
-- [Case sensitivity](#case-sensitivity)
-- [Access control](#access-control)
-- [Namespaces](#namespaces)
-- [Ownership](#ownership)
-- [File system events](#file-system-events)
-- [Mounts](#mounts)
-- [Symlinks](#symlinks)
-- [File types](#file-types)
-- [Extended attributes](#extended-attributes)
-- [Technology](#technology)
-
-### Case sensitivity
-
-With Docker for Mac, file systems are shared from OS X into containers
-in the same way as they operate in OS X. As a result, if a file system
-on OS X is case-insensitive that behavior is shared by any bind mount
-from OS X into a container. The default OS X file system is HFS+ and,
-during installation, it is installed as case-insensitive by default. To
-get case-sensitive behavior from your bind mounts, you must either
-create and format a ramdisk or external volume as HFS+ with
-case-sensitivity or reformat your OS root partition with HFS+ with
-case-sensitivity. We do not recommend reformatting your root partition
-as some Mac software dubiously relies on case-insensitivity to function.
-
-
-### Access control
-
-`osxfs`, and therefore Docker, can access only those file system
-resources that the Docker for Mac user has access to. `osxfs` does
-not run as `root`. If the OS X user is an administrator, `osxfs` inherits
-those administrator privileges. We are still evaluating which privileges
-to drop in the file system process to balance security and
-ease-of-use. `osxfs` performs no additional permissions checks and
-enforces no extra access control on accesses made through it. All
-processes in containers can access the same objects in the same way as
-the Docker user who started the containers.
-
-### Namespaces
-
-Much of the OS X file system that is accessible to the user is also
-available to containers using the `-v` bind mount syntax. By default,
-you can share files in `/Users`, `/Volumes`, `/private`, and `/tmp`
-directly. To add or remove directory trees that are exported to Docker,
-use the **File sharing** tab in Docker preferences -> **Preferences** -> **File
-sharing**. (See [Preferences](index.md#preferences).) All other paths
-used in `-v` bind mounts are sourced from the Moby Linux VM running the
-Docker containers, so arguments such as `-v
-/var/run/docker.sock:/var/run/docker.sock` should work as expected. If
-an OS X path is not shared and does not exist in th VM, an attempt to
-bind mount it will fail rather than create it in the VM. Paths that
-already exist in the VM and contain files are reserved by Docker and
-cannot be exported from OS X.
-
-### Ownership
-
-Initially, any containerized process that requests ownership metadata of
-an object is told that its `uid` and `gid` own the object. When any
-containerized process changes the ownership of a shared file system
-object, e.g. with `chown`, the new ownership information is persisted in
-the `com.docker.owner` extended attribute of the object. Subsequent
-requests for ownership metadata will return the previously set
-values. Ownership-based permissions are only enforced at the OS X file
-system level with all accessing processes behaving as the user running
-Docker. If the user does not have permission to read extended attributes
-on an object, e.g. when that object's permissions are `0000`, ownership
-will be reported as the accessing process until the extended attribute
-is again readable.
-
-### File system events
-
-Most `inotify` events are supported in bind mounts, and likely `dnotify`
-and `fanotify` (though they have not been tested) are also supported.
-This means that file system events from OS X are sent into containers
-and trigger any listening processes there.
-
-The following are **supported file system events**:
-
-* Creation
-* Modification
-* Attribute changes
-* Deletion
-* Directory changes
-
-The following are **partially supported file system events**:
-
-* Move events trigger `IN_DELETE` on the source of the rename and
- `IN_MODIFY` on the destination of the rename
-
-The following are **unsupported file system events**:
-
-* Open
-* Access
-* Close events
-* Unmount events (see Mounts)
-
-Some events may be delivered multiple times. Events are not delivered for bind mounts from symlinks (notably `/tmp` will not deliver inotify events but
-`/private/tmp` will). These limitations do not apply to events between
-containers, only to those events originating in OS X.
-
-
-### Mounts
-
-The OS X mount structure is not visible in the shared volume, but volume
-contents are visible. Volume contents appear in the same file system as the
-rest of the shared file system. Mounting/unmounting OS X volumes that
-are also bind mounted into containers may result in unexpected behavior
-in those containers. Unmount events are not supported. Mount export
-support is planned but is still under development.
-
-### Symlinks
-
-Symlinks are shared unmodified. This may cause issues when symlinks
-contain paths that rely on the default case-insensitivity of the
-default OS X file system, HFS+.
-
-### File types
-
-Symlinks, hardlinks, socket files, named pipes, regular files, and
-directories are supported. Socket files and named pipes only transmit
-between containers and between OS X processes -- no transmission across
-the hypervisor is supported, yet. Character and block device files are
-not supported.
-
-### Extended attributes
-
-Extended attributes are not yet supported.
-
-### Technology
-
-`osxfs` does not use OSXFUSE. `osxfs` does not run under, inside, or
-between OS X userspace processes and the OS X kernel.
-
-
-
diff --git a/docker-for-mac/release-notes.md b/docker-for-mac/release-notes.md
deleted file mode 100644
index 770049de4d..0000000000
--- a/docker-for-mac/release-notes.md
+++ /dev/null
@@ -1,1159 +0,0 @@
-
-
-# Docker for Mac Release Notes
-
-Here are the main improvements and issues per release, starting with the current release. The documentation is always updated for each release.
-
-For system requirements, please see the Getting Started topic on [What to know before you install](index.md#what-to-know-before-you-install).
-
-Release notes for _stable_ and _beta_ releases are listed below. You can learn about both kinds of releases, and download stable and beta product installers at [Download Docker for Mac](index.md#download-docker-for-mac).
-
-* [Stable Release Notes](#stable-release-notes)
-* [Beta Release Notes](#beta-release-notes)
-
-## Stable Release Notes
-
-### Docker for Mac 1.12.1, 2016-09-16 (stable)
-
-**New**
-
-* Support for OSX 10.12 Sierra
-
-**Upgrades**
-
-* Docker 1.12.1
-* Docker machine 0.8.1
-* Linux kernel 4.4.20
-* aufs 20160905
-
-**Bug fixes and minor changes**
-
-**General**
-
- * Fixed communications glitch when UI talks to com.docker.vmnetd
- Fixes https://github.com/docker/for-mac/issues/90
-
- * `docker-diagnose`: display and record the time the diagnosis was captured
-
- * Don't compute the container folder in `com.docker.vmnetd`
- Fixes https://github.com/docker/for-mac/issues/47
-
- * Warn the user if BlueStacks is installed (potential kernel panic)
-
- * Automatic update interval changed from 1 hour to 24 hours
-
- * Include Zsh completions
-
- * UI Fixes
-
-**Networking**
-
-* VPNKit supports search domains
-
-* slirp: support up to 8 external DNS servers
-
-* slirp: reduce the number of sockets used by UDP NAT, reduce the probability that NAT rules will time out earlier than expected
-
-* Entries from `/etc/hosts` should now resolve from within containers
-
-* Allow ports to be bound on host addresses other than `0.0.0.0` and `127.0.0.1`
- Fixes issue reported in https://github.com/docker/for-mac/issues/68
-
-* Use Mac System Configuration database to detect DNS
-
-**Filesharing (OSXFS)**
-
-* Fixed thread leak
-
-* Fixed a malfunction of new directories that have the same name as an old directory that is still open
-
-* Rename events now trigger DELETE and/or MODIFY `inotify` events (saving with TextEdit works now)
-
-* Fixed an issue that caused `inotify` failure and crashes
-
-* Fixed a directory file descriptor leak
-
-* Fixed socket `chowns`
-
-**Moby**
-
-* Use default `sysfs` settings, transparent huge pages disabled
-
-* `cgroup` mount to support `systemd` in containers
-
-* Increase Moby `fs.file-max` to 524288
-
-* Fixed Moby Diagnostics and Update Kernel
-
-**HyperKit**
-
-* HyperKit updated with `dtrace` support and lock fixes
-
-### Docker for Mac 2016-08-11 1.12.0-a (stable)
-
-This bug fix release contains osxfs improvements. The fixed issues may have
-been seen as failures with apt-get and npm in containers, missed inotify
-events or unexpected unmounts.
-
-* Bug fixes
- - osxfs: fixed an issue causing access to children of renamed
- directories to fail (symptoms: npm failures, apt-get failures)
- - osxfs: fixed an issue causing some ATTRIB and CREATE inotify
- events to fail delivery and other inotify events to stop
- - osxfs: fixed an issue causing all inotify events to stop when an
- ancestor directory of a mounted directory was mounted
- - osxfs: fixed an issue causing volumes mounted under other mounts
- to spontaneously unmount
-
-### Docker for Mac 1.12.0-a, 2016-08-03 (stable)
-
-This bug fix release contains osxfs improvements. The fixed issues may have
-been seen as failures with apt-get and npm in containers, missed `inotify`
-events or unexpected unmounts.
-
-**Hotfixes**
-
-* osxfs: fixed an issue causing access to children of renamed directories to fail (symptoms: npm failures, apt-get failures) (docker/for-mac)
-
-* osxfs: fixed an issue causing some ATTRIB and CREATE `inotify` events to fail delivery and other `inotify` events to stop
-
-* osxfs: fixed an issue causing all `inotify` events to stop when an ancestor directory of a mounted directory was mounted
-
-* osxfs: fixed an issue causing volumes mounted under other mounts to spontaneously unmount
-
-### Docker for Mac 1.12.0, 2016-07-28 (stable)
-
-* First stable release
-
-**Components**
-
-* Docker 1.12.0
-* Docker Machine 0.8.0
-* Docker Compose 1.8.0
-
-## Beta Release Notes
-
-### Beta 27 Release Notes (2016-09-28 1.12.2-rc1-beta27)
-
-**Upgrades**
-
-* Docker 1.12.2-rc1
-* Docker Machine 0.8.2
-* Docker compose 1.8.1
-* Kernel vsock driver v7
-* Kernel 4.4.21
-* aufs 20160912
-
-**Bug fixes and minor changes**
-
-* Fix an issue where some windows did not claim focus correctly
-* Add UI when switching channel to prevent user losing containers and settings
-* Check disk capacity before Toolbox import
-* Import certificates in `etc/ssl/certs/ca-certificates.crt`
-* DNS: reduce the number of UDP sockets consumed on the host
-* VPNkit: improve the connection-limiting code to avoid running out of sockets on the host
-* UDP: handle diagrams bigger than 2035, up to the configured macOS kernel limit
-* UDP: make the forwarding more robust; drop packets and continue rather than stopping
-* disk: make the "flush" behaviour configurable for database-like workloads. This works around a performance regression in `v1.12.1`.
-
-### Beta 26 Release Notes (2016-09-14 1.12.1-beta26)
-
-**New**
-
-* Improved support for macOS 10.12 Sierra
-
-**Upgrades**
-
-* Linux kernel 4.4.20
-* aufs 20160905
-
-**Bug fixes and minor changes**
-
-* Fixed communications glitch when UI talks to `com.docker.vmnetd`. Fixes https://github.com/docker/for-mac/issues/90
-
-* UI fix for macOs 10.12
-
-* Windows open on top of full screen app are available in all spaces
-
-* Reporting a bug, while not previously logged into GitHub now works
-
-* When a diagnostic upload fails, the error is properly reported
-
-* `docker-diagnose` displays and records the time the diagnosis was captured
-
-* Ports are allowed to bind to host addresses other than `0.0.0.0` and `127.0.0.1`. Fixes issue reported in https://github.com/docker/for-mac/issues/68.
-
-* We no longer compute the container folder in `com.docker.vmnetd`. Fixes https://github.com/docker/for-mac/issues/47.
-
-**Known Issues**
-
-* `Docker.app` sometimes uses 200% CPU after OS X wakes up from sleep mode. The
-issue is being investigated. The workaround is to restart Docker.app.
-
-* There are a number of issues with the performance of directories bind-mounted with `osxfs`. In particular, writes of small blocks and
-traversals of large directories are currently slow. Additionally, containers
-that perform large numbers of directory operations, such as repeated scans of
-large directory trees, may suffer from poor performance. More information is
-available in [Known Issues](troubleshoot.md#known-issues) in Troubleshooting.
-
-* Under some unhandled error conditions, `inotify` event delivery can fail and become permanently disabled. The workaround is to restart `Docker.app`.
-
-### Beta 25 Release Notes (2016-09-07 1.12.1-beta25)
-
-**Upgrades**
-
-* Experimental support for OSX 10.12 Sierra (beta)
-
-**Bug fixes and minor changes**
-
-* VPNKit supports search domains
-* Entries from `/etc/hosts` should now resolve from within containers
-* osxfs: fix thread leak
-
-**Known issues**
-
-* Several problems have been reported on macOS 10.12 Sierra and are being
-investigated. This includes failure to launch the app and being unable to
-upgrade to a new version.
-
-* Docker.app sometimes uses 200% CPU after OS X wakes up from sleep mode. The
-issue is being investigated. The workaround is to restart Docker.app
-
-* There are a number of issues with the performance of directories bind-mounted
-with `osxfs`. In particular, writes of small blocks and traversals of large
-directories are currently slow. Additionally, containers that perform large
-numbers of directory operations, such as repeated scans of large directory
-trees, may suffer from poor performance. More information is available in [Known
-Issues](troubleshoot.md#known-issues) in Troubleshooting.
-
-* Under some unhandled error conditions, `inotify` event delivery can fail and become permanently disabled. The workaround is to restart Docker.app.
-
-### Beta 24 Release Notes (2016-08-23 1.12.1-beta24)
-
-**Upgrades**
-
-* Docker 1.12.1
-* Docker Machine 0.8.1
-* Linux kernel 4.4.19
-* aufs 20160822
-
-**Bug fixes and minor changes**
-
-* osxfs: fixed a malfunction of new directories that have the same name as an old directory that is still open
-
-* osxfs: rename events now trigger DELETE and/or MODIFY `inotify` events (saving with TextEdit works now)
-
-* slirp: support up to 8 external DNS servers
-
-* slirp: reduce the number of sockets used by UDP NAT, reduce the probability that NAT rules will time out earlier than expected
-
-* The app warns user if BlueStacks is installed (potential kernel panic)
-
-**Known issues**
-
-* Several problems have been reported on macOS 10.12 Sierra and are being investigated. This includes failure to launch the app and being unable to
-upgrade to a new version.
-
-* `Docker.app` sometimes uses 200% CPU after OS X wakes up from sleep mode. The issue is being investigated. The workaround is to restart `Docker.app`.
-
-* There are a number of issues with the performance of directories bind-mounted with `osxfs`. In particular, writes of small blocks and traversals of large
-directories are currently slow. Additionally, containers that perform large
-numbers of directory operations, such as repeated scans of large directory
-trees, may suffer from poor performance. For more information and workarounds, see the bullet on [performance of bind-mounted directories](troubleshoot.md#bind-mounted-dirs) in [Known Issues](troubleshoot.md#known-issues) in Troubleshooting.
-
-* Under some unhandled error conditions, `inotify` event delivery can fail and become permanently disabled. The workaround is to restart `Docker.app`.
-
-### Beta 23 Release Notes (2016-08-16 1.12.1-rc1-beta23)
-
-**Upgrades**
-
-* Docker 1.12.1-rc1
-* Linux kernel 4.4.17
-* aufs 20160808
-
-**Bug fixes and minor changes**
-
-* Moby: use default sysfs settings, transparent huge pages disabled
-* Moby: cgroup mount to support systemd in containers
-* osxfs: fixed an issue that caused `inotify` failure and crashes
-* osxfs: fixed a directory fd leak
-* Zsh completions
-
-**Known issues**
-
-* Docker for Mac is not supported on OSX 10.12 Sierra
-
-* Docker.app sometimes uses 200% CPU after OS X wakes up from sleep mode. The issue is being investigated. The workaround is to restart Docker.app
-
-* There are a number of issues with the performance of directories bind-mounted with `osxfs`. In particular, writes of small blocks and traversals of large directories are currently slow. Additionally, containers that perform large numbers of directory operations, such as repeated scans of large directory trees, may suffer from poor performance. For more information and workarounds, see the bullet on [performance of bind-mounted directories](troubleshoot.md#bind-mounted-dirs) in [Known Issues](troubleshoot.md#known-issues) in Troubleshooting.
-
-* Under some unhandled error conditions, `inotify` event delivery can fail and become permanently disabled. The workaround is to restart Docker.app
-
-### Beta 22 Release Notes (2016-08-11 1.12.0-beta22)
-
-**Upgrades**
-
-* Linux kernel to 4.4.16
-
-**Bug fixes and minor changes**
-
-* Increase Moby fs.file-max to 524288
-* Use Mac System Configuration database to detect DNS
-* HyperKit updated with dtrace support and lock fixes
-* Fix Moby Diagnostics and Update Kernel
-* UI Fixes
-* osxfs: fix socket chowns
-
-**Known issues**
-
-* Docker.app sometimes uses 200% CPU after OS X wakes up from sleep mode. The issue is being investigated. The workaround is to restart Docker.app
-
-* There are a number of issues with the performance of directories bind-mounted with `osxfs`. In particular, writes of small blocks and traversals of large directories are currently slow. Additionally, containers that perform large numbers of directory operations, such as repeated scans of large directory trees, may suffer from poor performance. More information is available in [Known Issues](troubleshoot.md#known-issues) in [Troubleshooting](troubleshoot.md)
-
-* Under some unhandled error conditions, `inotify` event delivery can fail and become permanently disabled. The workaround is to restart Docker.app
-
-### Beta 21.1 Release Notes (2016-08-03 1.12.0-beta21.1)
-
-This bug fix release contains osxfs improvements. The fixed issues may have
-been seen as failures with apt-get and npm in containers, missed `inotify`
-events or unexpected unmounts.
-
-**Hotfixes**
-
-* osxfs: fixed an issue causing access to children of renamed directories to fail (symptoms: npm failures, apt-get failures) (docker/for-mac)
-
-* osxfs: fixed an issue causing some ATTRIB and CREATE `inotify` events to fail delivery and other `inotify` events to stop
-
-* osxfs: fixed an issue causing all `inotify` events to stop when an ancestor directory of a mounted directory was mounted
-
-* osxfs: fixed an issue causing volumes mounted under other mounts to spontaneously unmount (docker/docker#24503)
-
-#### Docker for Mac 1.12.0 (2016-07-28 1.12.0-beta21)
-
-**New**
-
-* Docker for Mac is now available from 2 channels: **stable** and **beta**. New features and bug fixes will go out first in auto-updates to users in the beta channel. Updates to the stable channel are much less frequent and happen in sync with major and minor releases of the Docker engine. Only features that are well-tested and ready for production are added to the stable channel releases. For downloads of both and more information, see the [Getting Started](index.md#download-docker-for-mac).
-
-**Upgrades**
-
-* Docker 1.12.0 with experimental features
-* Docker Machine 0.8.0
-* Docker Compose 1.8.0
-
-**Bug fixes and minor changes**
-
-* Check for updates, auto-update and diagnose can be run by non-admin users
-* osxfs: fixed an issue causing occasional incorrect short reads
-* osxfs: fixed an issue causing occasional EIO errors
-* osxfs: fixed an issue causing `inotify` creation events to fail
-* osxfs: increased the `fs.inotify.max_user_watches` limit in Moby to 524288
-* The UI shows documentation link for sharing volumes
-* Clearer error message when running with outdated Virtualbox version
-* Added link to sources for qemu-img
-
-**Known issues**
-
-* Docker.app sometimes uses 200% CPU after OS X wakes up from sleep mode. The issue is being investigated. The workaround is to restart Docker.app
-
-* There are a number of issues with the performance of directories bind-mounted with `osxfs`. In particular, writes of small blocks, and traversals of large directories are currently slow. Additionally, containers that perform large numbers of directory operations, such as repeated scans of large directory trees, may suffer from poor performance. For more information and workarounds, see [Known Issues](troubleshoot.md#known-issues) in [Logs and Troubleshooting](troubleshoot.md).
-
-* Under some unhandled error conditions, `inotify` event delivery can fail and become permanently disabled. The workaround is to restart Docker.app
-
-### Beta 20 Release Notes (2016-07-19 1.12.0-rc4-beta20)
-
-**Bug fixes and minor changes**
-
-* Fixed `docker.sock` permission issues
-* Don't check for update when the settings panel opens
-* Removed obsolete DNS workaround
-* Use the secondary DNS server in more circumstances
-* Limit the number of concurrent port forwards to avoid running out of resources
-* Store the database as a "bare" git repo to avoid corruption problems
-
-**Known issues**
-
-* `Docker.app` sometimes uses 200% CPU after OS X wakes up from sleep mode. The issue is being investigated. The workaround is to restart Docker for Mac (`Docker.app`).
-
-### Beta 19 Release Notes (2016-07-14 1.12.0-rc4-beta19)
-
-**New**
-
-* Added privacy tab in settings
-* Allow the definition of HTTP proxy overrides in the UI
-
-**Upgrades**
-
-* Docker 1.12.0 RC4
-* Docker Compose 1.8.0 RC2
-* Docker Machine 0.8.0 RC2
-* Linux kernel 4.4.15
-
-**Bug fixes and minor changes**
-
-* Filesystem sharing permissions can only be configured in the UI (no more `/Mac` in moby)
-* `com.docker.osx.xhyve.hyperkit`: increased max number of fds to 10240
-* Improved Moby syslog facilities
-* Improved file-sharing tab
-* `com.docker.slirp`: included the DNS TCP fallback fix, required when UDP responses are truncated
-* `docker build/events/logs/stats... ` won't leak when iterrupted with Ctrl-C
-
-**Known issues**
-
-* See [Known Issues](troubleshoot.md#known-issues) in [Troubleshooting](troubleshoot.md)
-
-### Beta 18.1 Release Notes (2016-07-07 1.12.0-rc3-beta18.1)
-
->**Note**: Docker 1.12.0 RC3 release introduces a backward incompatible change from RC2. You can fix this by [recreating or updating your containers](troubleshoot.md#recreate-or-update-your-containers-after-beta-18-upgrade) as described in Troubleshooting.
-
-**Hotfix**
-
-* Fixed issue resulting in error "Hijack is incompatible with use of CloseNotifier", reverts previous fix for `Ctrl-C` during build.
-
-**New**
-
-* New host/container file sharing UI
-* `/Mac` bind mount prefix is deprecated and will be removed soon
-
-**Upgrades**
-
-* Docker 1.12.0 RC3
-
-**Bug fixes and minor changes**
-
-* VPNKit: Improved scalability as number of network connections increases
-* The docker API proxy was failing to deal with some 1.12 features (e.g. health check)
-
-**Known issues**
-
-* See [Known Issues](troubleshoot.md#known-issues) in [Troubleshooting](troubleshoot.md)
-
-### Beta 18 Release Notes (2016-07-06 1.12.0-rc3-beta18)
-
-**New**
-
-* New host/container file sharing UI
-* `/Mac` bind mount prefix is deprecated and will be removed soon
-
-**Upgrades**
-
-* Docker 1.12.0 RC3
-
-**Bug fixes and minor changes**
-
-* VPNKit: Improved scalability as number of network connections increases
-* Interrupting a `docker build` with Ctrl-C will actually stop the build
-* The docker API proxy was failing to deal with some 1.12 features (e.g. health check)
-
-**Known issues**
-
-* See [Known Issues](troubleshoot.md#known-issues) in [Troubleshooting](troubleshoot.md)
-
-### Beta 17 Release Notes (2016-06-29 1.12.0-rc2-beta17)
-
-**Upgrades**
-
-* Linux kernel 4.4.14, aufs 20160627
-
-**Bug fixes and minor changes**
-
-* Documentation moved to https://docs.docker.com/docker-for-mac/
-* Allow non-admin users to launch the app for the first time (using admin creds)
-* Prompt non-admin users for admin password when needed in Preferences
-* Fixed download links, documentation links
-* Fixed "failure: No error" message in diagnostic panel
-* Improved diagnostics for networking and logs for the service port openers
-
-**Known issues**
-
-* See [Known Issues](troubleshoot.md#known-issues) in [Troubleshooting](troubleshoot.md)
-
-### Beta 16 Release Notes (2016-06-17 1.12.0-rc2-beta16)
-
-**Upgrades**
-
-* Docker 1.12.0 RC2
-* docker-compose 1.8.0 RC1
-* docker-machine 0.8.0 RC1
-* notary 0.3
-* Alpine 3.4
-
-**Bug fixes and minor changes**
-
-* VPNKit: Fixed a regressed error message when a port is in use
-* Fixed UI crashing with `NSInternalInconsistencyException` / fixed leak
-* HyperKit API: Improved error reporting
-* osxfs: fix sporadic EBADF due to fd access/release races (#3683)
-
-
-**Known issues**
-
-* See [Known Issues](troubleshoot.md#known-issues) in [Troubleshooting](troubleshoot.md)
-
-### Beta 15 Release Notes (2016-06-10 1.11.2-beta15)
-
-**New**
-
-* Registry mirror and insecure registries can now be configured from Preferences
-* VM can now be restarted from Preferences
-* `sysctl.conf` can be edited from Preferences
-
-**Upgrades**
-
-* Docker 1.11.2
-* Linux 4.4.12, `aufs` 20160530
-
-**Bug fixes and minor changes**
-
-* Timekeeping in Moby VM improved
-* Number of concurrent TCP/UDP connections increased in VPNKit
-* Hyperkit: `vsock` stability improvements
-* Fixed crash when user is admin
-
-**Known issues**
-
-* See [Known Issues](troubleshoot.md#known-issues) in [Troubleshooting](troubleshoot.md)
-
-### Beta 14 Release Notes (2016-06-02 1.11.1-beta14)
-
-**New**
-
-* New settings menu item, **Diagnose & Feedback**, is available to run diagnostics and upload logs to Docker.
-
-**Known issues**
-
-* `Docker.app` sometimes uses 200% CPU after OS X wakes up from sleep mode with OSX 10.10. The issue is being investigated. The workaround is to restart `Docker.app`.
-
-**Bug fixes and minor changes**
-
-* `osxfs`: now support `statfs`
-* **Preferences**: updated toolbar icons
-* Fall back to secondary DNS server if primary fails.
-* Added a link to the documentation from menu.
-
-### Beta 13.1 Release Notes (2016-05-28 1.11.1-beta13.1)
-
-**Hotfixes**
-
-* `osxfs`:
- - Fixed sporadic EBADF errors and End_of_file crashes due to a race corrupting node table invariants
- - Fixed a crash after accessing a sibling of a file moved to another directory caused by a node table invariant violation
-* Fixed issue where Proxy settings were applied on network change, causing docker daemon to restart too often
-* Fixed issue where log file sizes doubled on docker daemon restart
-
-### Beta 13 Release Notes (2016-05-25 1.11.1-beta13)
-
-**New**
-
-* `osxfs`: Enabled 10ms dcache for 3x speedup on a `go list ./...` test against docker/machine. Workloads heavy in file system path resolution (common among dynamic languages and build systems) will have those resolutions performed in amortized constant time rather than time linear in the depth of the path so speedups of 2-10x will be common.
-
-* Support multiple users on the same machine, non-admin users can use the app as long as `vmnetd` has been installed. Currently, only one user can be logged in at the same time.
-
-* Basic support for using system HTTP/HTTPS proxy in docker daemon
-
-**Known issues**
-
-* Docker.app sometimes uses 200% CPU after OS X wakes up from sleep mode. The issue is being investigated. The workaround is to restart Docker.app.
-
-**Bug fixes and minor changes**
-
-* `osxfs`:
- - setting `atime` and `mtime` of nodes is now supported
- - Fixed major regression in Beta 12 with ENOENT, ENOTEMPY, and other spurious errors after a directory rename. This manifested as `npm install` failure and other directory traversal issues.
- - Fixed temporary file ENOENT errors
- - Fixed in-place editing file truncation error (e.g. `perl -i`)w
-* improved time synchronisation after sleep
-
-### Beta 12 Release (2016-05-17 1.11.1-beta12)
-
-**Upgrades**
-
-* FUSE 7.23 for [osxfs](osxfs.md)
-
-**Known issues**
-
-* Docker.app sometimes uses 200% CPU after OS X wakes up from sleep mode. The issue is being investigated. The workaround is to restart Docker.app.
-
-**Bug fixes and minor changes**
-
-* UI improvements
-* Fixed a problem in [osxfs](osxfs.md) where`mkdir` returned EBUSY but directory was created.
-
-### Beta 11 Release (2016-05-10 1.11.1-beta11)
-
-**New**
-
-The `osxfs` file system now persists ownership changes in an extended attribute. (See the topic on [ownership](osxfs.md#ownership) in [Sharing the OS X file system with Docker containers](osxfs.md).)
-
-**Upgrades**
-
-* docker-compose 1.7.1 (see changelog)
-* Linux kernel 4.4.9
-
-**Bug fixes and minor changes**
-
-* Desktop notifications after successful update
-* No "update available" popup during install process
-* Fixed repeated bind of privileged ports
-* `osxfs`: Fixed the block count reported by stat
-* Moby (Backend) fixes:
- - Fixed `vsock` half closed issue
- - Added NFS support
- - Hostname is now Moby, not Docker
- - Fixes to disk formatting scripts
- - Linux kernel upgrade to 4.4.9
-
-## Beta 10 Release (2016-05-03 1.11.0-beta10)
-
-**New**
-
-* Token validation is now done over an actual SSL tunnel (HTTPS). (This should fix issues with antivirus applictions.)
-
-**Upgrades**
-
-* Docker 1.11.1
-
-**Bug fixes and minor changes**
-
-* UCP now starts again
-* Include debugging symbols in HyperKit
-* vsock stability improvements
-* Addressed glitches in **Preferences** panel
-* Fixed issues impacting the “whale menu”
-* Fixed uninstall process
-* HyperKit vcpu state machine improvements, may improve suspend/resume
-
-
-### Beta 9 Release (2016-04-26 1.11.0-beta9)
-
-**New**
-
-* New Preferences window - memory and vCPUs now adjustable
-* `localhost` is now used for port forwarding by default.`docker.local` will no longer work as of Beta 9.
-
-**Known issues**
-
-* Docker.app sometimes uses 200% CPU after OS X wakes up from sleep mode. The issue is being investigated. The workaround is to restart Docker.app.
-
-**Bug fixes and minor changes**
-
-* Fix loopback device naming
-* Improved docker socket download and osxfs sequential write by 20%
-* `com.docker.osxfs`
- - improved sequential read throughput by up to 20%
- - improved `readdir` performance by up to 6x
- - log all fatal exceptions
-* More reliable DNS forwarding over UDP and TCP
-* UDP ports can be proxied over vsock
-* Fixed EADDRINUSE (manifesting as errno 526) when ports are re-used
-* Send ICMP when asked to not fragment and we can’t guarantee it
-* Fixed parsing of UDP datagrams with IP socket options
-* Drop abnormally large ethernet frames
-* Improved HyperKit logging
-* Record VM start and stop events
-
-### Beta 8 Release (2016-04-20 1.11.0-beta8)
-
-**New**
-
-* Networking mode switched to VPN compatible by default, and as part of this change the overall experience has been improved:
- - `docker.local` now works in VPN compatibility mode
- - exposing ports on the Mac is available in both networking modes
- - port forwarding of privileged ports now works in both networking modes
- - traffic to external DNS servers is no longer dropped in VPN mode
-
-
-* `osxfs` now uses `AF_VSOCK` for transport giving ~1.8x speedup for large sequential read/write workloads but increasing latency by ~1.3x. `osxfs` performance engineering work continues.
-
-
-**Known issues**
-
-* Docker.app sometimes uses 200% CPU after OS X wakes up from sleep mode. The issue is being investigated. The workaround is to restart `Docker.app`
-
-**Bug fixes and minor changes**
-
-* Apple System Log now used for most logs instead of direct filesystem logging
-* `docker_proxy` fixes
-* Merged HyperKit upstream patches
-* Improved error reporting in `nat` network mode
-* `osxfs` `transfused` client now logs over `AF_VSOCK`
-* Fixed a `com.docker.osx.HyperKit.linux` supervisor deadlock if processes exit during a controlled shutdown
-* Fixed VPN mode malformed DNS query bug preventing some resolutions
-
-
-### Beta 7 Release (2016-04-12 1.11.0-beta7)
-
-**New**
-
-* Docs are updated per the Beta 7 release
-* Use AF_VSOCK for docker socket transport
-
-**Upgrades**
-
-* docker 1.11.0-rc5
-* docker-machine 0.7.0-rc3
-* docker-compose 1.7.0rc2
-
-
-**Known issues**
-
-* Docker.app sometimes uses 200% CPU after OS X wakes up from sleep mode. The issue is being investigated. The workaround is to restart Docker.app
-
-* If VPN mode is enabled and then disabled and then re-enabled again, `docker ps` will block for 90s
-
-**Bug fixes and minor changes**
-
-* Logging improvements
-* Improve process management
-
-## Beta 6 Release (2016-04-05 1.11.0-beta6)
-
-**New**
-
-* Docs are updated per the Beta 6 release
-* Added uninstall option in user interface
-
-**Upgrades**
-
-* docker 1.11.0-rc5
-* docker-machine 0.7.0-rc3
-* docker-compose 1.7.0rc2
-
-**Known issues**
-
-* `Docker.app` sometimes uses 200% CPU after OS X wakes up from sleep mode.
-The issue is being investigated. The workaround is to restart
-`Docker.app`.
-
-* If VPN mode is enabled, then disabled and re-enabled again,
-`docker ps` will block for 90 seconds.
-
-**Bug fixes and minor changes**
-
-* Fixed osxfs multiple same directory bind mounts stopping inotify
-* Fixed osxfs `setattr` on mode 0 files (`sed` failures)
-* Fixed osxfs blocking all operations during `readdir`
-* Fixed osxfs mishandled errors which crashed the file system and VM
-* Removed outdated `lofs`/`9p` support
-* Added more debugging info to logs uploaded by `pinata diagnose`
-* Improved diagnostics from within the virtual machine
-* VirtualBox version check now also works without VBoxManage in path
-* VPN mode now uses same IP range as NAT mode
-* Tokens are now verified on port 443
-* Removed outdated uninstall scripts
-* Increased default ulimits
-* Port forwarding with `-p` and `-P` should work in VPN mode
-* Fixed a memory leak in `com.docker.db`
-* Fixed a race condition on startup between Docker and networking which can
-lead to `Docker.app` not starting on reboot
-
-### Beta 5 Release (2016-03-29 1.10.3-beta5)
-
-**New**
-
-- Docs are updated per the Beta 5 release!
-
-**Known issues**
-
-- There is a race on startup between docker and networking which can lead to Docker.app not starting on reboot. The workaround is to restart the application manually.
-
-- Docker.app sometimes uses 200% CPU after OS X wakes up from sleep mode. The issue is being investigated. The workaround is to restart Docker.app.
-
-- In VPN mode, the `-p` option needs to be explicitly of the form `-p :`. `-p ` and `-P` will not work yet.
-
-**Bug fixes and minor changes**
-
-- Updated DMG background image
-- Show correct VM memory in Preferences
-- Feedback opens forum, not email
-- Fixed RAM amount error message
-- Fixed wording of CPU error dialog
-- Removed status from Preferences
-- Check for incompatible versions of Virtualbox
-
-### Beta 4 Release (2016-03-22 1.10.3-beta4)
-
-**New Features and Upgrades**
-
-
-
-
-
Component
-
Description
-
-
-
File System/Sharing
-
Support `inotify` events so that file system events on the
- Mac will trigger file system activations inside Linux containers
-
-
-
Docker Machine
-
Install Docker Machine as a part of Docker for Mac install in `/usr/local`
-
-
-
Getting Started and About
-
- Added animated popover window to help first-time users get started - Added a Beta icon to About box
-
-
-
-**Known issues**
-
-
-
-
-
Component
-
Description
-
-
-
Starting Docker
-
There is a race on startup between Docker and networking that can lead to `Docker.app` not starting on reboot.
The workaround is to restart the application manually.
-
-
-
-
OS X version support
-
`Docker.app` sometimes uses 200% CPU after OS X wakes up from sleep mode.
- The issue is being investigated.
The workaround is to restart
- `Docker.app`.
-
-
-
-
VPN/Hostnet
-
In VPN mode, the `-p` option needs to be explicitly of the form
- `-p :`. `-p ` and `-P` will not
- work yet.
-
-
-
-
-**Bug fixes and minor changes**
-
-
-
-
-
Component
-
Description
-
-
-
Hostnet/VPN mode
-
Fixed Moby DNS resolver failures by proxying the "Recursion Available" flag.
-
-
-
-
IP addresses
-
`docker ps` shows IP address rather than `docker.local`.
-
-
-
-
OS X version support
-
- Re-enabled support for OS X Yosemite version 10.10 - Ensured binaries are built for 10.10 rather than 10.11.
-
-
-
-
Application startup
-
- Fixed "Notification Center"-related crash on startup - Fixed watchdog crash on startup
-
-
-
-### Beta 3 Release (2016-03-15 1.10.3-beta3)
-
-**New Features and Upgrades**
-
-
-
-
-
Component
-
Description
-
-
-
File System
-
Improved file sharing write speed in OSXFS
-
-
-
User space networking
-
Renamed `bridged` mode to `nat` mode
-
-
-
Debugging
-
Docker runs in debug mode by default for new installs
-
-
-
-
Docker Engine
-
Upgraded to 1.10.3
-
-
-
-**Bug fixes and minor changes**
-
-
-
-
-
Component
-
Description
-
-
-
GUI
-
Auto update automatically checks for new versions again
-
-
-
-
File System
-
- Fixed OSXFS chmod on sockets
- - FixED OSXFS EINVAL from `open` using O_NOFOLLOW
-
-
-
Hypervisor
-
Hypervisor stability fixes, resynced with upstream repository
-
-
-
Hostnet/VPN mode
-
- Fixed get/set VPN mode in Preferences (GUI)
- - Added more verbose logging on errors in `nat` mode
- - Show correct forwarding details in `docker ps/inspect/port` in `nat` mode
-
-
-
-
Tokens
-
New lines ignored in token entry field
-
-
-
Feedback
-
Feedback mail has app version in subject field
-
-
-
Licensing
-
Clarified open source licenses
-
-
-
Crash reporting and error handling
-
- Fixed HockeyApp crash reporting
- - Fatal GUI errors now correctly terminate the app again
- - Fix proxy panics on EOF when decoding JSON
- - Fix long delay/crash when switching from `hostnet` to `nat` mode
-
-
-
-
Logging
-
- Moby logs included in diagnose upload
- - App version included in logs on startup
-
-
-
-
-
-### Beta 2 Release (2016-03-08 1.10.2-beta2)
-
-**New Features and Upgrades**
-
-
-
-
-
Component
-
Description
-
-
-
GUI
-
Add VPN mode/`hostnet` to Preferences
-
-
-
-
Add disable Time Machine backups of VM disk image to Preferences
-
-
-
CLI
-
Added `pinata` configuration tool for experimental Preferences
-
-
-
File System
-
Add guest-to-guest FIFO and socket file support
-
-
-
Notary
-
Upgraded to version 0.2
-
-
-
-**Bug fixes**
-
-
-
-
-
Component
-
Description
-
-
-
File System
-
Fixed data corruption bug during cp (use of sendfile/splice)
-
-
-
GUI
-
Fixed About box to contain correct version string
-
-
-
Hostnet/VPN mode
-
- Stability fixes and tests - Fixed DNS issues when changing networks
diff --git a/docker-for-mac/troubleshoot.md b/docker-for-mac/troubleshoot.md
deleted file mode 100644
index 874478122c..0000000000
--- a/docker-for-mac/troubleshoot.md
+++ /dev/null
@@ -1,332 +0,0 @@
-
-
-# Logs and Troubleshooting
-
-Here is information about how to diagnose and troubleshoot problems, send logs and communicate with the Docker for Mac team, use our forums and Knowledge Hub, browse and log issues on GitHub, and find workarounds for known problems.
-
-## Docker Knowledge Hub
-
-**Looking for help with Docker for Mac?** Check out the [Docker Knowledge Hub](http://success.docker.com/) for knowledge base articles, FAQs, and technical support for various subscription levels.
-
-## Diagnose problems, send feedback, and create GitHub issues
-
-If you encounter problems for which you do not find solutions in this documentation, [Docker for Mac issues on GitHub](https://github.com/docker/for-mac/issues) already filed by other users, or on the [Docker for Mac forum](https://forums.docker.com/c/docker-for-mac), we can help you troubleshoot the log data.
-
-Choose --> **Diagnose & Feedback** from the menu bar.
-
-
-
-You can choose to run diagnostics only, or diagnose and send the results to the Docker Team:
-
-* **Diagnose Only** - Runs diagnostics, and shows results locally. (Results are not sent to Docker, and no ID is generated.)
-
-
-
-* **Diagnose & Upload** - Runs diagnostics, shows results, and auto-uploads the diagnostic results to Docker. A diagnostic ID is auto-generated. You can refer to this ID when communicating with the Docker Team. Optionally, you can open an issue on GitHub using the uploaded results and ID as a basis.
-
-
-
-If you click **Open Issues**, this opens [Docker for Mac issues on GitHub](https://github.com/docker/for-mac/issues/) in your web browser in a “create new issue” template prepopulated with the following:
-
-* ID and summary of the diagnostic you just ran
-
-* System and version details
-
-* Sections where you can fill in a description of expected and actual behavior, and steps to reproduce the issue
-
-
-
-You can also create a new issue directly on GitHub at https://github.com/docker/for-mac/issues. (The README for the repository is [here](https://github.com/docker/for-mac).)
-
-Click [New Issue](https://github.com/docker/for-mac/issues/new) on that page (or right here ☺) to get a "create new issue" template prepopulated with sections for the ID and summary of your diagnostics, system and version details, description of expected and actual behavior, and steps to reproduce the issue.
-
-
-
-
-## Checking the logs
-
-In addition to using the diagnose and feedback option to submit logs, you can browse the logs yourself.
-
-#### Use the command line to view logs
-
-To view Docker for Mac logs at the command line, type this command in a terminal window or your favorite shell.
-
- $ syslog -k Sender Docker
-
-Alternatively, you can send the output of this command to a file. The following command redirects the log output to a file called `my_docker_logs.txt`.
-
- $ syslog -k Sender Docker > ~/Desktop/my_docker_logs.txt
-
-#### Use the Mac Console for log queries
-
-Macs provide a built-in log viewer. You can use the Mac Console System Log Query to check Docker app logs.
-
-The Console lives on your Mac hard drive in `Applications` > `Utilities`. You can bring it up quickly by just searching for it with Spotlight Search.
-
-To find all Docker app log messages, do the following.
-
-1. From the Console menu, choose **File** > **New System Log Query...**
-
- 
-
- * Name your search (for example `Docker`)
- * Set the **Sender** to **Docker**
-
-2. Click **OK** to run the log query.
-
- 
-
-You can use the Console Log Query to search logs, filter the results in various ways, and create reports.
-
-For example, you could construct a search for log messages sent by Docker that contain the word `hypervisor` then filter the results by time (earlier, later, now).
-
-The diagnostics and usage information to the left of the results provide auto-generated reports on packages.
-
-
-## Troubleshooting
-
-#### Recreate or update your containers after Beta 18 upgrade
-
-Docker 1.12.0 RC3 release introduces a backward incompatible change from RC2 to RC3. (For more information, see https://github.com/docker/docker/issues/24343#issuecomment-230623542.)
-
-You may get the following error when you try to start a container created with pre-Beta 18 Docker for Mac applications.
-
- Error response from daemon: Unknown runtime specified default
-
-You can fix this by either [recreating](#recreate-your-containers) or [updating](#update-your-containers) your containers.
-
-If you get the error message shown above, we recommend recreating them.
-
-##### Recreate your containers
-
-To recreate your containers, use Docker Compose.
-
- docker-compose down && docker-compose up
-
-##### Update your containers
-
-To fix existing containers, follow these steps.
-
-1. Run this command.
-
- $ docker run --rm -v /var/lib/docker:/docker cpuguy83/docker112rc3-runtimefix:rc3
-
- Unable to find image 'cpuguy83/docker112rc3-runtimefix:rc3' locally
- rc3: Pulling from cpuguy83/docker112rc3-runtimefix
- 91e7f9981d55: Pull complete
- Digest: sha256:96abed3f7a7a574774400ff20c6808aac37d37d787d1164d332675392675005c
- Status: Downloaded newer image for cpuguy83/docker112rc3-runtimefix:rc3
- proccessed 1648f773f92e8a4aad508a45088ca9137c3103457b48be1afb3fd8b4369e5140
- skipping container '433ba7ead89ba645efe9b5fff578e674aabba95d6dcb3910c9ad7f1a5c6b4538': already fixed
- proccessed 43df7f2ac8fc912046dfc48cf5d599018af8f60fee50eb7b09c1e10147758f06
- proccessed 65204cfa00b1b6679536c6ac72cdde1dbb43049af208973030b6d91356166958
- proccessed 66a72622e306450fd07f2b3a833355379884b7a6165b7527c10390c36536d82d
- proccessed 9d196e78390eeb44d3b354d24e25225d045f33f1666243466b3ed42fe670245c
- proccessed b9a0ecfe2ed9d561463251aa90fd1442299bcd9ea191a17055b01c6a00533b05
- proccessed c129a775c3fa3b6337e13b50aea84e4977c1774994be1f50ff13cbe60de9ac76
- proccessed dea73dc21126434f14c58b83140bf6470aa67e622daa85603a13bc48af7f8b04
- proccessed dfa8f9278642ab0f3e82ee8e4ad029587aafef9571ff50190e83757c03b4216c
- proccessed ee5bf706b6600a46e5d26327b13c3c1c5f7b261313438d47318702ff6ed8b30b
-
-2. Quit Docker.
-
-3. Start Docker.
-
- > **Note:** Be sure to quit and then restart Docker for Mac before attempting to start containers.
-
-4. Try to start the container again:
-
- $ docker start old-container
- old-container
-
-#### Incompatible CPU detected
-
-Docker for Mac requires a processor (CPU) that supports virtualization and, more specifically, the [Apple Hypervisor framework](https://developer.apple.com/library/mac/documentation/DriversKernelHardware/Reference/Hypervisor/). Docker for Mac is only compatible with Macs that have a CPU that supports the Hypervisor framework. Most Macs built in 2010 and later support it, as described in the Apple Hypervisor Framework documentation about supported hardware:
-
-*Generally, machines with an Intel VT-x feature set that includes Extended Page Tables (EPT) and Unrestricted Mode are supported.*
-
-To check if your Mac supports the Hypervisor framework, run this command in a terminal window.
-
-```
-sysctl kern.hv_support
-```
-If your Mac supports the Hypervisor Framework, the command will print `kern.hv_support: 1`.
-
-If not, the command will print `kern.hv_support: 0`.
-
-See also, [Hypervisor Framework Reference](https://developer.apple.com/library/mac/documentation/DriversKernelHardware/Reference/Hypervisor/) in the Apple documentation, and Docker for Mac system requirements in [What to know before you install](index.md#what-to-know-before-you-install).
-
-
-#### Workarounds for common problems
-
-* IPv6 workaround to auto-filter DNS addresses - IPv6 is not yet supported on Docker for Mac, which typically manifests as a network timeout when running `docker` commands that need access to external network servers (e.g., `docker pull busybox`).
-
- ```
- $ docker pull busybox
- Using default tag: latest
- Pulling repository docker.io/library/busybox
- Network timed out while trying to connect to https://index.docker.io/v1/repositories/library/busybox/images. You may want to check your internet connection or if you are behind a proxy.
- ```
-
- Starting with v1.12.1, 2016-09016 on the stable channel, and Beta 24 on the beta channel, a workaround is provided that auto-filters out the IPv6 addresses in DNS server lists and enables successful network accesss. For example, `2001:4860:4860::8888` would become `8.8.8.8`. So, the only workaround action needed for users is to [upgrade to Docker for Mac stable v1.12.1 or newer, or Beta 24 or newer](index.md#download-docker-for-mac).
-
- On releases with the workaround included to filter out / truncate IPv6 addresses from the DNS list, the above command should run properly:
-
- ```
- $ docker pull busybox
- Using default tag: latest
- latest: Pulling from library/busybox
- Digest: sha256:a59906e33509d14c036c8678d687bd4eec81ed7c4b8ce907b888c607f6a1e0e6
- Status: Image is up to date for busy box:latest
- ```
-
- To learn more, see these issues on GitHub and Docker for Mac forums:
-
- * [Network timeout when top two DNS servers in /etc/resolv.conf are IPv6 addresses](https://github.com/docker/for-mac/issues/9)
-
- * [ERROR: Network timed out while trying to connect to index.docker.io](https://forums.docker.com/t/error-network-timed-out-while-trying-to-connect-to-index-docker-io/17206)
-
-* If Docker for Mac fails to install or start properly:
- * Make sure you quit Docker for Mac before installing a new version of the application ( --> **Quit Docker**). Otherwise, you will get an "application in use" error when you try to copy the new app from the `.dmg` to `/Applications`.
-
- * Restart your Mac to stop / discard any vestige of the daemon running from the previously installed version.
-
- * Run the the uninstall commands from the menu.
-
-
-
-* If `docker` commands aren't working properly or as expected:
-
- Make sure you are not using the legacy Docker Machine environment in your shell
-or command window. You do not need `DOCKER_HOST` set, so unset it as it may be
-pointing at another Docker (e.g. VirtualBox). If you use bash, `unset
-${!DOCKER_*}` will unset existing `DOCKER` environment variables you have set.
-For other shells, unset each environment variable individually as described in
-[Setting up to run Docker for
-Mac](docker-toolbox.md#setting-up-to-run-docker-for-mac) in [Docker for Mac vs.
-Docker Toolbox](docker-toolbox.md).
-
-
-
-* Note that network connections will fail if the OS X Firewall is set to
-"Block all incoming connections". You can enable the firewall, but `bootpd` must be allowed incoming connections so that the VM can get an IP address.
-
-
-
-* For the `hello-world-nginx` example, Docker for Mac must be running in order to get to the webserver on `http://localhost/`. Make sure that the Docker whale
-is showing in the menu bar, and that you run the Docker commands in a shell that
-is connected to the Docker for Mac Engine (not Engine from Toolbox). Otherwise,
-you might start the webserver container but get a "web page not available" error
-when you go to `localhost`. For more on distinguishing between the two
-environments, see [Docker for Mac vs. Docker Toolbox](docker-toolbox.md).
-
-
-
-* If you see errors like `Bind for 0.0.0.0:8080 failed: port is already allocated` or
- `listen tcp:0.0.0.0:8080: bind: address is already in use`:
-
- These errors are often caused by some other software on the Mac using those ports.
- Run `lsof -i tcp:8080` to discover the name and pid of the other process and
- decide whether to shut the other process down, or to use a different port in
- your docker app.
-
-See also [Known Issues](#known-issues) on this page, and the [FAQs](faqs.md) topic.
-
-
-## Known issues
-
-* IPv6 is not yet supported on Docker for Mac. If you are using IPv6, and haven't upgraded to Beta 24 or v1.12.1 stable or newer, you will see a network
-timeout when you run `docker` commands that need access to external network
-servers. The aforementioned releases include a workaround for this because
-Docker for Mac does not yet support IPv6. See "IPv6 workaround to auto-filter DNS addresses" in
-[Workarounds for common problems](#workarounds-for-common-problems).
-
-* You might encounter errors when using `docker-compose up` with Docker for Mac (`ValueError: Extra Data`). We've identified this is likely related to data and/or events being passed all at once rather than one by one, so sometimes the data comes back as 2+ objects concatenated and causes an error.
-
-
-
-* Force-ejecting the `.dmg` after running `Docker.app` from it results in an unresponsive whale in the menu bar, Docker tasks "not responding" in activity monitor, helper processes running, and supporting technologies consuming large percentages of CPU. Please reboot, and then re-start Docker for Mac. If needed,`force quit` any Docker related applications as part of the reboot.
-
-
-
-* Docker does not auto-start on login even when it is enabled in --> **Preferences**. This is related to a set of issues with Docker helper, registration, and versioning.
-
-
-
-* Docker for Mac uses the `HyperKit` hypervisor (https://github.com/docker/hyperkit) in Mac OS X 10.10 Yosemite and higher. If you are developing with tools that have conflicts with `HyperKit`, such as [Intel Hardware Accelerated Execution Manager (HAXM)](https://software.intel.com/en-us/android/articles/intel-hardware-accelerated-execution-manager/), the current workaround is not to run them at the same time. You can pause `HyperKit` by quitting Docker for Mac temporarily while you work with HAXM. This will allow you to continue work with the other tools and prevent `HyperKit` from interfering.
-
-
-
-* If you are working with applications like [Apache Maven](https://maven.apache.org/) that expect settings for `DOCKER_HOST` and `DOCKER_CERT_PATH` environment variables, specify these to connect to Docker instances through Unix sockets. For example:
-
- export DOCKER_HOST=unix:///var/run/docker.sock
-
-* `docker-compose` 1.7.1 performs DNS unnecessary lookups for `localunixsocket.local` which can take 5s to timeout on some networks. If `docker-compose` commands seem very slow but seem to speed up when the network is disabled (e.g. when disconnected from wifi), try appending `127.0.0.1 localunixsocket.local` to the file `/etc/hosts`.
-Alternatively you could create a plain-text TCP proxy on localhost:1234 using:
-
- docker run -d -v /var/run/docker.sock:/var/run/docker.sock -p 127.0.0.1:1234:1234 bobrik/socat TCP-LISTEN:1234,fork UNIX-CONNECT:/var/run/docker.sock
-
- and then `export DOCKER_HOST=tcp://localhost:1234`.
-
-
-
-
-
-* There are a number of issues with the performance of directories
- bind-mounted with `osxfs`. In particular, writes of small blocks, and
- traversals of large directories are currently slow. Additionally,
- containers that perform large numbers of directory operations, such as
- repeated scans of large directory trees, may suffer from poor
- performance. Applications that behave in this way include:
-
- - `rake`
- - `ember build`
- - Symfony
- - Magento
-
- As a work-around for this behavior, you can put vendor or third-party library directories in Docker volumes, perform temporary file system
- operations outside of `osxfs` mounts, and use third-party tools like
- Unison or `rsync` to synchronize between container directories and
- bind-mounted directories. We are actively working on `osxfs`
- performance using a number of different techniques and we look forward
- to sharing improvements with you soon.
-
-
-
-* If your system does not have access to an NTP server, then after a hibernate the time seen by Docker for Mac may be considerably out of sync with the host. Furthermore, the time may slowly drift out of sync during use. To manually reset the time after hibernation, run:
-
- docker run --rm --privileged alpine hwclock -s
-
- Or, to resolve both issues, you can add the local clock as a low-priority (high stratum) fallback NTP time source for the host. To do this, edit the host's `/etc/ntp-restrict.conf` to add:
-
- server 127.127.1.1 # LCL, local clock
- fudge 127.127.1.1 stratum 12 # increase stratum
-
- Then restart the NTP service with:
-
- sudo launchctl unload /System/Library/LaunchDaemons/org.ntp.ntpd.plist
- sudo launchctl load /System/Library/LaunchDaemons/org.ntp.ntpd.plist
-
-
-
diff --git a/docker-for-windows/examples.md b/docker-for-windows/examples.md
deleted file mode 100644
index f3c41c7ee8..0000000000
--- a/docker-for-windows/examples.md
+++ /dev/null
@@ -1,38 +0,0 @@
-
-
-# Example Applications
-
-Upcoming releases will include example applications especially tailored for Docker for Mac and Docker for Windows.
-
-Examples will highlight develop, build, and run workfows in several languages, including Node.js, Python, Ruby, and Java.
-
-For now, if you want get started experimenting with the Beta apps and Docker Compose (which is installed automatically with Docker Desktop Editions), have a look at these example applications in the Compose documentation. You should be able to run these with Docker for Mac and Docker for Windows.
-
-Quickstart: Compose and Django
-
-Quickstart: Compose and Rails
-
-Quickstart: Compose and WordPress
-
-See also [learn by example](/engine/tutorials/index.md) tutorials on building images, runnning containers, networking, managing data, and storing images on Docker Hub.
-
-
-
diff --git a/docker-for-windows/faqs.md b/docker-for-windows/faqs.md
deleted file mode 100644
index 3ecfc94172..0000000000
--- a/docker-for-windows/faqs.md
+++ /dev/null
@@ -1,153 +0,0 @@
-
-
-# Frequently Asked Questions (FAQs)
-
-
->**Looking for popular FAQs on Docker for Windows?** Check out the [Docker
-Knowledge Hub](http://success.docker.com/) for knowledge base articles, FAQs,
-technical support for various subscription levels, and more.
-
-### Questions about stable and beta channels
-
-**Q: How do I get the stable or beta version of Docker for Windows?**
-
-A: Use the download links for the channels given in the topic [Download Docker
-for Windows](index.md#download-docker-for-windows).
-
-This topic also has more information about the two channels.
-
-**Q: What is the difference between the stable and beta versions of Docker for Windows?**
-
-A: Two different download channels are available for Docker for Windows:
-
-* The stable channel provides a general availability release-ready installer for a fully baked and tested, more reliable app. The stable version of Docker for Windows comes with the latest released version of Docker Engine. The release schedule is synched with Docker Engine releases and hotfixes.
-
-* The beta channel provides an installer with new features we are working on, but is not necessarily fully tested. It comes with the experimental version of Docker Engine. Bugs, crashes and issues are more likely to occur with the beta app, but you get a chance to preview new functionality, experiment, and provide feedback as the apps evolve. Releases are typically more frequent than for stable, often one or more per month.
-
-**Q: Can I switch back and forth between stable and beta versions of Docker for Windows?**
-
-A: Yes, you can switch between versions to try out the betas to see what's new,
-then go back to stable for other work. However, **you can have only one app
-installed at a time**. Switching back and forth between stable and beta apps can
-de-stabilize your development environment, particularly in cases where you
-switch from a newer (beta) channel to older (stable).
-
-For example, containers created with a newer beta version of Docker for Windows
-may not work after you switch back to stable because they may have been created
-leveraging beta features that aren't in stable yet. Just keep this in mind as
-you create and work with beta containers, perhaps in the spirit of a playground
-space where you are prepared to troubleshoot or start over.
-
-To safely switch between beta and stable versions be sure
-to save images and export the containers you need, then uninstall the current
-version before installing another. The workflow is described in more detail
-below.
-
-Do the following each time:
-
-1. Use `docker save` to save any images you want to keep. (See
-[save](/engine/reference/commandline/save.md) in the Docker Engine command line
-reference.)
-
-2. Use `docker export` to export containers you want to keep. (See
-[export](/engine/reference/commandline/export.md) in the Docker Engine command
-line reference.)
-
-3. Uninstall the current app (whether stable or beta).
-
-4. Install a different version of the app (stable or beta).
-
-### What kind of feedback are we looking for?
-
-Everything is fair game. We'd like your impressions on the download-install
-process, startup, functionality available, the GUI, usefulness of the app,
-command line integration, and so on. Tell us about problems, what you like, or
-functionality you'd like to see added.
-
-We are especially interested in getting feedback on the new swarm mode described
-in [Docker Swarm](/engine/swarm/index.md). A good place to start is the
-[tutorial](/engine/swarm/swarm-tutorial/index.md).
-
-### What if I have problems or questions?
-
-You can find the list of frequent issues in
-[Logs and Troubleshooting](troubleshoot.md).
-
-If you do not find a solution in Troubleshooting, browse issues on [Docker for Windows issues on GitHub](https://github.com/docker/for-win/issues) or create a new one. You can also create new issues based on diagnostics. To learn more about running diagnostics and about Docker for Windows GitHub issues, see [Diagnose and Feedback](index.md#diagnose-and-feedback).
-
-[Docker for Windows forum](https://forums.docker.com/c/docker-for-windows) provides discussion threads as well, and you can create discussion topics there, but we recommend using the GitHub issues over the forums for better tracking and response.
-
-### Can I use Docker for Windows with new swarm mode?
-
-Yes! You can use Docker for Windows to test single-node features of [swarm mode](/engine/swarm/index.md) introduced with Docker Engine 1.12, including initializing a swarm with a single node, creating services, and scaling services. Docker “Moby” on Hyper-V will serve as the single swarm node. You can also use Docker Machine, which comes with Docker for Windows, to create and experiment with a multi-node swarm. Check out the tutorial at [Get started with swarm mode](/engine/swarm/swarm-tutorial/index.md).
-
-### How do I connect to the remote Docker Engine API?
-
-You might need to provide the location of the remote API for Docker clients and development tools.
-
-On Docker for Windows, clients can connect to the Docker Engine through a **named pipe**: `npipe:////./pipe/docker_engine`, or **TCP socket** at this URL: `http://localhost:2375`.
-
-This sets `DOCKER_HOST` and `DOCKER_CERT_PATH` environment variables to the given values (for the named pipe or TCP socket, whichever you use).
-
-See also [Docker Remote API](/engine/reference/api/docker_remote_api.md) and the Docker for Windows forums topic [How to find the remote API](https://forums.docker.com/t/how-to-find-the-remote-api/20988).
-
-### Why doesn't `nodemon` pick up file changes in a container mounted on a shared drive?
-
-Currently, `inotify` does not work on Docker for Windows. This is a known issue.
-For more information and a temporary workaround, see [inotify on shared drives
-does not work](troubleshoot.md#inotify-on-shared-drives-does-not-work) in
-[Troubleshooting](troubleshoot.md).
-
-### Why does Docker for Windows sometimes lose network connectivity (e.g., `push`/`pull` doesn't work)?
-
-Networking is not yet fully stable across network changes and system sleep
-cycles. Exit and start Docker to restore connectivity.
-
-### Can I use VirtualBox alongside Docker 4 Windows?
-
-Unfortunately, VirtualBox (and other hypervisors like VMWare) cannot run when
-Hyper-V is enabled on Windows.
-
-### Why is Windows 10 Home not supported?
-
-Docker for Windows requires the Hyper-V Windows feature which is not
-available on Home-edition.
-
-### Why is Windows 10 required?
-
-Docker for Windows uses Windows Hyper-V. While older Windows versions have
-Hyper-V, their Hyper-V implementations lack features critical for Docker for
-Windows to work.
-
-### Why does Docker for Windows fail to start when firewalls or anti-virus software is installed?
-
-Comodo Firewall currently is incompatible with Hyper-V and some Windows 10 builds (possibly, the Anniversary Update), which impacts Docker for Windows. Other firewalls and anti-virus software might also be incompatible with these Microsoft Windows 10 buids. See details and workarounds in [Docker fails to start when Comodo Firewall is installed](troubleshoot.md#docker-fails-to-start-when-comodo-firewall-is-installed) in [Troubleshooting](troubleshoot.md).
-
-
-### How do I uninstall Docker Toolbox?
-
-You might decide that you do not need Toolbox now that you have Docker for Windows, and want to uninstall it. For
-details on how to perform a clean uninstall of Toolbox on Windows, see [How to
-uninstall Toolbox](/toolbox/toolbox_install_windows.md#how-to-uninstall-toolbox)
-in the Toolbox Windows topics.
-
-
-
diff --git a/docker-for-windows/images/Config-popup.png b/docker-for-windows/images/Config-popup.png
deleted file mode 100644
index 0ad3186996..0000000000
Binary files a/docker-for-windows/images/Config-popup.png and /dev/null differ
diff --git a/docker-for-windows/images/Docker-win-settings.png b/docker-for-windows/images/Docker-win-settings.png
deleted file mode 100755
index 626956187f..0000000000
Binary files a/docker-for-windows/images/Docker-win-settings.png and /dev/null differ
diff --git a/docker-for-windows/images/Start-Authorize.png b/docker-for-windows/images/Start-Authorize.png
deleted file mode 100644
index 292c3cc8f3..0000000000
Binary files a/docker-for-windows/images/Start-Authorize.png and /dev/null differ
diff --git a/docker-for-windows/images/Start-init.png b/docker-for-windows/images/Start-init.png
deleted file mode 100644
index fb2e93dc62..0000000000
Binary files a/docker-for-windows/images/Start-init.png and /dev/null differ
diff --git a/docker-for-windows/images/about-docker-win.png b/docker-for-windows/images/about-docker-win.png
deleted file mode 100644
index a7495e933c..0000000000
Binary files a/docker-for-windows/images/about-docker-win.png and /dev/null differ
diff --git a/docker-for-windows/images/chat.png b/docker-for-windows/images/chat.png
deleted file mode 100644
index 597db5aae9..0000000000
Binary files a/docker-for-windows/images/chat.png and /dev/null differ
diff --git a/docker-for-windows/images/config-popup-menu-win-switch-containers.png b/docker-for-windows/images/config-popup-menu-win-switch-containers.png
deleted file mode 100644
index 8a7a0ed12f..0000000000
Binary files a/docker-for-windows/images/config-popup-menu-win-switch-containers.png and /dev/null differ
diff --git a/docker-for-windows/images/config-popup-menu-win.png b/docker-for-windows/images/config-popup-menu-win.png
deleted file mode 100644
index 99a3461cd9..0000000000
Binary files a/docker-for-windows/images/config-popup-menu-win.png and /dev/null differ
diff --git a/docker-for-windows/images/desktop-whale-icon.png b/docker-for-windows/images/desktop-whale-icon.png
deleted file mode 100644
index 7781fe1770..0000000000
Binary files a/docker-for-windows/images/desktop-whale-icon.png and /dev/null differ
diff --git a/docker-for-windows/images/diagnose-d4win-issues-template.png b/docker-for-windows/images/diagnose-d4win-issues-template.png
deleted file mode 100644
index c0715a37c5..0000000000
Binary files a/docker-for-windows/images/diagnose-d4win-issues-template.png and /dev/null differ
diff --git a/docker-for-windows/images/diagnose-feedback-id-win.png b/docker-for-windows/images/diagnose-feedback-id-win.png
deleted file mode 100644
index 6f76d516f6..0000000000
Binary files a/docker-for-windows/images/diagnose-feedback-id-win.png and /dev/null differ
diff --git a/docker-for-windows/images/diagnose-feedback-win.png b/docker-for-windows/images/diagnose-feedback-win.png
deleted file mode 100644
index d4e68f2dde..0000000000
Binary files a/docker-for-windows/images/diagnose-feedback-win.png and /dev/null differ
diff --git a/docker-for-windows/images/docker-daemon.png b/docker-for-windows/images/docker-daemon.png
deleted file mode 100644
index 2ca6ab3c62..0000000000
Binary files a/docker-for-windows/images/docker-daemon.png and /dev/null differ
diff --git a/docker-for-windows/images/docker-is-running.png b/docker-for-windows/images/docker-is-running.png
deleted file mode 100644
index 06b2557ada..0000000000
Binary files a/docker-for-windows/images/docker-is-running.png and /dev/null differ
diff --git a/docker-for-windows/images/download.png b/docker-for-windows/images/download.png
deleted file mode 100644
index bfa896d89e..0000000000
Binary files a/docker-for-windows/images/download.png and /dev/null differ
diff --git a/docker-for-windows/images/hyper-v-message.png b/docker-for-windows/images/hyper-v-message.png
deleted file mode 100644
index cd3c557814..0000000000
Binary files a/docker-for-windows/images/hyper-v-message.png and /dev/null differ
diff --git a/docker-for-windows/images/import-docker-content.png b/docker-for-windows/images/import-docker-content.png
deleted file mode 100644
index 9b5fce2cbc..0000000000
Binary files a/docker-for-windows/images/import-docker-content.png and /dev/null differ
diff --git a/docker-for-windows/images/installer-allow.png b/docker-for-windows/images/installer-allow.png
deleted file mode 100644
index 4fd2d4dcc3..0000000000
Binary files a/docker-for-windows/images/installer-allow.png and /dev/null differ
diff --git a/docker-for-windows/images/installer-finishes.png b/docker-for-windows/images/installer-finishes.png
deleted file mode 100644
index daa793b35d..0000000000
Binary files a/docker-for-windows/images/installer-finishes.png and /dev/null differ
diff --git a/docker-for-windows/images/installer-in-downloads.png b/docker-for-windows/images/installer-in-downloads.png
deleted file mode 100644
index f246d47911..0000000000
Binary files a/docker-for-windows/images/installer-in-downloads.png and /dev/null differ
diff --git a/docker-for-windows/images/installer-license-ok.png b/docker-for-windows/images/installer-license-ok.png
deleted file mode 100644
index 49a3fe9b6f..0000000000
Binary files a/docker-for-windows/images/installer-license-ok.png and /dev/null differ
diff --git a/docker-for-windows/images/installer-license-show.png b/docker-for-windows/images/installer-license-show.png
deleted file mode 100644
index 11e075a60e..0000000000
Binary files a/docker-for-windows/images/installer-license-show.png and /dev/null differ
diff --git a/docker-for-windows/images/installer-progress-bar.png b/docker-for-windows/images/installer-progress-bar.png
deleted file mode 100644
index d56cbff463..0000000000
Binary files a/docker-for-windows/images/installer-progress-bar.png and /dev/null differ
diff --git a/docker-for-windows/images/proxies.png b/docker-for-windows/images/proxies.png
deleted file mode 100644
index 1e2c2b125a..0000000000
Binary files a/docker-for-windows/images/proxies.png and /dev/null differ
diff --git a/docker-for-windows/images/run-nginx.png b/docker-for-windows/images/run-nginx.png
deleted file mode 100644
index 01f5fa66e5..0000000000
Binary files a/docker-for-windows/images/run-nginx.png and /dev/null differ
diff --git a/docker-for-windows/images/settings-cpu-ram.png b/docker-for-windows/images/settings-cpu-ram.png
deleted file mode 100644
index 6eb39d3c46..0000000000
Binary files a/docker-for-windows/images/settings-cpu-ram.png and /dev/null differ
diff --git a/docker-for-windows/images/settings-docker-win.png b/docker-for-windows/images/settings-docker-win.png
deleted file mode 100644
index 9847392c71..0000000000
Binary files a/docker-for-windows/images/settings-docker-win.png and /dev/null differ
diff --git a/docker-for-windows/images/settings-general.png b/docker-for-windows/images/settings-general.png
deleted file mode 100644
index c92b672e0a..0000000000
Binary files a/docker-for-windows/images/settings-general.png and /dev/null differ
diff --git a/docker-for-windows/images/settings-kernel.png b/docker-for-windows/images/settings-kernel.png
deleted file mode 100644
index 69e47658d9..0000000000
Binary files a/docker-for-windows/images/settings-kernel.png and /dev/null differ
diff --git a/docker-for-windows/images/settings-network.png b/docker-for-windows/images/settings-network.png
deleted file mode 100644
index 52896a8e56..0000000000
Binary files a/docker-for-windows/images/settings-network.png and /dev/null differ
diff --git a/docker-for-windows/images/settings-reset.png b/docker-for-windows/images/settings-reset.png
deleted file mode 100644
index 3dd220ec84..0000000000
Binary files a/docker-for-windows/images/settings-reset.png and /dev/null differ
diff --git a/docker-for-windows/images/settings-shared-drives.png b/docker-for-windows/images/settings-shared-drives.png
deleted file mode 100644
index 4b81036d9b..0000000000
Binary files a/docker-for-windows/images/settings-shared-drives.png and /dev/null differ
diff --git a/docker-for-windows/images/settings-toolbox-import.png b/docker-for-windows/images/settings-toolbox-import.png
deleted file mode 100644
index 60fc6398b2..0000000000
Binary files a/docker-for-windows/images/settings-toolbox-import.png and /dev/null differ
diff --git a/docker-for-windows/images/submit-token.png b/docker-for-windows/images/submit-token.png
deleted file mode 100644
index 538460e9ce..0000000000
Binary files a/docker-for-windows/images/submit-token.png and /dev/null differ
diff --git a/docker-for-windows/images/whale-systray.png b/docker-for-windows/images/whale-systray.png
deleted file mode 100644
index ba6f8f1a8b..0000000000
Binary files a/docker-for-windows/images/whale-systray.png and /dev/null differ
diff --git a/docker-for-windows/images/whale-x.png b/docker-for-windows/images/whale-x.png
deleted file mode 100644
index c99e8d5898..0000000000
Binary files a/docker-for-windows/images/whale-x.png and /dev/null differ
diff --git a/docker-for-windows/images/win-file-and-printer-sharing.png b/docker-for-windows/images/win-file-and-printer-sharing.png
deleted file mode 100644
index 7f53c8bba9..0000000000
Binary files a/docker-for-windows/images/win-file-and-printer-sharing.png and /dev/null differ
diff --git a/docker-for-windows/images/win-install-success-hello-world.png b/docker-for-windows/images/win-install-success-hello-world.png
deleted file mode 100644
index d269096013..0000000000
Binary files a/docker-for-windows/images/win-install-success-hello-world.png and /dev/null differ
diff --git a/docker-for-windows/images/win-install-success-popup.png b/docker-for-windows/images/win-install-success-popup.png
deleted file mode 100644
index 69159c7fdf..0000000000
Binary files a/docker-for-windows/images/win-install-success-popup.png and /dev/null differ
diff --git a/docker-for-windows/images/win-install-success.png b/docker-for-windows/images/win-install-success.png
deleted file mode 100644
index a170536a37..0000000000
Binary files a/docker-for-windows/images/win-install-success.png and /dev/null differ
diff --git a/docker-for-windows/index.md b/docker-for-windows/index.md
deleted file mode 100644
index 9e64fc4cb9..0000000000
--- a/docker-for-windows/index.md
+++ /dev/null
@@ -1,429 +0,0 @@
-
-
-# Getting Started with Docker for Windows
-
-Welcome to Docker for Windows!
-
-Please read through these topics on how to get started. To **give us your feedback** on your experience with the app and report bugs or problems, log in to our [Docker for Windows forum](https://forums.docker.com/c/docker-for-windows).
-
->**Already have Docker for Windows?** If you already have Docker for Windows installed, and are ready to get started, skip over to the [Getting Started with Docker](/engine/getstarted/index.md) tutorial.
-
-
-## Download Docker for Windows
-
-If you have not already done so, please install Docker for Windows. You can download installers from the stable or beta channel. For more about stable and beta channels, see the [FAQs](faqs.md#questions-about-stable-and-beta-channels).
-
-
-
-
Stable channel
-
Beta channel
-
-
-
This installer is fully baked and tested, and comes with the latest GA version of Docker Engine.
This is the best channel to use if you want a reliable platform to work with.
These releases follow a version schedule with a longer lead time than the betas, synched with Docker Engine releases and hotfixes.
-
-
This installer offers cutting edge features and comes with the experimental version of Docker Engine, which is described in the Docker Experimental Features README on GitHub.
This is the best channel to use if you want to experiment with features we are working on as they become available, and can weather some instability and bugs. This channel is a continuation of the beta program, where you can provide feedback as the apps evolve. Releases are typically more frequent than for stable, often one or more per month.
-
->**Important Notes**:
->
->* Docker for Windows requires 64bit Windows 10 Pro, Enterprise and Education (1511 November update, Build 10586 or later) and Microsoft Hyper-V. Please see [What to know before you install](#what-to-know-before-you-install) for a full list of prerequisites.
->
->* You can switch between beta and stable versions, but _you must have only one app installed at a time_. Also, you will need to save images and export containers you want to keep before uninstalling the current version before installing another. For more about this, see the [FAQs about beta and stable channels](faqs.md#questions-about-stable-and-beta-channels).
-
-## What to know before you install
-
-* **README FIRST for Docker Toolbox and Docker Machine users**: Docker for Windows requires Microsoft Hyper-V to run. After Hyper-V is enabled, VirtualBox will no longer work, but any VirtualBox VM images will remain. VirtualBox VMs created with `docker-machine` (including the `default` one typically created during Toolbox install) will no longer start. These VMs cannot be used side-by-side with Docker for Windows. However, you can still use `docker-machine` to manage remote VMs.
-
-* You can import a `default` VirtualBox VM after installing Docker for Windows by using the **Settings** menu in the System Tray.
-
-* The current version of Docker for Windows runs on 64bit Windows 10 Pro, Enterprise and Education (1511 November update, Build 10586 or later). In the future we will support more versions of Windows 10.
-
-* Containers and images created with Docker for Windows are shared between all user accounts on machines where it is installed. This is because all Windows accounts will use the same VM to build and run containers. In the future, Docker for Windows will better isolate user content.
-
-* The Hyper-V package must be enabled for Docker for Windows to work. The Docker for Windows installer will enable it for you, if needed. (This requires a reboot).
-
- >**Note**: If your system does not satisfy these requirements, you can install [Docker Toolbox](/toolbox/overview.md), which uses Oracle Virtual Box instead of Hyper-V.
-
-* **What the install includes**: The installation provides [Docker Engine](https://docs.docker.com/engine/userguide/intro/), Docker CLI client, [Docker Compose](https://docs.docker.com/compose/overview/), and [Docker Machine](https://docs.docker.com/machine/overview/).
-
-## Step 1. Install Docker for Windows
-
-1. Double-click `InstallDocker.msi` to run the installer.
-
- If you haven't already downloaded the installer (`InstallDocker.msi`), you can get it [**here**](https://download.docker.com/win/stable/InstallDocker.msi). It typically downloads to your `Downloads folder`, or you can run it from the recent downloads bar at the bottom of your web browser.
-
-2. Follow the install wizard to accept the license, authorize the installer, and proceed with the install.
-
- You will be asked to authorize `Docker.app` with your system password during the install process. Privileged access is needed to install networking components, links to the Docker apps, and manage the Hyper-V VMs.
-
-3. Click **Finish** on the setup complete dialog to launch Docker.
-
- 
-
-## Step 2. Start Docker for Windows
-
-When the installation finishes, Docker starts automatically.
-
-The whale in the status bar indicates that Docker is running, and accessible from a terminal.
-
-If you just installed the app, you also get a popup success message with suggested next steps, and a link to this documentation.
-
-
-
-When initialization is complete, select **About Docker** from the notification area icon to verify that you have the latest version.
-
-Congratulations! You are up and running with Docker for Windows.
-
-## Step 3. Check versions of Docker Engine, Compose, and Machine
-
-Start your favorite shell (`cmd.exe`, PowerShell, or other) to check your versions of `docker` and `docker-compose`, and verify the installation.
-
- PS C:\Users\samstevens> docker --version
- Docker version 1.12.0, build 8eab29e, experimental
-
- PS C:\Users\samstevens> docker-compose --version
- docker-compose version 1.8.0, build d988a55
-
- PS C:\Users\samstevens> docker-machine --version
- docker-machine version 0.8.0, build b85aac1
-
-## Step 4. Explore the application and run examples
-
-The next few steps take you through some examples. These are just suggestions for ways to experiment with Docker on your system, check version information, and make sure `docker` commands are working properly.
-
-1. Open a shell (`cmd.exe`, PowerShell, or other).
-
-2. Run some Docker commands, such as `docker ps`, `docker version`, and `docker info`.
-
- Here is the output of `docker ps` run in a powershell. (In this example, no containers are running yet.)
-
- PS C:\Users\samstevens> docker ps
- CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES
-
- Here is an example of command output for `docker version`.
-
- PS C:\Users\Vicky> docker version
- Client:
- Version: 1.12.0
- API version: 1.24
- Go version: go1.6.3
- Git commit: 8eab29e
- Built: Thu Jul 28 21:04:48 2016
- OS/Arch: windows/amd64
- Experimental: true
-
- Server:
- Version: 1.12.0
- API version: 1.24
- Go version: go1.6.3
- Git commit: 8eab29e
- Built: Thu Jul 28 21:04:48 2016
- OS/Arch: linux/amd64
- Experimental: true
-
- Here is an example of command output for `docker info`.
-
- PS C:\Users\Vicky> docker info
- Containers: 0
- Running: 0
- Paused: 0
- Stopped: 0
- Images: 0
- Server Version: 1.12.0
- Storage Driver: aufs
- Root Dir: /var/lib/docker/aufs
- Backing Filesystem: extfs
- Dirs: 0
- Dirperm1 Supported: true
- Logging Driver: json-file
- Cgroup Driver: cgroupfs
- Plugins:
- Volume: local
- Network: host bridge null overlay
- Swarm: inactive
- Runtimes: runc
- Default Runtime: runc
- Security Options: seccomp
- Kernel Version: 4.4.16-moby
- Operating System: Alpine Linux v3.4
- OSType: linux
- Architecture: x86_64
- CPUs: 2
- Total Memory: 1.95 GiB
- Name: moby
- ID: BG6O:2VMH:OLNV:DDLF:SCSV:URRH:BW6M:INBW:OLAC:J7PX:XZVL:ADNB
- Docker Root Dir: /var/lib/docker
- Debug Mode (client): false
- Debug Mode (server): false
- Registry: https://index.docker.io/v1/
- Experimental: true
- Insecure Registries:
- 127.0.0.0/8
-
- >**Note:** The outputs above are examples. Your output for commands like `docker version` and `docker info` will vary depending on your product versions (e.g., as you install newer versions).
-
-3. Run `docker run hello-world` to test pulling an image from Docker Hub and starting a container.
-
- PS C:\Users\samstevens> docker run hello-world
-
- Hello from Docker.
- This message shows that your installation appears to be working correctly.
-
- To generate this message, Docker took the following steps:
- 1. The Docker client contacted the Docker daemon.
- 2. The Docker daemon pulled the "hello-world" image from the Docker Hub.
- 3. The Docker daemon created a new container from that image which runs the executable that produces the output you are currently reading.
- 4. The Docker daemon streamed that output to the Docker client, which sent it to your terminal.
-
-4. Try something more ambitious, and run an Ubuntu container in a Bash shell.
-
- $ docker run -it ubuntu bash
-
- PS C:\Users\samstevens> docker run -it ubuntu bash
- Unable to find image 'ubuntu:latest' locally
- latest: Pulling from library/ubuntu
- 5a132a7e7af1: Pull complete
- fd2731e4c50c: Pull complete
- 28a2f68d1120: Pull complete
- a3ed95caeb02: Pull complete
- Digest: sha256:4e85ebe01d056b43955250bbac22bdb8734271122e3c78d21e55ee235fc6802d
- Status: Downloaded newer image for ubuntu:latest
-
- Type `exit` to stop the container and close the Bash shell.
-
-5. For the pièce de résistance, start a Dockerized webserver with this command:
-
- docker run -d -p 80:80 --name webserver nginx
-
- This will download the `nginx` container image and start it. Here is the output of running this command in a powershell.
-
- PS C:\Users\samstevens> docker run -d -p 80:80 --name webserver nginx
- Unable to find image 'nginx:latest' locally
- latest: Pulling from library/nginx
-
- fdd5d7827f33: Pull complete
- a3ed95caeb02: Pull complete
- 716f7a5f3082: Pull complete
- 7b10f03a0309: Pull complete
- Digest: sha256:f6a001272d5d324c4c9f3f183e1b69e9e0ff12debeb7a092730d638c33e0de3e
- Status: Downloaded newer image for nginx:latest
- dfe13c68b3b86f01951af617df02be4897184cbf7a8b4d5caf1c3c5bd3fc267f
-
-6. Point your web browser at `http://localhost` to display the start page.
-
- (Since you specified the default HTTP port, it isn't necessary to append `:80` at the end of the URL.)
-
- 
-
-7. Run `docker ps` while your webserver is running to see details on the container.
-
- PS C:\Users\samstevens> docker ps
- CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS
- NAMES
- dfe13c68b3b8 nginx "nginx -g 'daemon off" 3 days ago Up 45 seconds 0.0.0.0:80->80/tcp, 443/tc
- p webserver
-
-8. Stop or remove containers and images.
-
- The `nginx` webserver will continue to run in the container on that port until you stop and/or remove the container. If you want to stop the webserver, type: `docker stop webserver` and start it again with `docker start webserver`.
-
- To stop and remove the running container with a single command, type: `docker rm -f webserver`. This will remove the container, but not the `nginx` image. You can list local images with `docker images`. You might want to keep some images around so that you don't have to pull them again from Docker Hub. To remove an image you no longer need, use `docker rmi |`. For example, `docker rmi nginx`.
-
-**Want more example applications?** - For more example walkthroughs that include setting up services and databases with Docker Compose, see [Example Applications](examples.md).
-
-## Docker Settings
-
-When Docker is running, the Docker whale is displayed in the system tray. If it is hidden, click the up arrow in the tray to show it.
-
-
-
-To get a popup menu with application options, right-click the whale:
-
-
-
-The **Settings** dialogs provide options to allow Docker auto-start, automatically check for updates, share local drives with Docker containers, enable VPN compatibilty, manage CPUs and memory Docker uses, restart Docker, or perform a factory reset.
-
-**Beta 26** includes an option to switch between Windows and Linux conatiners. See [Switch between Windows and Linux containers (Beta 26)](#switch-between-windows-and-linux-containers-beta-26). This is not yet available on stable builds.
-
-
-
-
-### General
-
-
-
-* **Start Docker when you log in** - Automatically start the Docker for Windows application upon Windows system login.
-
-* **Check for updates when the application starts** - Docker for Windows is set to automatically check for updates and notify you when an update is available. If an update is found, click **OK** to accept and install it (or cancel to keep the current version). Uncheck this option if you do not want notifications of version upgrades. You can still find out about updates manually by choosing **Check for Updates** from the menu.
-
-* **Send usage statistics** - You can set Docker for Windows to auto-send diagnostics, crash reports, and usage data. This information can help Docker improve the application and get more context for troubleshooting problems.
-
- Uncheck any of the options to opt out and prevent auto-send of data. Docker may prompt for more information in some cases, even with auto-send enabled. Also, you can enable or disable these auto-reporting settings with one click on the information popup when you first start Docker.
-
- 
-
-### Shared Drives
-
-Share your local drives (volumes) with Docker for Windows, so that they are available to your containers.
-
-
-
-You will be asked to provide your Windows system username and password (domain user) to apply shared drives. You can select an option to have Docker store the credentials so that you don't have to re-enter them every time.
-
-Permissions to access shared drives are tied to the credentials you provide here. If you run `docker` commands and tasks under a different username than the one used here to set up sharing, your containers will not have permissions to access the mounted volumes.
-
-See also [Verify domain user has permissions for shared drives](troubleshoot.md#verify-domain-user-has-permissions-for-shared-drives-volumes) in Troubleshooting.
-
-### Advanced
-
-
-
-* **CPUs** - Change the number of processors assigned to the Linux VM.
-
-* **Memory** - Change the amount of memory the Docker for Windows Linux VM uses.
-
-Please note, updating these settings requires a reconfiguration and reboot of the Linux VM. This will take a few seconds.
-
-### Network
-
-You can configure Docker for Windows networking to work on a virtual private network (VPN).
-
-* **Internal Virtual Switch** - You can specify a network address translation (NAT) prefix and subnet mask to enable internet connectivity.
-
-* **DNS Server** - You can configure the DNS server to use dynamic or static IP addressing.
-
-
-
->**Note:** Some users reported problems connecting to Docker Hub on Docker for Windows stable version. This would manifest as an error when trying to run `docker` commands that pull images from Docker Hub that are not alredy downloaded, such as a first time run of `docker run hello-world`. If you encounter this, reset the DNS server to use the Google DNS fixed address: `8.8.8.8`. For more information, see [Networking issues](troubleshoot.md#networking-issues) in Troubleshooting.
-
-Note that updating these settings requires a reconfiguration and reboot of the Linux VM.
-
-### Proxies
-
-Docker for Windows lets you configure HTTP/HTTPS Proxy Settings and automatically propagate these to Docker and to your containers.
-For example, if you set your proxy settings to `http://proxy.example.com`, Docker will use this proxy when pulling containers.
-
-
-
-When you start a container, you will see that your proxy settings propagate into the containers. For example:
-
-```
-$ docker run -it alpine env
-PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin
-HOSTNAME=b7edf988b2b5
-TERM=xterm
-HOME=/root
-HTTP_PROXY=http://proxy.example.com:3128
-http_proxy=http://proxy.example.com:3128
-no_proxy=*.local, 169.254/16
-```
-
-You can see from the above output that the `HTTP_PROXY`, `http_proxy` and `no_proxy` environment variables are set.
-When your proxy configuration changes, Docker restarts automatically to pick up the new settings.
-If you have containers that you wish to keep running across restarts, you should consider using [restart policies](https://docs.docker.com/engine/reference/run/#restart-policies-restart)
-
-### Docker daemon
-You can configure options on the Docker daemon in the given JSON configuration file, and determine how your containers will run.
-
-For a full list of options on the Docker daemon, see daemon in the Docker Engine command line reference.
-
-
-
-Note that updating these settings requires a reconfiguration and reboot of the Linux VM.
-
-### Switch between Windows and Linux containers (Beta 26)
-
-Starting with Beta 26, you can select which daemon (Linux or Windows) the Docker CLI talks to. Select **Switch to Windows containers** to toggle to Windows containers. Select **Switch to Linux containers**.
-
-Microsoft Developer Network has preliminary/draft information on Windows containers [here](https://msdn.microsoft.com/en-us/virtualization/windowscontainers/about/about_overview).
-
-This feature is not yet available on stable builds.
-
-### Diagnose and Feedback
-
-If you encounter problems for which you do not find solutions in this documentation, searching [Docker for Windows issues on GitHub](https://github.com/docker/for-win/issues) already filed by other users, or on the [Docker for Windows forum](https://forums.docker.com/c/docker-for-windows), we can help you troubleshoot the log data.
-
-Select **Upload a diagnostic**.
-
-This uploads (sends) the logs to Docker.
-
-
-
-To create a new issue directly on GitHub, open [Docker for Windows issues on GitHub](https://github.com/docker/for-win/issues) in your web browser and follow the instructions in the README. Click [New Issue](https://github.com/docker/for-win/issues/new) on that page (or right here ☺) to get a "create new issue" template prepopulated with sections for the ID and summary of your diagnostics, system and version details, description of expected and actual behavior, and steps to reproduce the issue.
-
-
-
-### Reset
-
-
-
-* **Restart Docker** - Shuts down and restarts the Docker application.
-
-* **Reset to Toolbox default machine content** - Imports containers and images from the existing Docker Toolbox machine named `default`. (This option is enabled only if you have Toolbox installed.) The VirtualBox VM will not be removed.
-
-* **Reset to factory defaults** - Resets Docker to factory defaults. This is useful in cases where Docker stops working or becomes unresponsive.
-
-
-
-## Where to go next
-
-* Try out the [Getting Started with Docker](/engine/getstarted/index.md) tutorial.
-
-* Dig in deeper with [learn by example](/engine/tutorials/index.md) tutorials on building images, runnning containers, networking, managing data, and storing images on Docker Hub.
-
-* See [Example Applications](examples.md) for example applications that include setting up services and databases in Docker Compose.
-
-* Interested in trying out the new [swarm mode](/engine/swarm/index.md) on Docker Engine v1.12?
-
- See [Get started with swarm mode](/engine/swarm/swarm-tutorial/index.md), a tutorial which includes specifics on how to leverage your Docker for Windows installation to run single and multi-node swarms.
-
- Also, try out the Swarm examples in [docker labs](https://github.com/docker/labs/tree/master/swarm-mode/beginner-tutorial). Run the `bash script` and follow the accompanying [Docker Swarm Tutorial](https://github.com/docker/labs/blob/master/swarm-mode/beginner-tutorial/README.md). The script uses Docker Machine to create a multi-node swarm, then walks you through various Swarm tasks and commands.
-
-* For a summary of Docker command line interface (CLI) commands, see [Docker CLI Reference Guide](/engine/reference/index.md).
-
-* Check out the blog posts on Docker for Mac and Docker for Windows public betas, and earlier posts on the intial private beta.
-
-* Please give feedback on your experience with the app and report bugs and problems by logging into our [Docker for Windows forum](https://forums.docker.com/c/docker-for-windows).
-
-
-
diff --git a/docker-for-windows/menu.md b/docker-for-windows/menu.md
deleted file mode 100644
index 8274d96f76..0000000000
--- a/docker-for-windows/menu.md
+++ /dev/null
@@ -1,14 +0,0 @@
-
-
-# Docker for Mac and Docker for Windows
diff --git a/docker-for-windows/opensource.md b/docker-for-windows/opensource.md
deleted file mode 100644
index 3809964332..0000000000
--- a/docker-for-windows/opensource.md
+++ /dev/null
@@ -1,28 +0,0 @@
-
-
-# Open Source Components and Licensing
-
-Docker Desktop Editions are built using open source software software. For details on the licensing, choose --> **About** from within the application, then click **Acknowlegdements**.
-
-Docker Desktop Editions distribute some components that are licensed under the GNU General Public License. You can download the source for these components [here](https://download.docker.com/opensource/License.tar.gz).
-
-
-
diff --git a/docker-for-windows/release-notes.md b/docker-for-windows/release-notes.md
deleted file mode 100644
index 62f846a701..0000000000
--- a/docker-for-windows/release-notes.md
+++ /dev/null
@@ -1,929 +0,0 @@
-
-
-# Docker for Windows Release Notes
-
-Here are the main improvements and issues per release, starting with the current release. The documentation is always updated for each release.
-
-For system requirements, please see the Getting Started topic on [What to know before you install](index.md#what-to-know-before-you-install).
-
-Release notes for _stable_ and _beta_ releases are listed below. You can learn about both kinds of releases, and download stable and beta product installers at [Download Docker for Windows](index.md#download-docker-for-windows).
-
-* [Stable Release Notes](#stable-release-notes)
-* [Beta Release Notes](#beta-release-notes)
-* [Alpha Release Notes](#alpha-release-notes)
-
-## Stable Release Notes
-
-### Docker for Windows 1.12.1, 2016-09-16 (stable)
-
->**Important Note**:
->
-> The auto-update function in Beta 21 will not be able to install this update. To install the latest beta manually if you are still on Beta 21, please download the installer here:
-
-> https://download.docker.com/win/beta/InstallDocker.msi
-
-> This problem is fixed as of Beta 23 for subsequent auto-updates.
-
-**New**
-
-* To support trusted registry transparently, all trusted CAs (root or intermediate) on the Windows host are automatically copied to Moby
-
-* `Reset Credentials` will also unshare the shared drives
-
-* Logs are now rotated every day
-
-* Support multiple DNS servers
-
-* Added `mfsymlinks` SMB option to support symlinks on bind mounted folder
-
-* Added `nobrl` SMB option to support `sqlite` on bind mounted folders
-
-* Detect outdated versions of Kitematic
-
-**Upgrades**
-
-* Docker 1.12.1
-* Docker machine 0.8.1
-* Linux kernel 4.4.20
-* aufs 20160905
-
-**Bug fixes and minor changes**
-
-**General**
-
-* Uploading a diagnostic now shows a proper status message in the Settings
-
-* Docker will stop asking to import from Toolbox after an upgrade
-
-* Docker can now import from Toolbox just after HyperV is activated
-
-* Added more debug information to the diagnostics
-
-* Sending anonymous statistics shouldn't hang anymore when Mixpanel is not available
-
-* Support newlines in release notes
-
-* Improve error message when Docker daemon is not responding
-
-* The configuration database is now stored in-memory
-
-* Preserve the stacktrace of PowerShell errors
-
-* Display service stacktrace in error windows
-
-**Networking**
-
-* Improve name servers discovery
-* VpnKit supports search domains
-* VpnKit is now compiled with OCaml 4.03 rather than 4.02.3
-
-**Filesharing**
-
-* Set `cifs` version to 3.02
-
-* VnpKit: reduce the number of sockets used by UDP NAT, reduce the probability
-
-* `slirp`: reduce the number of sockets used by UDP NAT, reduce the probability that NAT rules will time out earlier than expected
-
-* Fixed password handling for host file system sharing
-
-**Hyper-V**
-
-* Automatically disable lingering net adapters that prevent Docker from starting or using the network
-
-* Automatically delete duplicated MobyLinuxVMs on a `reset to factory defaults`
-
-* Improved the HyperV detection and activation mechanism
-
-**Moby**
-
-* Fixed Moby Diagnostics and Update Kernel
-
-* Use default `sysfs` settings, transparent huge pages disabled
-
-* `Cgroup` mount to support `systemd` in containers
-
-**Known issues**
-
-* Docker will automatically disable lingering net adapters. The only way to remove them is manually using `devmgmt.msc` as documented in [Remove stale network adapters](troubleshoot.md#4-remove-stale-network-adapters) under [Networking issues](troubleshoot.md#networking-issues) in Troubleshooting.
-
-### Docker for Windows 1.12.0, 2016-07-28 (stable)
-
-* First stable release
-
-**Components**
-
-* Docker 1.12.0
-* Docker Machine 0.8.0
-* Docker Compose 1.8.0
-
-
-## Beta Release Notes
-
-### Beta 27 Release Notes (2016-09-28 1.12.2-rc1-beta27)
-
->**Important Note**:
->
-> The auto-update function in Beta 21 will not be able to install this update. To install the latest beta manually if you are still on Beta 21, please download the installer here:
-
-> https://download.docker.com/win/beta/InstallDocker.msi
-
-> This problem is fixed as of Beta 23 for subsequent auto-updates.
-
-**New**
-
-* Reworked the File Sharing dialog and underlying mechanism
-* Pre-fill username
-* Faster and more reliable feedback when the user/password is not valid
-* Better support for domain users
-* Error message in Logs when File Sharing failed for other reasons
-
-**Upgrades**
-
-* Docker 1.12.2-rc1
-* Docker Machine 0.8.2
-* Docker Compose 1.8.1
-* kernel 4.4.21
-* aufs 20160912
-
-**Bug fixes and minor changes**
-
-* Improve the switching between Linux and Windows containers: better errors, more reliable, deal with more edge cases
-* Kill lingering dockerd that users might have still around because they played with Windows Containers before
-* Don't recreate the VM if only the DNS server is set
-* The uninstaller now kills the service if it failed to stop it properly
-* Restart VpnKit and DataKit when the processes die
-* VpnKit: impose a connection limit to avoid exhausting file descriptors
-* VpnKit: handle UDP datagrams larger than 2035 bytes
-* VpnKit: reduce the number of file descriptors consumed by DNS
-* Improve debug information
-
-### Beta 26 Release Notes (2016-09-14 1.12.1-beta26)
-
->**Important Note**:
->
-> The auto-update function in Beta 21 will not be able to install this update. To install the latest beta manually if you are still on Beta 21, please download the installer here:
-
-> https://download.docker.com/win/beta/InstallDocker.msi
-
-> This problem is fixed as of Beta 23 for subsequent auto-updates.
-
-**New**
-
-* Basic support for Windows containers. On Windows 10 build >= 14372, a switch in the `systray` icon will change which daemon (Linux or Windows) the Docker CLI talks to
-
-* To support trusted registry transparently, all trusted CAs (root or intermediate) on the Windows host are automatically copied to Moby
-
-* `Reset Credentials` will also unshare the shared drives
-
-* Logs are now rotated every day
-
-**Upgrades**
-
-* Linux kernel 4.4.20
-* aufs 20160905
-
-**Bug fixes and minor changes**
-
-* We no longer send the same DNS settings twice to the daemon
-
-* Fixed the lingering net adapters removal on Windows 10 Anniversary Update
-
-* Uploading a diagnostic now shows a proper status message in the Settings
-
-### Beta 25 Release (2016-09-07 1.12.1-beta25)
-
->**Important Note**:
->
-> The auto-update function in Beta 21 will not be able to install this update. To install the latest beta manually if you are still on Beta 21, please download the installer here:
-
-> https://download.docker.com/win/beta/InstallDocker.msi
-
-> This problem is fixed as of Beta 23 for subsequent auto-updates.
-
-**New**
-
-* Support multiple DNS servers
-
-**Bug fixes and minor changes**
-
-* Improved name servers discovery
-* VpnKit supports search domains
-* Set CIFS (common internet file system) version to 3.02
-
-**Known issues**
-
-* Only UTF-8 passwords are supported for host filesystem sharing
-
-* Docker will automatically disable lingering net adapters. The only way to remove them is manually using `devmgmt.msc` as documented in [Remove stale network adapters](troubleshoot.md#4-remove-stale-network-adapters) under [Networking issues](troubleshoot.md#networking-issues) in Troubleshooting.
-
-### Beta 24 Release (2016-08-23 1.12.1-beta24)
-
->**Important Note**:
->
-> The auto-update function in Beta 21 will not be able to install this update. To install the latest beta manually if you are still on Beta 21, please download the installer here:
-
-> https://download.docker.com/win/beta/InstallDocker.msi
-
-> This problem is fixed as of Beta 23 for subsequent auto-updates.
-
-**Upgrades**
-
-* Docker 1.12.1
-* Docker Machine 0.8.1
-* Linux kernel 4.4.19
-* aufs 20160822
-
-**Bug fixes and minor changes**
-
-* `slirp`: reduce the number of sockets used by UDP NAT, reduce the probability that NAT rules will time out earlier than expected
-
-**Known issues**
-
-* Only UTF-8 passwords are supported for host filesystem sharing.
-
-* Docker will automatically disable lingering net adapters. The only way to remove them is manually using `devmgmt.msc` as documented in [Remove stale network adapters](troubleshoot.md#4-remove-stale-network-adapters) under [Networking issues](troubleshoot.md#networking-issues) in Troubleshooting.
-
-### Beta 23 Release (2016-08-16 1.12.1-rc1-beta23)
-
->**Important Note**:
->
-> The auto-update function in Beta 21 will not be able to install this update. To install the latest beta manually if you are still on Beta 21, please download the installer here:
-
-> https://download.docker.com/win/beta/InstallDocker.msi
-
-> This problem is fixed as of Beta 23 for subsequent auto-updates.
-
-**New**
-
-* Added `mfsymlinks` smb option to support symlinks on bind mounted folder
-* Added `nobrl` smb option to support sqlite on bind mounted folders
-* Detect outdated versions of Kitematic
-
-**Upgrades**
-
-* Docker 1.12.1-rc1
-* Linux kernel 4.4.17
-* aufs 20160808
-
-**Bug fixes and minor changes**
-
-* Fixed password handling for host file system sharing
-* Automatically disable lingering net adapters that prevent Docker from starting or using the network
-* Automatically delete duplicated MobyLinuxVMs on a `reset to factory defaults`
-* Docker will stop asking to import from toolbox after an upgrade
-* Docker can now import from toolbox just after hyperV is activated
-* Fixed Moby Diagnostics and Update Kernel
-* Added more debug information to the diagnostics
-* Sending anonymous statistics shouldn't hang anymore when Mixpanel is not available
-* Improved the HyperV detection and activation mechanism
-* VpnKit is now compiled with OCaml 4.03 rather than 4.02.3
-* Support newlines in release notes
-* Improved error message when docker daemon is not responding
-* The configuration database is now stored in-memory
-* Preserve the stacktrace of PowerShell errors
-* Display service stacktrace in error windows
-* Moby: use default sysfs settings, transparent huge pages disabled
-* Moby: cgroup mount to support systemd in containers
-
-**Known issues**
-
-* Only UTF-8 passwords are supported for host filesystem sharing
-* Docker will automatically disable lingering net adapters. The only way to remove them is manually using `devmgmt.msc` as documented in [Troubleshooting](troubleshoot.md#networking-issues).
-
-### Beta 22 Release (2016-08-11 1.12.0-beta22)
-
-Unreleased. See Beta 23 for changes.
-
-**Known issues**
-
-* Docker will automatically disable lingering net adapters. The only way to remove them is manually using `devmgmt.msc` as documented in [Troubleshooting](troubleshoot.md#networking-issues).
-
-### Beta 21 Release (2016-07-28 1.12.0-beta21)
-
-**New**
-
-* Docker for Windows is now available from 2 channels: **stable** and **beta**. New features and bug fixes will go out first in auto-updates to users in the beta channel. Updates to the stable channel are much less frequent and happen in sync with major and minor releases of the Docker engine. Only features that are well-tested and ready for production are added to the stable channel releases. For downloads of both and more information, see the [Getting Started](index.md#download-docker-for-windows).
-
-* Removed the docker host name. Containers with exported ports are reachable via localhost.
-
-* The UI shows whether the user is on beta or stable channel
-
-**Upgrades**
-
-* Docker 1.12.0 with experimental features
-* Docker Machine 0.8.0
-* Docker Compose 1.8.0
-
-**Bug fixes and minor changes**
-
-* Fixed networking issue when transmitting data to a container via an exposed port.
-* Include the sources for qemu-img
-* Fixed the migration from toolbox when the user has a space in its login
-* Disable the migration from toolbox when hyperV is not yet activated
-* More windows can be closed with ESC
-* Added the channel to crash reports
-* Fixed a path rewriting bug that happens on Windows insider build 14367
-* Simplified the MobyLinux.ps1 script
-
-**Known issues**
-
-* Older Kitematic versions are not compatible with Docker for Windows. You need to manually delete the `C:\Program Files\Docker\Kitematic` folder before you click **Open Kitematic...** systray link.
-
-### Beta 20 Release (2016-07-19 1.12.0-rc4-beta20)
-
-**New**
-
-* The UI option to disable port forwarding to `localhost` has been removed
-
-**Bug fixes and minor changes**
-
-* Fixed `docker.sock` permission issues
-* Don't check for update when the settings panel opens
-* Removed obsolete DNS workaround
-* Use the secondary DNS server in more circumstances
-* Limit the number of concurrent port forwards to avoid running out of resources
-* Store the database as a "bare" git repo to avoid corruption problems
-
-### Beta 19 Release (2016-07-14 1.12.0-rc4-beta19)
-
-**New**
-
-* Added an option to opt-out from sending usage statistics (will be available on the future stable channel)
-* New error dialog box to upload crash reports
-
-**Upgrades**
-
-* Docker 1.12.0 RC4
-* Docker Compose 1.8.0 RC2
-* Docker Machine 0.8.0 RC2
-* Linux kernel 4.4.15
-
-**Bug fixes and minor changes**
-
-* `com.docker.slirp`: included the DNS TCP fallback fix, required when UDP responses are truncated
-* `docker build/events/logs/stats...` won't leak when interrupted with Ctrl-C
-* Disable all buttons on Update Window when a version is downloading
-
-### Beta 18.1 Release (2016-07-07 1.12.0-rc3-beta18.1)
-
->**Note**: Docker 1.12.0 RC3 release introduces a backward incompatible change from RC2. You can fix this by [recreating or updating your containers](troubleshoot.md#recreate-or-update-your-containers-after-beta-18-upgrade) as described in Troubleshooting.
-
-**Hotfix**
-
-* Fixed issue resulting in error "Hijack is incompatible with use of CloseNotifier", reverts previous fix for `Ctrl-C` during build.
-
-**New**
-
-* Forwarding the ports to localhost is now the default
-* Added `http`/`https` proxy configuration to the settings
-* The toolbox default machine can be imported on first launch
-* Added UI when a crash report is collected and uploaded
-* The check for update runs every 6 hours
-
-**Upgrades**
-
-* Docker 1.12.0 RC3
-
-**Bug fixes and minor changes**
-
-* The docker API proxy was failing to deal with 1.12 features (health check for, for example)
-* When killing the VM process, ignore when the process is already stopped
-* When stopping the VM, always stop the docker proxy
-* Prevent the update windows from downloading the `.msi` into `C:\Program Files\Docker`
-* All settings should be disabled when Docker is starting. (This regression was introduced in Beta 17)
-* VPNKit: Improved scalability as number of network connections increases
-* Improve the connection to the database
-* Ignore when the shutdown service is not available
-
-### Beta 18 Release (2016-07-06 1.12.0-rc3-beta18)
-
-**New**
-
-* Forwarding the ports to localhost is now the default
-* Added `http`/`https` proxy configuration to the settings
-* The toolbox default machine can be imported on first launch
-* Added UI when a crash report is collected and uploaded
-* The check for update runs every 6 hours
-
-**Upgrades**
-
-* Docker 1.12.0 RC3
-
-**Bug fixes and minor changes**
-
-* Interrupting a `docker build` with Ctrl-C will actually stop the build
-* The docker API proxy was failing to deal with 1.12 features (health check for, for example)
-* When killing the VM process, ignore when the process is already stopped
-* When stopping the VM, always stop the docker proxy
-* Prevent the update windows from downloading the `.msi` into `C:\Program Files\Docker`
-* All settings should be disabled when Docker is starting. (This regression was introduced in Beta 17)
-* VPNKit: Improved scalability as number of network connections increases
-* Improve the connection to the database
-* Ignore when the shutdown service is not available
-
-### Beta 17 Release (2016-06-29 1.12.0-rc2-beta17)
-
-**Upgrades**
-
-* Linux kernel 4.4.14, aufs 20160627
-
-**Bug fixes and minor changes**
-
-* Support users with spaces in their login
-* Fix some cases where `dotnet restore` could hang
-* Fixed `docker inspect` on an image
-* Removed the console from hyper-v manager
-* Improved diagnostic for VPN connection and add logs for the service port openers
-* Improve Moby's boot sequence to adapt to longer boot time when swarm services are running
-* Forcefully turn off a VM that won't shut down
-* Clicking on a link from the changelog opens a browser
-* Fix links to the documentation
-* Fix the url to download Kitematic
-* Renewed the signing certificates
-* Fixed errors with the firewall and the network switch
-* Fixed parsing errors in the Powershell script
-
-### Beta 16 Release (2016-06-17 1.12.0-rc2-beta16)
-
-**Upgrades**
-
-* Docker 1.12.0 RC2
-* docker-compose 1.8.0 RC1
-* docker-machine 0.8.0 RC1
-* Alpine 3.4
-
-**Bug fixes and minor changes**
-
-* Fixes to the VPN mode
-* Fixed the localhost port forwarding performance issue
-* Auto-detect mounted/unmounted drive in the list of shares
- - Changed the name of the application from "DockerforWindows" to "Docker for Windows"
- - Avoid multiple update windows being displayed at the same time
-
-### Beta 15 Release (2016-06-10 1.11.2-beta15)
-
-**New**
-
-* New experimental networking mode, exposing container ports on `localhost`
-* New Settings menu to configure sysctl.conf
-* New Settings menu to configure http proxies
-* The VPN mode setting is removed (VPN mode is now the only supported mode)
-* The vSwitch NAT configuration has been removed
-
-**Upgrades**
-
-* Docker 1.11.2
-* Linux 4.4.12, aufs 20160530
-
-**Bug fixes and minor changes**
-
-* Moved `Import from toolbox` option to the General Settings
-* Increased the timeout to write to the configuration database
-* Fixed an issue where sending anonynous stats to Mixpanel made the application stop
-* Faster boot time
-* All named pipes are now prefixed with the word `docker`
-* Full version number is now displayed in the update window
-* Default daemon config does not have debug enabled anymore
-* More responsive Settings Panel, with new whales also :-)
-* Improved logs and debug information
-
-### Beta 14 Release(2016-06-02 1.11.1-beta14)
-
-**New**
-
-* Enabled configuration of the docker daemon (edit `config.json`)
-* The VPN mode is enabled by default
-* Removed DHCP for VM network configuration
-* User configurable NAT prefix and DNS server
-* New feedback window to upload diagnostics dialog
-* New status indicator in **Settings** window
-* VM logs are uploaded with a crash report
-* Animated welcome whale
-
-**Bug fixes and minor changes**
-
-* Support non-ASCII characters in passwords
-* Fixed unshare a drive operation
-* Fixed deserialized of exceptions sent from the service
-* If the backend service is not running, the GUI now starts it
-* The app no longer complains if the backend service is not running and the user just wants to shut down.
-
-
-**Known issues**
-
-* Due to limitation in the Windows NAT implementation, co-existence with other NAT prefixes needs to be carefully managed. See [NAT Configuration](troubleshoot.md#nat-configuration) in [Troubleshooting](troubleshoot.md) for more details.
-
-### Beta 13 Release (2016-05-25 1.11.1-beta13)
-
-**New**
-
-This Beta release includes some significant changes:
-
-* Docker communication is over Hyper-V sockets instead of the network
-* Experimental VPN mode, also known as `vpnkit`
-* Initial support for `datakit` for configuration
-* Redesigned Settings panel
-* Docker can now be restarted
-
-**Bug fixes and minor changes**
-
-* Support Net adapters with a different name than "vEthernet (DockerNAT)"
-* Sharing now has a better support for domain users
-* Fixed Toolbox migration (was broken in Beta12)
-* Enabling HyperV (was broken in Beta12)
-* Fixed error message when invalid labels are passed to `docker run`
-* Mixpanel no longer uses roaming App Data
-* UI improvements
-* Support was added for VMs with other IP addresses out of the `10.0.75.0/24` range
-* Improved FAQ
-
-**Known issues**
-
-* Due to limitation in the Windows NAT implementation, co-existence with other NAT prefixes needs to be carefully managed. See [NAT Configuration](troubleshoot.md#nat-configuration) in [Troubleshooting](troubleshoot.md) for more details.
-
-### Beta 12 Release (2016-17-10 1.11.1-beta12)
-
-**New**
-
-* The application is now separated in two parts. A back-end service and a front-end GUI.The front-end GUI no longer asks for elevated access.
-
-**Bug fixes and minor changes**
-
-* Excluded the network drives from the shares list
-* Removed the notification when closing the application
-* Minor GUI improvements
-
-**Known issues**
-
-* Due to limitation in the Windows NAT implementation, co-existence with other NAT prefixes needs to be carefully managed. See [NAT Configuration](troubleshoot.md#nat-configuration) in [Troubleshooting](troubleshoot.md) for more details.
-
-
-### Beta 11b Hot Fix Release (2016-05-11 1.11.1-beta11b)
-
-**Hot fixes**
-
-* Fixed an issue with named pipe permisions that prevented Docker from starting
-
-### Beta 11 Release (2016-05-10 1.11.1-beta11)
-
-**New**
-
-* The GUI now runs in non-elevated mode and connects to an elevated Windows service
-* Allocate VM memory by 256 MB increments, instead of 1 GB
-* Show a meaningful error when the user has an empty password
-* Improved [Troubleshooting](troubleshoot.md) page
-
-**Upgrades**
-
-* docker-compose 1.7.1 (see changelog)
-* Kernel 4.4.9
-
-**Bug fixes and minor changes**
-
-* Report the VM's IP in `docker port`
-* Handle passwords with spaces
-* Show a clear error message when trying to install on Home editions
-* Slower whale animation in the System Tray
-* Proxy is restarting itself when it crashes
-* DHCP process handles exceptions gracefully
-* Moby (Backend) fixes:
- - Fixed `vsock` half closed issue
- - Added NFS support
- - Hostname is now Moby, not Docker
- - Fixes to disk formatting scripts
- - Kernel upgrade to 4.4.9
-
-**Known issues**
-
-* Due to limitation in the Windows NAT implementation, co-existence with other NAT prefixes needs to be carefully managed. See [Troubleshooting](troubleshoot.md) for more details.
-
-* Logs for the windows service are not aggregated with logs from the GUI. This will be fixed in future versions.
-
-
-## Beta 10 Release (2016-05-03 1.11.0-beta10)
-
-**New**
-
-* Improved Settings panel, allow to configure the VM’s memory and CPUs
-* Co-exist with multiple internal Hyper-V switches and improved DHCP handling
-* Token validation is now done over HTTPS. This should fix issues with some firewalls and antiviros software.
-
-**Upgrades**
-
-* Docker 1.11.1
-
-**Bug fixes and minor changes**
-
-* Fixed Desktop shortcut name and updated icons
-* Preparation to run the backend as service
-* Improved logging and Mixpanel events
-* Improved code quality
-* Improved the build
-* New icons
-
-**Known issues**
-
-* Due to limitation in the Windows NAT implementation, co-existence with other NAT prefixes needs to be carefully managed. See [Troubleshooting](troubleshoot.md) for more details.
-
-
-### Beta 9 Release (2016-04-26 1.11.0-beta9)
-
-**New**
-
-* Provide one-click dialog to enable Hyper-V
-* Report clear underlying Hyper-V errors
-
-**Bug fixes and minor changes**
-
-* Better handling of some networking issues
-* Fixed help menu and start menu getting started URLs
-* Restored “Docker is Initializing” notification on first run
-* Better error messages during authentication
-* Improved logging on error conditions
-* Improved build and tests
-
-**Known issues**
-
-* If multiple internal Hyper-V switches exist the Moby VM
-may not start correctly. We have identified the issue and
-are working on a solution.
-
-### Beta 8 Release (2016-04-20 1.11.0-beta8)
-
-**New**
-
-* Auto-update is installed silently, and relaunches the application when it completes
-* Uninstaller can be found in Windows menu
-* Kitematic can be downloaded from the Dashboard menu
-
-**Bug fixes and minor changes**
-
-* Better UI in the ShareDrive window
-* The firewall alert dialog will not come up as often as it was
-* Configured MobyLinux VM with a fixed memory of 2GB
-* User password is no longer stored on the host-side KVP
-* Uninstall shortcut is available in registry
-
-### Beta 7 Release (2016-04-12 1.11.0-beta7)
-
-**New**
-
- - Multiple drives can be shared
- - New update window
- - Welcome whale
-
-**Upgrades**
-
-* docker 1.11.0-rc5
-* docker-machine 0.7.0-rc3
-* docker-compose 1.7.0-rc2
-
-**Bug fixes and minor changes**
-
-* Improved networking configuration and error detection: fixed DHCP renewal and rebind issues
-* Allow DNS/DHCP processes to restart on bind error
-* Less destructive migration from Docker Toolbox
-* Improved documentation
-* Better error handling: Moby will restart itself if start takes too long.
-* Kill proxy and exit docker before a new version is installed
-* The application cannot start twice now
-* The proxy will stop automatically when the GUI is not running
-* Removed existing proxy firewall rules before starting Moby
-* The application now collects more and better information on crashes and other issues
-* Improved all dialogs and windows
-* Added the version to installer's first screen
-* Better reset to defaults
-* New regression test framework
-* The installation MSI is now timestamped
-* The Hyper-V install mentions Docker Toolbox only if it is present
-* Improved Bugsnag reports: fixed a dependency bug, and added a unique ID to each new report
-* Improved the build
-* Improved code quality
-
-**Known issues**
-
-- Settings are now serialized in JSON. This install will lose the current settings.
-
-- Docker needs to open ports on the firewall. Sometimes, the user will see a firewall alert dialog. The user should allow the ports to be opened.
-
-- The application was upgraded to 64 bits. The installation path changed to `C:\Program Files\Docker\Docker`. Users might have to close any Powershell/Cmd windows that were already open before the update to get the new `PATH`. In some cases, users may need to log off and on again.
-
-**Bug Fixes**
-
- - Fixed DHCP renewal and rebind
- - Only mention toolbox on Hyper-V install if it's present
- - The application does not start twice now
- - DNS/DHCP processes are allowed to restart on bind error now
- - Removed the window that opens quickly during bugsnag reports
- - Fixed OS reported by Bugsnag
- - Improved the build
- - Improved code quality
-
-### Beta 6 Release (2016-04-05 1.11.0.1288)
-
-**Enhancements**
-
-- Docs are updated for Beta 6!
-- Support roaming: DNS queries are forwarded to the host
-- Improved startup times by running a DHCP server on the host
-- New settings dialog design
-- Support windows paths with -v
-- Updated docker CLI and deamon to 1.11.0-rc3
-- Updated docker-machine to 0.7.0-rc2
-- Updated docker-compose to 1.7.0-rc1
-- Now install docker-credential-wincred
-- Allow non-root users in containers to create files on volume mounts
-- Automatically install HyperV
-- The application is now 64bits
-- Improved wording in all dialog boxes and error messages
-- Removed exit confirmation
-- Show clickable URL in the Install HyperV message box
-- Dashboard link to Kitematic (as on Mac)
-- Moby Kernel updated to 4.4.6
-- The registry key was changed to HKLM\SOFTWARE\Docker Inc.\Docker\1.0
-
-**Known issues**
-
-- Migration from Docker Toolbox can fail sometimes. If this happens, the workaround is to restart the application.
-
-- Docker needs to open ports on the firewall, which can activate a firewall alert dialog. Users should allow the ports to be opened.
-
-- The application was upgraded to 64 bits. The installation path changed to `C:\Program Files\Docker\Docker`. If users have Powershell/Cmd windows already open before the update, they might have to close them to catch the new PATH. In some cases, users will need to log off and on again.
-
-**Bug Fixes**
-
-- Kill VMs that cannot be shutdown properly
-
-- Improved the diagnostic information sent with bugsnag reports
-
-- Settings window shows when the drive is shared or not
-`C:` drive can be bind mounted with `//c` or `/c`. Used to be `//c/`
-
-- Don't try to submit empty tokens
-
-- Fixed the version shown in the About box
-
-- Fixed a race condition on the logs
-
-- Fixed a race condition on the settings
-
-- Fixed broken links in the documentation
-
-- Replaced `sha1` with actual version in the assemblies
-
-- Don't start the unused agent process
-
-### Beta 5 Release (2016-03-29 1.10.6)
-
-**Enhancements**
-
-* Remove debug console
-* Open browser with hyper-v installation instructions
-* Add Cloudfront for downloads from Europe
-* Capture qemu logs during toolbox upgrades
-* Rename alpha distribution channel to beta
-
-**Bug Fixes**
-
-* Fix diagnose section in bugsnag report
-* Fix msi version
-* Don't truncate Toolbox link
-
->**Note**: Docker for Windows skipped from Beta 1 to Beta 5 at this point to synch up the version numbering with Docker for Mac, which went into beta cycles a little earlier.
-
-### Beta 1 Release (2016-03-24 1.10.6)
-
-**Enhancements**
-
-- Display the third party licenses
-- Display the license agreement
-- The application will refuse to start if Hyper-v is not enabled
-- Rename `console` to `debug console`
-- Remove `machine` from notification
-- Open the feedback forum
-- Use same MixPanel project for Windows and OSX
-- Align MixPanel events with OSX
-- Added a script to diagnose problems
-- Submit diagnostic with bugsnag reports
-- MixPanel heartbeat every hour
-
-**Bug Fixes**
-
-- Accept all versions of Enterprise 10, Pro 10 and Education 10 during installation (Eval, N, ...)
-- Fix Linux kernel crashes with certain applications or somesuch
-- Fix notifications that are not shown
-- Animate the systray whale on reset
-- Shorten the enrollment process timeout
-- Properly unmount shares when the user un-selects the setting
-- Don't install on unsupported builds
-
-## Alpha Release Notes
-
-### Alpha 4 Release (A2016-03-10 1.10.4.0)
-
-- Faster Startup & Shutdown
-- Use host DNS parameters
-- Enrollment System
-- Recreating manually removed vm
-- More MixPanel Events
-- Various Bug Fixes
-
-### Alpha 3 Release (2016-03-03 1.10.2.14)
-
-**File sharing**
-
- - Create network share automatically
- - Improve Credentials management
- - Support paths with c and C drive
-
-**Crashes and Analytics**
-
- - Report crashes with Bugsnag
- - Send analytics through MixPanel
-
-**GUI**
-
- - Improve layout of About and Settings dialog
- - Improve Updater
- - Link to *Help*
- - Link to *Send Feeback*
-
-**General**
-
- - Bug fixes
-
-### Alpha 2 Release (2016-02-26 1.10.2.12)
-
-**Installer**
-
- - Enhancements
- - Auto-update
- - License agreement
-
-**General**
-
- - Bug fixes
-
-### Alpha 1 Release (2016-02-22 1.10.1.42-1)
-
-**Hypervisor**
-
- - significant performance improvements
-
-**Security**
-
- - retrieving Credentials from user
-
-**Filesystem**
-
- - hot-mounting host filesystem with credential
-
-**General**
-
- - state management
- - stability, logging
- - bugfixes, eye candies
-
-### Alpha 0 Release (2016-02-09 1.10.0.0-0)
-
-**Hypervision**
-
- - hyper-v backed virtual machines
- - boots moby in a few seconds
- - installs CLI in `PATH`
- - proxies docker commands to moby
-
-**Filesystem**
-
- - mounts host filesystem to support `--volume`
- - samba client with a hardcoded password
- - allows live reload
-
-**Networking**
-
- - live debugging Node.js application
-
-
-
diff --git a/docker-for-windows/troubleshoot.md b/docker-for-windows/troubleshoot.md
deleted file mode 100644
index 0efe9ef327..0000000000
--- a/docker-for-windows/troubleshoot.md
+++ /dev/null
@@ -1,300 +0,0 @@
-
-
-# Logs and Troubleshooting
-
-Here is information about how to diagnose and troubleshoot problems, send logs and communicate with the Docker for Windows team, use our forums and Knowledge Hub, browse and log issues on GitHub, and find workarounds for known problems.
-
-## Docker Knowledge Hub
-
-**Looking for help with Docker for Windows?** Check out the [Docker Knowledge Hub](http://success.docker.com/) for knowledge base articles, FAQs, and technical support for various subscription levels.
-
-## Submitting diagnostics, feedback, and GitHub issues
-
-If you encounter problems for which you do not find solutions in this documentation or on the [Docker for Windows forum](https://forums.docker.com/c/docker-for-windows), we can help you troubleshoot the log data. See [Diagnose and Feedback](index.md#diagnose-and-feedback) in the Getting Started topic.
-
-
-## Checking the Logs
-
-In addition to using the diagnose and feedback option to submit logs, you can browse the logs yourself.
-
-### Use the systray menu to view logs
-
-To view Docker for Windows latest log, click on the `Diagnose & Feedback` menu entry in the systray and then on the `Log file` link. You can see the full history of logs in your `AppData\Local` folder.
-
-### Use the systray menu to report and issue
-
-If you encounter an issue and the suggested troubleshoot procedures outlined below don't fix it you can generate a diagnostics report. Click on the `Diagnose & Feedback` menu entry in the systray and then on the `Upload diagnostic...` link. This will upload diagnostics to our server and provide you with a unique ID you can use in email or the forum to reference the upload.
-
-
-
-## Troubleshooting
-
-### inotify on shared drives does not work
-
-Currently, `inotify` does not work on Docker for Windows. This will become evident, for example, when when an application needs to read/write to a container across a mounted drive. This is a known issue that the team is working on. Below is a temporary workaround, and a link to the issue.
-
-* **Workaround for nodemon and Node.js** - If you are using [nodemon](https://github.com/remy/nodemon) with `Node.js`, try the fallback polling mode described here: [nodemon isn't restarting node applications](https://github.com/remy/nodemon#application-isnt-restarting)
-
-* **Docker for Windows issue on GitHub** - See the issue [Inotify on shared drives does not work](https://github.com/docker/for-win/issues/56#issuecomment-242135705)
-
-
-### Verify domain user has permissions for shared drives (volumes)
-
-Permissions to access shared drives are tied to the username and password you use to set up shared drives. (See [Shared Drives](index.md#shared-drives).) If you run `docker` commands and tasks under a different username than the one used to set up shared drives, your containers will not have permissions to access the mounted volumes. The volumes will show as empty.
-
-The solution to this is to switch to the domain user account and reset credentials on shared drives.
-
-Here is an example of how to de-bug this problem, given a scenario where you shared the `C` drive as a local user instead of as the domain user. Assume the local user is `samstevens` and the domain user is `merlin`.
-
-1. Make sure you are logged in as the Windows domain user (for our example, `merlin`).
-
-2. Run `net share c` to view user permissions for `\, FULL`.
-
- PS C:\WINDOWS\system32> net share c
- Share name C
- Path C:\
- Remark
- Maximum users No limit
- Users SAMSTEVENS
- Caching Caching disabled
- Permission windowsbox\samstevens, FULL
-
-3. Run the following command to remove the share.
-
- net share c /delete
-
-4. Re-share the drive via the [Shared Drives dialog](index.md#shared-drives), and provide the Windows domain user account credentials.
-
-5. Re-run `net share c`.
-
- PS C:\WINDOWS\system32> net share c
- Share name C
- Path C:\
- Remark
- Maximum users No limit
- Users MERLIN
- Caching Caching disabled
- Permission windowsbox\merlin, FULL
-
-See also, the related issue on GitHub, [Mounted volumes are empty in the container](https://github.com/docker/for-win/issues/25).
-
-### Avoid unexpected syntax errors, use Unix style line endings for files in containers
-
-Any file destined to run inside a container must use Unix style `\n` line endings. This includes files referenced at the command line for builds and in RUN commands in Docker files.
-
-Docker containers and `docker build` run in a Unix environment, so files in containers must use Unix style line endings `\n`, _not_ Windows style: `\r\n`. Keep this in mind when authoring files such as shell scripts using Windows tools, where the default is likely to be Windows style line endings. These commands ultimately get passed to Unix commands inside a Unix based container (for example, a shell script passed to `/bin/sh`). If Windows style line endings are used, `docker run` will fail with syntax errors.
-
-For an example of this issue and the resolution, see this issue on GitHub: Docker RUN fails to execute shell script (https://github.com/docker/docker/issues/24388).
-
-### Recreate or update your containers after Beta 18 upgrade
-
-Docker 1.12.0 RC3 release introduces a backward incompatible change from RC2 to RC3. (For more information, see https://github.com/docker/docker/issues/24343#issuecomment-230623542.)
-
-You may get the following error when you try to start a container created with pre-Beta 18 Docker for Windows applications.
-
- Error response from daemon: Unknown runtime specified default
-
-You can fix this by either [recreating](#recreate-your-containers) or [updating](#update-your-containers) your containers.
-
-If you get the error message shown above, we recommend recreating them.
-
-#### Recreate your containers
-
-To recreate your containers, use Docker Compose.
-
- docker-compose down && docker-compose up
-
-#### Update your containers
-
-To fix existing containers, follow these steps.
-
-1. Run this command.
-
- $ docker run --rm -v /var/lib/docker:/docker cpuguy83/docker112rc3-runtimefix:rc3
-
- Unable to find image 'cpuguy83/docker112rc3-runtimefix:rc3' locally
- rc3: Pulling from cpuguy83/docker112rc3-runtimefix
- 91e7f9981d55: Pull complete
- Digest: sha256:96abed3f7a7a574774400ff20c6808aac37d37d787d1164d332675392675005c
- Status: Downloaded newer image for cpuguy83/docker112rc3-runtimefix:rc3
- proccessed 1648f773f92e8a4aad508a45088ca9137c3103457b48be1afb3fd8b4369e5140
- skipping container '433ba7ead89ba645efe9b5fff578e674aabba95d6dcb3910c9ad7f1a5c6b4538': already fixed
- proccessed 43df7f2ac8fc912046dfc48cf5d599018af8f60fee50eb7b09c1e10147758f06
- proccessed 65204cfa00b1b6679536c6ac72cdde1dbb43049af208973030b6d91356166958
- proccessed 66a72622e306450fd07f2b3a833355379884b7a6165b7527c10390c36536d82d
- proccessed 9d196e78390eeb44d3b354d24e25225d045f33f1666243466b3ed42fe670245c
- proccessed b9a0ecfe2ed9d561463251aa90fd1442299bcd9ea191a17055b01c6a00533b05
- proccessed c129a775c3fa3b6337e13b50aea84e4977c1774994be1f50ff13cbe60de9ac76
- proccessed dea73dc21126434f14c58b83140bf6470aa67e622daa85603a13bc48af7f8b04
- proccessed dfa8f9278642ab0f3e82ee8e4ad029587aafef9571ff50190e83757c03b4216c
- proccessed ee5bf706b6600a46e5d26327b13c3c1c5f7b261313438d47318702ff6ed8b30b
-
-2. Quit Docker.
-
-3. Start Docker.
-
- > **Note:** Be sure to quit and then restart Docker for Windows before attempting to start containers.
-
-4. Try to start the container again:
-
- $ docker start old-container
- old-container
-
-### Hyper-V
-Docker for Windows requires a Hyper-V as well as the Hyper-V Module for Windows Powershell to be installed and enabled. See [these instructions](https://msdn.microsoft.com/en-us/virtualization/hyperv_on_windows/quick_start/walkthrough_install) to install Hyper-V manually. A reboot is *required*. If you install Hyper-V without the reboot, Docker for Windows will not work correctly. On some systems, Virtualization needs to be enabled in the BIOS. The steps to do so are Vendor specific, but typically the BIOS option is called `Virtualization Technology (VTx)` or similar.
-
-### Networking issues
-
-Some users have reported problems connecting to Docker Hub on the Docker for Windows stable version. (See GitHub issue [22567](https://github.com/docker/docker/issues/22567).)
-
-Here is an example command and error message:
-
- PS C:\WINDOWS\system32> docker run hello-world
- Unable to find image 'hello-world:latest' locally
- Pulling repository docker.io/library/hello-world
- C:\Program Files\Docker\Docker\Resources\bin\docker.exe: Error while pulling image: Get https://index.docker.io/v1/repositories/library/hello-world/images: dial tcp: lookup index.docker.io on 10.0.75.1:53: no such host.
- See 'C:\Program Files\Docker\Docker\Resources\bin\docker.exe run --help'.
-
-As an immediate workaround to this problem, reset the DNS server to use the Google DNS fixed address: `8.8.8.8`. You can configure this via the **Settings** -> **Network** dialog, as described in the topic [Network](index.md#network). Docker will automatically restart when you apply this setting, which could take some time.
-
-We are currently investigating this issue.
-
-#### Networking issues on pre Beta 10 versions
-Docker for Windows Beta 10 and later fixed a number of issues around the networking setup. If you still experience networking issue, this may be related to previous Docker for Windows installations. In this case, please quit Docker for Windows and perform the following steps:
-
-##### 1. Remove multiple `DockerNAT` VMswitches
-You might have multiple Internal VMSwitches called `DockerNAT`. You can view all VMSwitches either via the `Hyper-V Manager` sub-menu `Virtual Switch Manager` or from an elevated Powershell (run as Administrator) prompt by typing `Get-VMSwitch`. Simply delete all VMSwitches with `DockerNAT` in the name, either via the `Virtual Switch Manager` or by using `Remove-VMSwitch` powershell cmdlet.
-
-##### 2. Remove lingering IP addresses
-
-You might have lingering IP addresses on the system. They are supposed to get removed when you remove the associated VMSwitches, but sometimes this fails. Using `Remove-NetIPAddress 10.0.75.1` in an elevated Powershell prompt should remove them.
-
-##### 3. Remove stale NAT configurations
-
-You might have stale NAT configurations on the system. You should remove them with `Remove-NetNat DockerNAT` on an elevated Powershell prompt.
-
-##### 4. Remove stale network adapters
-
-You might have stale Network Adapters on the system. You should remove them with the following commands on an elevated Powershell prompt:
-
- $vmNetAdapter = Get-VMNetworkAdapter -ManagementOS -SwitchName DockerNAT
- Get-NetAdapter "vEthernet (DockerNAT)" | ? { $_.DeviceID -ne $vmNetAdapter.DeviceID } | Disable-NetAdapter -Confirm:$False -PassThru | Rename-NetAdapter -NewName "Broken Docker Adapter"
-
-Then you can remove them manually via the `devmgmt.msc` (aka Device Manager). You should see them as disabled Hyper-V Virtual Ethernet Adapter under the Network Adapter section. A right-click and selecting **uninstall** should remove the adapter.
-
-### NAT/IP configuration
-
-By default, Docker for Windows uses an internal network prefix of `10.0.75.0/24`. Should this clash with your normal network setup, you can change the prefix from the **Settings** menu. See the [Network](index.md#network) topic under [Settings](index.md#docker-settings).
-
-#### NAT/IP configuration issues on pre Beta 15 versions
-
-As of Beta 15, Docker for Windows is no longer using a switch with a NAT configuration. The notes below are left here only for older Beta versions.
-
-As of Beta14, networking for Docker for Windows is configurable through the UI. See the [Network](index.md#network) topic under [Settings](index.md#docker-settings).
-
-By default, Docker for Windows uses an internal Hyper-V switch with a NAT configuration with a `10.0.75.0/24` prefix. You can change the prefix used (as well as the DNS server) via the **Settings** menu as described in the [Network](index.md#network) topic.
-
-If you have additional Hyper-V VMs and they are attached to their own NAT prefixes, the prefixes need to be managed carefully, due to limitation of the Windows NAT implementation. Specifically, Windows currently only allows a single internal NAT prefix. If you need additional prefixes for your other VMs, you can create a larger NAT prefix.
-
-To create a larger NAT prefix, do the following.
-
-1. Stop Docker for Windows and remove all NAT prefixes with `Remove-NetNAT`.
-
-2. Create a new shorter NAT prefix which covers the Docker for Windows NAT prefix but allows room for additional NAT prefixes. For example:
-
- New-NetNat -Name DockerNAT -InternalIPInterfaceAddressPrefix 10.0.0.0/16
-
- The next time Docker for Windows starts, it will use the new, wider prefix.
-
-Alternatively, you can use a different NAT name and NAT prefix and adjust the NAT prefix Docker for Windows uses accordingly via the `Settings` panel.
-
->**Note**: You also need to adjust your existing VMs to use IP addresses from within the new NAT prefix.
-
-
-### Host filesystem Sharing
-
-The Linux VM used for Docker for Windows uses SMB/CIFS mounting of the host filesystem. In order to use this feature you must explicitly enable it via the `Settings` menu. You will get prompted for your Username and Password.
-
-Unfortunately, this setup does not support passwords which contain Unicode characters, so your password must be 8-bit ASCII only.
-
-The setup also does not support empty password, so you should set a password if you want to use the host filesystem sharing feature. Beta 11 and newer of Docker for Windows will display a warning, but versions earlier will not.
-
-Note, releases of Docker for Windows prior to Beta 11 also did not support spaces in the password and username, but this has been fixed with Beta 11.
-
-Please make sure that "File and printer sharing" is enabled in `Control Panel\Network and Internet\Network and Sharing Center\Advanced sharing settings`.
-
-
-
-## Workarounds
-
-### `inotify` currently does not work on Docker for Windows
-
-If you are using `Node.js` with `nodemon`, a temporary workaround is to try the fallback polling mode described here: [nodemon isn't restarting node applications](https://github.com/remy/nodemon#application-isnt-restarting). See also this issue on GitHub [Inotify on shared drives does not work](https://github.com/docker/for-win/issues/56#issuecomment-242135705).
-
-### Reboot
-
-Restart your PC to stop / discard any vestige of the daemon running from the previously installed version.
-
-### Unset `DOCKER_HOST`
-
-You do not need `DOCKER_HOST` set, so unset as it may be pointing at
-another Docker (e.g. VirtualBox). If you use bash, `unset ${!DOCKER_*}`
-will unset existing `DOCKER` environment variables you have set. For other shells, unset each environment variable individually.
-
-### Make sure Docker is running for webserver examples
-
-For the `hello-world-nginx` example and others, Docker for Windows must be running in order to get to the webserver on `http://localhost/`. Make sure that the Docker whale is showing in the menu bar, and that you run the Docker commands in a shell that is connected to the Docker for Windows Engine (not Engine from Toolbox). Otherwise, you might start the webserver container but get a "web page not available" error when you go to `docker`. For more on distinguishing between the two environments, see "Running Docker for Windows and Docker Toolbox" in [Getting Started](index.md).
-
-### How to solve `port already allocated` errors
-
-If you see errors like `Bind for 0.0.0.0:8080 failed: port is already allocated` or
- `listen tcp:0.0.0.0:8080: bind: address is already in use` ...
-
-These errors are often caused by some other software on Windows using those
-ports. To discover the identity of this software, either use the `resmon.exe`
-GUI and click "Network" and then "Listening Ports" or in a powershell use
-`netstat -aon | find /i "listening "` to discover the PID of the process
-currently using the port (the PID is the number in the rightmost column). Decide
-whether to shut the other process down, or to use a different port in your
-docker app.
-
-### Docker fails to start when firewall or anti-virus software is installed
-
-The **Comodo Firewall currently is incompatible with Hyper-V and some Windows 10
-builds** (possibly, Windows 10 Anniversary Update), which impacts Docker for
-Windows. **Other firewalls and anti-virus software might also be incompatible with these Microsoft Windows 10 buids**. The conflict typically occurs after a Windows update or new install of the firewall, and manifests as an error response from the Docker daemon and a **Docker for Windows start failure**.
-
-See the Comodo forums topics [Comodo Firewall conflict with
-Hyper-V](https://forums.comodo.com/bug-reports-cis/comodo-firewall-began-conflict-with-hyperv-t116351.0.html)
-and [Windows 10 Anniversary build doesn't allow Comodo drivers to be
-installed](https://forums.comodo.com/install-setup-configuration-help-cis/windows-10-aniversary-build-doesnt-allow-comodo-drivers-to-be-installed-t116322.0.html).
-A Docker for Windows user-created issue describes the problem specifically as it
-relates to Docker: [Docker fails to start on Windows
-10](https://github.com/docker/for-win/issues/27).
-
-For a temporary workaround, uninstall the Comodo Firewall, or explore other
-workarounds suggested on the forum.
-
-
-
First, create the new account in GitHub. It should be given read-only
- access to the main repository and all submodules that are needed.
-
-
-
2.
-
-
This can be accomplished by adding the account to a read-only team in
- the organization(s) where the main GitHub repository and all submodule
- repositories are kept.
-
-
-
3.
-
-
Next, remove the deploy key from the main GitHub repository. This can be done in the GitHub repository's "Deploy keys" Settings section.
-
-
-
4.
-
-
Your automated build's deploy key is in the "Build Details" menu
- under "Deploy keys".
-
-
-
5.
-
-
In your dedicated GitHub User account, add the deploy key from your
- Docker Hub Automated Build.
-
-
-
-
-## GitHub service hooks
-
-A GitHub Service hook allows GitHub to notify the Docker Hub when something has
-been committed to a given git repository.
-
-When you create an Automated Build from a GitHub user that has full "Public and
-Private" linking, a Service Hook should get automatically added to your GitHub
-repository.
-
-If your GitHub account link to the Docker Hub is "Limited Access", then you will
-need to add the Service Hook manually.
-
-To add, confirm, or modify the service hook, log in to GitHub, then navigate to
-the repository, click "Settings" (the gear), then select "Webhooks & Services".
-You must have Administrator privileges on the repository to view or modfy
-this setting.
-
-The image below shows the "Docker" Service Hook.
-
-
-
-If you add the "Docker" service manually, make sure the "Active" checkbox is
-selected and click the "Update service" button to save your changes.
diff --git a/docs/images/add-authorized-github-service.png b/docs/images/add-authorized-github-service.png
deleted file mode 100644
index a4fd351713..0000000000
Binary files a/docs/images/add-authorized-github-service.png and /dev/null differ
diff --git a/docs/images/authorized-services.png b/docs/images/authorized-services.png
deleted file mode 100644
index ccae6a7256..0000000000
Binary files a/docs/images/authorized-services.png and /dev/null differ
diff --git a/docs/images/bitbucket-hook.png b/docs/images/bitbucket-hook.png
deleted file mode 100644
index 3fd37708d8..0000000000
Binary files a/docs/images/bitbucket-hook.png and /dev/null differ
diff --git a/docs/images/bitbucket_creds.png b/docs/images/bitbucket_creds.png
deleted file mode 100644
index b24e185268..0000000000
Binary files a/docs/images/bitbucket_creds.png and /dev/null differ
diff --git a/docs/images/build-by.png b/docs/images/build-by.png
deleted file mode 100644
index d1071da272..0000000000
Binary files a/docs/images/build-by.png and /dev/null differ
diff --git a/docs/images/build-states-ex.png b/docs/images/build-states-ex.png
deleted file mode 100644
index 8f068ddd4d..0000000000
Binary files a/docs/images/build-states-ex.png and /dev/null differ
diff --git a/docs/images/build-trigger.png b/docs/images/build-trigger.png
deleted file mode 100644
index 8f034608ae..0000000000
Binary files a/docs/images/build-trigger.png and /dev/null differ
diff --git a/docs/images/busybox-image-tags.png b/docs/images/busybox-image-tags.png
deleted file mode 100644
index c3b07adb5e..0000000000
Binary files a/docs/images/busybox-image-tags.png and /dev/null differ
diff --git a/docs/images/create-dialog.png b/docs/images/create-dialog.png
deleted file mode 100644
index 1a4bddaf9b..0000000000
Binary files a/docs/images/create-dialog.png and /dev/null differ
diff --git a/docs/images/create-dialog1.png b/docs/images/create-dialog1.png
deleted file mode 100644
index c14e099f25..0000000000
Binary files a/docs/images/create-dialog1.png and /dev/null differ
diff --git a/docs/images/dashboard.png b/docs/images/dashboard.png
deleted file mode 100644
index 038be4cbba..0000000000
Binary files a/docs/images/dashboard.png and /dev/null differ
diff --git a/docs/images/deploy_key.png b/docs/images/deploy_key.png
deleted file mode 100644
index f1d8d92d22..0000000000
Binary files a/docs/images/deploy_key.png and /dev/null differ
diff --git a/docs/images/docker-integration.png b/docs/images/docker-integration.png
deleted file mode 100644
index 362e27ac09..0000000000
Binary files a/docs/images/docker-integration.png and /dev/null differ
diff --git a/docs/images/first_pending.png b/docs/images/first_pending.png
deleted file mode 100644
index 9deaeeea49..0000000000
Binary files a/docs/images/first_pending.png and /dev/null differ
diff --git a/docs/images/getting-started.png b/docs/images/getting-started.png
deleted file mode 100644
index 59f242d797..0000000000
Binary files a/docs/images/getting-started.png and /dev/null differ
diff --git a/docs/images/gh-check-admin-org-dh-app-access.png b/docs/images/gh-check-admin-org-dh-app-access.png
deleted file mode 100644
index 0df38c6946..0000000000
Binary files a/docs/images/gh-check-admin-org-dh-app-access.png and /dev/null differ
diff --git a/docs/images/gh-check-user-org-dh-app-access.png b/docs/images/gh-check-user-org-dh-app-access.png
deleted file mode 100644
index 13ad6468f6..0000000000
Binary files a/docs/images/gh-check-user-org-dh-app-access.png and /dev/null differ
diff --git a/docs/images/gh_add_ssh_user_key.png b/docs/images/gh_add_ssh_user_key.png
deleted file mode 100644
index 7d0092170f..0000000000
Binary files a/docs/images/gh_add_ssh_user_key.png and /dev/null differ
diff --git a/docs/images/gh_docker-service.png b/docs/images/gh_docker-service.png
deleted file mode 100644
index 7a84c81b7e..0000000000
Binary files a/docs/images/gh_docker-service.png and /dev/null differ
diff --git a/docs/images/gh_menu.png b/docs/images/gh_menu.png
deleted file mode 100644
index 84458a445f..0000000000
Binary files a/docs/images/gh_menu.png and /dev/null differ
diff --git a/docs/images/gh_org_members.png b/docs/images/gh_org_members.png
deleted file mode 100644
index 465f5da565..0000000000
Binary files a/docs/images/gh_org_members.png and /dev/null differ
diff --git a/docs/images/gh_repo_deploy_key.png b/docs/images/gh_repo_deploy_key.png
deleted file mode 100644
index 983b5eec77..0000000000
Binary files a/docs/images/gh_repo_deploy_key.png and /dev/null differ
diff --git a/docs/images/gh_service_hook.png b/docs/images/gh_service_hook.png
deleted file mode 100644
index c344c24afc..0000000000
Binary files a/docs/images/gh_service_hook.png and /dev/null differ
diff --git a/docs/images/gh_settings.png b/docs/images/gh_settings.png
deleted file mode 100644
index 2af9cb5138..0000000000
Binary files a/docs/images/gh_settings.png and /dev/null differ
diff --git a/docs/images/gh_team_members.png b/docs/images/gh_team_members.png
deleted file mode 100644
index 3bdf4abd95..0000000000
Binary files a/docs/images/gh_team_members.png and /dev/null differ
diff --git a/docs/images/github-side-hook.png b/docs/images/github-side-hook.png
deleted file mode 100644
index c742b4080a..0000000000
Binary files a/docs/images/github-side-hook.png and /dev/null differ
diff --git a/docs/images/groups.png b/docs/images/groups.png
deleted file mode 100644
index b725b48ba9..0000000000
Binary files a/docs/images/groups.png and /dev/null differ
diff --git a/docs/images/home-page.png b/docs/images/home-page.png
deleted file mode 100644
index e9c66cec9a..0000000000
Binary files a/docs/images/home-page.png and /dev/null differ
diff --git a/docs/images/hub.png b/docs/images/hub.png
deleted file mode 100644
index 959f961ae5..0000000000
Binary files a/docs/images/hub.png and /dev/null differ
diff --git a/docs/images/invite.png b/docs/images/invite.png
deleted file mode 100644
index f663340443..0000000000
Binary files a/docs/images/invite.png and /dev/null differ
diff --git a/docs/images/linked-acct.png b/docs/images/linked-acct.png
deleted file mode 100644
index 340733602c..0000000000
Binary files a/docs/images/linked-acct.png and /dev/null differ
diff --git a/docs/images/login-web.png b/docs/images/login-web.png
deleted file mode 100644
index 64e29c9014..0000000000
Binary files a/docs/images/login-web.png and /dev/null differ
diff --git a/docs/images/merge_builds.png b/docs/images/merge_builds.png
deleted file mode 100644
index 589bba9325..0000000000
Binary files a/docs/images/merge_builds.png and /dev/null differ
diff --git a/docs/images/org-repo-collaborators.png b/docs/images/org-repo-collaborators.png
deleted file mode 100644
index 3d80a1aa66..0000000000
Binary files a/docs/images/org-repo-collaborators.png and /dev/null differ
diff --git a/docs/images/orgs.png b/docs/images/orgs.png
deleted file mode 100644
index fe1b89b31c..0000000000
Binary files a/docs/images/orgs.png and /dev/null differ
diff --git a/docs/images/plus-carrot.png b/docs/images/plus-carrot.png
deleted file mode 100644
index 8c4cd37ded..0000000000
Binary files a/docs/images/plus-carrot.png and /dev/null differ
diff --git a/docs/images/prompt.png b/docs/images/prompt.png
deleted file mode 100644
index a94ccf08c9..0000000000
Binary files a/docs/images/prompt.png and /dev/null differ
diff --git a/docs/images/regex-help.png b/docs/images/regex-help.png
deleted file mode 100644
index ad404de476..0000000000
Binary files a/docs/images/regex-help.png and /dev/null differ
diff --git a/docs/images/register-web.png b/docs/images/register-web.png
deleted file mode 100644
index ea95e1f50b..0000000000
Binary files a/docs/images/register-web.png and /dev/null differ
diff --git a/docs/images/repo_links.png b/docs/images/repo_links.png
deleted file mode 100644
index 09a4bd63c1..0000000000
Binary files a/docs/images/repo_links.png and /dev/null differ
diff --git a/docs/images/repos.png b/docs/images/repos.png
deleted file mode 100644
index 959f961ae5..0000000000
Binary files a/docs/images/repos.png and /dev/null differ
diff --git a/docs/images/scan-drilldown.gif b/docs/images/scan-drilldown.gif
deleted file mode 100644
index e74acc162e..0000000000
Binary files a/docs/images/scan-drilldown.gif and /dev/null differ
diff --git a/docs/images/scan-results.png b/docs/images/scan-results.png
deleted file mode 100644
index 608674fee3..0000000000
Binary files a/docs/images/scan-results.png and /dev/null differ
diff --git a/docs/images/scan-tags.png b/docs/images/scan-tags.png
deleted file mode 100644
index ec2de8baad..0000000000
Binary files a/docs/images/scan-tags.png and /dev/null differ
diff --git a/docs/index.md b/docs/index.md
deleted file mode 100644
index 5a51c99055..0000000000
--- a/docs/index.md
+++ /dev/null
@@ -1,92 +0,0 @@
-+++
-title = "Overview of Docker Hub"
-description = "Docker Hub overview"
-keywords = ["Docker, docker, registry, accounts, plans, Dockerfile, Docker Hub, docs, documentation, accounts, organizations, repositories, groups, teams"]
-aliases = "/docker-hub/overview/"
-[menu.main]
-parent="mn_pubhub"
-weight=-99
-
-+++
-
-# Overview of Docker Hub
-
-[Docker Hub](https://hub.docker.com) is a cloud-based registry service which
-allows you to link to code repositories, build your images and test them, stores
-manually pushed images, and links to [Docker Cloud](https://docs.docker.com/docker-cloud/) so you can deploy images to your
-hosts. It provides a centralized resource for container image discovery,
-distribution and change management, [user and team collaboration](orgs.md), and
-workflow automation throughout the development pipeline.
-
-Log in to Docker Hub and Docker Cloud using [your free Docker ID](accounts.md).
-
-
-
-Docker Hub provides the following major features:
-
-* [Image Repositories](repos.md): Find, manage, and push and pull images from community, official, and private image libraries.
-* [Automated Builds](builds.md): Automatically create new images when you make changes to a source code repository.
-* [Webhooks](webhooks.md): A feature of Automated Builds, Webhooks let you trigger actions after a successful push to a repository.
-* [Organizations](orgs.md): Create work groups to manage access to image repositories.
-* GitHub and Bitbucket Integration: Add the Hub and your Docker Images to your current workflows.
-
-
-## Create a Docker ID
-
-To explore Docker Hub, you'll need to create an account by following the
-directions in [Your Docker ID](accounts.md).
-
-> **Note**: You can search for and pull Docker images from Hub without logging in, however to push images you must log in.
-
-Your Docker ID gives you one private Docker Hub repository for free. If you need
-more private repositories, you can upgrade from your free account to a paid
-plan. To learn more, log in to Docker Hub and go to [Billing & Plans](https://hub.docker.com/account/billing-plans/), in the Settings menu.
-
-### Explore repositories
-
-You can find public repositories and images from Docker Hub in two ways.
-You can "Search" from the Docker Hub website, or you can use the Docker command line tool to run the `docker search` command. For example if you were looking for an ubuntu image, you might run the following command line search:
-
-```
- $ docker search ubuntu
-```
-
-Both methods list the available public repositories on Docker Hub which match
-the search term.
-
-Private repositories do not appear in the repository search results. To see all
-the repositories you can access and their status, view your "Dashboard" page on
-[Docker Hub](https://hub.docker.com).
-
-
-You can find more information on working with Docker images in the [Docker userguide](https://docs.docker.com/userguide/dockerimages/).
-
-### Use Official Repositories
-
-Docker Hub contains a number of [Official
-Repositories](http://hub.docker.com/explore/). These are public, certified
-repositories from vendors and contributors to Docker. They contain Docker images
-from vendors like Canonical, Oracle, and Red Hat that you can use as the basis
-to building your applications and services.
-
-With Official Repositories you know you're using an optimized and
-up-to-date image that was built by experts to power your applications.
-
-> **Note:** If you would like to contribute an Official Repository for your organization or product, see the documentation on [Official Repositories on Docker Hub](official_repos.md) for more information.
-
-
-## Work with Docker Hub image repositories
-
-Docker Hub provides a place for you and your team to build and ship Docker images.
-
-You can configure Docker Hub repositories in two ways:
-
-* [Repositories](repos.md), which allow you to push images from a local Docker daemon to Docker Hub, and
-* [Automated Builds](builds.md), which link to a source code repository and trigger an image rebuild process on Docker Hub when changes are detected in the source code.
-
-You can create public repositories which can be accessed by any other Hub user, or you can create private repositories with limited access you control.
-
-### Docker commands and Docker Hub
-
-Docker itself provides access to Docker Hub services via the [`docker search`](http://docs.docker.com/reference/commandline/search),
-[`pull`](http://docs.docker.com/reference/commandline/pull), [`login`](http://docs.docker.com/reference/commandline/login), and [`push`](http://docs.docker.com/reference/commandline/push) commands.
diff --git a/docs/menu.md b/docs/menu.md
deleted file mode 100644
index 1a89dc09ae..0000000000
--- a/docs/menu.md
+++ /dev/null
@@ -1,14 +0,0 @@
-
-
-# Menu topic
-
-If you can view this content, please raise a bug report.
diff --git a/docs/official_repos.md b/docs/official_repos.md
deleted file mode 100644
index 56b5689a7f..0000000000
--- a/docs/official_repos.md
+++ /dev/null
@@ -1,125 +0,0 @@
-+++
-title = "Official Repositories on Docker Hub"
-description = "Guidelines for Official Repositories on Docker Hub"
-keywords = ["Docker, docker, registry, accounts, plans, Dockerfile, Docker Hub, docs, official, image, documentation"]
-[menu.main]
-parent="mn_pubhub"
-weight=15
-+++
-
-# Official Repositories on Docker Hub
-
-The Docker [Official Repositories](https://hub.docker.com/official/) are a
-curated set of Docker repositories that are promoted on Docker Hub. They are designed to:
-
-* Provide essential base OS repositories (for example,
- [ubuntu](https://hub.docker.com/_/ubuntu/),
- [centos](https://hub.docker.com/_/centos/)) that serve as the
- starting point for the majority of users.
-
-* Provide drop-in solutions for popular programming language runtimes, data
- stores, and other services, similar to what a Platform-as-a-Service (PAAS)
- would offer.
-
-* Exemplify [`Dockerfile` best practices](https://docs.docker.com/articles/dockerfile_best-practices)
- and provide clear documentation to serve as a reference for other `Dockerfile`
- authors.
-
-* Ensure that security updates are applied in a timely manner. This is
- particularly important as many Official Repositories are some of the most
- popular on Docker Hub.
-
-* Provide a channel for software vendors to redistribute up-to-date and
- supported versions of their products. Organization accounts on Docker Hub can
- also serve this purpose, without the careful review or restrictions on what
- can be published.
-
-Docker, Inc. sponsors a dedicated team that is responsible for reviewing and
-publishing all Official Repositories content. This team works in collaboration
-with upstream software maintainers, security experts, and the broader Docker
-community.
-
-While it is preferrable to have upstream software authors maintaining their
-corresponding Official Repositories, this is not a strict requirement. Creating
-and maintaining images for Official Repositories is a public process. It takes
-place openly on GitHub where participation is encouraged. Anyone can provide
-feedback, contribute code, suggest process changes, or even propose a new
-Official Repository.
-
-## Should I use Official Repositories?
-
-New Docker users are encouraged to use the Official Repositories in their
-projects. These repositories have clear documentation, promote best practices,
-and are designed for the most common use cases. Advanced users are encouraged to
-review the Official Repositories as part of their `Dockerfile` learning process.
-
-A common rationale for diverging from Official Repositories is to optimize for
-image size. For instance, many of the programming language stack images contain
-a complete build toolchain to support installation of modules that depend on
-optimized code. An advanced user could build a custom image with just the
-necessary pre-compiled libraries to save space.
-
-A number of language stacks such as
-[python](https://hub.docker.com/_/python/) and
-[ruby](https://hub.docker.com/_/ruby/) have `-slim` tag variants
-designed to fill the need for optimization. Even when these "slim" variants are
-insufficient, it is still recommended to inherit from an Official Repository
-base OS image to leverage the ongoing maintenance work, rather than duplicating
-these efforts.
-
-## How do I know the Official Repositories are secure?
-
-Docker provides a preview version of Docker Cloud's [Security Scanning service](http://docs.docker.com/docker-cloud/builds/image-scan/) for all of the
-Official Repositories located on Docker Hub. These security scan results provide
-valuable information about which images contain security vulnerabilities, which
-you should use to help you choose secure components for your own projects.
-
-To view the Docker Security Scanning results:
-
-1. Make sure you're logged in to Docker Hub.
- You can view Official Images even while logged out, however the scan results are only available once you log in.
-2. Navigate to the official repository whose security scan you want to view.
-3. Click the `Tags` tab to see a list of tags and their security scan summaries.
- 
-
-You can click into a tag's detail page to see more information about which
-layers in the image and which components within the layer are vulnerable.
-Details including a link to the official CVE report for the vulnerability appear
-when you click an individual vulnerable component.
-
-## How can I get involved?
-
-All Official Repositories contain a **User Feedback** section in their
-documentation which covers the details for that specific repository. In most
-cases, the GitHub repository which contains the Dockerfiles for an Official
-Repository also has an active issue tracker. General feedback and support
-questions should be directed to `#docker-library` on Freenode IRC.
-
-## How do I create a new Official Repository?
-
-From a high level, an Official Repository starts out as a proposal in the form
-of a set of GitHub pull requests. You'll find detailed and objective proposal
-requirements in the following GitHub repositories:
-
-* [docker-library/official-images](https://github.com/docker-library/official-images)
-
-* [docker-library/docs](https://github.com/docker-library/docs)
-
-The Official Repositories team, with help from community contributors, formally
-review each proposal and provide feedback to the author. This initial review
-process may require a bit of back and forth before the proposal is accepted.
-
-There are also subjective considerations during the review process. These
-subjective concerns boil down to the basic question: "is this image generally
-useful?" For example, the [python](https://hub.docker.com/_/python/)
-Official Repository is "generally useful" to the large Python developer
-community, whereas an obscure text adventure game written in Python last week is
-not.
-
-Once a new proposal is accepted, the author is responsibile for keeping
-their images up-to-date and responding to user feedback. The Official
-Repositories team becomes responsibile for publishing the images and
-documentation on Docker Hub. Updates to the Official Repository follow the same
-pull request process, though with less review. The Official Repositories team
-ultimately acts as a gatekeeper for all changes, which helps mitigate the risk
-of quality and security issues from being introduced.
diff --git a/docs/orgs.md b/docs/orgs.md
deleted file mode 100644
index d14f5a1dce..0000000000
--- a/docs/orgs.md
+++ /dev/null
@@ -1,53 +0,0 @@
-+++
-title = "Teams & Organizations"
-description = "Docker Hub Teams and Organizations"
-keywords = ["Docker, docker, registry, teams, organizations, plans, Dockerfile, Docker Hub, docs, documentation"]
-[menu.main]
-parent="mn_pubhub"
-weight=-80
-+++
-
-# Organizations and teams
-
-Docker Hub [organizations](https://hub.docker.com/organizations/) let you
-create teams so you can give colleagues access to shared image repositories.
-A Docker Hub organization can contain public and private repositories just like
-a user account.
-Access to push or pull for these repositories is allocated by defining teams of users and then assigning team rights to specific repositories. Repository
-creation is limited to users in the organization owner's group. This allows you
-to distribute limited access Docker images, and to select which Docker Hub users
-can publish new images.
-
-### Creating and viewing organizations
-
-You can see which organizations you belong to and add new organizations by clicking "Organizations" in the top nav bar.
-
-
-
-### Organization teams
-
-Users in the "Owners" team of an organization can create and modify the
-membership of all teams.
-
-Other users can only see teams they belong to.
-
-
-
-### Repository team permissions
-
-Use teams to manage who can interact with your repositories.
-
-You need to be a member of the organization's "Owners" team to create a new team,
-Hub repository, or automated build. As an "Owner", you then delegate the following
-repository access rights to a team using the "Collaborators" section of the repository view:
-
-- `Read` access allows a user to view, search, and pull a private repository in the same way as they can a public repository.
-- `Write` access users are able to push to non-automated repositories on the Docker Hub.
-- `Admin` access allows the user to modify the repositories "Description", "Collaborators" rights,
- "Public/Private" visibility and "Delete".
-
-> **Note**: A User who has not yet verified their email address will only have
-> `Read` access to the repository, regardless of the rights their team
-> membership has given them.
-
-
diff --git a/docs/repos.md b/docs/repos.md
deleted file mode 100644
index 7f47b83cf1..0000000000
--- a/docs/repos.md
+++ /dev/null
@@ -1,270 +0,0 @@
-+++
-title = "Repositories on Docker Hub"
-description = "Your Repositories on Docker Hub"
-keywords = ["Docker, docker, trusted, registry, accounts, plans, Dockerfile, Docker Hub, webhooks, docs, documentation"]
-[menu.main]
-parent="mn_pubhub"
-weight=5
-+++
-
-# Your Hub repositories
-
-Docker Hub repositories let you share images with co-workers,
-customers, or the Docker community at large. If you're building your images internally,
-either on your own Docker daemon, or using your own Continuous integration services,
-you can push them to a Docker Hub repository that you add to your Docker Hub user or
-organization account.
-
-Alternatively, if the source code for your Docker image is on GitHub or Bitbucket,
-you can use an "Automated build" repository, which is built by the Docker Hub
-services. See the [automated builds documentation](builds.md) to read about
-the extra functionality provided by those services.
-
-
-
-## Searching for images
-
-You can search the [Docker Hub](https://hub.docker.com) registry via its search
-interface or by using the command line interface. Searching can find images by image
-name, user name, or description:
-
- $ docker search centos
- NAME DESCRIPTION STARS OFFICIAL AUTOMATED
- centos The official build of CentOS. 1034 [OK]
- ansible/centos7-ansible Ansible on Centos7 43 [OK]
- tutum/centos Centos image with SSH access. For the root... 13 [OK]
- ...
-
-There you can see two example results: `centos` and `ansible/centos7-ansible`. The second
-result shows that it comes from the public repository of a user, named
-`ansible/`, while the first result, `centos`, doesn't explicitly list a
-repository which means that it comes from the top-level namespace for
-[Official Repositories](official_repos.md). The `/` character separates
-a user's repository from the image name.
-
-Once you've found the image you want, you can download it with `docker pull `:
-
- $ docker pull centos
- latest: Pulling from centos
- 6941bfcbbfca: Pull complete
- 41459f052977: Pull complete
- fd44297e2ddb: Already exists
- centos:latest: The image you are pulling has been verified. Important: image verification is a tech preview feature and should not be relied on to provide security.
- Digest: sha256:d601d3b928eb2954653c59e65862aabb31edefa868bd5148a41fa45004c12288
- Status: Downloaded newer image for centos:latest
-
-You now have an image from which you can run containers.
-
-## Viewing repository tags
-
-Docker Hub's repository "Tags" view shows you the available tags and the size
-of the associated image.
-
-Image sizes are the cumulative space taken up by the image and all
-its parent images. This is also the disk space used by the contents of the
-Tar file created when you `docker save` an image.
-
-
-
-## Creating a new repository on Docker Hub
-
-When you first create a Docker Hub user, you will have a "Get started with Docker Hub."
-screen, from which you can click directly into "Create Repository".
-You can also use the "Create ▼" menu to "Create Repository".
-
-When creating a new repository, you can choose to put it in your Docker ID namespace, or that of any [organization](orgs.md) that you
-are in the "Owners" team.
-The Repository Name will need to be unique in that namespace, can be two to 255 characters,
-and can only contain lowercase letters, numbers or `-` and `_`.
-
-The "Short Description" of 100 characters will be used in the search results, while the
-"Full Description" can be used as the Readme for the repository, and can use Markdown to
-add simple formatting.
-
-After you hit the "Create" button, you then need to `docker push` images to that Hub based
-repository.
-
-
-
-## Pushing a repository image to Docker Hub
-
-In order to push a repository to the Docker Hub, you need to
-name your local image using your Docker Hub username, and the
-repository name that you created in the previous step.
-You can add multiple images to a repository, by adding a specific `:` to
-it (for example `docs/base:testing`). If its not specified, the tag defaults to
-`latest`.
-You can name your local images either when you build it, using
-`docker build -t /[:]`,
-by re-tagging an existing local image `docker tag /[:]`,
-or by using `docker commit /[:]` to commit
-changes.
-See [Working with Docker images](https://docs.docker.com/userguide/dockerimages) for a detailed description.
-
-Now you can push this repository to the registry designated by its name or tag.
-
- $ docker push /:
-
-The image will then be uploaded and available for use by your team-mates and/or the
-community.
-
-
-## Stars
-
-Your repositories can be starred and you can star repositories in
-return. Stars are a way to show that you like a repository. They are
-also an easy way of bookmarking your favorites.
-
-## Comments
-
-You can interact with other members of the Docker community and maintainers by
-leaving comments on repositories. If you find any comments that are not
-appropriate, you can flag them for review.
-
-## Collaborators and their role
-
-A collaborator is someone you want to give access to a private
-repository. Once designated, they can `push` and `pull` to your
-repositories. They will not be allowed to perform any administrative
-tasks such as deleting the repository or changing its status from
-private to public.
-
-> **Note:**
-> A collaborator cannot add other collaborators. Only the owner of
-> the repository has administrative access.
-
-You can also assign more granular collaborator rights ("Read", "Write", or "Admin")
-on Docker Hub by using organizations and teams. For more information
-see the [organizations documentation](orgs.md).
-
-## Private repositories
-
-Private repositories allow you to have repositories that contain images
-that you want to keep private, either to your own account or within an
-organization or team.
-
-To work with a private repository on [Docker
-Hub](https://hub.docker.com), you will need to add one via the [Add
-Repository](https://hub.docker.com/add/repository/)
-button. You get one private repository for free with your Docker Hub
-user account (not usable for organizations you're a member of). If
-you need more accounts you can upgrade your [Docker
-Hub](https://hub.docker.com/account/billing-plans/) plan.
-
-Once the private repository is created, you can `push` and `pull` images
-to and from it using Docker.
-
-> *Note:* You need to be signed in and have access to work with a
-> private repository.
-
-Private repositories are just like public ones. However, it isn't
-possible to browse them or search their content on the public registry.
-They do not get cached the same way as a public repository either.
-
-It is possible to give access to a private repository to those whom you
-designate (i.e., collaborators) from its "Settings" page. From there, you
-can also switch repository status (*public* to *private*, or
-vice-versa). You will need to have an available private repository slot
-open before you can do such a switch. If you don't have any available,
-you can always upgrade your [Docker
-Hub](https://hub.docker.com/account/billing-plans/) plan.
-
-## Webhooks
-
-A webhook is an HTTP call-back triggered by a specific event.
-You can use a Hub repository webhook to notify people, services, and other
-applications after a new image is pushed to your repository (this also happens
-for Automated builds). For example, you can trigger an automated test or
-deployment to happen as soon as the image is available.
-
-To get started adding webhooks, go to the desired repository in the Hub,
-and click "Webhooks" under the "Settings" box.
-A webhook is called only after a successful `push` is
-made. The webhook calls are HTTP POST requests with a JSON payload
-similar to the example shown below.
-
-*Example webhook JSON payload:*
-
-```json
-{
- "callback_url": "https://registry.hub.docker.com/u/svendowideit/busybox/hook/2141bc0cdec4hebec411i4c1g40242eg110020/",
- "push_data": {
- "images": [
- "27d47432a69bca5f2700e4dff7de0388ed65f9d3fb1ec645e2bc24c223dc1cc3",
- "51a9c7c1f8bb2fa19bcd09789a34e63f35abb80044bc10196e304f6634cc582c",
- "..."
- ],
- "pushed_at": 1.417566822e+09,
- "pusher": "svendowideit"
- },
- "repository": {
- "comment_count": 0,
- "date_created": 1.417566665e+09,
- "description": "",
- "full_description": "webhook triggered from a 'docker push'",
- "is_official": false,
- "is_private": false,
- "is_trusted": false,
- "name": "busybox",
- "namespace": "svendowideit",
- "owner": "svendowideit",
- "repo_name": "svendowideit/busybox",
- "repo_url": "https://registry.hub.docker.com/u/svendowideit/busybox/",
- "star_count": 0,
- "status": "Active"
- }
-}
-```
-
-
-
->**Note:** If you want to test your webhook, we recommend using a tool like
->[requestb.in](http://requestb.in/). Also note, the Docker Hub server can't be
->filtered by IP address.
-
-### Webhook chains
-
-Webhook chains allow you to chain calls to multiple services. For example,
-you can use this to trigger a deployment of your container only after
-it has been successfully tested, then update a separate Changelog once the
-deployment is complete.
-After clicking the "Add webhook" button, simply add as many URLs as necessary
-in your chain.
-
-The first webhook in a chain will be called after a successful push. Subsequent
-URLs will be contacted after the callback has been validated.
-
-### Validating a callback
-
-In order to validate a callback in a webhook chain, you need to
-
-1. Retrieve the `callback_url` value in the request's JSON payload.
-1. Send a POST request to this URL containing a valid JSON body.
-
-> **Note**: A chain request will only be considered complete once the last
-> callback has been validated.
-
-To help you debug or simply view the results of your webhook(s),
-view the "History" of the webhook available on its settings page.
-
-#### Callback JSON data
-
-The following parameters are recognized in callback data:
-
-* `state` (required): Accepted values are `success`, `failure` and `error`.
- If the state isn't `success`, the webhook chain will be interrupted.
-* `description`: A string containing miscellaneous information that will be
- available on the Docker Hub. Maximum 255 characters.
-* `context`: A string containing the context of the operation. Can be retrieved
- from the Docker Hub. Maximum 100 characters.
-* `target_url`: The URL where the results of the operation can be found. Can be
- retrieved on the Docker Hub.
-
-*Example callback payload:*
-
- {
- "state": "success",
- "description": "387 tests PASSED",
- "context": "Continuous integration by Acme CI",
- "target_url": "http://ci.acme.com/results/afd339c1c3d27"
- }
diff --git a/docs/s3_website.json b/docs/s3_website.json
deleted file mode 100644
index 96eea7318e..0000000000
--- a/docs/s3_website.json
+++ /dev/null
@@ -1,8 +0,0 @@
-{
- "ErrorDocument": {
- "Key": "jsearch/index.html"
- },
- "IndexDocument": {
- "Suffix": "index.html"
- }
-}
diff --git a/docs/webhooks.md b/docs/webhooks.md
deleted file mode 100644
index a50206e3f6..0000000000
--- a/docs/webhooks.md
+++ /dev/null
@@ -1,50 +0,0 @@
-+++
-title = "Webhooks for automated builds"
-description = "Docker Hub Automated Builds"
-keywords = ["Docker, webhookds, hub, builds"]
-[menu.main]
-parent="mn_pubhub"
-weight=7
-+++
-
-# Webhooks for automated builds
-
-If you have an automated build repository in Docker Hub, you can use Webhooks to cause an action in another application in response to an event in the repository. Docker Hub webhooks fire when an image is built in, or a new tag added to, your automated build repository.
-
-With your webhook, you specify a target URL and a JSON payload to deliver. The example webhook below generates an HTTP POST that delivers a JSON payload:
-
-```json
-{
- "callback_url": "https://registry.hub.docker.com/u/svendowideit/testhook/hook/2141b5bi5i5b02bec211i4eeih0242eg11000a/",
- "push_data": {
- "images": [
- "27d47432a69bca5f2700e4dff7de0388ed65f9d3fb1ec645e2bc24c223dc1cc3",
- "51a9c7c1f8bb2fa19bcd09789a34e63f35abb80044bc10196e304f6634cc582c",
- "..."
- ],
- "pushed_at": 1.417566161e+09,
- "pusher": "trustedbuilder"
- },
- "repository": {
- "comment_count": "0",
- "date_created": 1.417494799e+09,
- "description": "",
- "dockerfile": "#\n# BUILD\u0009\u0009docker build -t svendowideit/apt-cacher .\n# RUN\u0009\u0009docker run -d -p 3142:3142 -name apt-cacher-run apt-cacher\n#\n# and then you can run containers with:\n# \u0009\u0009docker run -t -i -rm -e http_proxy http://192.168.1.2:3142/ debian bash\n#\nFROM\u0009\u0009ubuntu\nMAINTAINER\u0009SvenDowideit@home.org.au\n\n\nVOLUME\u0009\u0009[\/var/cache/apt-cacher-ng\]\nRUN\u0009\u0009apt-get update ; apt-get install -yq apt-cacher-ng\n\nEXPOSE \u0009\u00093142\nCMD\u0009\u0009chmod 777 /var/cache/apt-cacher-ng ; /etc/init.d/apt-cacher-ng start ; tail -f /var/log/apt-cacher-ng/*\n,
- full_description: Docker Hub based automated build from a GitHub repo",
- "is_official": false,
- "is_private": true,
- "is_trusted": true,
- "name": "testhook",
- "namespace": "svendowideit",
- "owner": "svendowideit",
- "repo_name": "svendowideit/testhook",
- "repo_url": "https://registry.hub.docker.com/u/svendowideit/testhook/",
- "star_count": 0,
- "status": "Active"
- }
-}
-```
-
->**Note:** If you want to test your webhook, we recommend using a tool like
->[requestb.in](http://requestb.in/). Also note, the Docker Hub server can't be
->filtered by IP address.
diff --git a/fabfile.py b/fabfile.py
deleted file mode 100644
index 80d2a1aedc..0000000000
--- a/fabfile.py
+++ /dev/null
@@ -1,14 +0,0 @@
-from fabric.api import run
-
-def start_project(email="none", user="none", auth="none", beta_password="maybejustnotsomeemptyspaceyea?", sha="latest", new_relic_key="", new_relic_app_name="hub-stage-node"):
- run('docker rm $(docker ps -a -q) > /dev/null 2>&1 || :')
- run('docker rmi $(docker images -q) > /dev/null 2>&1 || :')
- run('cd /home/')
- run('docker login -e %s -u %s -p %s' % (email, user, auth))
- run('docker pull bagel/hub-prod:%s' % sha)
- run('docker pull bagel/haproxy_beta:latest')
- run("docker ps | awk '{if($1 != \"CONTAINER\"){print $1}}' | xargs -r docker kill")
- # We should tag the image with the git commit and deploy that instead of "latest"
- run('docker run -dp 7001:3000 -e ENV=production --restart=on-failure:5 -e HUB_API_BASE_URL=https://hub-beta-stage.docker.com -e REGISTRY_API_BASE_URL=https://hub-beta-stage.docker.com -e NEW_RELIC_LICENSE_KEY=%s -e NEW_RELIC_APP_NAME=%s bagel/hub-prod:%s' % (new_relic_key, new_relic_app_name, sha))
- # HAProxy doesn't change a lot. We should check the image names before killing/rebooting
- run('docker run -dp 80:80 -p 443:443 -e BETA_PASSWORD=%s --restart=on-failure:5 -v /opt/haproxy.pem:/haproxy/keys/hub-beta.docker.com/hub-beta.docker.pem bagel/haproxy_beta:latest' % beta_password)
diff --git a/flow-libs/async.js b/flow-libs/async.js
deleted file mode 100644
index 2d9f0cccef..0000000000
--- a/flow-libs/async.js
+++ /dev/null
@@ -1,12 +0,0 @@
-type AsyncCallback = (err: ?Object, results: ?any) => void;
-type ParallelFuncs = (callback: AsyncCallback) => void;
-
-declare module 'async' {
- declare function parallel(tasks: Array | Object,
- callback: AsyncCallback): void
- declare function series(tasks: Array,
- callback: AsyncCallback): void
- declare function each(arr: Array,
- func: Function,
- callback: Function): void
-}
\ No newline at end of file
diff --git a/flow-libs/debug.js b/flow-libs/debug.js
deleted file mode 100644
index 4807c40f8a..0000000000
--- a/flow-libs/debug.js
+++ /dev/null
@@ -1,5 +0,0 @@
-type DebugFunction = (thing: any) => void;
-
-declare module 'debug' {
- declare function exports(string: string): DebugFunction;
-}
\ No newline at end of file
diff --git a/flow-libs/fluxible.js b/flow-libs/fluxible.js
deleted file mode 100644
index 3ad2f4e95f..0000000000
--- a/flow-libs/fluxible.js
+++ /dev/null
@@ -1,4 +0,0 @@
-export type FluxibleActionContext = {
- dispatch(eventName: string,
- payload: any): void;
-}
diff --git a/flow-libs/hub-js-sdk.js b/flow-libs/hub-js-sdk.js
deleted file mode 100644
index 79da45ff99..0000000000
--- a/flow-libs/hub-js-sdk.js
+++ /dev/null
@@ -1,74 +0,0 @@
-type SuperAgentCallback = (err: any,
- res: any) => void;
-
-type JWT = String;
-type ChangePasswordData = {
- username: String;
- oldpassword: String;
- newpassword: String
-}
-
-declare module 'hub-js-sdk' {
- declare var Auth: {
- getToken(username: string,
- password: string,
- cb: SuperAgentCallback): void;
- }
- declare var Repositories: {
- createRepository(jwt: JWT,
- repository: any,
- cb: SuperAgentCallback): void;
- getReposForUser(jwt: JWT,
- username: String,
- cb: SuperAgentCallback): void
- }
- declare var Emails: {
- getEmailSubscriptions(JWT: JWT,
- user: String,
- cb: SuperAgentCallback): void;
- unsubscribeEmails(JWT: JWT,
- user: String,
- data: Object,
- cb: SuperAgentCallback): void;
- subscribeEmails(JWT:JWT,
- user: String,
- data: Object,
- cb: SuperAgentCallback): void;
- getEmailsJWT(JWT:JWT,
- cb:SuperAgentCallback): void;
- getEmailsForUser(JWT: JWT,
- user: String,
- cb: SuperAgentCallback): void;
- deleteEmailByID(JWT: JWT,
- id: String,
- cb: SuperAgentCallback): void;
- updateEmailByID(JWT: JWT,
- id: String,
- data: Object,
- cb: SuperAgentCallback): void;
- addEmailsForUser(JWT: JWT,
- user: Object,
- email: string,
- cb: SuperAgentCallback): void;
- }
-}
-
-declare module 'hub-js-sdk/src/Hub/SDK/Users' {
- declare function changePassword(JWT: JWT,
- data: ChangePasswordData,
- cb: SuperAgentCallback): void;
- declare function getUser(JWT: JWT,
- user: String,
- cb: SuperAgentCallback): void;
-}
-
-declare module 'hub-js-sdk/src/Hub/SDK/Auth' {
- declare function getToken(username: String,
- password: String,
- cb: SuperAgentCallback): void;
-}
-
-declare module 'hub-js-sdk/src/Hub/SDK/Notifications' {
- declare function getActivityFeed(JWT: JWT,
- cb: SuperAgentCallback): void;
-}
diff --git a/flow-libs/lodash.js b/flow-libs/lodash.js
deleted file mode 100644
index a047b9e1b5..0000000000
--- a/flow-libs/lodash.js
+++ /dev/null
@@ -1,5 +0,0 @@
-declare module 'lodash' {
- declare function sortByOrder(arr: Array,
- properties: Array,
- sortOrder: Array): Array;
-}
\ No newline at end of file
diff --git a/gulp-tasks/img.js b/gulp-tasks/img.js
deleted file mode 100644
index 524d9a86f6..0000000000
--- a/gulp-tasks/img.js
+++ /dev/null
@@ -1,24 +0,0 @@
-var gulp = require('gulp');
-var imagemin = require('gulp-imagemin');
-var pngquant = require('imagemin-pngquant');
-
-//Hub2 Images for dev & production (There is a separate task for docker-ux images)
-gulp.task('images::dev', function () {
- return gulp.src('app/img/**')
- .pipe(imagemin({
- progressive: true,
- svgoPlugins: [{removeViewBox: false}],
- use: [pngquant({ quality: '65-80', speed: 4 })]
- }))
- .pipe(gulp.dest('app/.build/public/img'));
-});
-
-gulp.task('images::prod', function() {
- return gulp.src('app/img/**')
- .pipe(imagemin({
- progressive: true,
- svgoPlugins: [{removeViewBox: false}],
- use: [pngquant({ quality: '65-80', speed: 4 })]
- }))
- .pipe(gulp.dest('.tmp/server/build/img'));
-});
diff --git a/gulpfile.js b/gulpfile.js
deleted file mode 100644
index 05734a32b9..0000000000
--- a/gulpfile.js
+++ /dev/null
@@ -1,3 +0,0 @@
-'use strict';
-require('./gulp-tasks/img');
-
diff --git a/local.Dockerfile b/local.Dockerfile
deleted file mode 100644
index 34163d2fd2..0000000000
--- a/local.Dockerfile
+++ /dev/null
@@ -1,22 +0,0 @@
-FROM bagel/universe:337f873f4f23f4b2603972229ae3519c5f61f6d7
-
-ENV ENV local
-ENV NODE_ENV local
-
-COPY ./app /opt/hub/app
-COPY ./Makefile /opt/hub/Makefile
-COPY ./_webpack /opt/hub/_webpack
-COPY ./gulpfile.js /opt/hub/gulpfile.js
-COPY ./gulp-tasks /opt/hub/gulp-tasks
-COPY ./app-server /opt/hub/app-server
-COPY ./.eslintrc /opt/hub/.eslintrc
-
-RUN make server-prod-target
-RUN make server-extras
-RUN make js-local
-RUN make images-prod
-RUN make docker-font-prod
-RUN gulp images::prod
-RUN make styles-base-prod
-RUN make stats-dir
-RUN make css-stats
diff --git a/package.json b/package.json
deleted file mode 100644
index fe9abf952a..0000000000
--- a/package.json
+++ /dev/null
@@ -1,114 +0,0 @@
-{
- "name": "docker-2.0",
- "version": "0.0.1",
- "private": true,
- "scripts": {
- "test": "jest",
- "build:dev": "DEBUG=* webpack -dw"
- },
- "jest": {
- "rootDir": "./app/scripts",
- "scriptPreprocessor": "../../node_modules/babel-jest",
- "testFileExtensions": [
- "js"
- ],
- "moduleFileExtensions": [
- "jsx",
- "js",
- "json"
- ],
- "modulePathIgnorePatterns": [
- "/node_modules/"
- ],
- "unmockedModulePathPatterns": [
- "react"
- ]
- },
- "dependencies": {
- "@dux/element-button": "0.0.3",
- "@dux/element-card": "0.0.7",
- "@dux/element-markdown": "0.0.8",
- "@dux/hub-sdk": "^0.1.1",
- "async": "^1.3.0",
- "babel": "^5.6.14",
- "babel-core": "^5.6.14",
- "babel-runtime": "^5.6.18",
- "body-parser": "^1.12.2",
- "bugsnag": "^1.7.0",
- "classnames": "^2.1.2",
- "cookie": "^0.2.3",
- "cookie-parser": "^1.3.4",
- "csurf": "^1.8.0",
- "debug": "^2.1.3",
- "dux": "file:./private-deps/docker-ux",
- "express": "^4.12.3",
- "express-state": "^1.2.0",
- "fluxible": "^1.0.3",
- "fluxible-addons-react": "^0.2.0",
- "highlight.js": "^9.0.0",
- "history": "^1.17.0",
- "hub-js-sdk": "file:./private-deps/hub-js-sdk",
- "immutable": "^3.7.6",
- "keymirror": "^0.1.1",
- "lodash": "^3.6.0",
- "marked": "^0.3.3",
- "md5": "^2.0.0",
- "moment": "^2.10.3",
- "newrelic": "christopherbiscardi/node-newrelic#c4ccca3764acafaf9c5899e4a1abece828e1f7b8",
- "normalizr": "^1.4.0",
- "numeral": "^1.5.3",
- "rc-tooltip": "^3.3.0",
- "react": "^0.14.7",
- "react-document-title": "^2.0.2",
- "react-dom": "^0.14.3",
- "react-router": "^1.0.0",
- "react-select": "^1.0.0-beta6",
- "recurly-js": "git://github.com/recurly/recurly-js#d9740eb3ee416fb999635daecfb524a492dbb058",
- "redux": "^3.0.5",
- "redux-logger": "^2.3.2",
- "redux-ui": "0.0.8",
- "remarkable": "^1.6.0",
- "reselect": "^2.0.1",
- "serialize-javascript": "^1.0.0",
- "serve-favicon": "^2.2.0",
- "superagent": "^1.1.0",
- "svg-inline-react": "^0.3.1",
- "velocity-animate": "^1.2.3",
- "velocity-react": "1.1.3"
- },
- "devDependencies": {
- "babel-eslint": "^4.0.0",
- "babel-jest": "^5.0.1",
- "babel-loader": "^5.0.0",
- "css-loader": "^0.23.0",
- "cssnano": "^3.2.0",
- "cssstats": "^1.10.0",
- "eslint": "^1.2.1",
- "eslint-loader": "^1.0.0",
- "extract-text-webpack-plugin": "^0.9.1",
- "gulp": "^3.8.11",
- "gulp-imagemin": "^2.2.1",
- "imagemin-pngquant": "^4.0.0",
- "json-loader": "^0.5.2",
- "lost": "^6.6.2",
- "nodemon": "^1.3.7",
- "postcss-browser-reporter": "^0.4.0",
- "postcss-constants": "^0.1.1",
- "postcss-cssnext": "^2.1.0",
- "postcss-cssstats": "^1.0.0",
- "postcss-each": "^0.7.0",
- "postcss-import": "^7.0.0",
- "postcss-loader": "^0.8.0",
- "postcss-nested": "^1.0.0",
- "postcss-url": "^5.0.1",
- "react-redux": "^4.0.3",
- "redux": "^3.0.5",
- "reselect": "^2.0.1",
- "style-loader": "^0.13.0",
- "svg-inline-loader": "^0.4.0",
- "webpack": "^1.10"
- },
- "engines": {
- "node": ">=4.0.0"
- }
-}
diff --git a/pr-docs/css.md b/pr-docs/css.md
deleted file mode 100644
index f019c08e6c..0000000000
--- a/pr-docs/css.md
+++ /dev/null
@@ -1,62 +0,0 @@
-[Demo](https://css-modules.github.io/webpack-demo/)
-
-The approach is thus: Use Foundation as a "browser reset" stylesheet,
-then put everything that isn't a foundation `_settings.scss` variable
-in CSSModule sidecar files. This links our javascript modules with our
-css and increases the ease with which we can create a module library.
-
-# Implementation
-
-## File Structure
-
-```
-app/scripts/
-|-- ScopedSelectors.js
-|-- ScopedSelectors.css
-```
-
-## Usage
-
-```javascript
-import styles from './ScopedSelectors.css';
-
-import React, { Component } from 'react';
-
-export default class ScopedSelectors extends Component {
-
- render() {
- return (
-
-
Scoped Selectors
-
- );
- }
-
-};
-```
-
-```css
-.root {
- border-width: 2px;
- border-style: solid;
- border-color: #777;
- padding: 0 20px;
- margin: 0 6px;
- max-width: 400px;
-}
-
-.text {
- color: #777;
- font-size: 24px;
- font-family: helvetica, arial, sans-serif;
- font-weight: 600;
-}
-```
-
-# Approach
-
-* modules should be scoped to themselves and not affect children or
- siblings.
-* Webpack already has support for css-modules in it's `css-loader`. So
- we'll start with that.
-* [css-modules and preprocessors (sass)](https://github.com/css-modules/css-modules#usage-with-preprocessors)
diff --git a/pr-docs/linting.md b/pr-docs/linting.md
deleted file mode 100644
index e66ad8c472..0000000000
--- a/pr-docs/linting.md
+++ /dev/null
@@ -1,28 +0,0 @@
-# Linting
-
-All code must pass [ESLint][eslint] before being merged into master
-branch. The ESLint config can be found in `.eslintrc` and is
-integrated into webpack.
-
-# Running ESLint
-
-```
-gulp webpack
-```
-
-Since linting is integrated with webpack, it is possible to lint code
-while it is being developed without any extra effort. This is
-important because if it is not approximate to effortless to run
-linting, it will not be run while developing.
-
-# We should block deploys for linting errors
-
-Since we do CI/CD, the static analysis present in ESLint can help us
-catch bugs before shipping. We should therefore block deploys if
-ESLint detects an error-level (level `2` in `.eslintrc`) issue.
-
-* [eslint][eslint]
-* [babel-eslint][babel-eslint]
-
-[eslint]: http://eslint.org/
-[babel-eslint]: https://github.com/babel/babel-eslint
diff --git a/pr-docs/routes.md b/pr-docs/routes.md
deleted file mode 100644
index 99876f2e52..0000000000
--- a/pr-docs/routes.md
+++ /dev/null
@@ -1,262 +0,0 @@
-# Routes
-
-Two items affect this proposal.
-
-1. [Distribution's work](https://github.com/docker/distribution),
- specifically relating to defining Repositories, Images, Manifests,
- Digests and Tags.
-2. The Current Hub's Routing Issues
-
-## Distributions Work (partially summarized)
-
-### Repository
-
-* A set of blobs
-* Subsets of these blobs make up Images
-
-### Image
-
-* A set of blobs
- - Layers
- - Tag
- - Signatures
- - Manifest
-* A Tag (potentially containing signatures) points to a Manifest
-* A Manifest points to multiple layers.
-
-### Manifest
-
-As defined in the [distribution][manifest-pr] Manifest PR:
-
-> A [Content Manifest][manifest] is a simple JSON file which contains
-> general fields that are typical to any package management
-> system. The goal is for these manifests to describe an application
-> and its dependencies in a content-addressable and verifiable way.
-
-### Tag
-
-As defined in the [distribution][d-tag-pr] PR:
-
-> A [tag][tag] is simply a named pointer to content. The content can
-> be any blob but should mostly be a manifest. One can sign tags to
-> later verify that they were created by a trusted party.
-
-### Additional Content
-
-Image names will be allowed to have many slashes in the future.
-
-## Current Hub Issues
-
-### Collisions
-
-The URLs for user and repo collide:
-
-A user's Starred Repos:
-
-```
-/u/biscarch/starred/
-```
-
-A user's repository, named Starred.
-
-```
-/u/biscarch/starred/
-```
-
-## Future Problems
-
-An image for the user `biscarch`, named `my/repo`:
-
-```
-/u/biscarch/my/repo/
-```
-
-An image for the user `biscarch`, named `my`, tagged `repo`:
-
-```
-/u/biscarch/my/repo/
-```
-
-## Solutions
-
-Namespace `Users`, `Repos` and `Images` as such (with the user
-`biscarch`)
-
-```
-/u/:user
-/r/:user/:repo
-/i/:user/:repo/:tag
-```
-
-### Solving Starred Repos
-
-Prefix defines whether we are referring to a repo or attribute of a
-user:
-
-```
-/u/biscarch/starred
-/r/biscarch/starred
-```
-
-### Solving Repo/Image Conflicts
-
-Prefix determines whether we are referring to a Repository or Image:
-
-```
-/r/biscarch/my/repo/
-/i/biscarch/my/repo/
-```
-
-## The new Spec
-
-```
-/u/
-/u/:user/
-/r/:user/:repo/
-/i/:user/:repo/:tag/
-```
-
-### Full List
-
-### Dashboard
-
-`/`
-
-### Official Repositories
-
-"username" === library, which is represented as the root `_`.
-All management of `library` namespaced repos is done from the usual
-`/u/library/:repo/`
-
-```
-/_/:repo/
-/_/:repo/dockerfile/
-/_/:repo/dockerfile/raw
-/_/:repo/tags/
-```
-
-### Single Endpoints
-
-* Search
- - `/search/`
-* Plans
- - `/plans/`
-
-### Account
-
-Mostly Settings; Add Repository Page;
-
-`/account/` should redirect to `/account/settings/`
-
-```
-/account/accounts/
-/account/authorized_services/
-/account/change-password/
-/account/confirm-email//
-/account/emails/
-/account/notifications/
-/account/organizations/
-/account/organizations/:org_name/
-/account/organizations/:org_name/groups/:group_id/
-/account/repositories/add/
-/account/settings/
-/account/subscriptions/
-```
-
-### Users
-
-```
-/u/
-/u/:user/
-/u/:user/activity/
-/u/:user/contributed/
-/u/:user/starred/
-```
-
-### Repos
-
-```
-/r/:user/:repo/
-/r/:user/:repo/~/settings/
-/r/:user/:repo/~/settings/collaborators/
-/r/:user/:repo/~/settings/links/
-/r/:user/:repo/~/settings/triggers/
-/r/:user/:repo/~/settings/webhooks/
-/r/:user/:repo/~/settings/tags/
-```
-
-Current build history urls:
-
-```
-/r/:user/:repo/~/builds_history/
-```
-
-### Images
-
-We currently don't do a lot for Images. Repositories have been the
-main focus.
-
-```
-/i/:user/:repo/:tag/
-/i/:user/:repo/:tag/~/dockerfile/
-/i/:user/:repo/:tag/~/dockerfile/raw/
-```
-
-### Automated Builds
-
-```
-/automated-builds/
-/builds/
-/builds/:user/:repo/
-```
-
-### Convenience Redirects
-
-Also, potential pages to build out more agressively.
-
-* `/official/`
- - redirects to `/search?q=library&f=official`
- - future: Potentially `Explore` type page for official repos
-* `/most_stars/`, `/popular/`
- - redirects to `search?q=library&s=stars`
-* `/recent_updated/`
- - `search?q=library&s=last_updated`
-
-#### Help
-
-* `/help`
- - `https://www.docker.com/resources/help/`
- - Can we rely on this url to stick around?
-* `/help/docs`
- - `https://docs.docker.com/`
-
-## Make Separate Sites for:
-
-### Highland URLs
-
-We need to pull out the APIs used on the current Hub for this.
-
-```
-/highland/
-/highland/build-configs/
-/highland/builds/
-/highland/search/
-/highland/stats/
-```
-
-
-## More Issues
-
-* There are no links to comments
-* `/opensearch.xml` times out on the current site
- - Should we re-implement?
-* `/sitemap.xml`
-
-# Concerns with this Proposal
-
-* Automated Build urls need to be given more thought
-
-[tag]: https://github.com/stevvooe/distribution/blob/a8d3f3474b7b60576dc64250d95db3717bf07c33/doc/spec/tags.md#tags
-[d-tag-pr]: https://github.com/docker/distribution/pull/173/files
-[d-manifest-pr]: https://github.com/docker/distribution/pull/62
-[manifest]: https://github.com/jlhawn/distribution/blob/e8b5c8c32b565b9b643c3a0b0e87339bf40eb206/doc/spec/manifest.md
diff --git a/production_ready.md b/production_ready.md
deleted file mode 100644
index ee92e5a3b3..0000000000
--- a/production_ready.md
+++ /dev/null
@@ -1,93 +0,0 @@
-Production Readiness: Docker Hub Front-End (hub-web-v2)
-================================
-
-Testing
--------
-
- * **What is the max traffic load that your service has been tested with?**
- Hub UI has not been load tested.
-
- * **How has the service been soak-tested?**
- Hub UI has not been soak tested.
-
- Monitoring
- ----------
-
- * **How do you monitor?**
- New Relic for server monitoring, BugSnag for JavaScript errors and PagerDuty for alerting.
-
- * **What’s the link(s) to the dashboard(s)?**
- New Relic: https://rpm.newrelic.com/accounts/532547/applications/8853774
- BugSnag: https://bugsnag.com/docker/hub-prod/errors
- PagerDuty: https://docker.pagerduty.com/services/PKZG21B
-
- * **Do you use an exception tracking service (e.g. Bugsnag, Sentry, New Relic)?**
- Yes, BugSnag and New Relic.
-
- * **What’s the health check endpoint? And what checks does that endpoint perform?**
- https://hub.docker.com/_health/
-
- * **What external services do you depend on? How do you monitor them and handle failures?**
- Hub API Gateway and all downstream Docker Cloud services.
- Google Tag Manager (gtm.js)
- Recurly (recurly.js)
-
-
-
- * **What’s the link to view the logs?**
-
- Alerting
- --------
-
- * **How do you know if your service is down?**
- PagerDuty alerts
- Prometheus alerts
-
- * **What are the metrics that you alert on?**
- 500's from Front End containers
-
- * **Have you tested tripping on an alert to page somebody?**
- Not manually tested. But production systems are paging properly.
-
- * **What’s the link to your on-call schedule?**
- https://docker.pagerduty.com/schedules#P88XAI9
-
- * **Where is your on-call run-book?**
- https://docker.atlassian.net/wiki/display/DE/Hub+UI+Runbook
-
- Disaster
- --------
-
- * **What’s the plan if your persistence layer blows up?**
- Front-end is stateless so this shouldn't be required, but restart Container if unsure.
-
- * **What’s the plan if any of your external service dependencies blows up?**
- Hub API Gateway or downstream service - find service owner, escalate/alert through PagerDuty, contact service team via Slack.
- Google Tag Manager - disable Google Tag Manager from https://tagmanager.google.com - single signon with docker.com account
- Recurly problem - check status.recurly.com, escalate/alert Billing team through PagerDuty, contact service team via Slack.
- Update status.io describing impact to UI if any.
-
-
- Security
- --------
-
- * **Is the service exposed on the public internet? Does it require TLS?**
- https://hub.docker.com/
-
- * **How do you store production secrets?**
- Front-End does not store secrets. JWT is stored in user's browser cookie.
-
- * **What is your authentication model (both user authentication and service-to-service authentication)?**
- oauth
-
- * **Do you store any sensitive user data (emails, phone numbers, intellectual property)?**
- JWT in cookie.
-
- Release process
- ---------------
-
- * **What’s the link to your docs on how to do a release?**
- https://docker.atlassian.net/wiki/display/DH/Hub+frontend+Deployment+Process
-
- * **How long does it take to release a code fix to production?**
- 4-8 hours
diff --git a/startup-scripts/boot-dev-tmux.sh b/startup-scripts/boot-dev-tmux.sh
deleted file mode 100755
index 0907330ac3..0000000000
--- a/startup-scripts/boot-dev-tmux.sh
+++ /dev/null
@@ -1,45 +0,0 @@
-#!/bin/bash
-
-set -e
-
-eval $(docker-machine env dev)
-
-###############################################################
-# You must have `tmux` installed locally. On OSX, this can be #
-# accomplished with `brew install tmux` #
-###############################################################
-
-SESSION=HubDev
-DIR=${PWD##*/}_hub_1
-CONTAINER=$(sed s/-//g <<< $DIR)
-
-
-# Create new tmux session
-tmux -2 new-session -d -s $SESSION
-
-# Window 1
-
-## webpack task
-tmux split-window
-tmux select-pane -t 0
-tmux send-keys "DEBUG=* webpack -wd" C-m
-
-## styles
-
-tmux select-pane -t 1
-tmux send-keys "DEBUG=* gulp watch::styles::dev" C-m
-
-## Flow
-
-tmux split-window -h
-tmux select-pane -t 2
-tmux send-keys "flow" C-m
-
-## docker logs
-
-tmux select-pane -t 0
-tmux split-window -h
-tmux send-keys "docker-compose logs hub" C-m
-
-# Attach to session
-tmux -2 attach-session -t $SESSION
diff --git a/startup-scripts/boot-dev.sh b/startup-scripts/boot-dev.sh
deleted file mode 100755
index 83b569ffe3..0000000000
--- a/startup-scripts/boot-dev.sh
+++ /dev/null
@@ -1,2 +0,0 @@
-DEBUG=* webpack -dw &
-cd app/.build && nodemon ./server.js
diff --git a/startup-scripts/bootstrap-dev.sh b/startup-scripts/bootstrap-dev.sh
deleted file mode 100755
index 454c4620fa..0000000000
--- a/startup-scripts/bootstrap-dev.sh
+++ /dev/null
@@ -1,8 +0,0 @@
-# run this before the development container to bootstrap your local filesystem
-npm install
-cp app/favicon.ico app/.build/favicon.ico
-make server-target
-make styles-base
-gulp images::dev
-make images
-make docker-font-dev
diff --git a/webpack.config.js b/webpack.config.js
deleted file mode 100644
index 9a5242e5ec..0000000000
--- a/webpack.config.js
+++ /dev/null
@@ -1,151 +0,0 @@
-const debug = require('debug')('webpack-debug');
-var ENV_CONFIG = require('./_webpack/_envConfig.js');
-var fs = require('fs');
-var path = require('path');
-var ExtractTextPlugin = require("extract-text-webpack-plugin");
-var _ = require('lodash');
-var webpack = require('webpack');
-
-var loaders = require('./_webpack/_commonLoaders');
-
-/**
- * blacklist this array from being included in `externals`.
- *
- * This has the effect of making any modules in this list be
- * resolved at build time instead of runtime. This affects the
- * server bundle
- */
-var blacklist = ['.bin', 'hub-js-sdk', 'dux'];
-var node_modules = fs.readdirSync('node_modules').filter(function(x) {
- return !_.includes(blacklist, x);
-});
-
-/* Dux Button Config */
-var elementButton = require('@dux/element-button/defaults');
-var buttons = elementButton.mkButtons([{
- name: 'primary',
- color: '#FFF',
- bg: '#22B8EB'
-},{
- name: 'secondary',
- color: '#FFF',
- bg: '#232C37'
-},{
- name: 'coral',
- color: '#FFF',
- bg: '#FF85AF'
-},{
- name: 'success',
- color: '#FFF',
- bg: '#0FD85A'
-},{
- name: 'warning',
- color: '#FFF',
- bg: '#FF8546'
-},{
- name: 'yellow',
- color: '#FFF',
- bg: '#FFDE50'
-},{
- name: 'alert',
- color: '#FFF',
- bg: '#EB3E46'
-}]);
-debug('modules that will be runtime require dependencies of the server if the server requires them: ', node_modules);
-var commonConfig = {
- resolve: {
- extensions: ['', '.js', '.jsx', '.json'],
- root: [
- path.resolve(__dirname, './app/scripts/'),
- path.resolve(__dirname, './app/scripts/components/')
- ],
- modulesDirectories: ['node_modules', 'app/scripts']
- },
- module: {
- preLoaders: loaders.preLoaders,
- loaders: loaders.commonLoaders
- },
- plugins: [
- ENV_CONFIG,
- new webpack.optimize.DedupePlugin(),
- new ExtractTextPlugin('public/styles/style.css', { allChunks: true })
- ],
- postcss: [
- require('postcss-import')(),
- require('postcss-constants')({
- defaults: _.merge(require('@dux/element-card/defaults')({
- capBackground: '#f1f6fb',
- borderColor: '#c4cdda'
- }),
- {
- duxElementButton: {
- radius: '.25rem',
- buttons: buttons
- }
- })
- }),
- require('postcss-each'),
- require('postcss-cssnext')({
- browsers: 'last 2 versions',
- features: {
- // https://github.com/robwierzbowski/node-pixrem/issues/40
- rem: false
- }
- }),
- require('postcss-nested'),
- require('lost')({
- gutter: '1.25rem',
- flexbox: 'flex'
- }),
- require('postcss-cssstats')(function(stats) {
- /**
- * this is in test-phase because it runs on all
- * files individually. We should either figure out
- * that that is useful or get it to run on the full postcss
- * AST or extracted CSS file.
- */
- debug(stats);
- }),
- require('postcss-url')(),
- require('cssnano')(),
- require('postcss-browser-reporter')
- ],
- eslint: {
- failOnError: true
- },
- profile: true
-}
-
-var clientBundle = _.assign({},
- commonConfig,
- {
- // client.js
- entry: './app/scripts/client.js',
- devtool: 'eval-source-map',
- output: {
- path: 'app/.build/public/',
- filename: 'js/client.js'
- }
- });
-
-var serverBundle = _.assign({},
- commonConfig,
- {
- // server.js
- entry: './app/scripts/server.js',
- output: {
- path: 'app/.build/',
- filename: 'server.js',
- libraryTarget: 'commonjs2'
- },
- target: 'node',
- externals: node_modules,
- node: {
- __dirname: '/opt/hub/'
- }
- });
-
-module.exports = [
- clientBundle,
- serverBundle
-];