mirror of
https://github.com/mkdocs/mkdocs.git
synced 2026-03-27 09:58:31 +07:00
Removed six dependency.
This commit is contained in:
@@ -15,6 +15,7 @@ You can determine your currently installed version using `mkdocs --version`:
|
||||
|
||||
## Version 0.14.0 (2015-??-??)
|
||||
|
||||
* Remove dependancy on the six library. (#583)
|
||||
* Add `--quiet` and `--verbose` options to all subcommands.
|
||||
* Add short options (`-a`) to most command line options.
|
||||
|
||||
|
||||
@@ -1,5 +1,6 @@
|
||||
# coding: utf-8
|
||||
|
||||
from __future__ import unicode_literals
|
||||
from datetime import datetime
|
||||
import io
|
||||
import logging
|
||||
@@ -8,7 +9,6 @@ import os
|
||||
from jinja2.exceptions import TemplateNotFound
|
||||
import jinja2
|
||||
import json
|
||||
import six
|
||||
|
||||
from mkdocs import nav, search, utils
|
||||
from mkdocs.relative_path_ext import RelativePathExtension
|
||||
@@ -109,7 +109,7 @@ def get_page_context(page, content, toc, meta, config):
|
||||
base = config['site_url']
|
||||
if not base.endswith('/'):
|
||||
base += '/'
|
||||
canonical_url = six.moves.urllib.parse.urljoin(
|
||||
canonical_url = utils.urljoin(
|
||||
base, page.abs_url.lstrip('/'))
|
||||
else:
|
||||
canonical_url = None
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
#!/usr/bin/env python
|
||||
# coding: utf-8
|
||||
|
||||
from __future__ import unicode_literals
|
||||
import logging
|
||||
import click
|
||||
|
||||
|
||||
@@ -1,10 +1,10 @@
|
||||
from __future__ import unicode_literals
|
||||
import logging
|
||||
import os
|
||||
|
||||
import six
|
||||
import yaml
|
||||
|
||||
from mkdocs import exceptions
|
||||
from mkdocs import utils
|
||||
|
||||
|
||||
log = logging.getLogger('mkdocs.config')
|
||||
@@ -14,7 +14,7 @@ class ValidationError(Exception):
|
||||
"""Raised during the validation process of the config on errors."""
|
||||
|
||||
|
||||
class Config(six.moves.UserDict):
|
||||
class Config(utils.UserDict):
|
||||
"""
|
||||
MkDocs Configuration dict
|
||||
|
||||
@@ -106,7 +106,7 @@ def _open_config_file(config_file):
|
||||
log.debug("Loading configuration file: %s", config_file)
|
||||
|
||||
# If it is a string, we can assume it is a path and attempt to open it.
|
||||
if isinstance(config_file, six.string_types):
|
||||
if isinstance(config_file, utils.string_types):
|
||||
if os.path.exists(config_file):
|
||||
config_file = open(config_file, 'rb')
|
||||
else:
|
||||
|
||||
@@ -1,7 +1,6 @@
|
||||
from __future__ import unicode_literals
|
||||
import os
|
||||
|
||||
import six
|
||||
|
||||
from mkdocs import utils, legacy
|
||||
from mkdocs.config.base import Config, ValidationError
|
||||
|
||||
@@ -164,7 +163,7 @@ class URL(OptionallyRequired):
|
||||
def run_validation(self, value):
|
||||
|
||||
try:
|
||||
parsed_url = six.moves.urllib.parse.urlparse(value)
|
||||
parsed_url = utils.urlparse(value)
|
||||
except (AttributeError, TypeError):
|
||||
raise ValidationError("Unable to parse the URL.")
|
||||
|
||||
@@ -186,7 +185,7 @@ class RepoURL(URL):
|
||||
def post_validation(self, config, key_name):
|
||||
|
||||
if config['repo_url'] is not None and config.get('repo_name') is None:
|
||||
repo_host = six.moves.urllib.parse.urlparse(
|
||||
repo_host = utils.urlparse(
|
||||
config['repo_url']).netloc.lower()
|
||||
if repo_host == 'github.com':
|
||||
config['repo_name'] = 'GitHub'
|
||||
@@ -204,7 +203,7 @@ class Dir(Type):
|
||||
"""
|
||||
|
||||
def __init__(self, exists=False, **kwargs):
|
||||
super(Dir, self).__init__(type_=six.string_types, **kwargs)
|
||||
super(Dir, self).__init__(type_=utils.string_types, **kwargs)
|
||||
self.exists = exists
|
||||
|
||||
def run_validation(self, value):
|
||||
@@ -369,15 +368,15 @@ class Pages(Extras):
|
||||
# TODO: Remove in 1.0
|
||||
config_types = set(type(l) for l in value)
|
||||
|
||||
if config_types.issubset(set([six.text_type, dict, str])):
|
||||
if config_types.issubset(set([utils.text_type, dict, str])):
|
||||
return value
|
||||
|
||||
if config_types.issubset(set([six.text_type, list, str])):
|
||||
if config_types.issubset(set([utils.text_type, list, str])):
|
||||
return legacy.pages_compat_shim(value)
|
||||
|
||||
raise ValidationError("Invalid pages config. {0} {1}".format(
|
||||
config_types,
|
||||
set([six.text_type, dict, ])
|
||||
set([utils.text_type, dict, ])
|
||||
))
|
||||
|
||||
def post_validation(self, config, key_name):
|
||||
@@ -463,7 +462,7 @@ class MarkdownExtensions(OptionallyRequired):
|
||||
raise ValidationError('Invalid config options for Markdown '
|
||||
"Extension '{0}'.".format(ext))
|
||||
self.configdata[ext] = cfg
|
||||
elif isinstance(item, six.string_types):
|
||||
elif isinstance(item, utils.string_types):
|
||||
extensions.append(item)
|
||||
else:
|
||||
raise ValidationError('Invalid Markdown Extensions configuration')
|
||||
|
||||
@@ -1,7 +1,5 @@
|
||||
from __future__ import unicode_literals
|
||||
|
||||
import six
|
||||
|
||||
from mkdocs import utils
|
||||
from mkdocs.config import config_options
|
||||
|
||||
@@ -16,10 +14,10 @@ from mkdocs.config import config_options
|
||||
DEFAULT_SCHEMA = (
|
||||
|
||||
# Reserved for internal use, stores the mkdocs.yml config file.
|
||||
('config_file_path', config_options.Type(six.string_types)),
|
||||
('config_file_path', config_options.Type(utils.string_types)),
|
||||
|
||||
# The title to use for the documentation
|
||||
('site_name', config_options.Type(six.string_types, required=True)),
|
||||
('site_name', config_options.Type(utils.string_types, required=True)),
|
||||
|
||||
# Defines the structure of the navigation and which markdown files are
|
||||
# included in the build.
|
||||
@@ -30,12 +28,12 @@ DEFAULT_SCHEMA = (
|
||||
|
||||
# A description for the documentation project that will be added to the
|
||||
# HTML meta tags.
|
||||
('site_description', config_options.Type(six.string_types)),
|
||||
('site_description', config_options.Type(utils.string_types)),
|
||||
# The name of the author to add to the HTML meta tags
|
||||
('site_author', config_options.Type(six.string_types)),
|
||||
('site_author', config_options.Type(utils.string_types)),
|
||||
|
||||
# The path to the favicon for a site
|
||||
('site_favicon', config_options.Type(six.string_types)),
|
||||
('site_favicon', config_options.Type(utils.string_types)),
|
||||
|
||||
# The MkDocs theme for the documentation.
|
||||
('theme', config_options.Theme(default='mkdocs')),
|
||||
@@ -51,7 +49,7 @@ DEFAULT_SCHEMA = (
|
||||
('theme_dir', config_options.ThemeDir(exists=True)),
|
||||
|
||||
# A copyright notice to add to the footer of documentation.
|
||||
('copyright', config_options.Type(six.string_types)),
|
||||
('copyright', config_options.Type(utils.string_types)),
|
||||
|
||||
# set of values for Google analytics containing the account IO and domain,
|
||||
# this should look like, ['UA-27795084-5', 'mkdocs.org']
|
||||
@@ -59,7 +57,7 @@ DEFAULT_SCHEMA = (
|
||||
|
||||
# The address on which to serve the live reloading docs server.
|
||||
('dev_addr', config_options.Type(
|
||||
six.string_types, default='127.0.0.1:8000')),
|
||||
utils.string_types, default='127.0.0.1:8000')),
|
||||
|
||||
# If `True`, use `<page_name>/index.hmtl` style files with hyperlinks to
|
||||
# the directory.If `False`, use `<page_name>.html style file with
|
||||
@@ -75,7 +73,7 @@ DEFAULT_SCHEMA = (
|
||||
# A name to use for the link to the project source repo.
|
||||
# Default, If repo_url is unset then None, otherwise
|
||||
# "GitHub" or "Bitbucket" for known url or Hostname for unknown urls.
|
||||
('repo_name', config_options.Type(six.string_types)),
|
||||
('repo_name', config_options.Type(utils.string_types)),
|
||||
|
||||
# Specify which css or javascript files from the docs directory should be
|
||||
# additionally included in the site. Default, List of all .css and .js
|
||||
@@ -108,7 +106,7 @@ DEFAULT_SCHEMA = (
|
||||
|
||||
# the remote branch to commit to when using gh-deploy
|
||||
('remote_branch', config_options.Type(
|
||||
six.string_types, default='gh-pages')),
|
||||
utils.string_types, default='gh-pages')),
|
||||
|
||||
# extra is a mapping/dictionary of data that is passed to the template.
|
||||
# This allows template authors to require extra configuration that not
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
from __future__ import unicode_literals
|
||||
from click import ClickException
|
||||
|
||||
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
from __future__ import unicode_literals
|
||||
import logging
|
||||
import subprocess
|
||||
import os
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
from __future__ import unicode_literals
|
||||
import logging
|
||||
|
||||
import six
|
||||
|
||||
from mkdocs import utils
|
||||
from mkdocs.exceptions import ConfigurationError
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
@@ -53,7 +53,7 @@ def pages_compat_shim(original_pages):
|
||||
|
||||
for config_line in original_pages:
|
||||
|
||||
if isinstance(config_line, six.string_types):
|
||||
if isinstance(config_line, utils.string_types):
|
||||
config_line = [config_line, ]
|
||||
|
||||
if len(config_line) not in (1, 2, 3):
|
||||
|
||||
@@ -6,12 +6,11 @@ Deals with generating the site-wide navigation.
|
||||
This consists of building a set of interlinked page and header objects.
|
||||
"""
|
||||
|
||||
from __future__ import unicode_literals
|
||||
import datetime
|
||||
import logging
|
||||
import os
|
||||
|
||||
import six
|
||||
|
||||
from mkdocs import utils, exceptions
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
@@ -213,7 +212,7 @@ def _path_to_page(path, title, url_context, use_directory_urls):
|
||||
|
||||
def _follow(config_line, url_context, use_dir_urls, header=None, title=None):
|
||||
|
||||
if isinstance(config_line, six.string_types):
|
||||
if isinstance(config_line, utils.string_types):
|
||||
path = os.path.normpath(config_line)
|
||||
page = _path_to_page(path, title, url_context, use_dir_urls)
|
||||
|
||||
@@ -239,7 +238,7 @@ def _follow(config_line, url_context, use_dir_urls, header=None, title=None):
|
||||
|
||||
next_cat_or_title, subpages_or_path = next(iter(config_line.items()))
|
||||
|
||||
if isinstance(subpages_or_path, six.string_types):
|
||||
if isinstance(subpages_or_path, utils.string_types):
|
||||
path = subpages_or_path
|
||||
for sub in _follow(path, url_context, use_dir_urls, header=header, title=next_cat_or_title):
|
||||
yield sub
|
||||
|
||||
@@ -35,11 +35,11 @@ tutorial/install.md | tutorial/install/ | ../img/initial-layout.png |
|
||||
tutorial/intro.md | tutorial/intro/ | ../../img/initial-layout.png |
|
||||
|
||||
"""
|
||||
from __future__ import unicode_literals
|
||||
import logging
|
||||
|
||||
from markdown.extensions import Extension
|
||||
from markdown.treeprocessors import Treeprocessor
|
||||
import six
|
||||
|
||||
from mkdocs import utils
|
||||
from mkdocs.exceptions import MarkdownNotFound
|
||||
@@ -56,7 +56,7 @@ def _iter(node):
|
||||
def path_to_url(url, nav, strict):
|
||||
|
||||
scheme, netloc, path, params, query, fragment = (
|
||||
six.moves.urllib.parse.urlparse(url))
|
||||
utils.urlparse(url))
|
||||
|
||||
if scheme or netloc or not path:
|
||||
# Ignore URLs unless they are a relative link to a markdown file.
|
||||
@@ -89,7 +89,7 @@ def path_to_url(url, nav, strict):
|
||||
|
||||
# Convert the .md hyperlink to a relative hyperlink to the HTML page.
|
||||
fragments = (scheme, netloc, path, params, query, fragment)
|
||||
url = six.moves.urllib.parse.urlunparse(fragments)
|
||||
url = utils.urlunparse(fragments)
|
||||
return url
|
||||
|
||||
|
||||
|
||||
@@ -1,7 +1,12 @@
|
||||
from __future__ import unicode_literals
|
||||
|
||||
import json
|
||||
import six
|
||||
from mkdocs import utils
|
||||
|
||||
try: # pragma: no cover
|
||||
from html.parser import HTMLParser # noqa
|
||||
except ImportError: # pragma: no cover
|
||||
from HTMLParser import HTMLParser # noqa
|
||||
|
||||
|
||||
class SearchIndex(object):
|
||||
@@ -32,7 +37,7 @@ class SearchIndex(object):
|
||||
"""
|
||||
self._entries.append({
|
||||
'title': title,
|
||||
'text': six.text_type(text.strip().encode('utf-8'), encoding='utf-8'),
|
||||
'text': utils.text_type(text.strip().encode('utf-8'), encoding='utf-8'),
|
||||
'location': loc
|
||||
})
|
||||
|
||||
@@ -93,7 +98,7 @@ class SearchIndex(object):
|
||||
return s.get_data()
|
||||
|
||||
|
||||
class HTMLStripper(six.moves.html_parser.HTMLParser):
|
||||
class HTMLStripper(HTMLParser):
|
||||
"""
|
||||
A simple HTML parser that stores all of the data within tags
|
||||
but ignores the tags themselves and thus strips them from the
|
||||
@@ -103,7 +108,7 @@ class HTMLStripper(six.moves.html_parser.HTMLParser):
|
||||
def __init__(self, *args, **kwargs):
|
||||
# HTMLParser is a old-style class in Python 2, so
|
||||
# super() wont work here.
|
||||
six.moves.html_parser.HTMLParser.__init__(self, *args, **kwargs)
|
||||
HTMLParser.__init__(self, *args, **kwargs)
|
||||
|
||||
self.data = []
|
||||
|
||||
@@ -136,7 +141,7 @@ class ContentSection(object):
|
||||
])
|
||||
|
||||
|
||||
class ContentParser(six.moves.html_parser.HTMLParser):
|
||||
class ContentParser(HTMLParser):
|
||||
"""
|
||||
Given a block of HTML, group the content under the preceding
|
||||
H1 or H2 tags which can then be used for creating an index
|
||||
@@ -147,7 +152,7 @@ class ContentParser(six.moves.html_parser.HTMLParser):
|
||||
|
||||
# HTMLParser is a old-style class in Python 2, so
|
||||
# super() wont work here.
|
||||
six.moves.html_parser.HTMLParser.__init__(self, *args, **kwargs)
|
||||
HTMLParser.__init__(self, *args, **kwargs)
|
||||
|
||||
self.data = []
|
||||
self.section = None
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
from __future__ import unicode_literals
|
||||
import logging
|
||||
import shutil
|
||||
import tempfile
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
from __future__ import unicode_literals
|
||||
import textwrap
|
||||
import markdown
|
||||
|
||||
|
||||
@@ -1,14 +1,20 @@
|
||||
#!/usr/bin/env python
|
||||
# coding: utf-8
|
||||
|
||||
from __future__ import unicode_literals
|
||||
import os
|
||||
import shutil
|
||||
import tempfile
|
||||
import unittest
|
||||
|
||||
from six.moves import zip
|
||||
import mock
|
||||
|
||||
try:
|
||||
from itertools import izip as zip
|
||||
except ImportError:
|
||||
# In Py3 use builtin zip function
|
||||
pass
|
||||
|
||||
|
||||
from mkdocs import build, nav, config
|
||||
from mkdocs.exceptions import MarkdownNotFound
|
||||
from mkdocs.tests.base import dedent
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
#!/usr/bin/env python
|
||||
# coding: utf-8
|
||||
|
||||
from __future__ import unicode_literals
|
||||
import unittest
|
||||
import mock
|
||||
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
from __future__ import unicode_literals
|
||||
import os
|
||||
import tempfile
|
||||
import unittest
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
#!/usr/bin/env python
|
||||
# coding: utf-8
|
||||
|
||||
from __future__ import unicode_literals
|
||||
import os
|
||||
import shutil
|
||||
import tempfile
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
from __future__ import unicode_literals
|
||||
import unittest
|
||||
|
||||
import yaml
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
#!/usr/bin/env python
|
||||
# coding: utf-8
|
||||
|
||||
from __future__ import unicode_literals
|
||||
import mock
|
||||
import os
|
||||
import unittest
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
#!/usr/bin/env python
|
||||
# coding: utf-8
|
||||
|
||||
from __future__ import unicode_literals
|
||||
import tempfile
|
||||
import unittest
|
||||
import os
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
#!/usr/bin/env python
|
||||
# coding: utf-8
|
||||
|
||||
from __future__ import unicode_literals
|
||||
import unittest
|
||||
|
||||
from mkdocs import nav
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
#!/usr/bin/env python
|
||||
# coding: utf-8
|
||||
|
||||
from __future__ import unicode_literals
|
||||
import unittest
|
||||
|
||||
from mkdocs.tests.base import dedent, markdown_to_toc
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
#!/usr/bin/env python
|
||||
# coding: utf-8
|
||||
|
||||
from __future__ import unicode_literals
|
||||
import os
|
||||
import unittest
|
||||
|
||||
|
||||
@@ -14,7 +14,12 @@ The steps we take to generate a table of contents are:
|
||||
* Parse table of contents HTML into the underlying data structure.
|
||||
"""
|
||||
|
||||
import six
|
||||
from __future__ import unicode_literals
|
||||
|
||||
try: # pragma: no cover
|
||||
from html.parser import HTMLParser # noqa
|
||||
except ImportError: # pragma: no cover
|
||||
from HTMLParser import HTMLParser # noqa
|
||||
|
||||
|
||||
class TableOfContents(object):
|
||||
@@ -50,10 +55,10 @@ class AnchorLink(object):
|
||||
return ret
|
||||
|
||||
|
||||
class TOCParser(six.moves.html_parser.HTMLParser):
|
||||
class TOCParser(HTMLParser):
|
||||
|
||||
def __init__(self):
|
||||
six.moves.html_parser.HTMLParser.__init__(self)
|
||||
HTMLParser.__init__(self)
|
||||
self.links = []
|
||||
|
||||
self.in_anchor = False
|
||||
|
||||
@@ -7,14 +7,33 @@ Nothing in this module should have an knowledge of config or the layout
|
||||
and structure of the site and pages in the site.
|
||||
"""
|
||||
|
||||
from __future__ import unicode_literals
|
||||
import os
|
||||
import sys
|
||||
import shutil
|
||||
|
||||
import markdown
|
||||
import six
|
||||
|
||||
from mkdocs import toc
|
||||
|
||||
try: # pragma: no cover
|
||||
from urllib.parse import urlparse, urlunparse, urljoin # noqa
|
||||
from urllib.request import pathname2url # noqa
|
||||
from collections import UserDict # noqa
|
||||
except ImportError: # pragma: no cover
|
||||
from urlparse import urlparse, urlunparse, urljoin # noqa
|
||||
from urllib import pathname2url # noqa
|
||||
from UserDict import UserDict # noqa
|
||||
|
||||
|
||||
PY3 = sys.version_info[0] == 3
|
||||
|
||||
if PY3: # pragma: no cover
|
||||
string_types = str, # noqa
|
||||
text_type = str # noqa
|
||||
else: # pragma: no cover
|
||||
string_types = basestring, # noqa
|
||||
text_type = unicode # noqa
|
||||
|
||||
|
||||
def reduce_list(data_set):
|
||||
""" Reduce duplicate items in a list and preserve order """
|
||||
@@ -190,7 +209,7 @@ def create_media_urls(nav, path_list):
|
||||
|
||||
for path in path_list:
|
||||
# Allow links to fully qualified URL's
|
||||
parsed = six.moves.urllib.parse.urlparse(path)
|
||||
parsed = urlparse(path)
|
||||
if parsed.netloc:
|
||||
final_urls.append(path)
|
||||
continue
|
||||
@@ -221,7 +240,7 @@ def create_relative_media_url(nav, url):
|
||||
"""
|
||||
|
||||
# Allow links to fully qualified URL's
|
||||
parsed = six.moves.urllib.parse.urlparse(url)
|
||||
parsed = urlparse(url)
|
||||
if parsed.netloc:
|
||||
return url
|
||||
|
||||
@@ -243,7 +262,7 @@ def create_relative_media_url(nav, url):
|
||||
# correctly for images in the same directory as the markdown. I think this
|
||||
# is due to us moving it into a directory with index.html, but I'm not sure
|
||||
if (nav.file_context.current_file.endswith("/index.md") is False and
|
||||
nav.url_context.base_path is not '/' and
|
||||
nav.url_context.base_path != '/' and
|
||||
relative_url.startswith("./")):
|
||||
relative_url = ".%s" % relative_url
|
||||
|
||||
@@ -256,7 +275,7 @@ def path_to_url(path):
|
||||
if os.path.sep == '/':
|
||||
return path
|
||||
|
||||
return six.moves.urllib.request.pathname2url(path)
|
||||
return pathname2url(path)
|
||||
|
||||
|
||||
def convert_markdown(markdown_source, extensions=None, extension_configs=None):
|
||||
|
||||
@@ -4,5 +4,4 @@ Jinja2>=2.7.1
|
||||
livereload>=2.3.2
|
||||
Markdown>=2.5
|
||||
PyYAML>=3.10
|
||||
six>=1.9.0
|
||||
tornado>=4.1
|
||||
|
||||
Reference in New Issue
Block a user