diff --git a/venv/bin/activate b/venv/bin/activate new file mode 100644 index 0000000..625bdeb --- /dev/null +++ b/venv/bin/activate @@ -0,0 +1,78 @@ +# This file must be used with "source bin/activate" *from bash* +# you cannot run it directly + +deactivate () { + unset -f pydoc >/dev/null 2>&1 + + # reset old environment variables + # ! [ -z ${VAR+_} ] returns true if VAR is declared at all + if ! [ -z "${_OLD_VIRTUAL_PATH+_}" ] ; then + PATH="$_OLD_VIRTUAL_PATH" + export PATH + unset _OLD_VIRTUAL_PATH + fi + if ! [ -z "${_OLD_VIRTUAL_PYTHONHOME+_}" ] ; then + PYTHONHOME="$_OLD_VIRTUAL_PYTHONHOME" + export PYTHONHOME + unset _OLD_VIRTUAL_PYTHONHOME + fi + + # This should detect bash and zsh, which have a hash command that must + # be called to get it to forget past commands. Without forgetting + # past commands the $PATH changes we made may not be respected + if [ -n "${BASH-}" ] || [ -n "${ZSH_VERSION-}" ] ; then + hash -r 2>/dev/null + fi + + if ! [ -z "${_OLD_VIRTUAL_PS1+_}" ] ; then + PS1="$_OLD_VIRTUAL_PS1" + export PS1 + unset _OLD_VIRTUAL_PS1 + fi + + unset VIRTUAL_ENV + if [ ! "${1-}" = "nondestructive" ] ; then + # Self destruct! + unset -f deactivate + fi +} + +# unset irrelevant variables +deactivate nondestructive + +VIRTUAL_ENV="/home/wes/MGOAL/venv" +export VIRTUAL_ENV + +_OLD_VIRTUAL_PATH="$PATH" +PATH="$VIRTUAL_ENV/bin:$PATH" +export PATH + +# unset PYTHONHOME if set +if ! [ -z "${PYTHONHOME+_}" ] ; then + _OLD_VIRTUAL_PYTHONHOME="$PYTHONHOME" + unset PYTHONHOME +fi + +if [ -z "${VIRTUAL_ENV_DISABLE_PROMPT-}" ] ; then + _OLD_VIRTUAL_PS1="$PS1" + if [ "x" != x ] ; then + PS1="$PS1" + else + PS1="(`basename \"$VIRTUAL_ENV\"`) $PS1" + fi + export PS1 +fi + +# Make sure to unalias pydoc if it's already there +alias pydoc 2>/dev/null >/dev/null && unalias pydoc + +pydoc () { + python -m pydoc "$@" +} + +# This should detect bash and zsh, which have a hash command that must +# be called to get it to forget past commands. Without forgetting +# past commands the $PATH changes we made may not be respected +if [ -n "${BASH-}" ] || [ -n "${ZSH_VERSION-}" ] ; then + hash -r 2>/dev/null +fi diff --git a/venv/bin/activate.csh b/venv/bin/activate.csh new file mode 100644 index 0000000..9c5cb83 --- /dev/null +++ b/venv/bin/activate.csh @@ -0,0 +1,36 @@ +# This file must be used with "source bin/activate.csh" *from csh*. +# You cannot run it directly. +# Created by Davide Di Blasi . + +alias deactivate 'test $?_OLD_VIRTUAL_PATH != 0 && setenv PATH "$_OLD_VIRTUAL_PATH" && unset _OLD_VIRTUAL_PATH; rehash; test $?_OLD_VIRTUAL_PROMPT != 0 && set prompt="$_OLD_VIRTUAL_PROMPT" && unset _OLD_VIRTUAL_PROMPT; unsetenv VIRTUAL_ENV; test "\!:*" != "nondestructive" && unalias deactivate && unalias pydoc' + +# Unset irrelevant variables. +deactivate nondestructive + +setenv VIRTUAL_ENV "/home/wes/MGOAL/venv" + +set _OLD_VIRTUAL_PATH="$PATH" +setenv PATH "$VIRTUAL_ENV/bin:$PATH" + + + +if ("" != "") then + set env_name = "" +else + set env_name = `basename "$VIRTUAL_ENV"` +endif + +# Could be in a non-interactive environment, +# in which case, $prompt is undefined and we wouldn't +# care about the prompt anyway. +if ( $?prompt ) then + set _OLD_VIRTUAL_PROMPT="$prompt" + set prompt = "[$env_name] $prompt" +endif + +unset env_name + +alias pydoc python -m pydoc + +rehash + diff --git a/venv/bin/activate.fish b/venv/bin/activate.fish new file mode 100644 index 0000000..152dc7d --- /dev/null +++ b/venv/bin/activate.fish @@ -0,0 +1,76 @@ +# This file must be used using `. bin/activate.fish` *within a running fish ( http://fishshell.com ) session*. +# Do not run it directly. + +function deactivate -d 'Exit virtualenv mode and return to the normal environment.' + # reset old environment variables + if test -n "$_OLD_VIRTUAL_PATH" + set -gx PATH $_OLD_VIRTUAL_PATH + set -e _OLD_VIRTUAL_PATH + end + + if test -n "$_OLD_VIRTUAL_PYTHONHOME" + set -gx PYTHONHOME $_OLD_VIRTUAL_PYTHONHOME + set -e _OLD_VIRTUAL_PYTHONHOME + end + + if test -n "$_OLD_FISH_PROMPT_OVERRIDE" + # Set an empty local `$fish_function_path` to allow the removal of `fish_prompt` using `functions -e`. + set -l fish_function_path + + # Erase virtualenv's `fish_prompt` and restore the original. + functions -e fish_prompt + functions -c _old_fish_prompt fish_prompt + functions -e _old_fish_prompt + set -e _OLD_FISH_PROMPT_OVERRIDE + end + + set -e VIRTUAL_ENV + + if test "$argv[1]" != 'nondestructive' + # Self-destruct! + functions -e pydoc + functions -e deactivate + end +end + +# Unset irrelevant variables. +deactivate nondestructive + +set -gx VIRTUAL_ENV "/home/wes/MGOAL/venv" + +set -gx _OLD_VIRTUAL_PATH $PATH +set -gx PATH "$VIRTUAL_ENV/bin" $PATH + +# Unset `$PYTHONHOME` if set. +if set -q PYTHONHOME + set -gx _OLD_VIRTUAL_PYTHONHOME $PYTHONHOME + set -e PYTHONHOME +end + +function pydoc + python -m pydoc $argv +end + +if test -z "$VIRTUAL_ENV_DISABLE_PROMPT" + # Copy the current `fish_prompt` function as `_old_fish_prompt`. + functions -c fish_prompt _old_fish_prompt + + function fish_prompt + # Save the current $status, for fish_prompts that display it. + set -l old_status $status + + # Prompt override provided? + # If not, just prepend the environment name. + if test -n "" + printf '%s%s' "" (set_color normal) + else + printf '%s(%s) ' (set_color normal) (basename "$VIRTUAL_ENV") + end + + # Restore the original $status + echo "exit $old_status" | source + _old_fish_prompt + end + + set -gx _OLD_FISH_PROMPT_OVERRIDE "$VIRTUAL_ENV" +end diff --git a/venv/bin/activate_this.py b/venv/bin/activate_this.py new file mode 100644 index 0000000..f18193b --- /dev/null +++ b/venv/bin/activate_this.py @@ -0,0 +1,34 @@ +"""By using execfile(this_file, dict(__file__=this_file)) you will +activate this virtualenv environment. + +This can be used when you must use an existing Python interpreter, not +the virtualenv bin/python +""" + +try: + __file__ +except NameError: + raise AssertionError( + "You must run this like execfile('path/to/activate_this.py', dict(__file__='path/to/activate_this.py'))") +import sys +import os + +old_os_path = os.environ.get('PATH', '') +os.environ['PATH'] = os.path.dirname(os.path.abspath(__file__)) + os.pathsep + old_os_path +base = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) +if sys.platform == 'win32': + site_packages = os.path.join(base, 'Lib', 'site-packages') +else: + site_packages = os.path.join(base, 'lib', 'python%s' % sys.version[:3], 'site-packages') +prev_sys_path = list(sys.path) +import site +site.addsitedir(site_packages) +sys.real_prefix = sys.prefix +sys.prefix = base +# Move the added items to the front of the path: +new_sys_path = [] +for item in list(sys.path): + if item not in prev_sys_path: + new_sys_path.append(item) + sys.path.remove(item) +sys.path[:0] = new_sys_path diff --git a/venv/bin/easy_install b/venv/bin/easy_install new file mode 100755 index 0000000..130372c --- /dev/null +++ b/venv/bin/easy_install @@ -0,0 +1,11 @@ +#!/home/wes/MGOAL/venv/bin/python2 + +# -*- coding: utf-8 -*- +import re +import sys + +from setuptools.command.easy_install import main + +if __name__ == '__main__': + sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0]) + sys.exit(main()) diff --git a/venv/bin/easy_install-2.7 b/venv/bin/easy_install-2.7 new file mode 100755 index 0000000..130372c --- /dev/null +++ b/venv/bin/easy_install-2.7 @@ -0,0 +1,11 @@ +#!/home/wes/MGOAL/venv/bin/python2 + +# -*- coding: utf-8 -*- +import re +import sys + +from setuptools.command.easy_install import main + +if __name__ == '__main__': + sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0]) + sys.exit(main()) diff --git a/venv/bin/flask b/venv/bin/flask new file mode 100755 index 0000000..c1f3137 --- /dev/null +++ b/venv/bin/flask @@ -0,0 +1,11 @@ +#!/home/wes/MGOAL/venv/bin/python2 + +# -*- coding: utf-8 -*- +import re +import sys + +from flask.cli import main + +if __name__ == '__main__': + sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0]) + sys.exit(main()) diff --git a/venv/bin/pip b/venv/bin/pip new file mode 100755 index 0000000..90c3dd8 --- /dev/null +++ b/venv/bin/pip @@ -0,0 +1,11 @@ +#!/home/wes/MGOAL/venv/bin/python2 + +# -*- coding: utf-8 -*- +import re +import sys + +from pip import main + +if __name__ == '__main__': + sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0]) + sys.exit(main()) diff --git a/venv/bin/pip2 b/venv/bin/pip2 new file mode 100755 index 0000000..90c3dd8 --- /dev/null +++ b/venv/bin/pip2 @@ -0,0 +1,11 @@ +#!/home/wes/MGOAL/venv/bin/python2 + +# -*- coding: utf-8 -*- +import re +import sys + +from pip import main + +if __name__ == '__main__': + sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0]) + sys.exit(main()) diff --git a/venv/bin/pip2.7 b/venv/bin/pip2.7 new file mode 100755 index 0000000..90c3dd8 --- /dev/null +++ b/venv/bin/pip2.7 @@ -0,0 +1,11 @@ +#!/home/wes/MGOAL/venv/bin/python2 + +# -*- coding: utf-8 -*- +import re +import sys + +from pip import main + +if __name__ == '__main__': + sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0]) + sys.exit(main()) diff --git a/venv/bin/python b/venv/bin/python new file mode 120000 index 0000000..551ab26 --- /dev/null +++ b/venv/bin/python @@ -0,0 +1 @@ +python2 \ No newline at end of file diff --git a/venv/bin/python-config b/venv/bin/python-config new file mode 100755 index 0000000..d66c2e0 --- /dev/null +++ b/venv/bin/python-config @@ -0,0 +1,78 @@ +#!/home/wes/MGOAL/venv/bin/python + +import sys +import getopt +import sysconfig + +valid_opts = ['prefix', 'exec-prefix', 'includes', 'libs', 'cflags', + 'ldflags', 'help'] + +if sys.version_info >= (3, 2): + valid_opts.insert(-1, 'extension-suffix') + valid_opts.append('abiflags') +if sys.version_info >= (3, 3): + valid_opts.append('configdir') + + +def exit_with_usage(code=1): + sys.stderr.write("Usage: {0} [{1}]\n".format( + sys.argv[0], '|'.join('--'+opt for opt in valid_opts))) + sys.exit(code) + +try: + opts, args = getopt.getopt(sys.argv[1:], '', valid_opts) +except getopt.error: + exit_with_usage() + +if not opts: + exit_with_usage() + +pyver = sysconfig.get_config_var('VERSION') +getvar = sysconfig.get_config_var + +opt_flags = [flag for (flag, val) in opts] + +if '--help' in opt_flags: + exit_with_usage(code=0) + +for opt in opt_flags: + if opt == '--prefix': + print(sysconfig.get_config_var('prefix')) + + elif opt == '--exec-prefix': + print(sysconfig.get_config_var('exec_prefix')) + + elif opt in ('--includes', '--cflags'): + flags = ['-I' + sysconfig.get_path('include'), + '-I' + sysconfig.get_path('platinclude')] + if opt == '--cflags': + flags.extend(getvar('CFLAGS').split()) + print(' '.join(flags)) + + elif opt in ('--libs', '--ldflags'): + abiflags = getattr(sys, 'abiflags', '') + libs = ['-lpython' + pyver + abiflags] + libs += getvar('LIBS').split() + libs += getvar('SYSLIBS').split() + # add the prefix/lib/pythonX.Y/config dir, but only if there is no + # shared library in prefix/lib/. + if opt == '--ldflags': + if not getvar('Py_ENABLE_SHARED'): + libs.insert(0, '-L' + getvar('LIBPL')) + if not getvar('PYTHONFRAMEWORK'): + libs.extend(getvar('LINKFORSHARED').split()) + print(' '.join(libs)) + + elif opt == '--extension-suffix': + ext_suffix = sysconfig.get_config_var('EXT_SUFFIX') + if ext_suffix is None: + ext_suffix = sysconfig.get_config_var('SO') + print(ext_suffix) + + elif opt == '--abiflags': + if not getattr(sys, 'abiflags', None): + exit_with_usage() + print(sys.abiflags) + + elif opt == '--configdir': + print(sysconfig.get_config_var('LIBPL')) diff --git a/venv/bin/python2 b/venv/bin/python2 new file mode 100755 index 0000000..4965985 Binary files /dev/null and b/venv/bin/python2 differ diff --git a/venv/bin/python2.7 b/venv/bin/python2.7 new file mode 120000 index 0000000..551ab26 --- /dev/null +++ b/venv/bin/python2.7 @@ -0,0 +1 @@ +python2 \ No newline at end of file diff --git a/venv/bin/wheel b/venv/bin/wheel new file mode 100755 index 0000000..b59cbf6 --- /dev/null +++ b/venv/bin/wheel @@ -0,0 +1,11 @@ +#!/home/wes/MGOAL/venv/bin/python2 + +# -*- coding: utf-8 -*- +import re +import sys + +from wheel.tool import main + +if __name__ == '__main__': + sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0]) + sys.exit(main()) diff --git a/venv/include/python2.7 b/venv/include/python2.7 new file mode 120000 index 0000000..ad4ca80 --- /dev/null +++ b/venv/include/python2.7 @@ -0,0 +1 @@ +/usr/include/python2.7 \ No newline at end of file diff --git a/venv/lib/python2.7/UserDict.py b/venv/lib/python2.7/UserDict.py new file mode 120000 index 0000000..1dcde33 --- /dev/null +++ b/venv/lib/python2.7/UserDict.py @@ -0,0 +1 @@ +/usr/lib/python2.7/UserDict.py \ No newline at end of file diff --git a/venv/lib/python2.7/_abcoll.py b/venv/lib/python2.7/_abcoll.py new file mode 120000 index 0000000..e39c38d --- /dev/null +++ b/venv/lib/python2.7/_abcoll.py @@ -0,0 +1 @@ +/usr/lib/python2.7/_abcoll.py \ No newline at end of file diff --git a/venv/lib/python2.7/_weakrefset.py b/venv/lib/python2.7/_weakrefset.py new file mode 120000 index 0000000..a3c1cd4 --- /dev/null +++ b/venv/lib/python2.7/_weakrefset.py @@ -0,0 +1 @@ +/usr/lib/python2.7/_weakrefset.py \ No newline at end of file diff --git a/venv/lib/python2.7/abc.py b/venv/lib/python2.7/abc.py new file mode 120000 index 0000000..cb3e5d1 --- /dev/null +++ b/venv/lib/python2.7/abc.py @@ -0,0 +1 @@ +/usr/lib/python2.7/abc.py \ No newline at end of file diff --git a/venv/lib/python2.7/codecs.py b/venv/lib/python2.7/codecs.py new file mode 120000 index 0000000..50169dc --- /dev/null +++ b/venv/lib/python2.7/codecs.py @@ -0,0 +1 @@ +/usr/lib/python2.7/codecs.py \ No newline at end of file diff --git a/venv/lib/python2.7/config b/venv/lib/python2.7/config new file mode 120000 index 0000000..154af7a --- /dev/null +++ b/venv/lib/python2.7/config @@ -0,0 +1 @@ +/usr/lib/python2.7/config \ No newline at end of file diff --git a/venv/lib/python2.7/copy_reg.py b/venv/lib/python2.7/copy_reg.py new file mode 120000 index 0000000..5dc0af3 --- /dev/null +++ b/venv/lib/python2.7/copy_reg.py @@ -0,0 +1 @@ +/usr/lib/python2.7/copy_reg.py \ No newline at end of file diff --git a/venv/lib/python2.7/distutils/__init__.py b/venv/lib/python2.7/distutils/__init__.py new file mode 100644 index 0000000..29fc1da --- /dev/null +++ b/venv/lib/python2.7/distutils/__init__.py @@ -0,0 +1,101 @@ +import os +import sys +import warnings +import imp +import opcode # opcode is not a virtualenv module, so we can use it to find the stdlib + # Important! To work on pypy, this must be a module that resides in the + # lib-python/modified-x.y.z directory + +dirname = os.path.dirname + +distutils_path = os.path.join(os.path.dirname(opcode.__file__), 'distutils') +if os.path.normpath(distutils_path) == os.path.dirname(os.path.normpath(__file__)): + warnings.warn( + "The virtualenv distutils package at %s appears to be in the same location as the system distutils?") +else: + __path__.insert(0, distutils_path) + real_distutils = imp.load_module("_virtualenv_distutils", None, distutils_path, ('', '', imp.PKG_DIRECTORY)) + # Copy the relevant attributes + try: + __revision__ = real_distutils.__revision__ + except AttributeError: + pass + __version__ = real_distutils.__version__ + +from distutils import dist, sysconfig + +try: + basestring +except NameError: + basestring = str + +## patch build_ext (distutils doesn't know how to get the libs directory +## path on windows - it hardcodes the paths around the patched sys.prefix) + +if sys.platform == 'win32': + from distutils.command.build_ext import build_ext as old_build_ext + class build_ext(old_build_ext): + def finalize_options (self): + if self.library_dirs is None: + self.library_dirs = [] + elif isinstance(self.library_dirs, basestring): + self.library_dirs = self.library_dirs.split(os.pathsep) + + self.library_dirs.insert(0, os.path.join(sys.real_prefix, "Libs")) + old_build_ext.finalize_options(self) + + from distutils.command import build_ext as build_ext_module + build_ext_module.build_ext = build_ext + +## distutils.dist patches: + +old_find_config_files = dist.Distribution.find_config_files +def find_config_files(self): + found = old_find_config_files(self) + system_distutils = os.path.join(distutils_path, 'distutils.cfg') + #if os.path.exists(system_distutils): + # found.insert(0, system_distutils) + # What to call the per-user config file + if os.name == 'posix': + user_filename = ".pydistutils.cfg" + else: + user_filename = "pydistutils.cfg" + user_filename = os.path.join(sys.prefix, user_filename) + if os.path.isfile(user_filename): + for item in list(found): + if item.endswith('pydistutils.cfg'): + found.remove(item) + found.append(user_filename) + return found +dist.Distribution.find_config_files = find_config_files + +## distutils.sysconfig patches: + +old_get_python_inc = sysconfig.get_python_inc +def sysconfig_get_python_inc(plat_specific=0, prefix=None): + if prefix is None: + prefix = sys.real_prefix + return old_get_python_inc(plat_specific, prefix) +sysconfig_get_python_inc.__doc__ = old_get_python_inc.__doc__ +sysconfig.get_python_inc = sysconfig_get_python_inc + +old_get_python_lib = sysconfig.get_python_lib +def sysconfig_get_python_lib(plat_specific=0, standard_lib=0, prefix=None): + if standard_lib and prefix is None: + prefix = sys.real_prefix + return old_get_python_lib(plat_specific, standard_lib, prefix) +sysconfig_get_python_lib.__doc__ = old_get_python_lib.__doc__ +sysconfig.get_python_lib = sysconfig_get_python_lib + +old_get_config_vars = sysconfig.get_config_vars +def sysconfig_get_config_vars(*args): + real_vars = old_get_config_vars(*args) + if sys.platform == 'win32': + lib_dir = os.path.join(sys.real_prefix, "libs") + if isinstance(real_vars, dict) and 'LIBDIR' not in real_vars: + real_vars['LIBDIR'] = lib_dir # asked for all + elif isinstance(real_vars, list) and 'LIBDIR' in args: + real_vars = real_vars + [lib_dir] # asked for list + return real_vars +sysconfig_get_config_vars.__doc__ = old_get_config_vars.__doc__ +sysconfig.get_config_vars = sysconfig_get_config_vars diff --git a/venv/lib/python2.7/distutils/distutils.cfg b/venv/lib/python2.7/distutils/distutils.cfg new file mode 100644 index 0000000..1af230e --- /dev/null +++ b/venv/lib/python2.7/distutils/distutils.cfg @@ -0,0 +1,6 @@ +# This is a config file local to this virtualenv installation +# You may include options that will be used by all distutils commands, +# and by easy_install. For instance: +# +# [easy_install] +# find_links = http://mylocalsite diff --git a/venv/lib/python2.7/encodings b/venv/lib/python2.7/encodings new file mode 120000 index 0000000..1250ad8 --- /dev/null +++ b/venv/lib/python2.7/encodings @@ -0,0 +1 @@ +/usr/lib/python2.7/encodings \ No newline at end of file diff --git a/venv/lib/python2.7/fnmatch.py b/venv/lib/python2.7/fnmatch.py new file mode 120000 index 0000000..ec3e10c --- /dev/null +++ b/venv/lib/python2.7/fnmatch.py @@ -0,0 +1 @@ +/usr/lib/python2.7/fnmatch.py \ No newline at end of file diff --git a/venv/lib/python2.7/genericpath.py b/venv/lib/python2.7/genericpath.py new file mode 120000 index 0000000..cb8897c --- /dev/null +++ b/venv/lib/python2.7/genericpath.py @@ -0,0 +1 @@ +/usr/lib/python2.7/genericpath.py \ No newline at end of file diff --git a/venv/lib/python2.7/lib-dynload b/venv/lib/python2.7/lib-dynload new file mode 120000 index 0000000..c706a1e --- /dev/null +++ b/venv/lib/python2.7/lib-dynload @@ -0,0 +1 @@ +/usr/lib/python2.7/lib-dynload \ No newline at end of file diff --git a/venv/lib/python2.7/linecache.py b/venv/lib/python2.7/linecache.py new file mode 120000 index 0000000..943c429 --- /dev/null +++ b/venv/lib/python2.7/linecache.py @@ -0,0 +1 @@ +/usr/lib/python2.7/linecache.py \ No newline at end of file diff --git a/venv/lib/python2.7/locale.py b/venv/lib/python2.7/locale.py new file mode 120000 index 0000000..92c243c --- /dev/null +++ b/venv/lib/python2.7/locale.py @@ -0,0 +1 @@ +/usr/lib/python2.7/locale.py \ No newline at end of file diff --git a/venv/lib/python2.7/no-global-site-packages.txt b/venv/lib/python2.7/no-global-site-packages.txt new file mode 100644 index 0000000..e69de29 diff --git a/venv/lib/python2.7/ntpath.py b/venv/lib/python2.7/ntpath.py new file mode 120000 index 0000000..5659ae1 --- /dev/null +++ b/venv/lib/python2.7/ntpath.py @@ -0,0 +1 @@ +/usr/lib/python2.7/ntpath.py \ No newline at end of file diff --git a/venv/lib/python2.7/orig-prefix.txt b/venv/lib/python2.7/orig-prefix.txt new file mode 100644 index 0000000..e25db58 --- /dev/null +++ b/venv/lib/python2.7/orig-prefix.txt @@ -0,0 +1 @@ +/usr \ No newline at end of file diff --git a/venv/lib/python2.7/os.py b/venv/lib/python2.7/os.py new file mode 120000 index 0000000..950fc8d --- /dev/null +++ b/venv/lib/python2.7/os.py @@ -0,0 +1 @@ +/usr/lib/python2.7/os.py \ No newline at end of file diff --git a/venv/lib/python2.7/posixpath.py b/venv/lib/python2.7/posixpath.py new file mode 120000 index 0000000..30cb8ca --- /dev/null +++ b/venv/lib/python2.7/posixpath.py @@ -0,0 +1 @@ +/usr/lib/python2.7/posixpath.py \ No newline at end of file diff --git a/venv/lib/python2.7/re.py b/venv/lib/python2.7/re.py new file mode 120000 index 0000000..56a0731 --- /dev/null +++ b/venv/lib/python2.7/re.py @@ -0,0 +1 @@ +/usr/lib/python2.7/re.py \ No newline at end of file diff --git a/venv/lib/python2.7/site-packages/Flask-0.11.1.dist-info/DESCRIPTION.rst b/venv/lib/python2.7/site-packages/Flask-0.11.1.dist-info/DESCRIPTION.rst new file mode 100644 index 0000000..f34086e --- /dev/null +++ b/venv/lib/python2.7/site-packages/Flask-0.11.1.dist-info/DESCRIPTION.rst @@ -0,0 +1,44 @@ +Flask +----- + +Flask is a microframework for Python based on Werkzeug, Jinja 2 and good +intentions. And before you ask: It's BSD licensed! + +Flask is Fun +```````````` + +Save in a hello.py: + +.. code:: python + + from flask import Flask + app = Flask(__name__) + + @app.route("/") + def hello(): + return "Hello World!" + + if __name__ == "__main__": + app.run() + +And Easy to Setup +````````````````` + +And run it: + +.. code:: bash + + $ pip install Flask + $ python hello.py + * Running on http://localhost:5000/ + +Links +````` + +* `website `_ +* `documentation `_ +* `development version + `_ + + + diff --git a/venv/lib/python2.7/site-packages/Flask-0.11.1.dist-info/INSTALLER b/venv/lib/python2.7/site-packages/Flask-0.11.1.dist-info/INSTALLER new file mode 100644 index 0000000..a1b589e --- /dev/null +++ b/venv/lib/python2.7/site-packages/Flask-0.11.1.dist-info/INSTALLER @@ -0,0 +1 @@ +pip diff --git a/venv/lib/python2.7/site-packages/Flask-0.11.1.dist-info/METADATA b/venv/lib/python2.7/site-packages/Flask-0.11.1.dist-info/METADATA new file mode 100644 index 0000000..2362e83 --- /dev/null +++ b/venv/lib/python2.7/site-packages/Flask-0.11.1.dist-info/METADATA @@ -0,0 +1,73 @@ +Metadata-Version: 2.0 +Name: Flask +Version: 0.11.1 +Summary: A microframework based on Werkzeug, Jinja2 and good intentions +Home-page: http://github.com/pallets/flask/ +Author: Armin Ronacher +Author-email: armin.ronacher@active-4.com +License: BSD +Platform: any +Classifier: Development Status :: 4 - Beta +Classifier: Environment :: Web Environment +Classifier: Intended Audience :: Developers +Classifier: License :: OSI Approved :: BSD License +Classifier: Operating System :: OS Independent +Classifier: Programming Language :: Python +Classifier: Programming Language :: Python :: 2 +Classifier: Programming Language :: Python :: 2.6 +Classifier: Programming Language :: Python :: 2.7 +Classifier: Programming Language :: Python :: 3 +Classifier: Programming Language :: Python :: 3.3 +Classifier: Programming Language :: Python :: 3.4 +Classifier: Programming Language :: Python :: 3.5 +Classifier: Topic :: Internet :: WWW/HTTP :: Dynamic Content +Classifier: Topic :: Software Development :: Libraries :: Python Modules +Requires-Dist: Jinja2 (>=2.4) +Requires-Dist: Werkzeug (>=0.7) +Requires-Dist: click (>=2.0) +Requires-Dist: itsdangerous (>=0.21) + +Flask +----- + +Flask is a microframework for Python based on Werkzeug, Jinja 2 and good +intentions. And before you ask: It's BSD licensed! + +Flask is Fun +```````````` + +Save in a hello.py: + +.. code:: python + + from flask import Flask + app = Flask(__name__) + + @app.route("/") + def hello(): + return "Hello World!" + + if __name__ == "__main__": + app.run() + +And Easy to Setup +````````````````` + +And run it: + +.. code:: bash + + $ pip install Flask + $ python hello.py + * Running on http://localhost:5000/ + +Links +````` + +* `website `_ +* `documentation `_ +* `development version + `_ + + + diff --git a/venv/lib/python2.7/site-packages/Flask-0.11.1.dist-info/RECORD b/venv/lib/python2.7/site-packages/Flask-0.11.1.dist-info/RECORD new file mode 100644 index 0000000..1d0ce4b --- /dev/null +++ b/venv/lib/python2.7/site-packages/Flask-0.11.1.dist-info/RECORD @@ -0,0 +1,51 @@ +Flask-0.11.1.dist-info/DESCRIPTION.rst,sha256=d3yS7Rb_P-vHUNF_euyueS52NGfpmuqg-C4FVuyGL1w,726 +Flask-0.11.1.dist-info/METADATA,sha256=JYsqnubNfoj5zUbNv3Ip0Oim6ubPsjTRmdgDj0vsQOQ,1864 +Flask-0.11.1.dist-info/RECORD,, +Flask-0.11.1.dist-info/WHEEL,sha256=o2k-Qa-RMNIJmUdIc7KU6VWR_ErNRbWNlxDIpl7lm34,110 +Flask-0.11.1.dist-info/entry_points.txt,sha256=jzk2Wy2h30uEcqqzd4CVnlzsMXB-vaD5GXjuPMXmTmI,60 +Flask-0.11.1.dist-info/metadata.json,sha256=mZ1xu9uLOritQIZ0KTAMKnzcA1PHcyFP6e7pgi2Uh64,1363 +Flask-0.11.1.dist-info/top_level.txt,sha256=dvi65F6AeGWVU0TBpYiC04yM60-FX1gJFkK31IKQr5c,6 +flask/__init__.py,sha256=nPn1VVakZyNaD7j1f3k8I4y81ch8U_aKTUhGWE0mcTY,1673 +flask/__main__.py,sha256=cldbNi5zpjE68XzIWI8uYHNWwBHHVJmwtlXWk6P4CO4,291 +flask/_compat.py,sha256=vtZmNzjdZ5slXhSrjOVIX52C_FIY03xipZjJdrWUv1o,2490 +flask/app.py,sha256=Fmtl5tQjFcz-x6jSFpRDX7AmLSu-qPP4jvpx2G7d2EE,83370 +flask/blueprints.py,sha256=6HVasMcPcaq7tk36kCrgX4bnhTkky4G5WIWCyyJL8HY,16872 +flask/cli.py,sha256=mUcrLCj_e_GjTxM5YOF4f_qCB3kPphTFwDHPuStrL4s,16941 +flask/config.py,sha256=94IXmQjOxy4L-EsW5R5cljc9uzzCdYNFVwZjg4uJ2Xg,9602 +flask/ctx.py,sha256=UPA0YwoIlHP0txOGanC9lQLSGv6eCqV5Fmw2cVJRmgQ,14739 +flask/debughelpers.py,sha256=z-uQavKIymOZl0WQDLXsnacA00ERIlCx3S3Tnb_OYsE,6024 +flask/exthook.py,sha256=BHC5mNVqvmMpktMxTckJKiw2wyWiKQkV8eiiK1ZXTSQ,5748 +flask/globals.py,sha256=I3m_4RssLhWW1R11zuEI8oFryHUHX3NQwjMkGXOZzg8,1645 +flask/helpers.py,sha256=VTVl7C1GRyp3y-SkX1fYGJhG-TlzfAWphglayGR6Zog,37384 +flask/json.py,sha256=Dht7yQuPBpp0YH3gETBJDo5hdeSSzHCVEEOxnSslgpg,9261 +flask/logging.py,sha256=NwB0-gTqg2htzI88_BxWcmiMWVKb0NgY2RgZTzEZQmA,2683 +flask/sessions.py,sha256=iZBE63y-1SA8PmpIIngYrjJZvc3ZY3wDZt0Q2DTFrdc,14333 +flask/signals.py,sha256=ufhmyiteedO7_nOOcsMheQwGpjPkEP93dh_I7lZlu_c,2209 +flask/templating.py,sha256=u7FbN6j56H_q6CrdJJyJ6gZtqaMa0vh1_GP12gEHRQQ,4912 +flask/testing.py,sha256=t8dNl0b8Qq8A9dUXgYp1HmLOq-FGa90gTr-gLh9WTgA,5101 +flask/views.py,sha256=1QCDSf2w9cmrrKPAmresZiBbvYsDbPnEr1-KmBkQFGY,5632 +flask/wrappers.py,sha256=avZTGHogskE_pJ1iHhULhXFPkaYoeha6vT20JskH_fc,7538 +flask/ext/__init__.py,sha256=UEezCApsG4ZJWqwUnX9YmWcNN4OVENgph_9L05n0eOM,842 +../../../bin/flask,sha256=TCnOAaDSCS_Yn-mcXLKUOrE9i1L2r4Ukgzy0gHRnUgU,227 +Flask-0.11.1.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4 +flask/config.pyc,, +flask/testing.pyc,, +flask/wrappers.pyc,, +flask/views.pyc,, +flask/logging.pyc,, +flask/json.pyc,, +flask/helpers.pyc,, +flask/__main__.pyc,, +flask/blueprints.pyc,, +flask/signals.pyc,, +flask/exthook.pyc,, +flask/__init__.pyc,, +flask/debughelpers.pyc,, +flask/templating.pyc,, +flask/ctx.pyc,, +flask/cli.pyc,, +flask/_compat.pyc,, +flask/app.pyc,, +flask/globals.pyc,, +flask/ext/__init__.pyc,, +flask/sessions.pyc,, diff --git a/venv/lib/python2.7/site-packages/Flask-0.11.1.dist-info/WHEEL b/venv/lib/python2.7/site-packages/Flask-0.11.1.dist-info/WHEEL new file mode 100644 index 0000000..8b6dd1b --- /dev/null +++ b/venv/lib/python2.7/site-packages/Flask-0.11.1.dist-info/WHEEL @@ -0,0 +1,6 @@ +Wheel-Version: 1.0 +Generator: bdist_wheel (0.29.0) +Root-Is-Purelib: true +Tag: py2-none-any +Tag: py3-none-any + diff --git a/venv/lib/python2.7/site-packages/Flask-0.11.1.dist-info/entry_points.txt b/venv/lib/python2.7/site-packages/Flask-0.11.1.dist-info/entry_points.txt new file mode 100644 index 0000000..14adf18 --- /dev/null +++ b/venv/lib/python2.7/site-packages/Flask-0.11.1.dist-info/entry_points.txt @@ -0,0 +1,4 @@ + + [console_scripts] + flask=flask.cli:main + \ No newline at end of file diff --git a/venv/lib/python2.7/site-packages/Flask-0.11.1.dist-info/metadata.json b/venv/lib/python2.7/site-packages/Flask-0.11.1.dist-info/metadata.json new file mode 100644 index 0000000..7b5a3c9 --- /dev/null +++ b/venv/lib/python2.7/site-packages/Flask-0.11.1.dist-info/metadata.json @@ -0,0 +1 @@ +{"classifiers": ["Development Status :: 4 - Beta", "Environment :: Web Environment", "Intended Audience :: Developers", "License :: OSI Approved :: BSD License", "Operating System :: OS Independent", "Programming Language :: Python", "Programming Language :: Python :: 2", "Programming Language :: Python :: 2.6", "Programming Language :: Python :: 2.7", "Programming Language :: Python :: 3", "Programming Language :: Python :: 3.3", "Programming Language :: Python :: 3.4", "Programming Language :: Python :: 3.5", "Topic :: Internet :: WWW/HTTP :: Dynamic Content", "Topic :: Software Development :: Libraries :: Python Modules"], "extensions": {"python.commands": {"wrap_console": {"flask": "flask.cli:main"}}, "python.details": {"contacts": [{"email": "armin.ronacher@active-4.com", "name": "Armin Ronacher", "role": "author"}], "document_names": {"description": "DESCRIPTION.rst"}, "project_urls": {"Home": "http://github.com/pallets/flask/"}}, "python.exports": {"console_scripts": {"flask": "flask.cli:main"}}}, "extras": [], "generator": "bdist_wheel (0.29.0)", "license": "BSD", "metadata_version": "2.0", "name": "Flask", "platform": "any", "run_requires": [{"requires": ["Jinja2 (>=2.4)", "Werkzeug (>=0.7)", "click (>=2.0)", "itsdangerous (>=0.21)"]}], "summary": "A microframework based on Werkzeug, Jinja2 and good intentions", "version": "0.11.1"} \ No newline at end of file diff --git a/venv/lib/python2.7/site-packages/Flask-0.11.1.dist-info/top_level.txt b/venv/lib/python2.7/site-packages/Flask-0.11.1.dist-info/top_level.txt new file mode 100644 index 0000000..7e10602 --- /dev/null +++ b/venv/lib/python2.7/site-packages/Flask-0.11.1.dist-info/top_level.txt @@ -0,0 +1 @@ +flask diff --git a/venv/lib/python2.7/site-packages/Jinja2-2.8.dist-info/DESCRIPTION.rst b/venv/lib/python2.7/site-packages/Jinja2-2.8.dist-info/DESCRIPTION.rst new file mode 100644 index 0000000..4421f04 --- /dev/null +++ b/venv/lib/python2.7/site-packages/Jinja2-2.8.dist-info/DESCRIPTION.rst @@ -0,0 +1,36 @@ +Jinja2 +~~~~~~ + +Jinja2 is a template engine written in pure Python. It provides a +`Django`_ inspired non-XML syntax but supports inline expressions and +an optional `sandboxed`_ environment. + +Nutshell +-------- + +Here a small example of a Jinja template:: + + {% extends 'base.html' %} + {% block title %}Memberlist{% endblock %} + {% block content %} + + {% endblock %} + +Philosophy +---------- + +Application logic is for the controller but don't try to make the life +for the template designer too hard by giving him too few functionality. + +For more informations visit the new `Jinja2 webpage`_ and `documentation`_. + +.. _sandboxed: http://en.wikipedia.org/wiki/Sandbox_(computer_security) +.. _Django: http://www.djangoproject.com/ +.. _Jinja2 webpage: http://jinja.pocoo.org/ +.. _documentation: http://jinja.pocoo.org/2/documentation/ + + diff --git a/venv/lib/python2.7/site-packages/Jinja2-2.8.dist-info/INSTALLER b/venv/lib/python2.7/site-packages/Jinja2-2.8.dist-info/INSTALLER new file mode 100644 index 0000000..a1b589e --- /dev/null +++ b/venv/lib/python2.7/site-packages/Jinja2-2.8.dist-info/INSTALLER @@ -0,0 +1 @@ +pip diff --git a/venv/lib/python2.7/site-packages/Jinja2-2.8.dist-info/METADATA b/venv/lib/python2.7/site-packages/Jinja2-2.8.dist-info/METADATA new file mode 100644 index 0000000..304d7c9 --- /dev/null +++ b/venv/lib/python2.7/site-packages/Jinja2-2.8.dist-info/METADATA @@ -0,0 +1,63 @@ +Metadata-Version: 2.0 +Name: Jinja2 +Version: 2.8 +Summary: A small but fast and easy to use stand-alone template engine written in pure python. +Home-page: http://jinja.pocoo.org/ +Author: Armin Ronacher +Author-email: armin.ronacher@active-4.com +License: BSD +Platform: UNKNOWN +Classifier: Development Status :: 5 - Production/Stable +Classifier: Environment :: Web Environment +Classifier: Intended Audience :: Developers +Classifier: License :: OSI Approved :: BSD License +Classifier: Operating System :: OS Independent +Classifier: Programming Language :: Python +Classifier: Programming Language :: Python :: 2 +Classifier: Programming Language :: Python :: 2.6 +Classifier: Programming Language :: Python :: 2.7 +Classifier: Programming Language :: Python :: 3 +Classifier: Programming Language :: Python :: 3.3 +Classifier: Topic :: Internet :: WWW/HTTP :: Dynamic Content +Classifier: Topic :: Software Development :: Libraries :: Python Modules +Classifier: Topic :: Text Processing :: Markup :: HTML +Requires-Dist: MarkupSafe +Provides-Extra: i18n +Requires-Dist: Babel (>=0.8); extra == 'i18n' + +Jinja2 +~~~~~~ + +Jinja2 is a template engine written in pure Python. It provides a +`Django`_ inspired non-XML syntax but supports inline expressions and +an optional `sandboxed`_ environment. + +Nutshell +-------- + +Here a small example of a Jinja template:: + + {% extends 'base.html' %} + {% block title %}Memberlist{% endblock %} + {% block content %} + + {% endblock %} + +Philosophy +---------- + +Application logic is for the controller but don't try to make the life +for the template designer too hard by giving him too few functionality. + +For more informations visit the new `Jinja2 webpage`_ and `documentation`_. + +.. _sandboxed: http://en.wikipedia.org/wiki/Sandbox_(computer_security) +.. _Django: http://www.djangoproject.com/ +.. _Jinja2 webpage: http://jinja.pocoo.org/ +.. _documentation: http://jinja.pocoo.org/2/documentation/ + + diff --git a/venv/lib/python2.7/site-packages/Jinja2-2.8.dist-info/RECORD b/venv/lib/python2.7/site-packages/Jinja2-2.8.dist-info/RECORD new file mode 100644 index 0000000..0e47ff6 --- /dev/null +++ b/venv/lib/python2.7/site-packages/Jinja2-2.8.dist-info/RECORD @@ -0,0 +1,54 @@ +jinja2/__init__.py,sha256=c59bnaAFo63I7lYUZlO2UKHj8LPG3JACKnCrwWgvjGY,2326 +jinja2/_compat.py,sha256=O4FnYOMi4HRBfoCKkX137tt3sR6HvpnQNcwqg8ARYog,3109 +jinja2/_stringdefs.py,sha256=SFObWX5vSMeGNc_aSO3_B2EEmScCstFWtjS4K0YFBXk,404291 +jinja2/bccache.py,sha256=EMN9fsvOpwK3DfxQ9F1lmWoxU2Qlo6AnNhPXTsMrw84,12793 +jinja2/compiler.py,sha256=nQmoS6HpGwgDIC8UXkSdjPYiAjbVqZ-Gf4odO-SAR6E,63846 +jinja2/constants.py,sha256=DCr-oKC2xQO-fkOQO3kXRJW7rEYgmcsMRNpPnM66YSU,1626 +jinja2/debug.py,sha256=GEGHM8vFsNFF-kGc0_fwyj1ftMtuyaH4r0nyG-XA9Z8,11553 +jinja2/defaults.py,sha256=eLMOE7JC52QwZBu5Gz4TPZqzoJy9IgV5EynL_pW7MUw,1057 +jinja2/environment.py,sha256=jzJmujSFtxb1HvITO4TdUCOOA-hSZx0gHrxeDZ2VE-M,48120 +jinja2/exceptions.py,sha256=Q9yZOUif-lhVj5BRw0ELjfBvEdBsB7xZobgOvC2qGy4,4428 +jinja2/ext.py,sha256=X-1zCiut1cuxIteKPkJr3jb6odlVE1jciO8RnrMniPE,25072 +jinja2/filters.py,sha256=R4x2flPfyzIjrtItzpGpK4LzBvx-NOlEXH9wD-ZBWtU,30115 +jinja2/lexer.py,sha256=QyiQwAQVEE2YREZJLcA04F3yqv0XOwBbSlWaFW4xJ20,28425 +jinja2/loaders.py,sha256=BgDCvmiB0gH_zPMf-6TMemqtJdrck3IyJ8g0kWUvFa0,17380 +jinja2/meta.py,sha256=cxAOtMuSWWSQX2H8zhYsAtjNwRcNB8Zvs06Y-JlWnbk,4198 +jinja2/nodes.py,sha256=YN6hfFa0WlfToG2r-Q-yhUkAUp0O9l8KulK53mOAVUo,28954 +jinja2/optimizer.py,sha256=bNNKbo5SC5FBUm9dvP-I3GkiXZYBYIER7_g9hK77ZVI,2302 +jinja2/parser.py,sha256=pjLfkZDg2IKJKt_ixNosV-RzwAja5GWYuVeBQumIRns,35442 +jinja2/runtime.py,sha256=Ct36Q9-gVmKer45syS4j3thQ15T_DnLDh6CqvTcnPwQ,22530 +jinja2/sandbox.py,sha256=qgH4CoBsF5NwGj0krqsCOw8sg2mXmfpZKnvmZEE-da4,13327 +jinja2/tests.py,sha256=znB0L_k6wdKp_lQJvxboXwUXDy1HhFe5SSA888tHt_w,4131 +jinja2/utils.py,sha256=pjbOhQJ5NYexu2MbjA66nBibudUkYcQRZbxvbYE0tFk,16560 +jinja2/visitor.py,sha256=3hEAYD26xS_JiJBf4RfcqYPpiuR6efOH8Hh6om59eU8,3316 +Jinja2-2.8.dist-info/DESCRIPTION.rst,sha256=CXIS1UnPSk5_lZBS6Lb8ko-3lqGfjsiUwNBLXCTj2lc,975 +Jinja2-2.8.dist-info/entry_points.txt,sha256=NdzVcOrqyNyKDxD09aERj__3bFx2paZhizFDsKmVhiA,72 +Jinja2-2.8.dist-info/METADATA,sha256=Vio5F8qaEVcGzaCV1rl8tIWEKsHUFSSSAfL0u9oMmGk,2061 +Jinja2-2.8.dist-info/metadata.json,sha256=4TsqsSBwGwy0C2xF_uRZHYsRn2W5Lv4NUMBjTnXPldM,1275 +Jinja2-2.8.dist-info/RECORD,, +Jinja2-2.8.dist-info/top_level.txt,sha256=PkeVWtLb3-CqjWi1fO29OCbj55EhX_chhKrCdrVe_zs,7 +Jinja2-2.8.dist-info/WHEEL,sha256=AvR0WeTpDaxT645bl5FQxUK6NPsTls2ttpcGJg3j1Xg,110 +Jinja2-2.8.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4 +jinja2/_compat.pyc,, +jinja2/defaults.pyc,, +jinja2/sandbox.pyc,, +jinja2/_stringdefs.pyc,, +jinja2/environment.pyc,, +jinja2/bccache.pyc,, +jinja2/runtime.pyc,, +jinja2/utils.pyc,, +jinja2/parser.pyc,, +jinja2/debug.pyc,, +jinja2/visitor.pyc,, +jinja2/ext.pyc,, +jinja2/lexer.pyc,, +jinja2/nodes.pyc,, +jinja2/meta.pyc,, +jinja2/compiler.pyc,, +jinja2/exceptions.pyc,, +jinja2/__init__.pyc,, +jinja2/optimizer.pyc,, +jinja2/constants.pyc,, +jinja2/tests.pyc,, +jinja2/loaders.pyc,, +jinja2/filters.pyc,, diff --git a/venv/lib/python2.7/site-packages/Jinja2-2.8.dist-info/WHEEL b/venv/lib/python2.7/site-packages/Jinja2-2.8.dist-info/WHEEL new file mode 100644 index 0000000..9dff69d --- /dev/null +++ b/venv/lib/python2.7/site-packages/Jinja2-2.8.dist-info/WHEEL @@ -0,0 +1,6 @@ +Wheel-Version: 1.0 +Generator: bdist_wheel (0.24.0) +Root-Is-Purelib: true +Tag: py2-none-any +Tag: py3-none-any + diff --git a/venv/lib/python2.7/site-packages/Jinja2-2.8.dist-info/entry_points.txt b/venv/lib/python2.7/site-packages/Jinja2-2.8.dist-info/entry_points.txt new file mode 100644 index 0000000..32e6b75 --- /dev/null +++ b/venv/lib/python2.7/site-packages/Jinja2-2.8.dist-info/entry_points.txt @@ -0,0 +1,4 @@ + + [babel.extractors] + jinja2 = jinja2.ext:babel_extract[i18n] + \ No newline at end of file diff --git a/venv/lib/python2.7/site-packages/Jinja2-2.8.dist-info/metadata.json b/venv/lib/python2.7/site-packages/Jinja2-2.8.dist-info/metadata.json new file mode 100644 index 0000000..f5b1ffa --- /dev/null +++ b/venv/lib/python2.7/site-packages/Jinja2-2.8.dist-info/metadata.json @@ -0,0 +1 @@ +{"license": "BSD", "name": "Jinja2", "metadata_version": "2.0", "generator": "bdist_wheel (0.24.0)", "summary": "A small but fast and easy to use stand-alone template engine written in pure python.", "run_requires": [{"requires": ["Babel (>=0.8)"], "extra": "i18n"}, {"requires": ["MarkupSafe"]}], "version": "2.8", "extensions": {"python.details": {"project_urls": {"Home": "http://jinja.pocoo.org/"}, "document_names": {"description": "DESCRIPTION.rst"}, "contacts": [{"role": "author", "email": "armin.ronacher@active-4.com", "name": "Armin Ronacher"}]}, "python.exports": {"babel.extractors": {"jinja2": "jinja2.ext:babel_extract [i18n]"}}}, "classifiers": ["Development Status :: 5 - Production/Stable", "Environment :: Web Environment", "Intended Audience :: Developers", "License :: OSI Approved :: BSD License", "Operating System :: OS Independent", "Programming Language :: Python", "Programming Language :: Python :: 2", "Programming Language :: Python :: 2.6", "Programming Language :: Python :: 2.7", "Programming Language :: Python :: 3", "Programming Language :: Python :: 3.3", "Topic :: Internet :: WWW/HTTP :: Dynamic Content", "Topic :: Software Development :: Libraries :: Python Modules", "Topic :: Text Processing :: Markup :: HTML"], "extras": ["i18n"]} \ No newline at end of file diff --git a/venv/lib/python2.7/site-packages/Jinja2-2.8.dist-info/top_level.txt b/venv/lib/python2.7/site-packages/Jinja2-2.8.dist-info/top_level.txt new file mode 100644 index 0000000..7f7afbf --- /dev/null +++ b/venv/lib/python2.7/site-packages/Jinja2-2.8.dist-info/top_level.txt @@ -0,0 +1 @@ +jinja2 diff --git a/venv/lib/python2.7/site-packages/MarkupSafe-0.23.dist-info/DESCRIPTION.rst b/venv/lib/python2.7/site-packages/MarkupSafe-0.23.dist-info/DESCRIPTION.rst new file mode 100644 index 0000000..67256b9 --- /dev/null +++ b/venv/lib/python2.7/site-packages/MarkupSafe-0.23.dist-info/DESCRIPTION.rst @@ -0,0 +1,101 @@ +MarkupSafe +========== + +Implements a unicode subclass that supports HTML strings: + +>>> from markupsafe import Markup, escape +>>> escape("") +Markup(u'<script>alert(document.cookie);</script>') +>>> tmpl = Markup("%s") +>>> tmpl % "Peter > Lustig" +Markup(u'Peter > Lustig') + +If you want to make an object unicode that is not yet unicode +but don't want to lose the taint information, you can use the +`soft_unicode` function. (On Python 3 you can also use `soft_str` which +is a different name for the same function). + +>>> from markupsafe import soft_unicode +>>> soft_unicode(42) +u'42' +>>> soft_unicode(Markup('foo')) +Markup(u'foo') + +HTML Representations +-------------------- + +Objects can customize their HTML markup equivalent by overriding +the `__html__` function: + +>>> class Foo(object): +... def __html__(self): +... return 'Nice' +... +>>> escape(Foo()) +Markup(u'Nice') +>>> Markup(Foo()) +Markup(u'Nice') + +Silent Escapes +-------------- + +Since MarkupSafe 0.10 there is now also a separate escape function +called `escape_silent` that returns an empty string for `None` for +consistency with other systems that return empty strings for `None` +when escaping (for instance Pylons' webhelpers). + +If you also want to use this for the escape method of the Markup +object, you can create your own subclass that does that:: + + from markupsafe import Markup, escape_silent as escape + + class SilentMarkup(Markup): + __slots__ = () + + @classmethod + def escape(cls, s): + return cls(escape(s)) + +New-Style String Formatting +--------------------------- + +Starting with MarkupSafe 0.21 new style string formats from Python 2.6 and +3.x are now fully supported. Previously the escape behavior of those +functions was spotty at best. The new implementations operates under the +following algorithm: + +1. if an object has an ``__html_format__`` method it is called as + replacement for ``__format__`` with the format specifier. It either + has to return a string or markup object. +2. if an object has an ``__html__`` method it is called. +3. otherwise the default format system of Python kicks in and the result + is HTML escaped. + +Here is how you can implement your own formatting:: + + class User(object): + + def __init__(self, id, username): + self.id = id + self.username = username + + def __html_format__(self, format_spec): + if format_spec == 'link': + return Markup('{1}').format( + self.id, + self.__html__(), + ) + elif format_spec: + raise ValueError('Invalid format spec') + return self.__html__() + + def __html__(self): + return Markup('{0}').format(self.username) + +And to format that user: + +>>> user = User(1, 'foo') +>>> Markup('

User: {0:link}').format(user) +Markup(u'

User: foo') + + diff --git a/venv/lib/python2.7/site-packages/MarkupSafe-0.23.dist-info/INSTALLER b/venv/lib/python2.7/site-packages/MarkupSafe-0.23.dist-info/INSTALLER new file mode 100644 index 0000000..a1b589e --- /dev/null +++ b/venv/lib/python2.7/site-packages/MarkupSafe-0.23.dist-info/INSTALLER @@ -0,0 +1 @@ +pip diff --git a/venv/lib/python2.7/site-packages/MarkupSafe-0.23.dist-info/METADATA b/venv/lib/python2.7/site-packages/MarkupSafe-0.23.dist-info/METADATA new file mode 100644 index 0000000..d06751b --- /dev/null +++ b/venv/lib/python2.7/site-packages/MarkupSafe-0.23.dist-info/METADATA @@ -0,0 +1,121 @@ +Metadata-Version: 2.0 +Name: MarkupSafe +Version: 0.23 +Summary: Implements a XML/HTML/XHTML Markup safe string for Python +Home-page: http://github.com/mitsuhiko/markupsafe +Author: Armin Ronacher +Author-email: armin.ronacher@active-4.com +License: BSD +Platform: UNKNOWN +Classifier: Development Status :: 5 - Production/Stable +Classifier: Environment :: Web Environment +Classifier: Intended Audience :: Developers +Classifier: License :: OSI Approved :: BSD License +Classifier: Operating System :: OS Independent +Classifier: Programming Language :: Python +Classifier: Programming Language :: Python :: 3 +Classifier: Topic :: Internet :: WWW/HTTP :: Dynamic Content +Classifier: Topic :: Software Development :: Libraries :: Python Modules +Classifier: Topic :: Text Processing :: Markup :: HTML + +MarkupSafe +========== + +Implements a unicode subclass that supports HTML strings: + +>>> from markupsafe import Markup, escape +>>> escape("") +Markup(u'<script>alert(document.cookie);</script>') +>>> tmpl = Markup("%s") +>>> tmpl % "Peter > Lustig" +Markup(u'Peter > Lustig') + +If you want to make an object unicode that is not yet unicode +but don't want to lose the taint information, you can use the +`soft_unicode` function. (On Python 3 you can also use `soft_str` which +is a different name for the same function). + +>>> from markupsafe import soft_unicode +>>> soft_unicode(42) +u'42' +>>> soft_unicode(Markup('foo')) +Markup(u'foo') + +HTML Representations +-------------------- + +Objects can customize their HTML markup equivalent by overriding +the `__html__` function: + +>>> class Foo(object): +... def __html__(self): +... return 'Nice' +... +>>> escape(Foo()) +Markup(u'Nice') +>>> Markup(Foo()) +Markup(u'Nice') + +Silent Escapes +-------------- + +Since MarkupSafe 0.10 there is now also a separate escape function +called `escape_silent` that returns an empty string for `None` for +consistency with other systems that return empty strings for `None` +when escaping (for instance Pylons' webhelpers). + +If you also want to use this for the escape method of the Markup +object, you can create your own subclass that does that:: + + from markupsafe import Markup, escape_silent as escape + + class SilentMarkup(Markup): + __slots__ = () + + @classmethod + def escape(cls, s): + return cls(escape(s)) + +New-Style String Formatting +--------------------------- + +Starting with MarkupSafe 0.21 new style string formats from Python 2.6 and +3.x are now fully supported. Previously the escape behavior of those +functions was spotty at best. The new implementations operates under the +following algorithm: + +1. if an object has an ``__html_format__`` method it is called as + replacement for ``__format__`` with the format specifier. It either + has to return a string or markup object. +2. if an object has an ``__html__`` method it is called. +3. otherwise the default format system of Python kicks in and the result + is HTML escaped. + +Here is how you can implement your own formatting:: + + class User(object): + + def __init__(self, id, username): + self.id = id + self.username = username + + def __html_format__(self, format_spec): + if format_spec == 'link': + return Markup('{1}').format( + self.id, + self.__html__(), + ) + elif format_spec: + raise ValueError('Invalid format spec') + return self.__html__() + + def __html__(self): + return Markup('{0}').format(self.username) + +And to format that user: + +>>> user = User(1, 'foo') +>>> Markup('

User: {0:link}').format(user) +Markup(u'

User: foo') + + diff --git a/venv/lib/python2.7/site-packages/MarkupSafe-0.23.dist-info/RECORD b/venv/lib/python2.7/site-packages/MarkupSafe-0.23.dist-info/RECORD new file mode 100644 index 0000000..6306e76 --- /dev/null +++ b/venv/lib/python2.7/site-packages/MarkupSafe-0.23.dist-info/RECORD @@ -0,0 +1,19 @@ +MarkupSafe-0.23.dist-info/DESCRIPTION.rst,sha256=VnEbwPneiOkqh-nzxb0DUiGlcVGHuaDQjsNBLi-yNYw,3091 +MarkupSafe-0.23.dist-info/METADATA,sha256=g-KikeSr9J7vagkJoCt0ViT2ORy9O4NYV7XtRu1Pni8,3879 +MarkupSafe-0.23.dist-info/RECORD,, +MarkupSafe-0.23.dist-info/WHEEL,sha256=LgENoDL_AkiRWbJ7HCJLnTXVvHl9ZtXZ7UZmjmcCeN0,105 +MarkupSafe-0.23.dist-info/metadata.json,sha256=y9yFyMJYU3UIRc5KfHIY6A3Z6nvJ2lXva5-7Ts2lsvY,901 +MarkupSafe-0.23.dist-info/top_level.txt,sha256=qy0Plje5IJuvsCBjejJyhDCjEAdcDLK_2agVcex8Z6U,11 +markupsafe/__init__.py,sha256=zFQpANILi3mCCALiPd6ZJdlW6ibu_hTKzikMXKXVtaM,10338 +markupsafe/_compat.py,sha256=r1HE0CpcAZeb-AiTV9wITR91PeLHn0CzZ_XHkYoozpI,565 +markupsafe/_constants.py,sha256=U_xybFQsyXKCgHSfranJnFzo-z9nn9fuBeSk243sE5Q,4795 +markupsafe/_native.py,sha256=E2Un1ysOf-w45d18YCj8UelT5UP7Vt__IuFPYJ7YRIs,1187 +markupsafe/_speedups.c,sha256=gZwPEM_0zcbAzJjPuPYXk97R67QR1uUGtDvOPsvirCA,5939 +markupsafe/_speedups.so,sha256=fQbdBe1y1gzgoPajRdqAcU39EvIJQ_j_ZrgO3v2zIQk,13344 +markupsafe/tests.py,sha256=RLI4eYI0ICNZwkoN638VHXf_fDu4d_jnvbGr22j58Ng,6107 +MarkupSafe-0.23.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4 +markupsafe/_constants.pyc,, +markupsafe/__init__.pyc,, +markupsafe/_native.pyc,, +markupsafe/_compat.pyc,, +markupsafe/tests.pyc,, diff --git a/venv/lib/python2.7/site-packages/MarkupSafe-0.23.dist-info/WHEEL b/venv/lib/python2.7/site-packages/MarkupSafe-0.23.dist-info/WHEEL new file mode 100644 index 0000000..d15279d --- /dev/null +++ b/venv/lib/python2.7/site-packages/MarkupSafe-0.23.dist-info/WHEEL @@ -0,0 +1,5 @@ +Wheel-Version: 1.0 +Generator: bdist_wheel (0.29.0) +Root-Is-Purelib: false +Tag: cp27-cp27mu-linux_x86_64 + diff --git a/venv/lib/python2.7/site-packages/MarkupSafe-0.23.dist-info/metadata.json b/venv/lib/python2.7/site-packages/MarkupSafe-0.23.dist-info/metadata.json new file mode 100644 index 0000000..21fcd69 --- /dev/null +++ b/venv/lib/python2.7/site-packages/MarkupSafe-0.23.dist-info/metadata.json @@ -0,0 +1 @@ +{"classifiers": ["Development Status :: 5 - Production/Stable", "Environment :: Web Environment", "Intended Audience :: Developers", "License :: OSI Approved :: BSD License", "Operating System :: OS Independent", "Programming Language :: Python", "Programming Language :: Python :: 3", "Topic :: Internet :: WWW/HTTP :: Dynamic Content", "Topic :: Software Development :: Libraries :: Python Modules", "Topic :: Text Processing :: Markup :: HTML"], "extensions": {"python.details": {"contacts": [{"email": "armin.ronacher@active-4.com", "name": "Armin Ronacher", "role": "author"}], "document_names": {"description": "DESCRIPTION.rst"}, "project_urls": {"Home": "http://github.com/mitsuhiko/markupsafe"}}}, "generator": "bdist_wheel (0.29.0)", "license": "BSD", "metadata_version": "2.0", "name": "MarkupSafe", "summary": "Implements a XML/HTML/XHTML Markup safe string for Python", "version": "0.23"} \ No newline at end of file diff --git a/venv/lib/python2.7/site-packages/MarkupSafe-0.23.dist-info/top_level.txt b/venv/lib/python2.7/site-packages/MarkupSafe-0.23.dist-info/top_level.txt new file mode 100644 index 0000000..75bf729 --- /dev/null +++ b/venv/lib/python2.7/site-packages/MarkupSafe-0.23.dist-info/top_level.txt @@ -0,0 +1 @@ +markupsafe diff --git a/venv/lib/python2.7/site-packages/Werkzeug-0.11.10.dist-info/DESCRIPTION.rst b/venv/lib/python2.7/site-packages/Werkzeug-0.11.10.dist-info/DESCRIPTION.rst new file mode 100644 index 0000000..2a6e8bb --- /dev/null +++ b/venv/lib/python2.7/site-packages/Werkzeug-0.11.10.dist-info/DESCRIPTION.rst @@ -0,0 +1,54 @@ +Werkzeug +======== + +Werkzeug started as simple collection of various utilities for WSGI +applications and has become one of the most advanced WSGI utility +modules. It includes a powerful debugger, full featured request and +response objects, HTTP utilities to handle entity tags, cache control +headers, HTTP dates, cookie handling, file uploads, a powerful URL +routing system and a bunch of community contributed addon modules. + +Werkzeug is unicode aware and doesn't enforce a specific template +engine, database adapter or anything else. It doesn't even enforce +a specific way of handling requests and leaves all that up to the +developer. It's most useful for end user applications which should work +on as many server environments as possible (such as blogs, wikis, +bulletin boards, etc.). + +Details and example applications are available on the +`Werkzeug website `_. + + +Features +-------- + +- unicode awareness + +- request and response objects + +- various utility functions for dealing with HTTP headers such as + `Accept` and `Cache-Control` headers. + +- thread local objects with proper cleanup at request end + +- an interactive debugger + +- A simple WSGI server with support for threading and forking + with an automatic reloader. + +- a flexible URL routing system with REST support. + +- fully WSGI compatible + + +Development Version +------------------- + +The Werkzeug development version can be installed by cloning the git +repository from `github`_:: + + git clone git@github.com:mitsuhiko/werkzeug.git + +.. _github: http://github.com/mitsuhiko/werkzeug + + diff --git a/venv/lib/python2.7/site-packages/Werkzeug-0.11.10.dist-info/INSTALLER b/venv/lib/python2.7/site-packages/Werkzeug-0.11.10.dist-info/INSTALLER new file mode 100644 index 0000000..a1b589e --- /dev/null +++ b/venv/lib/python2.7/site-packages/Werkzeug-0.11.10.dist-info/INSTALLER @@ -0,0 +1 @@ +pip diff --git a/venv/lib/python2.7/site-packages/Werkzeug-0.11.10.dist-info/METADATA b/venv/lib/python2.7/site-packages/Werkzeug-0.11.10.dist-info/METADATA new file mode 100644 index 0000000..d8a1764 --- /dev/null +++ b/venv/lib/python2.7/site-packages/Werkzeug-0.11.10.dist-info/METADATA @@ -0,0 +1,79 @@ +Metadata-Version: 2.0 +Name: Werkzeug +Version: 0.11.10 +Summary: The Swiss Army knife of Python web development +Home-page: http://werkzeug.pocoo.org/ +Author: Armin Ronacher +Author-email: armin.ronacher@active-4.com +License: BSD +Platform: any +Classifier: Development Status :: 5 - Production/Stable +Classifier: Environment :: Web Environment +Classifier: Intended Audience :: Developers +Classifier: License :: OSI Approved :: BSD License +Classifier: Operating System :: OS Independent +Classifier: Programming Language :: Python +Classifier: Programming Language :: Python :: 2 +Classifier: Programming Language :: Python :: 2.6 +Classifier: Programming Language :: Python :: 2.7 +Classifier: Programming Language :: Python :: 3 +Classifier: Programming Language :: Python :: 3.3 +Classifier: Programming Language :: Python :: 3.4 +Classifier: Programming Language :: Python :: 3.5 +Classifier: Topic :: Internet :: WWW/HTTP :: Dynamic Content +Classifier: Topic :: Software Development :: Libraries :: Python Modules + +Werkzeug +======== + +Werkzeug started as simple collection of various utilities for WSGI +applications and has become one of the most advanced WSGI utility +modules. It includes a powerful debugger, full featured request and +response objects, HTTP utilities to handle entity tags, cache control +headers, HTTP dates, cookie handling, file uploads, a powerful URL +routing system and a bunch of community contributed addon modules. + +Werkzeug is unicode aware and doesn't enforce a specific template +engine, database adapter or anything else. It doesn't even enforce +a specific way of handling requests and leaves all that up to the +developer. It's most useful for end user applications which should work +on as many server environments as possible (such as blogs, wikis, +bulletin boards, etc.). + +Details and example applications are available on the +`Werkzeug website `_. + + +Features +-------- + +- unicode awareness + +- request and response objects + +- various utility functions for dealing with HTTP headers such as + `Accept` and `Cache-Control` headers. + +- thread local objects with proper cleanup at request end + +- an interactive debugger + +- A simple WSGI server with support for threading and forking + with an automatic reloader. + +- a flexible URL routing system with REST support. + +- fully WSGI compatible + + +Development Version +------------------- + +The Werkzeug development version can be installed by cloning the git +repository from `github`_:: + + git clone git@github.com:mitsuhiko/werkzeug.git + +.. _github: http://github.com/mitsuhiko/werkzeug + + diff --git a/venv/lib/python2.7/site-packages/Werkzeug-0.11.10.dist-info/RECORD b/venv/lib/python2.7/site-packages/Werkzeug-0.11.10.dist-info/RECORD new file mode 100644 index 0000000..b9b77f6 --- /dev/null +++ b/venv/lib/python2.7/site-packages/Werkzeug-0.11.10.dist-info/RECORD @@ -0,0 +1,94 @@ +Werkzeug-0.11.10.dist-info/DESCRIPTION.rst,sha256=5sTwZ_Sj5aeEN8mlcOdNJ_ng40HiGazGmILLyTMX8o0,1595 +Werkzeug-0.11.10.dist-info/METADATA,sha256=vweH07PHdEwMIeOU7fvKXKi_6zY5e6d_dfVCgqT--Aw,2600 +Werkzeug-0.11.10.dist-info/RECORD,, +Werkzeug-0.11.10.dist-info/WHEEL,sha256=GrqQvamwgBV4nLoJe0vhYRSWzWsx7xjlt74FT0SWYfE,110 +Werkzeug-0.11.10.dist-info/metadata.json,sha256=PC5ATsCVx4EUlD8uEGHEhx-aTnCr9GA8vfWXT3CLwQw,1096 +Werkzeug-0.11.10.dist-info/top_level.txt,sha256=QRyj2VjwJoQkrwjwFIOlB8Xg3r9un0NtqVHQF-15xaw,9 +werkzeug/__init__.py,sha256=BDx17HnxLmB4L8OSE0Bkj3DjDTO6xLu6_nNck8QiRaw,6920 +werkzeug/_compat.py,sha256=8c4U9o6A_TR9nKCcTbpZNxpqCXcXDVIbFawwKM2s92c,6311 +werkzeug/_internal.py,sha256=IEScSoFtQ8KqFH_2ubdfscNAdQ2RIysyVupI5BR9W2U,13709 +werkzeug/_reloader.py,sha256=YQykMSQW7AlojJQ7qOlgNaXw5_CNjf9yzxplwzVdL7Q,8336 +werkzeug/datastructures.py,sha256=5CLVMLROGMoB0dSdQ7aBabma6IC_vxIx77VnMoFFVyc,87447 +werkzeug/exceptions.py,sha256=c-3fKHItsPvC52X_NwBNLcmGXR30h0WP5ynPSwCqPiw,18733 +werkzeug/filesystem.py,sha256=hHWeWo_gqLMzTRfYt8-7n2wWcWUNTnDyudQDLOBEICE,2175 +werkzeug/formparser.py,sha256=90D5Urp8Ghrzw32kAs090G0nXPYlU73NeAzPlQFMVrY,21296 +werkzeug/http.py,sha256=sqNaMmLBvi16cPoVRiBviwnVOb1bAQ1lGTrvfCdaQrY,35264 +werkzeug/local.py,sha256=4Q5gwHQJhfhZFqTR8iQDs2VHohpR1OEsP4YTwn7rt7w,14275 +werkzeug/posixemulation.py,sha256=xEF2Bxc-vUCPkiu4IbfWVd3LW7DROYAT-ExW6THqyzw,3519 +werkzeug/routing.py,sha256=TqiZD5HkwdLBnKBUjC5PlytzXmpczQC5dz54VfQzMOw,66350 +werkzeug/script.py,sha256=DwaVDcXdaOTffdNvlBdLitxWXjKaRVT32VbhDtljFPY,11365 +werkzeug/security.py,sha256=tuVc22OqoHV5K-TrYJmynCJJa12aUt9BQ3wR_vEPQ34,8971 +werkzeug/serving.py,sha256=uRUqXuA-Dw2MRA-d232cK_034-taldoj66fEFrtin7k,27736 +werkzeug/test.py,sha256=nan0aDi3g5hyUzWCtaN3XL9HrbIsNNNgMNjwpfM6qMc,34152 +werkzeug/testapp.py,sha256=3HQRW1sHZKXuAjCvFMet4KXtQG3loYTFnvn6LWt-4zI,9396 +werkzeug/urls.py,sha256=fSbI4Gb29_p02Zk21VAZQRN1QdOVY9CNTgpb2rbajNQ,36710 +werkzeug/useragents.py,sha256=uqpgPcJ5BfcCVh9nPIIl2r3duIrIuENmrbRqbAMmPDk,5418 +werkzeug/utils.py,sha256=lkybtv_mq35zV1qhelvEcILTzrMUwZ9yon6E8XwapJE,22972 +werkzeug/wrappers.py,sha256=lKYevpKD1-quk9Cop7bsFxt1eWJxU3h33HCnOI_YzSU,77011 +werkzeug/wsgi.py,sha256=S8R3pBGPlBK67s-d6Wa93nhzG27WjfcHs_ZBGIAQCxM,39573 +werkzeug/contrib/__init__.py,sha256=f7PfttZhbrImqpr5Ezre8CXgwvcGUJK7zWNpO34WWrw,623 +werkzeug/contrib/atom.py,sha256=rvijBrphjMzVObfuCR6ddu6aLwI_SiNiudu64OSTh4Q,15588 +werkzeug/contrib/cache.py,sha256=4W2WCT9Hw6HEU8yME9GuU4Xf8e50r2K84ASMxhLb6tY,27983 +werkzeug/contrib/fixers.py,sha256=MtN_YmENxoTsGvXGGERmtbQ62LaeFc5I2d1YifXNENA,10183 +werkzeug/contrib/iterio.py,sha256=pTX36rYCKO_9IEoB5sIN5cFSYszI9zdx6YhquWovcPY,10814 +werkzeug/contrib/jsrouting.py,sha256=QTmgeDoKXvNK02KzXgx9lr3cAH6fAzpwF5bBdPNvJPs,8564 +werkzeug/contrib/limiter.py,sha256=iS8-ahPZ-JLRnmfIBzxpm7O_s3lPsiDMVWv7llAIDCI,1334 +werkzeug/contrib/lint.py,sha256=XDKYx0ELn9k18xRn4SiAsCgltCuN4yLjzxnCN8tG_eM,12490 +werkzeug/contrib/profiler.py,sha256=ISwCWvwVyGpDLRBRpLjo_qUWma6GXYBrTAco4PEQSHY,5151 +werkzeug/contrib/securecookie.py,sha256=X-Ao_0NRDveW6K1Fhe4U42hHWBW8esCpA3VcBDpzWIk,12206 +werkzeug/contrib/sessions.py,sha256=uAPcnyxaxEla-bUA13gKc3KK4mwSagdzbCZzyKl3PeE,12577 +werkzeug/contrib/testtools.py,sha256=G9xN-qeihJlhExrIZMCahvQOIDxdL9NiX874jiiHFMs,2453 +werkzeug/contrib/wrappers.py,sha256=Uv5FRO5OqKwOsNgkW2-FRcw0vUDe3uiaivjPNYWNfAk,10337 +werkzeug/debug/__init__.py,sha256=qQT5YnOv9Eov9Jt5eLtP6MOqwpmo-tORJ6HcQmmnvro,17271 +werkzeug/debug/console.py,sha256=B7uAu9Rk60siDnGlEt-A_q1ZR4zCtmxx5itg3X-BOxo,5599 +werkzeug/debug/repr.py,sha256=NaoB89aHb0vuvdSWels-GWdeGDZp76uE4uSNZPX1jAM,9354 +werkzeug/debug/tbtools.py,sha256=L5P5TkGEHc_Bc5duNosP6D4CNe7ieTo1oiPX8nKQdek,18402 +werkzeug/debug/shared/FONT_LICENSE,sha256=LwAVEI1oYnvXiNMT9SnCH_TaLCxCpeHziDrMg0gPkAI,4673 +werkzeug/debug/shared/console.png,sha256=bxax6RXXlvOij_KeqvSNX0ojJf83YbnZ7my-3Gx9w2A,507 +werkzeug/debug/shared/debugger.js,sha256=PEMBoNuD6fUaNou8Km_ZvVmFcIA3z3k3jSEMWLW-cA0,6187 +werkzeug/debug/shared/jquery.js,sha256=7LkWEzqTdpEfELxcZZlS6wAx5Ff13zZ83lYO2_ujj7g,95957 +werkzeug/debug/shared/less.png,sha256=-4-kNRaXJSONVLahrQKUxMwXGm9R4OnZ9SxDGpHlIR4,191 +werkzeug/debug/shared/more.png,sha256=GngN7CioHQoV58rH6ojnkYi8c_qED2Aka5FO5UXrReY,200 +werkzeug/debug/shared/source.png,sha256=RoGcBTE4CyCB85GBuDGTFlAnUqxwTBiIfDqW15EpnUQ,818 +werkzeug/debug/shared/style.css,sha256=7x1s8olZO1XHalqD4M9MWn9vRqQkA635S9_6zRoe220,6231 +werkzeug/debug/shared/ubuntu.ttf,sha256=1eaHFyepmy4FyDvjLVzpITrGEBu_CZYY94jE0nED1c0,70220 +Werkzeug-0.11.10.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4 +werkzeug/_reloader.pyc,, +werkzeug/filesystem.pyc,, +werkzeug/contrib/testtools.pyc,, +werkzeug/formparser.pyc,, +werkzeug/_compat.pyc,, +werkzeug/posixemulation.pyc,, +werkzeug/wsgi.pyc,, +werkzeug/serving.pyc,, +werkzeug/contrib/__init__.pyc,, +werkzeug/contrib/iterio.pyc,, +werkzeug/test.pyc,, +werkzeug/__init__.pyc,, +werkzeug/contrib/limiter.pyc,, +werkzeug/debug/tbtools.pyc,, +werkzeug/contrib/sessions.pyc,, +werkzeug/contrib/securecookie.pyc,, +werkzeug/local.pyc,, +werkzeug/utils.pyc,, +werkzeug/_internal.pyc,, +werkzeug/security.pyc,, +werkzeug/contrib/cache.pyc,, +werkzeug/script.pyc,, +werkzeug/routing.pyc,, +werkzeug/wrappers.pyc,, +werkzeug/contrib/jsrouting.pyc,, +werkzeug/contrib/fixers.pyc,, +werkzeug/contrib/profiler.pyc,, +werkzeug/debug/console.pyc,, +werkzeug/debug/__init__.pyc,, +werkzeug/datastructures.pyc,, +werkzeug/http.pyc,, +werkzeug/urls.pyc,, +werkzeug/contrib/lint.pyc,, +werkzeug/contrib/wrappers.pyc,, +werkzeug/exceptions.pyc,, +werkzeug/contrib/atom.pyc,, +werkzeug/testapp.pyc,, +werkzeug/debug/repr.pyc,, +werkzeug/useragents.pyc,, diff --git a/venv/lib/python2.7/site-packages/Werkzeug-0.11.10.dist-info/WHEEL b/venv/lib/python2.7/site-packages/Werkzeug-0.11.10.dist-info/WHEEL new file mode 100644 index 0000000..0de529b --- /dev/null +++ b/venv/lib/python2.7/site-packages/Werkzeug-0.11.10.dist-info/WHEEL @@ -0,0 +1,6 @@ +Wheel-Version: 1.0 +Generator: bdist_wheel (0.26.0) +Root-Is-Purelib: true +Tag: py2-none-any +Tag: py3-none-any + diff --git a/venv/lib/python2.7/site-packages/Werkzeug-0.11.10.dist-info/metadata.json b/venv/lib/python2.7/site-packages/Werkzeug-0.11.10.dist-info/metadata.json new file mode 100644 index 0000000..ac71ca2 --- /dev/null +++ b/venv/lib/python2.7/site-packages/Werkzeug-0.11.10.dist-info/metadata.json @@ -0,0 +1 @@ +{"generator": "bdist_wheel (0.26.0)", "summary": "The Swiss Army knife of Python web development", "classifiers": ["Development Status :: 5 - Production/Stable", "Environment :: Web Environment", "Intended Audience :: Developers", "License :: OSI Approved :: BSD License", "Operating System :: OS Independent", "Programming Language :: Python", "Programming Language :: Python :: 2", "Programming Language :: Python :: 2.6", "Programming Language :: Python :: 2.7", "Programming Language :: Python :: 3", "Programming Language :: Python :: 3.3", "Programming Language :: Python :: 3.4", "Programming Language :: Python :: 3.5", "Topic :: Internet :: WWW/HTTP :: Dynamic Content", "Topic :: Software Development :: Libraries :: Python Modules"], "extensions": {"python.details": {"project_urls": {"Home": "http://werkzeug.pocoo.org/"}, "contacts": [{"email": "armin.ronacher@active-4.com", "name": "Armin Ronacher", "role": "author"}], "document_names": {"description": "DESCRIPTION.rst"}}}, "license": "BSD", "metadata_version": "2.0", "name": "Werkzeug", "platform": "any", "version": "0.11.10"} \ No newline at end of file diff --git a/venv/lib/python2.7/site-packages/Werkzeug-0.11.10.dist-info/top_level.txt b/venv/lib/python2.7/site-packages/Werkzeug-0.11.10.dist-info/top_level.txt new file mode 100644 index 0000000..6fe8da8 --- /dev/null +++ b/venv/lib/python2.7/site-packages/Werkzeug-0.11.10.dist-info/top_level.txt @@ -0,0 +1 @@ +werkzeug diff --git a/venv/lib/python2.7/site-packages/click-6.6.dist-info/DESCRIPTION.rst b/venv/lib/python2.7/site-packages/click-6.6.dist-info/DESCRIPTION.rst new file mode 100644 index 0000000..e118723 --- /dev/null +++ b/venv/lib/python2.7/site-packages/click-6.6.dist-info/DESCRIPTION.rst @@ -0,0 +1,3 @@ +UNKNOWN + + diff --git a/venv/lib/python2.7/site-packages/click-6.6.dist-info/INSTALLER b/venv/lib/python2.7/site-packages/click-6.6.dist-info/INSTALLER new file mode 100644 index 0000000..a1b589e --- /dev/null +++ b/venv/lib/python2.7/site-packages/click-6.6.dist-info/INSTALLER @@ -0,0 +1 @@ +pip diff --git a/venv/lib/python2.7/site-packages/click-6.6.dist-info/METADATA b/venv/lib/python2.7/site-packages/click-6.6.dist-info/METADATA new file mode 100644 index 0000000..ec533a2 --- /dev/null +++ b/venv/lib/python2.7/site-packages/click-6.6.dist-info/METADATA @@ -0,0 +1,16 @@ +Metadata-Version: 2.0 +Name: click +Version: 6.6 +Summary: A simple wrapper around optparse for powerful command line utilities. +Home-page: http://github.com/mitsuhiko/click +Author: Armin Ronacher +Author-email: armin.ronacher@active-4.com +License: UNKNOWN +Platform: UNKNOWN +Classifier: License :: OSI Approved :: BSD License +Classifier: Programming Language :: Python +Classifier: Programming Language :: Python :: 3 + +UNKNOWN + + diff --git a/venv/lib/python2.7/site-packages/click-6.6.dist-info/RECORD b/venv/lib/python2.7/site-packages/click-6.6.dist-info/RECORD new file mode 100644 index 0000000..615da13 --- /dev/null +++ b/venv/lib/python2.7/site-packages/click-6.6.dist-info/RECORD @@ -0,0 +1,41 @@ +click/__init__.py,sha256=-BDiJ4G3FXy1nn7Lt9UTIYdKb7SZTHnTm9yv2WQbHm0,2858 +click/_bashcomplete.py,sha256=J8L3TvE23YVicEf5LQdiN5pCS3aAquP8D8897Jp_PvE,2423 +click/_compat.py,sha256=xP-n7Tkuof1I6aEzcR6A88vusa-Lu6gpkPxnO-KY0pA,20706 +click/_termui_impl.py,sha256=BbBPaw2nDEmNYgjBl_JlxUOSAzFexO0jL3z0OY-Z6BM,16377 +click/_textwrap.py,sha256=gwS4m7bdQiJnzaDG8osFcRb-5vn4t4l2qSCy-5csCEc,1198 +click/_unicodefun.py,sha256=vK1_Y_d29nxYhE7_QF8y8-DLjG_fYDhiT-LlcmiHVeE,4265 +click/_winconsole.py,sha256=MzG46DEYPoRyx4SO7EIhFuFZHESgooAfJLIukbB6p5c,7790 +click/core.py,sha256=PFAK6hjAiM8fcXxdHNwaDKINSoHYCYfLV8n9TdodYS4,70085 +click/decorators.py,sha256=y7CX2needh8iRWafj-QS_hGQFsN24eyXAhx5Y2ATwas,10941 +click/exceptions.py,sha256=rOa0pP3PbSy0_AAPOW9irBEM8AJ3BySN-4z2VUwFVo4,6788 +click/formatting.py,sha256=eh-cypTUAhpI3HD-K4ZpR3vCiURIO62xXvKkR3tNUTM,8889 +click/globals.py,sha256=PAgnKvGxq4YuEIldw3lgYOGBLYwsyxnm1IByBX3BFXo,1515 +click/parser.py,sha256=i01xgYuIA6AwQWEXjshwHSwnTR3gUep4FxJIfyW4ta4,15510 +click/termui.py,sha256=Bp99MSWQtyoWe1_7HggDmA77n--3KLxu7NsZMFMaCUo,21008 +click/testing.py,sha256=LjWcVtxA5vMdWx5ev7KSjU4vcTlxZiSDrP7tOV1nCxk,11004 +click/types.py,sha256=ZGb2lmFs5Vwd9loTRIMbGcqhPVOql8mGoBhWBRT6V4E,18864 +click/utils.py,sha256=1jalPlkUU28JReTEQeeSFtbJd-SirYWBNfjtELBKzT4,14916 +click-6.6.dist-info/DESCRIPTION.rst,sha256=OCTuuN6LcWulhHS3d5rfjdsQtW22n7HENFRh6jC6ego,10 +click-6.6.dist-info/METADATA,sha256=gYQxaGWBuXfQy0NGVARhka10cSm9b_6XF6WeD7k3_Rg,424 +click-6.6.dist-info/RECORD,, +click-6.6.dist-info/WHEEL,sha256=o2k-Qa-RMNIJmUdIc7KU6VWR_ErNRbWNlxDIpl7lm34,110 +click-6.6.dist-info/metadata.json,sha256=6QQvh7-Y7ZLMEiYZVIYzVKGeKPmzdxcgSIioWfhp9DE,568 +click-6.6.dist-info/top_level.txt,sha256=J1ZQogalYS4pphY_lPECoNMfw0HzTSrZglC4Yfwo4xA,6 +click-6.6.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4 +click/_unicodefun.pyc,, +click/_compat.pyc,, +click/_termui_impl.pyc,, +click/_textwrap.pyc,, +click/formatting.pyc,, +click/exceptions.pyc,, +click/decorators.pyc,, +click/parser.pyc,, +click/testing.pyc,, +click/utils.pyc,, +click/_bashcomplete.pyc,, +click/types.pyc,, +click/termui.pyc,, +click/core.pyc,, +click/_winconsole.pyc,, +click/__init__.pyc,, +click/globals.pyc,, diff --git a/venv/lib/python2.7/site-packages/click-6.6.dist-info/WHEEL b/venv/lib/python2.7/site-packages/click-6.6.dist-info/WHEEL new file mode 100644 index 0000000..8b6dd1b --- /dev/null +++ b/venv/lib/python2.7/site-packages/click-6.6.dist-info/WHEEL @@ -0,0 +1,6 @@ +Wheel-Version: 1.0 +Generator: bdist_wheel (0.29.0) +Root-Is-Purelib: true +Tag: py2-none-any +Tag: py3-none-any + diff --git a/venv/lib/python2.7/site-packages/click-6.6.dist-info/metadata.json b/venv/lib/python2.7/site-packages/click-6.6.dist-info/metadata.json new file mode 100644 index 0000000..ddf94ca --- /dev/null +++ b/venv/lib/python2.7/site-packages/click-6.6.dist-info/metadata.json @@ -0,0 +1 @@ +{"classifiers": ["License :: OSI Approved :: BSD License", "Programming Language :: Python", "Programming Language :: Python :: 3"], "extensions": {"python.details": {"contacts": [{"email": "armin.ronacher@active-4.com", "name": "Armin Ronacher", "role": "author"}], "document_names": {"description": "DESCRIPTION.rst"}, "project_urls": {"Home": "http://github.com/mitsuhiko/click"}}}, "generator": "bdist_wheel (0.29.0)", "metadata_version": "2.0", "name": "click", "summary": "A simple wrapper around optparse for powerful command line utilities.", "version": "6.6"} \ No newline at end of file diff --git a/venv/lib/python2.7/site-packages/click-6.6.dist-info/top_level.txt b/venv/lib/python2.7/site-packages/click-6.6.dist-info/top_level.txt new file mode 100644 index 0000000..dca9a90 --- /dev/null +++ b/venv/lib/python2.7/site-packages/click-6.6.dist-info/top_level.txt @@ -0,0 +1 @@ +click diff --git a/venv/lib/python2.7/site-packages/click/__init__.py b/venv/lib/python2.7/site-packages/click/__init__.py new file mode 100644 index 0000000..3a7e3ee --- /dev/null +++ b/venv/lib/python2.7/site-packages/click/__init__.py @@ -0,0 +1,98 @@ +# -*- coding: utf-8 -*- +""" + click + ~~~~~ + + Click is a simple Python module that wraps the stdlib's optparse to make + writing command line scripts fun. Unlike other modules, it's based around + a simple API that does not come with too much magic and is composable. + + In case optparse ever gets removed from the stdlib, it will be shipped by + this module. + + :copyright: (c) 2014 by Armin Ronacher. + :license: BSD, see LICENSE for more details. +""" + +# Core classes +from .core import Context, BaseCommand, Command, MultiCommand, Group, \ + CommandCollection, Parameter, Option, Argument + +# Globals +from .globals import get_current_context + +# Decorators +from .decorators import pass_context, pass_obj, make_pass_decorator, \ + command, group, argument, option, confirmation_option, \ + password_option, version_option, help_option + +# Types +from .types import ParamType, File, Path, Choice, IntRange, Tuple, \ + STRING, INT, FLOAT, BOOL, UUID, UNPROCESSED + +# Utilities +from .utils import echo, get_binary_stream, get_text_stream, open_file, \ + format_filename, get_app_dir, get_os_args + +# Terminal functions +from .termui import prompt, confirm, get_terminal_size, echo_via_pager, \ + progressbar, clear, style, unstyle, secho, edit, launch, getchar, \ + pause + +# Exceptions +from .exceptions import ClickException, UsageError, BadParameter, \ + FileError, Abort, NoSuchOption, BadOptionUsage, BadArgumentUsage, \ + MissingParameter + +# Formatting +from .formatting import HelpFormatter, wrap_text + +# Parsing +from .parser import OptionParser + + +__all__ = [ + # Core classes + 'Context', 'BaseCommand', 'Command', 'MultiCommand', 'Group', + 'CommandCollection', 'Parameter', 'Option', 'Argument', + + # Globals + 'get_current_context', + + # Decorators + 'pass_context', 'pass_obj', 'make_pass_decorator', 'command', 'group', + 'argument', 'option', 'confirmation_option', 'password_option', + 'version_option', 'help_option', + + # Types + 'ParamType', 'File', 'Path', 'Choice', 'IntRange', 'Tuple', 'STRING', + 'INT', 'FLOAT', 'BOOL', 'UUID', 'UNPROCESSED', + + # Utilities + 'echo', 'get_binary_stream', 'get_text_stream', 'open_file', + 'format_filename', 'get_app_dir', 'get_os_args', + + # Terminal functions + 'prompt', 'confirm', 'get_terminal_size', 'echo_via_pager', + 'progressbar', 'clear', 'style', 'unstyle', 'secho', 'edit', 'launch', + 'getchar', 'pause', + + # Exceptions + 'ClickException', 'UsageError', 'BadParameter', 'FileError', + 'Abort', 'NoSuchOption', 'BadOptionUsage', 'BadArgumentUsage', + 'MissingParameter', + + # Formatting + 'HelpFormatter', 'wrap_text', + + # Parsing + 'OptionParser', +] + + +# Controls if click should emit the warning about the use of unicode +# literals. +disable_unicode_literals_warning = False + + +__version__ = '6.6' diff --git a/venv/lib/python2.7/site-packages/click/_bashcomplete.py b/venv/lib/python2.7/site-packages/click/_bashcomplete.py new file mode 100644 index 0000000..d97b66a --- /dev/null +++ b/venv/lib/python2.7/site-packages/click/_bashcomplete.py @@ -0,0 +1,83 @@ +import os +import re +from .utils import echo +from .parser import split_arg_string +from .core import MultiCommand, Option + + +COMPLETION_SCRIPT = ''' +%(complete_func)s() { + COMPREPLY=( $( env COMP_WORDS="${COMP_WORDS[*]}" \\ + COMP_CWORD=$COMP_CWORD \\ + %(autocomplete_var)s=complete $1 ) ) + return 0 +} + +complete -F %(complete_func)s -o default %(script_names)s +''' + +_invalid_ident_char_re = re.compile(r'[^a-zA-Z0-9_]') + + +def get_completion_script(prog_name, complete_var): + cf_name = _invalid_ident_char_re.sub('', prog_name.replace('-', '_')) + return (COMPLETION_SCRIPT % { + 'complete_func': '_%s_completion' % cf_name, + 'script_names': prog_name, + 'autocomplete_var': complete_var, + }).strip() + ';' + + +def resolve_ctx(cli, prog_name, args): + ctx = cli.make_context(prog_name, args, resilient_parsing=True) + while ctx.args + ctx.protected_args and isinstance(ctx.command, MultiCommand): + a = ctx.args + ctx.protected_args + cmd = ctx.command.get_command(ctx, a[0]) + if cmd is None: + return None + ctx = cmd.make_context(a[0], a[1:], parent=ctx, resilient_parsing=True) + return ctx + + +def get_choices(cli, prog_name, args, incomplete): + ctx = resolve_ctx(cli, prog_name, args) + if ctx is None: + return + + choices = [] + if incomplete and not incomplete[:1].isalnum(): + for param in ctx.command.params: + if not isinstance(param, Option): + continue + choices.extend(param.opts) + choices.extend(param.secondary_opts) + elif isinstance(ctx.command, MultiCommand): + choices.extend(ctx.command.list_commands(ctx)) + + for item in choices: + if item.startswith(incomplete): + yield item + + +def do_complete(cli, prog_name): + cwords = split_arg_string(os.environ['COMP_WORDS']) + cword = int(os.environ['COMP_CWORD']) + args = cwords[1:cword] + try: + incomplete = cwords[cword] + except IndexError: + incomplete = '' + + for item in get_choices(cli, prog_name, args, incomplete): + echo(item) + + return True + + +def bashcomplete(cli, prog_name, complete_var, complete_instr): + if complete_instr == 'source': + echo(get_completion_script(prog_name, complete_var)) + return True + elif complete_instr == 'complete': + return do_complete(cli, prog_name) + return False diff --git a/venv/lib/python2.7/site-packages/click/_compat.py b/venv/lib/python2.7/site-packages/click/_compat.py new file mode 100644 index 0000000..6bbebd3 --- /dev/null +++ b/venv/lib/python2.7/site-packages/click/_compat.py @@ -0,0 +1,642 @@ +import re +import io +import os +import sys +import codecs +from weakref import WeakKeyDictionary + + +PY2 = sys.version_info[0] == 2 +WIN = sys.platform.startswith('win') +DEFAULT_COLUMNS = 80 + + +_ansi_re = re.compile('\033\[((?:\d|;)*)([a-zA-Z])') + + +def get_filesystem_encoding(): + return sys.getfilesystemencoding() or sys.getdefaultencoding() + + +def _make_text_stream(stream, encoding, errors): + if encoding is None: + encoding = get_best_encoding(stream) + if errors is None: + errors = 'replace' + return _NonClosingTextIOWrapper(stream, encoding, errors, + line_buffering=True) + + +def is_ascii_encoding(encoding): + """Checks if a given encoding is ascii.""" + try: + return codecs.lookup(encoding).name == 'ascii' + except LookupError: + return False + + +def get_best_encoding(stream): + """Returns the default stream encoding if not found.""" + rv = getattr(stream, 'encoding', None) or sys.getdefaultencoding() + if is_ascii_encoding(rv): + return 'utf-8' + return rv + + +class _NonClosingTextIOWrapper(io.TextIOWrapper): + + def __init__(self, stream, encoding, errors, **extra): + self._stream = stream = _FixupStream(stream) + io.TextIOWrapper.__init__(self, stream, encoding, errors, **extra) + + # The io module is a place where the Python 3 text behavior + # was forced upon Python 2, so we need to unbreak + # it to look like Python 2. + if PY2: + def write(self, x): + if isinstance(x, str) or is_bytes(x): + try: + self.flush() + except Exception: + pass + return self.buffer.write(str(x)) + return io.TextIOWrapper.write(self, x) + + def writelines(self, lines): + for line in lines: + self.write(line) + + def __del__(self): + try: + self.detach() + except Exception: + pass + + def isatty(self): + # https://bitbucket.org/pypy/pypy/issue/1803 + return self._stream.isatty() + + +class _FixupStream(object): + """The new io interface needs more from streams than streams + traditionally implement. As such, this fix-up code is necessary in + some circumstances. + """ + + def __init__(self, stream): + self._stream = stream + + def __getattr__(self, name): + return getattr(self._stream, name) + + def read1(self, size): + f = getattr(self._stream, 'read1', None) + if f is not None: + return f(size) + # We only dispatch to readline instead of read in Python 2 as we + # do not want cause problems with the different implementation + # of line buffering. + if PY2: + return self._stream.readline(size) + return self._stream.read(size) + + def readable(self): + x = getattr(self._stream, 'readable', None) + if x is not None: + return x() + try: + self._stream.read(0) + except Exception: + return False + return True + + def writable(self): + x = getattr(self._stream, 'writable', None) + if x is not None: + return x() + try: + self._stream.write('') + except Exception: + try: + self._stream.write(b'') + except Exception: + return False + return True + + def seekable(self): + x = getattr(self._stream, 'seekable', None) + if x is not None: + return x() + try: + self._stream.seek(self._stream.tell()) + except Exception: + return False + return True + + +if PY2: + text_type = unicode + bytes = str + raw_input = raw_input + string_types = (str, unicode) + iteritems = lambda x: x.iteritems() + range_type = xrange + + def is_bytes(x): + return isinstance(x, (buffer, bytearray)) + + _identifier_re = re.compile(r'^[a-zA-Z_][a-zA-Z0-9_]*$') + + # For Windows, we need to force stdout/stdin/stderr to binary if it's + # fetched for that. This obviously is not the most correct way to do + # it as it changes global state. Unfortunately, there does not seem to + # be a clear better way to do it as just reopening the file in binary + # mode does not change anything. + # + # An option would be to do what Python 3 does and to open the file as + # binary only, patch it back to the system, and then use a wrapper + # stream that converts newlines. It's not quite clear what's the + # correct option here. + # + # This code also lives in _winconsole for the fallback to the console + # emulation stream. + if WIN: + import msvcrt + def set_binary_mode(f): + try: + fileno = f.fileno() + except Exception: + pass + else: + msvcrt.setmode(fileno, os.O_BINARY) + return f + else: + set_binary_mode = lambda x: x + + def isidentifier(x): + return _identifier_re.search(x) is not None + + def get_binary_stdin(): + return set_binary_mode(sys.stdin) + + def get_binary_stdout(): + return set_binary_mode(sys.stdout) + + def get_binary_stderr(): + return set_binary_mode(sys.stderr) + + def get_text_stdin(encoding=None, errors=None): + rv = _get_windows_console_stream(sys.stdin, encoding, errors) + if rv is not None: + return rv + return _make_text_stream(sys.stdin, encoding, errors) + + def get_text_stdout(encoding=None, errors=None): + rv = _get_windows_console_stream(sys.stdout, encoding, errors) + if rv is not None: + return rv + return _make_text_stream(sys.stdout, encoding, errors) + + def get_text_stderr(encoding=None, errors=None): + rv = _get_windows_console_stream(sys.stderr, encoding, errors) + if rv is not None: + return rv + return _make_text_stream(sys.stderr, encoding, errors) + + def filename_to_ui(value): + if isinstance(value, bytes): + value = value.decode(get_filesystem_encoding(), 'replace') + return value +else: + import io + text_type = str + raw_input = input + string_types = (str,) + range_type = range + isidentifier = lambda x: x.isidentifier() + iteritems = lambda x: iter(x.items()) + + def is_bytes(x): + return isinstance(x, (bytes, memoryview, bytearray)) + + def _is_binary_reader(stream, default=False): + try: + return isinstance(stream.read(0), bytes) + except Exception: + return default + # This happens in some cases where the stream was already + # closed. In this case, we assume the default. + + def _is_binary_writer(stream, default=False): + try: + stream.write(b'') + except Exception: + try: + stream.write('') + return False + except Exception: + pass + return default + return True + + def _find_binary_reader(stream): + # We need to figure out if the given stream is already binary. + # This can happen because the official docs recommend detaching + # the streams to get binary streams. Some code might do this, so + # we need to deal with this case explicitly. + if _is_binary_reader(stream, False): + return stream + + buf = getattr(stream, 'buffer', None) + + # Same situation here; this time we assume that the buffer is + # actually binary in case it's closed. + if buf is not None and _is_binary_reader(buf, True): + return buf + + def _find_binary_writer(stream): + # We need to figure out if the given stream is already binary. + # This can happen because the official docs recommend detatching + # the streams to get binary streams. Some code might do this, so + # we need to deal with this case explicitly. + if _is_binary_writer(stream, False): + return stream + + buf = getattr(stream, 'buffer', None) + + # Same situation here; this time we assume that the buffer is + # actually binary in case it's closed. + if buf is not None and _is_binary_writer(buf, True): + return buf + + def _stream_is_misconfigured(stream): + """A stream is misconfigured if its encoding is ASCII.""" + # If the stream does not have an encoding set, we assume it's set + # to ASCII. This appears to happen in certain unittest + # environments. It's not quite clear what the correct behavior is + # but this at least will force Click to recover somehow. + return is_ascii_encoding(getattr(stream, 'encoding', None) or 'ascii') + + def _is_compatible_text_stream(stream, encoding, errors): + stream_encoding = getattr(stream, 'encoding', None) + stream_errors = getattr(stream, 'errors', None) + + # Perfect match. + if stream_encoding == encoding and stream_errors == errors: + return True + + # Otherwise, it's only a compatible stream if we did not ask for + # an encoding. + if encoding is None: + return stream_encoding is not None + + return False + + def _force_correct_text_reader(text_reader, encoding, errors): + if _is_binary_reader(text_reader, False): + binary_reader = text_reader + else: + # If there is no target encoding set, we need to verify that the + # reader is not actually misconfigured. + if encoding is None and not _stream_is_misconfigured(text_reader): + return text_reader + + if _is_compatible_text_stream(text_reader, encoding, errors): + return text_reader + + # If the reader has no encoding, we try to find the underlying + # binary reader for it. If that fails because the environment is + # misconfigured, we silently go with the same reader because this + # is too common to happen. In that case, mojibake is better than + # exceptions. + binary_reader = _find_binary_reader(text_reader) + if binary_reader is None: + return text_reader + + # At this point, we default the errors to replace instead of strict + # because nobody handles those errors anyways and at this point + # we're so fundamentally fucked that nothing can repair it. + if errors is None: + errors = 'replace' + return _make_text_stream(binary_reader, encoding, errors) + + def _force_correct_text_writer(text_writer, encoding, errors): + if _is_binary_writer(text_writer, False): + binary_writer = text_writer + else: + # If there is no target encoding set, we need to verify that the + # writer is not actually misconfigured. + if encoding is None and not _stream_is_misconfigured(text_writer): + return text_writer + + if _is_compatible_text_stream(text_writer, encoding, errors): + return text_writer + + # If the writer has no encoding, we try to find the underlying + # binary writer for it. If that fails because the environment is + # misconfigured, we silently go with the same writer because this + # is too common to happen. In that case, mojibake is better than + # exceptions. + binary_writer = _find_binary_writer(text_writer) + if binary_writer is None: + return text_writer + + # At this point, we default the errors to replace instead of strict + # because nobody handles those errors anyways and at this point + # we're so fundamentally fucked that nothing can repair it. + if errors is None: + errors = 'replace' + return _make_text_stream(binary_writer, encoding, errors) + + def get_binary_stdin(): + reader = _find_binary_reader(sys.stdin) + if reader is None: + raise RuntimeError('Was not able to determine binary ' + 'stream for sys.stdin.') + return reader + + def get_binary_stdout(): + writer = _find_binary_writer(sys.stdout) + if writer is None: + raise RuntimeError('Was not able to determine binary ' + 'stream for sys.stdout.') + return writer + + def get_binary_stderr(): + writer = _find_binary_writer(sys.stderr) + if writer is None: + raise RuntimeError('Was not able to determine binary ' + 'stream for sys.stderr.') + return writer + + def get_text_stdin(encoding=None, errors=None): + rv = _get_windows_console_stream(sys.stdin, encoding, errors) + if rv is not None: + return rv + return _force_correct_text_reader(sys.stdin, encoding, errors) + + def get_text_stdout(encoding=None, errors=None): + rv = _get_windows_console_stream(sys.stdout, encoding, errors) + if rv is not None: + return rv + return _force_correct_text_writer(sys.stdout, encoding, errors) + + def get_text_stderr(encoding=None, errors=None): + rv = _get_windows_console_stream(sys.stderr, encoding, errors) + if rv is not None: + return rv + return _force_correct_text_writer(sys.stderr, encoding, errors) + + def filename_to_ui(value): + if isinstance(value, bytes): + value = value.decode(get_filesystem_encoding(), 'replace') + else: + value = value.encode('utf-8', 'surrogateescape') \ + .decode('utf-8', 'replace') + return value + + +def get_streerror(e, default=None): + if hasattr(e, 'strerror'): + msg = e.strerror + else: + if default is not None: + msg = default + else: + msg = str(e) + if isinstance(msg, bytes): + msg = msg.decode('utf-8', 'replace') + return msg + + +def open_stream(filename, mode='r', encoding=None, errors='strict', + atomic=False): + # Standard streams first. These are simple because they don't need + # special handling for the atomic flag. It's entirely ignored. + if filename == '-': + if 'w' in mode: + if 'b' in mode: + return get_binary_stdout(), False + return get_text_stdout(encoding=encoding, errors=errors), False + if 'b' in mode: + return get_binary_stdin(), False + return get_text_stdin(encoding=encoding, errors=errors), False + + # Non-atomic writes directly go out through the regular open functions. + if not atomic: + if encoding is None: + return open(filename, mode), True + return io.open(filename, mode, encoding=encoding, errors=errors), True + + # Some usability stuff for atomic writes + if 'a' in mode: + raise ValueError( + 'Appending to an existing file is not supported, because that ' + 'would involve an expensive `copy`-operation to a temporary ' + 'file. Open the file in normal `w`-mode and copy explicitly ' + 'if that\'s what you\'re after.' + ) + if 'x' in mode: + raise ValueError('Use the `overwrite`-parameter instead.') + if 'w' not in mode: + raise ValueError('Atomic writes only make sense with `w`-mode.') + + # Atomic writes are more complicated. They work by opening a file + # as a proxy in the same folder and then using the fdopen + # functionality to wrap it in a Python file. Then we wrap it in an + # atomic file that moves the file over on close. + import tempfile + fd, tmp_filename = tempfile.mkstemp(dir=os.path.dirname(filename), + prefix='.__atomic-write') + + if encoding is not None: + f = io.open(fd, mode, encoding=encoding, errors=errors) + else: + f = os.fdopen(fd, mode) + + return _AtomicFile(f, tmp_filename, filename), True + + +# Used in a destructor call, needs extra protection from interpreter cleanup. +if hasattr(os, 'replace'): + _replace = os.replace + _can_replace = True +else: + _replace = os.rename + _can_replace = not WIN + + +class _AtomicFile(object): + + def __init__(self, f, tmp_filename, real_filename): + self._f = f + self._tmp_filename = tmp_filename + self._real_filename = real_filename + self.closed = False + + @property + def name(self): + return self._real_filename + + def close(self, delete=False): + if self.closed: + return + self._f.close() + if not _can_replace: + try: + os.remove(self._real_filename) + except OSError: + pass + _replace(self._tmp_filename, self._real_filename) + self.closed = True + + def __getattr__(self, name): + return getattr(self._f, name) + + def __enter__(self): + return self + + def __exit__(self, exc_type, exc_value, tb): + self.close(delete=exc_type is not None) + + def __repr__(self): + return repr(self._f) + + +auto_wrap_for_ansi = None +colorama = None +get_winterm_size = None + + +def strip_ansi(value): + return _ansi_re.sub('', value) + + +def should_strip_ansi(stream=None, color=None): + if color is None: + if stream is None: + stream = sys.stdin + return not isatty(stream) + return not color + + +# If we're on Windows, we provide transparent integration through +# colorama. This will make ANSI colors through the echo function +# work automatically. +if WIN: + # Windows has a smaller terminal + DEFAULT_COLUMNS = 79 + + from ._winconsole import _get_windows_console_stream + + def _get_argv_encoding(): + import locale + return locale.getpreferredencoding() + + if PY2: + def raw_input(prompt=''): + sys.stderr.flush() + if prompt: + stdout = _default_text_stdout() + stdout.write(prompt) + stdin = _default_text_stdin() + return stdin.readline().rstrip('\r\n') + + try: + import colorama + except ImportError: + pass + else: + _ansi_stream_wrappers = WeakKeyDictionary() + + def auto_wrap_for_ansi(stream, color=None): + """This function wraps a stream so that calls through colorama + are issued to the win32 console API to recolor on demand. It + also ensures to reset the colors if a write call is interrupted + to not destroy the console afterwards. + """ + try: + cached = _ansi_stream_wrappers.get(stream) + except Exception: + cached = None + if cached is not None: + return cached + strip = should_strip_ansi(stream, color) + ansi_wrapper = colorama.AnsiToWin32(stream, strip=strip) + rv = ansi_wrapper.stream + _write = rv.write + + def _safe_write(s): + try: + return _write(s) + except: + ansi_wrapper.reset_all() + raise + + rv.write = _safe_write + try: + _ansi_stream_wrappers[stream] = rv + except Exception: + pass + return rv + + def get_winterm_size(): + win = colorama.win32.GetConsoleScreenBufferInfo( + colorama.win32.STDOUT).srWindow + return win.Right - win.Left, win.Bottom - win.Top +else: + def _get_argv_encoding(): + return getattr(sys.stdin, 'encoding', None) or get_filesystem_encoding() + + _get_windows_console_stream = lambda *x: None + + +def term_len(x): + return len(strip_ansi(x)) + + +def isatty(stream): + try: + return stream.isatty() + except Exception: + return False + + +def _make_cached_stream_func(src_func, wrapper_func): + cache = WeakKeyDictionary() + def func(): + stream = src_func() + try: + rv = cache.get(stream) + except Exception: + rv = None + if rv is not None: + return rv + rv = wrapper_func() + try: + cache[stream] = rv + except Exception: + pass + return rv + return func + + +_default_text_stdin = _make_cached_stream_func( + lambda: sys.stdin, get_text_stdin) +_default_text_stdout = _make_cached_stream_func( + lambda: sys.stdout, get_text_stdout) +_default_text_stderr = _make_cached_stream_func( + lambda: sys.stderr, get_text_stderr) + + +binary_streams = { + 'stdin': get_binary_stdin, + 'stdout': get_binary_stdout, + 'stderr': get_binary_stderr, +} + +text_streams = { + 'stdin': get_text_stdin, + 'stdout': get_text_stdout, + 'stderr': get_text_stderr, +} diff --git a/venv/lib/python2.7/site-packages/click/_termui_impl.py b/venv/lib/python2.7/site-packages/click/_termui_impl.py new file mode 100644 index 0000000..a2c1eb0 --- /dev/null +++ b/venv/lib/python2.7/site-packages/click/_termui_impl.py @@ -0,0 +1,547 @@ +""" + click._termui_impl + ~~~~~~~~~~~~~~~~~~ + + This module contains implementations for the termui module. To keep the + import time of Click down, some infrequently used functionality is placed + in this module and only imported as needed. + + :copyright: (c) 2014 by Armin Ronacher. + :license: BSD, see LICENSE for more details. +""" +import os +import sys +import time +import math +from ._compat import _default_text_stdout, range_type, PY2, isatty, \ + open_stream, strip_ansi, term_len, get_best_encoding, WIN +from .utils import echo +from .exceptions import ClickException + + +if os.name == 'nt': + BEFORE_BAR = '\r' + AFTER_BAR = '\n' +else: + BEFORE_BAR = '\r\033[?25l' + AFTER_BAR = '\033[?25h\n' + + +def _length_hint(obj): + """Returns the length hint of an object.""" + try: + return len(obj) + except TypeError: + try: + get_hint = type(obj).__length_hint__ + except AttributeError: + return None + try: + hint = get_hint(obj) + except TypeError: + return None + if hint is NotImplemented or \ + not isinstance(hint, (int, long)) or \ + hint < 0: + return None + return hint + + +class ProgressBar(object): + + def __init__(self, iterable, length=None, fill_char='#', empty_char=' ', + bar_template='%(bar)s', info_sep=' ', show_eta=True, + show_percent=None, show_pos=False, item_show_func=None, + label=None, file=None, color=None, width=30): + self.fill_char = fill_char + self.empty_char = empty_char + self.bar_template = bar_template + self.info_sep = info_sep + self.show_eta = show_eta + self.show_percent = show_percent + self.show_pos = show_pos + self.item_show_func = item_show_func + self.label = label or '' + if file is None: + file = _default_text_stdout() + self.file = file + self.color = color + self.width = width + self.autowidth = width == 0 + + if length is None: + length = _length_hint(iterable) + if iterable is None: + if length is None: + raise TypeError('iterable or length is required') + iterable = range_type(length) + self.iter = iter(iterable) + self.length = length + self.length_known = length is not None + self.pos = 0 + self.avg = [] + self.start = self.last_eta = time.time() + self.eta_known = False + self.finished = False + self.max_width = None + self.entered = False + self.current_item = None + self.is_hidden = not isatty(self.file) + self._last_line = None + + def __enter__(self): + self.entered = True + self.render_progress() + return self + + def __exit__(self, exc_type, exc_value, tb): + self.render_finish() + + def __iter__(self): + if not self.entered: + raise RuntimeError('You need to use progress bars in a with block.') + self.render_progress() + return self + + def render_finish(self): + if self.is_hidden: + return + self.file.write(AFTER_BAR) + self.file.flush() + + @property + def pct(self): + if self.finished: + return 1.0 + return min(self.pos / (float(self.length) or 1), 1.0) + + @property + def time_per_iteration(self): + if not self.avg: + return 0.0 + return sum(self.avg) / float(len(self.avg)) + + @property + def eta(self): + if self.length_known and not self.finished: + return self.time_per_iteration * (self.length - self.pos) + return 0.0 + + def format_eta(self): + if self.eta_known: + t = self.eta + 1 + seconds = t % 60 + t /= 60 + minutes = t % 60 + t /= 60 + hours = t % 24 + t /= 24 + if t > 0: + days = t + return '%dd %02d:%02d:%02d' % (days, hours, minutes, seconds) + else: + return '%02d:%02d:%02d' % (hours, minutes, seconds) + return '' + + def format_pos(self): + pos = str(self.pos) + if self.length_known: + pos += '/%s' % self.length + return pos + + def format_pct(self): + return ('% 4d%%' % int(self.pct * 100))[1:] + + def format_progress_line(self): + show_percent = self.show_percent + + info_bits = [] + if self.length_known: + bar_length = int(self.pct * self.width) + bar = self.fill_char * bar_length + bar += self.empty_char * (self.width - bar_length) + if show_percent is None: + show_percent = not self.show_pos + else: + if self.finished: + bar = self.fill_char * self.width + else: + bar = list(self.empty_char * (self.width or 1)) + if self.time_per_iteration != 0: + bar[int((math.cos(self.pos * self.time_per_iteration) + / 2.0 + 0.5) * self.width)] = self.fill_char + bar = ''.join(bar) + + if self.show_pos: + info_bits.append(self.format_pos()) + if show_percent: + info_bits.append(self.format_pct()) + if self.show_eta and self.eta_known and not self.finished: + info_bits.append(self.format_eta()) + if self.item_show_func is not None: + item_info = self.item_show_func(self.current_item) + if item_info is not None: + info_bits.append(item_info) + + return (self.bar_template % { + 'label': self.label, + 'bar': bar, + 'info': self.info_sep.join(info_bits) + }).rstrip() + + def render_progress(self): + from .termui import get_terminal_size + nl = False + + if self.is_hidden: + buf = [self.label] + nl = True + else: + buf = [] + # Update width in case the terminal has been resized + if self.autowidth: + old_width = self.width + self.width = 0 + clutter_length = term_len(self.format_progress_line()) + new_width = max(0, get_terminal_size()[0] - clutter_length) + if new_width < old_width: + buf.append(BEFORE_BAR) + buf.append(' ' * self.max_width) + self.max_width = new_width + self.width = new_width + + clear_width = self.width + if self.max_width is not None: + clear_width = self.max_width + + buf.append(BEFORE_BAR) + line = self.format_progress_line() + line_len = term_len(line) + if self.max_width is None or self.max_width < line_len: + self.max_width = line_len + buf.append(line) + + buf.append(' ' * (clear_width - line_len)) + line = ''.join(buf) + + # Render the line only if it changed. + if line != self._last_line: + self._last_line = line + echo(line, file=self.file, color=self.color, nl=nl) + self.file.flush() + + def make_step(self, n_steps): + self.pos += n_steps + if self.length_known and self.pos >= self.length: + self.finished = True + + if (time.time() - self.last_eta) < 1.0: + return + + self.last_eta = time.time() + self.avg = self.avg[-6:] + [-(self.start - time.time()) / (self.pos)] + + self.eta_known = self.length_known + + def update(self, n_steps): + self.make_step(n_steps) + self.render_progress() + + def finish(self): + self.eta_known = 0 + self.current_item = None + self.finished = True + + def next(self): + if self.is_hidden: + return next(self.iter) + try: + rv = next(self.iter) + self.current_item = rv + except StopIteration: + self.finish() + self.render_progress() + raise StopIteration() + else: + self.update(1) + return rv + + if not PY2: + __next__ = next + del next + + +def pager(text, color=None): + """Decide what method to use for paging through text.""" + stdout = _default_text_stdout() + if not isatty(sys.stdin) or not isatty(stdout): + return _nullpager(stdout, text, color) + pager_cmd = (os.environ.get('PAGER', None) or '').strip() + if pager_cmd: + if WIN: + return _tempfilepager(text, pager_cmd, color) + return _pipepager(text, pager_cmd, color) + if os.environ.get('TERM') in ('dumb', 'emacs'): + return _nullpager(stdout, text, color) + if WIN or sys.platform.startswith('os2'): + return _tempfilepager(text, 'more <', color) + if hasattr(os, 'system') and os.system('(less) 2>/dev/null') == 0: + return _pipepager(text, 'less', color) + + import tempfile + fd, filename = tempfile.mkstemp() + os.close(fd) + try: + if hasattr(os, 'system') and os.system('more "%s"' % filename) == 0: + return _pipepager(text, 'more', color) + return _nullpager(stdout, text, color) + finally: + os.unlink(filename) + + +def _pipepager(text, cmd, color): + """Page through text by feeding it to another program. Invoking a + pager through this might support colors. + """ + import subprocess + env = dict(os.environ) + + # If we're piping to less we might support colors under the + # condition that + cmd_detail = cmd.rsplit('/', 1)[-1].split() + if color is None and cmd_detail[0] == 'less': + less_flags = os.environ.get('LESS', '') + ' '.join(cmd_detail[1:]) + if not less_flags: + env['LESS'] = '-R' + color = True + elif 'r' in less_flags or 'R' in less_flags: + color = True + + if not color: + text = strip_ansi(text) + + c = subprocess.Popen(cmd, shell=True, stdin=subprocess.PIPE, + env=env) + encoding = get_best_encoding(c.stdin) + try: + c.stdin.write(text.encode(encoding, 'replace')) + c.stdin.close() + except (IOError, KeyboardInterrupt): + pass + + # Less doesn't respect ^C, but catches it for its own UI purposes (aborting + # search or other commands inside less). + # + # That means when the user hits ^C, the parent process (click) terminates, + # but less is still alive, paging the output and messing up the terminal. + # + # If the user wants to make the pager exit on ^C, they should set + # `LESS='-K'`. It's not our decision to make. + while True: + try: + c.wait() + except KeyboardInterrupt: + pass + else: + break + + +def _tempfilepager(text, cmd, color): + """Page through text by invoking a program on a temporary file.""" + import tempfile + filename = tempfile.mktemp() + if not color: + text = strip_ansi(text) + encoding = get_best_encoding(sys.stdout) + with open_stream(filename, 'wb')[0] as f: + f.write(text.encode(encoding)) + try: + os.system(cmd + ' "' + filename + '"') + finally: + os.unlink(filename) + + +def _nullpager(stream, text, color): + """Simply print unformatted text. This is the ultimate fallback.""" + if not color: + text = strip_ansi(text) + stream.write(text) + + +class Editor(object): + + def __init__(self, editor=None, env=None, require_save=True, + extension='.txt'): + self.editor = editor + self.env = env + self.require_save = require_save + self.extension = extension + + def get_editor(self): + if self.editor is not None: + return self.editor + for key in 'VISUAL', 'EDITOR': + rv = os.environ.get(key) + if rv: + return rv + if WIN: + return 'notepad' + for editor in 'vim', 'nano': + if os.system('which %s >/dev/null 2>&1' % editor) == 0: + return editor + return 'vi' + + def edit_file(self, filename): + import subprocess + editor = self.get_editor() + if self.env: + environ = os.environ.copy() + environ.update(self.env) + else: + environ = None + try: + c = subprocess.Popen('%s "%s"' % (editor, filename), + env=environ, shell=True) + exit_code = c.wait() + if exit_code != 0: + raise ClickException('%s: Editing failed!' % editor) + except OSError as e: + raise ClickException('%s: Editing failed: %s' % (editor, e)) + + def edit(self, text): + import tempfile + + text = text or '' + if text and not text.endswith('\n'): + text += '\n' + + fd, name = tempfile.mkstemp(prefix='editor-', suffix=self.extension) + try: + if WIN: + encoding = 'utf-8-sig' + text = text.replace('\n', '\r\n') + else: + encoding = 'utf-8' + text = text.encode(encoding) + + f = os.fdopen(fd, 'wb') + f.write(text) + f.close() + timestamp = os.path.getmtime(name) + + self.edit_file(name) + + if self.require_save \ + and os.path.getmtime(name) == timestamp: + return None + + f = open(name, 'rb') + try: + rv = f.read() + finally: + f.close() + return rv.decode('utf-8-sig').replace('\r\n', '\n') + finally: + os.unlink(name) + + +def open_url(url, wait=False, locate=False): + import subprocess + + def _unquote_file(url): + try: + import urllib + except ImportError: + import urllib + if url.startswith('file://'): + url = urllib.unquote(url[7:]) + return url + + if sys.platform == 'darwin': + args = ['open'] + if wait: + args.append('-W') + if locate: + args.append('-R') + args.append(_unquote_file(url)) + null = open('/dev/null', 'w') + try: + return subprocess.Popen(args, stderr=null).wait() + finally: + null.close() + elif WIN: + if locate: + url = _unquote_file(url) + args = 'explorer /select,"%s"' % _unquote_file( + url.replace('"', '')) + else: + args = 'start %s "" "%s"' % ( + wait and '/WAIT' or '', url.replace('"', '')) + return os.system(args) + + try: + if locate: + url = os.path.dirname(_unquote_file(url)) or '.' + else: + url = _unquote_file(url) + c = subprocess.Popen(['xdg-open', url]) + if wait: + return c.wait() + return 0 + except OSError: + if url.startswith(('http://', 'https://')) and not locate and not wait: + import webbrowser + webbrowser.open(url) + return 0 + return 1 + + +def _translate_ch_to_exc(ch): + if ch == '\x03': + raise KeyboardInterrupt() + if ch == '\x04': + raise EOFError() + + +if WIN: + import msvcrt + + def getchar(echo): + rv = msvcrt.getch() + if echo: + msvcrt.putchar(rv) + _translate_ch_to_exc(rv) + if PY2: + enc = getattr(sys.stdin, 'encoding', None) + if enc is not None: + rv = rv.decode(enc, 'replace') + else: + rv = rv.decode('cp1252', 'replace') + return rv +else: + import tty + import termios + + def getchar(echo): + if not isatty(sys.stdin): + f = open('/dev/tty') + fd = f.fileno() + else: + fd = sys.stdin.fileno() + f = None + try: + old_settings = termios.tcgetattr(fd) + try: + tty.setraw(fd) + ch = os.read(fd, 32) + if echo and isatty(sys.stdout): + sys.stdout.write(ch) + finally: + termios.tcsetattr(fd, termios.TCSADRAIN, old_settings) + sys.stdout.flush() + if f is not None: + f.close() + except termios.error: + pass + _translate_ch_to_exc(ch) + return ch.decode(get_best_encoding(sys.stdin), 'replace') diff --git a/venv/lib/python2.7/site-packages/click/_textwrap.py b/venv/lib/python2.7/site-packages/click/_textwrap.py new file mode 100644 index 0000000..7e77603 --- /dev/null +++ b/venv/lib/python2.7/site-packages/click/_textwrap.py @@ -0,0 +1,38 @@ +import textwrap +from contextlib import contextmanager + + +class TextWrapper(textwrap.TextWrapper): + + def _handle_long_word(self, reversed_chunks, cur_line, cur_len, width): + space_left = max(width - cur_len, 1) + + if self.break_long_words: + last = reversed_chunks[-1] + cut = last[:space_left] + res = last[space_left:] + cur_line.append(cut) + reversed_chunks[-1] = res + elif not cur_line: + cur_line.append(reversed_chunks.pop()) + + @contextmanager + def extra_indent(self, indent): + old_initial_indent = self.initial_indent + old_subsequent_indent = self.subsequent_indent + self.initial_indent += indent + self.subsequent_indent += indent + try: + yield + finally: + self.initial_indent = old_initial_indent + self.subsequent_indent = old_subsequent_indent + + def indent_only(self, text): + rv = [] + for idx, line in enumerate(text.splitlines()): + indent = self.initial_indent + if idx > 0: + indent = self.subsequent_indent + rv.append(indent + line) + return '\n'.join(rv) diff --git a/venv/lib/python2.7/site-packages/click/_unicodefun.py b/venv/lib/python2.7/site-packages/click/_unicodefun.py new file mode 100644 index 0000000..24b7031 --- /dev/null +++ b/venv/lib/python2.7/site-packages/click/_unicodefun.py @@ -0,0 +1,119 @@ +import os +import sys +import codecs + +from ._compat import PY2 + + +# If someone wants to vendor click, we want to ensure the +# correct package is discovered. Ideally we could use a +# relative import here but unfortunately Python does not +# support that. +click = sys.modules[__name__.rsplit('.', 1)[0]] + + +def _find_unicode_literals_frame(): + import __future__ + frm = sys._getframe(1) + idx = 1 + while frm is not None: + if frm.f_globals.get('__name__', '').startswith('click.'): + frm = frm.f_back + idx += 1 + elif frm.f_code.co_flags & __future__.unicode_literals.compiler_flag: + return idx + else: + break + return 0 + + +def _check_for_unicode_literals(): + if not __debug__: + return + if not PY2 or click.disable_unicode_literals_warning: + return + bad_frame = _find_unicode_literals_frame() + if bad_frame <= 0: + return + from warnings import warn + warn(Warning('Click detected the use of the unicode_literals ' + '__future__ import. This is heavily discouraged ' + 'because it can introduce subtle bugs in your ' + 'code. You should instead use explicit u"" literals ' + 'for your unicode strings. For more information see ' + 'http://click.pocoo.org/python3/'), + stacklevel=bad_frame) + + +def _verify_python3_env(): + """Ensures that the environment is good for unicode on Python 3.""" + if PY2: + return + try: + import locale + fs_enc = codecs.lookup(locale.getpreferredencoding()).name + except Exception: + fs_enc = 'ascii' + if fs_enc != 'ascii': + return + + extra = '' + if os.name == 'posix': + import subprocess + rv = subprocess.Popen(['locale', '-a'], stdout=subprocess.PIPE, + stderr=subprocess.PIPE).communicate()[0] + good_locales = set() + has_c_utf8 = False + + # Make sure we're operating on text here. + if isinstance(rv, bytes): + rv = rv.decode('ascii', 'replace') + + for line in rv.splitlines(): + locale = line.strip() + if locale.lower().endswith(('.utf-8', '.utf8')): + good_locales.add(locale) + if locale.lower() in ('c.utf8', 'c.utf-8'): + has_c_utf8 = True + + extra += '\n\n' + if not good_locales: + extra += ( + 'Additional information: on this system no suitable UTF-8\n' + 'locales were discovered. This most likely requires resolving\n' + 'by reconfiguring the locale system.' + ) + elif has_c_utf8: + extra += ( + 'This system supports the C.UTF-8 locale which is recommended.\n' + 'You might be able to resolve your issue by exporting the\n' + 'following environment variables:\n\n' + ' export LC_ALL=C.UTF-8\n' + ' export LANG=C.UTF-8' + ) + else: + extra += ( + 'This system lists a couple of UTF-8 supporting locales that\n' + 'you can pick from. The following suitable locales where\n' + 'discovered: %s' + ) % ', '.join(sorted(good_locales)) + + bad_locale = None + for locale in os.environ.get('LC_ALL'), os.environ.get('LANG'): + if locale and locale.lower().endswith(('.utf-8', '.utf8')): + bad_locale = locale + if locale is not None: + break + if bad_locale is not None: + extra += ( + '\n\nClick discovered that you exported a UTF-8 locale\n' + 'but the locale system could not pick up from it because\n' + 'it does not exist. The exported locale is "%s" but it\n' + 'is not supported' + ) % bad_locale + + raise RuntimeError('Click will abort further execution because Python 3 ' + 'was configured to use ASCII as encoding for the ' + 'environment. Either run this under Python 2 or ' + 'consult http://click.pocoo.org/python3/ for ' + 'mitigation steps.' + extra) diff --git a/venv/lib/python2.7/site-packages/click/_winconsole.py b/venv/lib/python2.7/site-packages/click/_winconsole.py new file mode 100644 index 0000000..9aed942 --- /dev/null +++ b/venv/lib/python2.7/site-packages/click/_winconsole.py @@ -0,0 +1,273 @@ +# -*- coding: utf-8 -*- +# This module is based on the excellent work by Adam Bartoš who +# provided a lot of what went into the implementation here in +# the discussion to issue1602 in the Python bug tracker. +# +# There are some general differences in regards to how this works +# compared to the original patches as we do not need to patch +# the entire interpreter but just work in our little world of +# echo and prmopt. + +import io +import os +import sys +import zlib +import time +import ctypes +import msvcrt +from click._compat import _NonClosingTextIOWrapper, text_type, PY2 +from ctypes import byref, POINTER, c_int, c_char, c_char_p, \ + c_void_p, py_object, c_ssize_t, c_ulong, windll, WINFUNCTYPE +try: + from ctypes import pythonapi + PyObject_GetBuffer = pythonapi.PyObject_GetBuffer + PyBuffer_Release = pythonapi.PyBuffer_Release +except ImportError: + pythonapi = None +from ctypes.wintypes import LPWSTR, LPCWSTR + + +c_ssize_p = POINTER(c_ssize_t) + +kernel32 = windll.kernel32 +GetStdHandle = kernel32.GetStdHandle +ReadConsoleW = kernel32.ReadConsoleW +WriteConsoleW = kernel32.WriteConsoleW +GetLastError = kernel32.GetLastError +GetCommandLineW = WINFUNCTYPE(LPWSTR)( + ('GetCommandLineW', windll.kernel32)) +CommandLineToArgvW = WINFUNCTYPE( + POINTER(LPWSTR), LPCWSTR, POINTER(c_int))( + ('CommandLineToArgvW', windll.shell32)) + + +STDIN_HANDLE = GetStdHandle(-10) +STDOUT_HANDLE = GetStdHandle(-11) +STDERR_HANDLE = GetStdHandle(-12) + + +PyBUF_SIMPLE = 0 +PyBUF_WRITABLE = 1 + +ERROR_SUCCESS = 0 +ERROR_NOT_ENOUGH_MEMORY = 8 +ERROR_OPERATION_ABORTED = 995 + +STDIN_FILENO = 0 +STDOUT_FILENO = 1 +STDERR_FILENO = 2 + +EOF = b'\x1a' +MAX_BYTES_WRITTEN = 32767 + + +class Py_buffer(ctypes.Structure): + _fields_ = [ + ('buf', c_void_p), + ('obj', py_object), + ('len', c_ssize_t), + ('itemsize', c_ssize_t), + ('readonly', c_int), + ('ndim', c_int), + ('format', c_char_p), + ('shape', c_ssize_p), + ('strides', c_ssize_p), + ('suboffsets', c_ssize_p), + ('internal', c_void_p) + ] + + if PY2: + _fields_.insert(-1, ('smalltable', c_ssize_t * 2)) + + +# On PyPy we cannot get buffers so our ability to operate here is +# serverly limited. +if pythonapi is None: + get_buffer = None +else: + def get_buffer(obj, writable=False): + buf = Py_buffer() + flags = PyBUF_WRITABLE if writable else PyBUF_SIMPLE + PyObject_GetBuffer(py_object(obj), byref(buf), flags) + try: + buffer_type = c_char * buf.len + return buffer_type.from_address(buf.buf) + finally: + PyBuffer_Release(byref(buf)) + + +class _WindowsConsoleRawIOBase(io.RawIOBase): + + def __init__(self, handle): + self.handle = handle + + def isatty(self): + io.RawIOBase.isatty(self) + return True + + +class _WindowsConsoleReader(_WindowsConsoleRawIOBase): + + def readable(self): + return True + + def readinto(self, b): + bytes_to_be_read = len(b) + if not bytes_to_be_read: + return 0 + elif bytes_to_be_read % 2: + raise ValueError('cannot read odd number of bytes from ' + 'UTF-16-LE encoded console') + + buffer = get_buffer(b, writable=True) + code_units_to_be_read = bytes_to_be_read // 2 + code_units_read = c_ulong() + + rv = ReadConsoleW(self.handle, buffer, code_units_to_be_read, + byref(code_units_read), None) + if GetLastError() == ERROR_OPERATION_ABORTED: + # wait for KeyboardInterrupt + time.sleep(0.1) + if not rv: + raise OSError('Windows error: %s' % GetLastError()) + + if buffer[0] == EOF: + return 0 + return 2 * code_units_read.value + + +class _WindowsConsoleWriter(_WindowsConsoleRawIOBase): + + def writable(self): + return True + + @staticmethod + def _get_error_message(errno): + if errno == ERROR_SUCCESS: + return 'ERROR_SUCCESS' + elif errno == ERROR_NOT_ENOUGH_MEMORY: + return 'ERROR_NOT_ENOUGH_MEMORY' + return 'Windows error %s' % errno + + def write(self, b): + bytes_to_be_written = len(b) + buf = get_buffer(b) + code_units_to_be_written = min(bytes_to_be_written, + MAX_BYTES_WRITTEN) // 2 + code_units_written = c_ulong() + + WriteConsoleW(self.handle, buf, code_units_to_be_written, + byref(code_units_written), None) + bytes_written = 2 * code_units_written.value + + if bytes_written == 0 and bytes_to_be_written > 0: + raise OSError(self._get_error_message(GetLastError())) + return bytes_written + + +class ConsoleStream(object): + + def __init__(self, text_stream, byte_stream): + self._text_stream = text_stream + self.buffer = byte_stream + + @property + def name(self): + return self.buffer.name + + def write(self, x): + if isinstance(x, text_type): + return self._text_stream.write(x) + try: + self.flush() + except Exception: + pass + return self.buffer.write(x) + + def writelines(self, lines): + for line in lines: + self.write(line) + + def __getattr__(self, name): + return getattr(self._text_stream, name) + + def isatty(self): + return self.buffer.isatty() + + def __repr__(self): + return '' % ( + self.name, + self.encoding, + ) + + +def _get_text_stdin(buffer_stream): + text_stream = _NonClosingTextIOWrapper( + io.BufferedReader(_WindowsConsoleReader(STDIN_HANDLE)), + 'utf-16-le', 'strict', line_buffering=True) + return ConsoleStream(text_stream, buffer_stream) + + +def _get_text_stdout(buffer_stream): + text_stream = _NonClosingTextIOWrapper( + _WindowsConsoleWriter(STDOUT_HANDLE), + 'utf-16-le', 'strict', line_buffering=True) + return ConsoleStream(text_stream, buffer_stream) + + +def _get_text_stderr(buffer_stream): + text_stream = _NonClosingTextIOWrapper( + _WindowsConsoleWriter(STDERR_HANDLE), + 'utf-16-le', 'strict', line_buffering=True) + return ConsoleStream(text_stream, buffer_stream) + + +if PY2: + def _hash_py_argv(): + return zlib.crc32('\x00'.join(sys.argv[1:])) + + _initial_argv_hash = _hash_py_argv() + + def _get_windows_argv(): + argc = c_int(0) + argv_unicode = CommandLineToArgvW(GetCommandLineW(), byref(argc)) + argv = [argv_unicode[i] for i in range(0, argc.value)] + + if not hasattr(sys, 'frozen'): + argv = argv[1:] + while len(argv) > 0: + arg = argv[0] + if not arg.startswith('-') or arg == '-': + break + argv = argv[1:] + if arg.startswith(('-c', '-m')): + break + + return argv[1:] + + +_stream_factories = { + 0: _get_text_stdin, + 1: _get_text_stdout, + 2: _get_text_stderr, +} + + +def _get_windows_console_stream(f, encoding, errors): + if get_buffer is not None and \ + encoding in ('utf-16-le', None) \ + and errors in ('strict', None) and \ + hasattr(f, 'isatty') and f.isatty(): + func = _stream_factories.get(f.fileno()) + if func is not None: + if not PY2: + f = getattr(f, 'buffer') + if f is None: + return None + else: + # If we are on Python 2 we need to set the stream that we + # deal with to binary mode as otherwise the exercise if a + # bit moot. The same problems apply as for + # get_binary_stdin and friends from _compat. + msvcrt.setmode(f.fileno(), os.O_BINARY) + return func(f) diff --git a/venv/lib/python2.7/site-packages/click/core.py b/venv/lib/python2.7/site-packages/click/core.py new file mode 100644 index 0000000..33a527a --- /dev/null +++ b/venv/lib/python2.7/site-packages/click/core.py @@ -0,0 +1,1738 @@ +import os +import sys +from contextlib import contextmanager +from itertools import repeat +from functools import update_wrapper + +from .types import convert_type, IntRange, BOOL +from .utils import make_str, make_default_short_help, echo, get_os_args +from .exceptions import ClickException, UsageError, BadParameter, Abort, \ + MissingParameter +from .termui import prompt, confirm +from .formatting import HelpFormatter, join_options +from .parser import OptionParser, split_opt +from .globals import push_context, pop_context + +from ._compat import PY2, isidentifier, iteritems +from ._unicodefun import _check_for_unicode_literals, _verify_python3_env + + +_missing = object() + + +SUBCOMMAND_METAVAR = 'COMMAND [ARGS]...' +SUBCOMMANDS_METAVAR = 'COMMAND1 [ARGS]... [COMMAND2 [ARGS]...]...' + + +def _bashcomplete(cmd, prog_name, complete_var=None): + """Internal handler for the bash completion support.""" + if complete_var is None: + complete_var = '_%s_COMPLETE' % (prog_name.replace('-', '_')).upper() + complete_instr = os.environ.get(complete_var) + if not complete_instr: + return + + from ._bashcomplete import bashcomplete + if bashcomplete(cmd, prog_name, complete_var, complete_instr): + sys.exit(1) + + +def _check_multicommand(base_command, cmd_name, cmd, register=False): + if not base_command.chain or not isinstance(cmd, MultiCommand): + return + if register: + hint = 'It is not possible to add multi commands as children to ' \ + 'another multi command that is in chain mode' + else: + hint = 'Found a multi command as subcommand to a multi command ' \ + 'that is in chain mode. This is not supported' + raise RuntimeError('%s. Command "%s" is set to chain and "%s" was ' + 'added as subcommand but it in itself is a ' + 'multi command. ("%s" is a %s within a chained ' + '%s named "%s"). This restriction was supposed to ' + 'be lifted in 6.0 but the fix was flawed. This ' + 'will be fixed in Click 7.0' % ( + hint, base_command.name, cmd_name, + cmd_name, cmd.__class__.__name__, + base_command.__class__.__name__, + base_command.name)) + + +def batch(iterable, batch_size): + return list(zip(*repeat(iter(iterable), batch_size))) + + +def invoke_param_callback(callback, ctx, param, value): + code = getattr(callback, '__code__', None) + args = getattr(code, 'co_argcount', 3) + + if args < 3: + # This will become a warning in Click 3.0: + from warnings import warn + warn(Warning('Invoked legacy parameter callback "%s". The new ' + 'signature for such callbacks starting with ' + 'click 2.0 is (ctx, param, value).' + % callback), stacklevel=3) + return callback(ctx, value) + return callback(ctx, param, value) + + +@contextmanager +def augment_usage_errors(ctx, param=None): + """Context manager that attaches extra information to exceptions that + fly. + """ + try: + yield + except BadParameter as e: + if e.ctx is None: + e.ctx = ctx + if param is not None and e.param is None: + e.param = param + raise + except UsageError as e: + if e.ctx is None: + e.ctx = ctx + raise + + +def iter_params_for_processing(invocation_order, declaration_order): + """Given a sequence of parameters in the order as should be considered + for processing and an iterable of parameters that exist, this returns + a list in the correct order as they should be processed. + """ + def sort_key(item): + try: + idx = invocation_order.index(item) + except ValueError: + idx = float('inf') + return (not item.is_eager, idx) + + return sorted(declaration_order, key=sort_key) + + +class Context(object): + """The context is a special internal object that holds state relevant + for the script execution at every single level. It's normally invisible + to commands unless they opt-in to getting access to it. + + The context is useful as it can pass internal objects around and can + control special execution features such as reading data from + environment variables. + + A context can be used as context manager in which case it will call + :meth:`close` on teardown. + + .. versionadded:: 2.0 + Added the `resilient_parsing`, `help_option_names`, + `token_normalize_func` parameters. + + .. versionadded:: 3.0 + Added the `allow_extra_args` and `allow_interspersed_args` + parameters. + + .. versionadded:: 4.0 + Added the `color`, `ignore_unknown_options`, and + `max_content_width` parameters. + + :param command: the command class for this context. + :param parent: the parent context. + :param info_name: the info name for this invocation. Generally this + is the most descriptive name for the script or + command. For the toplevel script it is usually + the name of the script, for commands below it it's + the name of the script. + :param obj: an arbitrary object of user data. + :param auto_envvar_prefix: the prefix to use for automatic environment + variables. If this is `None` then reading + from environment variables is disabled. This + does not affect manually set environment + variables which are always read. + :param default_map: a dictionary (like object) with default values + for parameters. + :param terminal_width: the width of the terminal. The default is + inherit from parent context. If no context + defines the terminal width then auto + detection will be applied. + :param max_content_width: the maximum width for content rendered by + Click (this currently only affects help + pages). This defaults to 80 characters if + not overridden. In other words: even if the + terminal is larger than that, Click will not + format things wider than 80 characters by + default. In addition to that, formatters might + add some safety mapping on the right. + :param resilient_parsing: if this flag is enabled then Click will + parse without any interactivity or callback + invocation. This is useful for implementing + things such as completion support. + :param allow_extra_args: if this is set to `True` then extra arguments + at the end will not raise an error and will be + kept on the context. The default is to inherit + from the command. + :param allow_interspersed_args: if this is set to `False` then options + and arguments cannot be mixed. The + default is to inherit from the command. + :param ignore_unknown_options: instructs click to ignore options it does + not know and keeps them for later + processing. + :param help_option_names: optionally a list of strings that define how + the default help parameter is named. The + default is ``['--help']``. + :param token_normalize_func: an optional function that is used to + normalize tokens (options, choices, + etc.). This for instance can be used to + implement case insensitive behavior. + :param color: controls if the terminal supports ANSI colors or not. The + default is autodetection. This is only needed if ANSI + codes are used in texts that Click prints which is by + default not the case. This for instance would affect + help output. + """ + + def __init__(self, command, parent=None, info_name=None, obj=None, + auto_envvar_prefix=None, default_map=None, + terminal_width=None, max_content_width=None, + resilient_parsing=False, allow_extra_args=None, + allow_interspersed_args=None, + ignore_unknown_options=None, help_option_names=None, + token_normalize_func=None, color=None): + #: the parent context or `None` if none exists. + self.parent = parent + #: the :class:`Command` for this context. + self.command = command + #: the descriptive information name + self.info_name = info_name + #: the parsed parameters except if the value is hidden in which + #: case it's not remembered. + self.params = {} + #: the leftover arguments. + self.args = [] + #: protected arguments. These are arguments that are prepended + #: to `args` when certain parsing scenarios are encountered but + #: must be never propagated to another arguments. This is used + #: to implement nested parsing. + self.protected_args = [] + if obj is None and parent is not None: + obj = parent.obj + #: the user object stored. + self.obj = obj + self._meta = getattr(parent, 'meta', {}) + + #: A dictionary (-like object) with defaults for parameters. + if default_map is None \ + and parent is not None \ + and parent.default_map is not None: + default_map = parent.default_map.get(info_name) + self.default_map = default_map + + #: This flag indicates if a subcommand is going to be executed. A + #: group callback can use this information to figure out if it's + #: being executed directly or because the execution flow passes + #: onwards to a subcommand. By default it's None, but it can be + #: the name of the subcommand to execute. + #: + #: If chaining is enabled this will be set to ``'*'`` in case + #: any commands are executed. It is however not possible to + #: figure out which ones. If you require this knowledge you + #: should use a :func:`resultcallback`. + self.invoked_subcommand = None + + if terminal_width is None and parent is not None: + terminal_width = parent.terminal_width + #: The width of the terminal (None is autodetection). + self.terminal_width = terminal_width + + if max_content_width is None and parent is not None: + max_content_width = parent.max_content_width + #: The maximum width of formatted content (None implies a sensible + #: default which is 80 for most things). + self.max_content_width = max_content_width + + if allow_extra_args is None: + allow_extra_args = command.allow_extra_args + #: Indicates if the context allows extra args or if it should + #: fail on parsing. + #: + #: .. versionadded:: 3.0 + self.allow_extra_args = allow_extra_args + + if allow_interspersed_args is None: + allow_interspersed_args = command.allow_interspersed_args + #: Indicates if the context allows mixing of arguments and + #: options or not. + #: + #: .. versionadded:: 3.0 + self.allow_interspersed_args = allow_interspersed_args + + if ignore_unknown_options is None: + ignore_unknown_options = command.ignore_unknown_options + #: Instructs click to ignore options that a command does not + #: understand and will store it on the context for later + #: processing. This is primarily useful for situations where you + #: want to call into external programs. Generally this pattern is + #: strongly discouraged because it's not possibly to losslessly + #: forward all arguments. + #: + #: .. versionadded:: 4.0 + self.ignore_unknown_options = ignore_unknown_options + + if help_option_names is None: + if parent is not None: + help_option_names = parent.help_option_names + else: + help_option_names = ['--help'] + + #: The names for the help options. + self.help_option_names = help_option_names + + if token_normalize_func is None and parent is not None: + token_normalize_func = parent.token_normalize_func + + #: An optional normalization function for tokens. This is + #: options, choices, commands etc. + self.token_normalize_func = token_normalize_func + + #: Indicates if resilient parsing is enabled. In that case Click + #: will do its best to not cause any failures. + self.resilient_parsing = resilient_parsing + + # If there is no envvar prefix yet, but the parent has one and + # the command on this level has a name, we can expand the envvar + # prefix automatically. + if auto_envvar_prefix is None: + if parent is not None \ + and parent.auto_envvar_prefix is not None and \ + self.info_name is not None: + auto_envvar_prefix = '%s_%s' % (parent.auto_envvar_prefix, + self.info_name.upper()) + else: + self.auto_envvar_prefix = auto_envvar_prefix.upper() + self.auto_envvar_prefix = auto_envvar_prefix + + if color is None and parent is not None: + color = parent.color + + #: Controls if styling output is wanted or not. + self.color = color + + self._close_callbacks = [] + self._depth = 0 + + def __enter__(self): + self._depth += 1 + push_context(self) + return self + + def __exit__(self, exc_type, exc_value, tb): + self._depth -= 1 + if self._depth == 0: + self.close() + pop_context() + + @contextmanager + def scope(self, cleanup=True): + """This helper method can be used with the context object to promote + it to the current thread local (see :func:`get_current_context`). + The default behavior of this is to invoke the cleanup functions which + can be disabled by setting `cleanup` to `False`. The cleanup + functions are typically used for things such as closing file handles. + + If the cleanup is intended the context object can also be directly + used as a context manager. + + Example usage:: + + with ctx.scope(): + assert get_current_context() is ctx + + This is equivalent:: + + with ctx: + assert get_current_context() is ctx + + .. versionadded:: 5.0 + + :param cleanup: controls if the cleanup functions should be run or + not. The default is to run these functions. In + some situations the context only wants to be + temporarily pushed in which case this can be disabled. + Nested pushes automatically defer the cleanup. + """ + if not cleanup: + self._depth += 1 + try: + with self as rv: + yield rv + finally: + if not cleanup: + self._depth -= 1 + + @property + def meta(self): + """This is a dictionary which is shared with all the contexts + that are nested. It exists so that click utiltiies can store some + state here if they need to. It is however the responsibility of + that code to manage this dictionary well. + + The keys are supposed to be unique dotted strings. For instance + module paths are a good choice for it. What is stored in there is + irrelevant for the operation of click. However what is important is + that code that places data here adheres to the general semantics of + the system. + + Example usage:: + + LANG_KEY = __name__ + '.lang' + + def set_language(value): + ctx = get_current_context() + ctx.meta[LANG_KEY] = value + + def get_language(): + return get_current_context().meta.get(LANG_KEY, 'en_US') + + .. versionadded:: 5.0 + """ + return self._meta + + def make_formatter(self): + """Creates the formatter for the help and usage output.""" + return HelpFormatter(width=self.terminal_width, + max_width=self.max_content_width) + + def call_on_close(self, f): + """This decorator remembers a function as callback that should be + executed when the context tears down. This is most useful to bind + resource handling to the script execution. For instance, file objects + opened by the :class:`File` type will register their close callbacks + here. + + :param f: the function to execute on teardown. + """ + self._close_callbacks.append(f) + return f + + def close(self): + """Invokes all close callbacks.""" + for cb in self._close_callbacks: + cb() + self._close_callbacks = [] + + @property + def command_path(self): + """The computed command path. This is used for the ``usage`` + information on the help page. It's automatically created by + combining the info names of the chain of contexts to the root. + """ + rv = '' + if self.info_name is not None: + rv = self.info_name + if self.parent is not None: + rv = self.parent.command_path + ' ' + rv + return rv.lstrip() + + def find_root(self): + """Finds the outermost context.""" + node = self + while node.parent is not None: + node = node.parent + return node + + def find_object(self, object_type): + """Finds the closest object of a given type.""" + node = self + while node is not None: + if isinstance(node.obj, object_type): + return node.obj + node = node.parent + + def ensure_object(self, object_type): + """Like :meth:`find_object` but sets the innermost object to a + new instance of `object_type` if it does not exist. + """ + rv = self.find_object(object_type) + if rv is None: + self.obj = rv = object_type() + return rv + + def lookup_default(self, name): + """Looks up the default for a parameter name. This by default + looks into the :attr:`default_map` if available. + """ + if self.default_map is not None: + rv = self.default_map.get(name) + if callable(rv): + rv = rv() + return rv + + def fail(self, message): + """Aborts the execution of the program with a specific error + message. + + :param message: the error message to fail with. + """ + raise UsageError(message, self) + + def abort(self): + """Aborts the script.""" + raise Abort() + + def exit(self, code=0): + """Exits the application with a given exit code.""" + sys.exit(code) + + def get_usage(self): + """Helper method to get formatted usage string for the current + context and command. + """ + return self.command.get_usage(self) + + def get_help(self): + """Helper method to get formatted help page for the current + context and command. + """ + return self.command.get_help(self) + + def invoke(*args, **kwargs): + """Invokes a command callback in exactly the way it expects. There + are two ways to invoke this method: + + 1. the first argument can be a callback and all other arguments and + keyword arguments are forwarded directly to the function. + 2. the first argument is a click command object. In that case all + arguments are forwarded as well but proper click parameters + (options and click arguments) must be keyword arguments and Click + will fill in defaults. + + Note that before Click 3.2 keyword arguments were not properly filled + in against the intention of this code and no context was created. For + more information about this change and why it was done in a bugfix + release see :ref:`upgrade-to-3.2`. + """ + self, callback = args[:2] + ctx = self + + # It's also possible to invoke another command which might or + # might not have a callback. In that case we also fill + # in defaults and make a new context for this command. + if isinstance(callback, Command): + other_cmd = callback + callback = other_cmd.callback + ctx = Context(other_cmd, info_name=other_cmd.name, parent=self) + if callback is None: + raise TypeError('The given command does not have a ' + 'callback that can be invoked.') + + for param in other_cmd.params: + if param.name not in kwargs and param.expose_value: + kwargs[param.name] = param.get_default(ctx) + + args = args[2:] + with augment_usage_errors(self): + with ctx: + return callback(*args, **kwargs) + + def forward(*args, **kwargs): + """Similar to :meth:`invoke` but fills in default keyword + arguments from the current context if the other command expects + it. This cannot invoke callbacks directly, only other commands. + """ + self, cmd = args[:2] + + # It's also possible to invoke another command which might or + # might not have a callback. + if not isinstance(cmd, Command): + raise TypeError('Callback is not a command.') + + for param in self.params: + if param not in kwargs: + kwargs[param] = self.params[param] + + return self.invoke(cmd, **kwargs) + + +class BaseCommand(object): + """The base command implements the minimal API contract of commands. + Most code will never use this as it does not implement a lot of useful + functionality but it can act as the direct subclass of alternative + parsing methods that do not depend on the Click parser. + + For instance, this can be used to bridge Click and other systems like + argparse or docopt. + + Because base commands do not implement a lot of the API that other + parts of Click take for granted, they are not supported for all + operations. For instance, they cannot be used with the decorators + usually and they have no built-in callback system. + + .. versionchanged:: 2.0 + Added the `context_settings` parameter. + + :param name: the name of the command to use unless a group overrides it. + :param context_settings: an optional dictionary with defaults that are + passed to the context object. + """ + #: the default for the :attr:`Context.allow_extra_args` flag. + allow_extra_args = False + #: the default for the :attr:`Context.allow_interspersed_args` flag. + allow_interspersed_args = True + #: the default for the :attr:`Context.ignore_unknown_options` flag. + ignore_unknown_options = False + + def __init__(self, name, context_settings=None): + #: the name the command thinks it has. Upon registering a command + #: on a :class:`Group` the group will default the command name + #: with this information. You should instead use the + #: :class:`Context`\'s :attr:`~Context.info_name` attribute. + self.name = name + if context_settings is None: + context_settings = {} + #: an optional dictionary with defaults passed to the context. + self.context_settings = context_settings + + def get_usage(self, ctx): + raise NotImplementedError('Base commands cannot get usage') + + def get_help(self, ctx): + raise NotImplementedError('Base commands cannot get help') + + def make_context(self, info_name, args, parent=None, **extra): + """This function when given an info name and arguments will kick + off the parsing and create a new :class:`Context`. It does not + invoke the actual command callback though. + + :param info_name: the info name for this invokation. Generally this + is the most descriptive name for the script or + command. For the toplevel script it's usually + the name of the script, for commands below it it's + the name of the script. + :param args: the arguments to parse as list of strings. + :param parent: the parent context if available. + :param extra: extra keyword arguments forwarded to the context + constructor. + """ + for key, value in iteritems(self.context_settings): + if key not in extra: + extra[key] = value + ctx = Context(self, info_name=info_name, parent=parent, **extra) + with ctx.scope(cleanup=False): + self.parse_args(ctx, args) + return ctx + + def parse_args(self, ctx, args): + """Given a context and a list of arguments this creates the parser + and parses the arguments, then modifies the context as necessary. + This is automatically invoked by :meth:`make_context`. + """ + raise NotImplementedError('Base commands do not know how to parse ' + 'arguments.') + + def invoke(self, ctx): + """Given a context, this invokes the command. The default + implementation is raising a not implemented error. + """ + raise NotImplementedError('Base commands are not invokable by default') + + def main(self, args=None, prog_name=None, complete_var=None, + standalone_mode=True, **extra): + """This is the way to invoke a script with all the bells and + whistles as a command line application. This will always terminate + the application after a call. If this is not wanted, ``SystemExit`` + needs to be caught. + + This method is also available by directly calling the instance of + a :class:`Command`. + + .. versionadded:: 3.0 + Added the `standalone_mode` flag to control the standalone mode. + + :param args: the arguments that should be used for parsing. If not + provided, ``sys.argv[1:]`` is used. + :param prog_name: the program name that should be used. By default + the program name is constructed by taking the file + name from ``sys.argv[0]``. + :param complete_var: the environment variable that controls the + bash completion support. The default is + ``"__COMPLETE"`` with prog name in + uppercase. + :param standalone_mode: the default behavior is to invoke the script + in standalone mode. Click will then + handle exceptions and convert them into + error messages and the function will never + return but shut down the interpreter. If + this is set to `False` they will be + propagated to the caller and the return + value of this function is the return value + of :meth:`invoke`. + :param extra: extra keyword arguments are forwarded to the context + constructor. See :class:`Context` for more information. + """ + # If we are in Python 3, we will verify that the environment is + # sane at this point of reject further execution to avoid a + # broken script. + if not PY2: + _verify_python3_env() + else: + _check_for_unicode_literals() + + if args is None: + args = get_os_args() + else: + args = list(args) + + if prog_name is None: + prog_name = make_str(os.path.basename( + sys.argv and sys.argv[0] or __file__)) + + # Hook for the Bash completion. This only activates if the Bash + # completion is actually enabled, otherwise this is quite a fast + # noop. + _bashcomplete(self, prog_name, complete_var) + + try: + try: + with self.make_context(prog_name, args, **extra) as ctx: + rv = self.invoke(ctx) + if not standalone_mode: + return rv + ctx.exit() + except (EOFError, KeyboardInterrupt): + echo(file=sys.stderr) + raise Abort() + except ClickException as e: + if not standalone_mode: + raise + e.show() + sys.exit(e.exit_code) + except Abort: + if not standalone_mode: + raise + echo('Aborted!', file=sys.stderr) + sys.exit(1) + + def __call__(self, *args, **kwargs): + """Alias for :meth:`main`.""" + return self.main(*args, **kwargs) + + +class Command(BaseCommand): + """Commands are the basic building block of command line interfaces in + Click. A basic command handles command line parsing and might dispatch + more parsing to commands nested below it. + + .. versionchanged:: 2.0 + Added the `context_settings` parameter. + + :param name: the name of the command to use unless a group overrides it. + :param context_settings: an optional dictionary with defaults that are + passed to the context object. + :param callback: the callback to invoke. This is optional. + :param params: the parameters to register with this command. This can + be either :class:`Option` or :class:`Argument` objects. + :param help: the help string to use for this command. + :param epilog: like the help string but it's printed at the end of the + help page after everything else. + :param short_help: the short help to use for this command. This is + shown on the command listing of the parent command. + :param add_help_option: by default each command registers a ``--help`` + option. This can be disabled by this parameter. + """ + + def __init__(self, name, context_settings=None, callback=None, + params=None, help=None, epilog=None, short_help=None, + options_metavar='[OPTIONS]', add_help_option=True): + BaseCommand.__init__(self, name, context_settings) + #: the callback to execute when the command fires. This might be + #: `None` in which case nothing happens. + self.callback = callback + #: the list of parameters for this command in the order they + #: should show up in the help page and execute. Eager parameters + #: will automatically be handled before non eager ones. + self.params = params or [] + self.help = help + self.epilog = epilog + self.options_metavar = options_metavar + if short_help is None and help: + short_help = make_default_short_help(help) + self.short_help = short_help + self.add_help_option = add_help_option + + def get_usage(self, ctx): + formatter = ctx.make_formatter() + self.format_usage(ctx, formatter) + return formatter.getvalue().rstrip('\n') + + def get_params(self, ctx): + rv = self.params + help_option = self.get_help_option(ctx) + if help_option is not None: + rv = rv + [help_option] + return rv + + def format_usage(self, ctx, formatter): + """Writes the usage line into the formatter.""" + pieces = self.collect_usage_pieces(ctx) + formatter.write_usage(ctx.command_path, ' '.join(pieces)) + + def collect_usage_pieces(self, ctx): + """Returns all the pieces that go into the usage line and returns + it as a list of strings. + """ + rv = [self.options_metavar] + for param in self.get_params(ctx): + rv.extend(param.get_usage_pieces(ctx)) + return rv + + def get_help_option_names(self, ctx): + """Returns the names for the help option.""" + all_names = set(ctx.help_option_names) + for param in self.params: + all_names.difference_update(param.opts) + all_names.difference_update(param.secondary_opts) + return all_names + + def get_help_option(self, ctx): + """Returns the help option object.""" + help_options = self.get_help_option_names(ctx) + if not help_options or not self.add_help_option: + return + + def show_help(ctx, param, value): + if value and not ctx.resilient_parsing: + echo(ctx.get_help(), color=ctx.color) + ctx.exit() + return Option(help_options, is_flag=True, + is_eager=True, expose_value=False, + callback=show_help, + help='Show this message and exit.') + + def make_parser(self, ctx): + """Creates the underlying option parser for this command.""" + parser = OptionParser(ctx) + parser.allow_interspersed_args = ctx.allow_interspersed_args + parser.ignore_unknown_options = ctx.ignore_unknown_options + for param in self.get_params(ctx): + param.add_to_parser(parser, ctx) + return parser + + def get_help(self, ctx): + """Formats the help into a string and returns it. This creates a + formatter and will call into the following formatting methods: + """ + formatter = ctx.make_formatter() + self.format_help(ctx, formatter) + return formatter.getvalue().rstrip('\n') + + def format_help(self, ctx, formatter): + """Writes the help into the formatter if it exists. + + This calls into the following methods: + + - :meth:`format_usage` + - :meth:`format_help_text` + - :meth:`format_options` + - :meth:`format_epilog` + """ + self.format_usage(ctx, formatter) + self.format_help_text(ctx, formatter) + self.format_options(ctx, formatter) + self.format_epilog(ctx, formatter) + + def format_help_text(self, ctx, formatter): + """Writes the help text to the formatter if it exists.""" + if self.help: + formatter.write_paragraph() + with formatter.indentation(): + formatter.write_text(self.help) + + def format_options(self, ctx, formatter): + """Writes all the options into the formatter if they exist.""" + opts = [] + for param in self.get_params(ctx): + rv = param.get_help_record(ctx) + if rv is not None: + opts.append(rv) + + if opts: + with formatter.section('Options'): + formatter.write_dl(opts) + + def format_epilog(self, ctx, formatter): + """Writes the epilog into the formatter if it exists.""" + if self.epilog: + formatter.write_paragraph() + with formatter.indentation(): + formatter.write_text(self.epilog) + + def parse_args(self, ctx, args): + parser = self.make_parser(ctx) + opts, args, param_order = parser.parse_args(args=args) + + for param in iter_params_for_processing( + param_order, self.get_params(ctx)): + value, args = param.handle_parse_result(ctx, opts, args) + + if args and not ctx.allow_extra_args and not ctx.resilient_parsing: + ctx.fail('Got unexpected extra argument%s (%s)' + % (len(args) != 1 and 's' or '', + ' '.join(map(make_str, args)))) + + ctx.args = args + return args + + def invoke(self, ctx): + """Given a context, this invokes the attached callback (if it exists) + in the right way. + """ + if self.callback is not None: + return ctx.invoke(self.callback, **ctx.params) + + +class MultiCommand(Command): + """A multi command is the basic implementation of a command that + dispatches to subcommands. The most common version is the + :class:`Group`. + + :param invoke_without_command: this controls how the multi command itself + is invoked. By default it's only invoked + if a subcommand is provided. + :param no_args_is_help: this controls what happens if no arguments are + provided. This option is enabled by default if + `invoke_without_command` is disabled or disabled + if it's enabled. If enabled this will add + ``--help`` as argument if no arguments are + passed. + :param subcommand_metavar: the string that is used in the documentation + to indicate the subcommand place. + :param chain: if this is set to `True` chaining of multiple subcommands + is enabled. This restricts the form of commands in that + they cannot have optional arguments but it allows + multiple commands to be chained together. + :param result_callback: the result callback to attach to this multi + command. + """ + allow_extra_args = True + allow_interspersed_args = False + + def __init__(self, name=None, invoke_without_command=False, + no_args_is_help=None, subcommand_metavar=None, + chain=False, result_callback=None, **attrs): + Command.__init__(self, name, **attrs) + if no_args_is_help is None: + no_args_is_help = not invoke_without_command + self.no_args_is_help = no_args_is_help + self.invoke_without_command = invoke_without_command + if subcommand_metavar is None: + if chain: + subcommand_metavar = SUBCOMMANDS_METAVAR + else: + subcommand_metavar = SUBCOMMAND_METAVAR + self.subcommand_metavar = subcommand_metavar + self.chain = chain + #: The result callback that is stored. This can be set or + #: overridden with the :func:`resultcallback` decorator. + self.result_callback = result_callback + + if self.chain: + for param in self.params: + if isinstance(param, Argument) and not param.required: + raise RuntimeError('Multi commands in chain mode cannot ' + 'have optional arguments.') + + def collect_usage_pieces(self, ctx): + rv = Command.collect_usage_pieces(self, ctx) + rv.append(self.subcommand_metavar) + return rv + + def format_options(self, ctx, formatter): + Command.format_options(self, ctx, formatter) + self.format_commands(ctx, formatter) + + def resultcallback(self, replace=False): + """Adds a result callback to the chain command. By default if a + result callback is already registered this will chain them but + this can be disabled with the `replace` parameter. The result + callback is invoked with the return value of the subcommand + (or the list of return values from all subcommands if chaining + is enabled) as well as the parameters as they would be passed + to the main callback. + + Example:: + + @click.group() + @click.option('-i', '--input', default=23) + def cli(input): + return 42 + + @cli.resultcallback() + def process_result(result, input): + return result + input + + .. versionadded:: 3.0 + + :param replace: if set to `True` an already existing result + callback will be removed. + """ + def decorator(f): + old_callback = self.result_callback + if old_callback is None or replace: + self.result_callback = f + return f + def function(__value, *args, **kwargs): + return f(old_callback(__value, *args, **kwargs), + *args, **kwargs) + self.result_callback = rv = update_wrapper(function, f) + return rv + return decorator + + def format_commands(self, ctx, formatter): + """Extra format methods for multi methods that adds all the commands + after the options. + """ + rows = [] + for subcommand in self.list_commands(ctx): + cmd = self.get_command(ctx, subcommand) + # What is this, the tool lied about a command. Ignore it + if cmd is None: + continue + + help = cmd.short_help or '' + rows.append((subcommand, help)) + + if rows: + with formatter.section('Commands'): + formatter.write_dl(rows) + + def parse_args(self, ctx, args): + if not args and self.no_args_is_help and not ctx.resilient_parsing: + echo(ctx.get_help(), color=ctx.color) + ctx.exit() + + rest = Command.parse_args(self, ctx, args) + if self.chain: + ctx.protected_args = rest + ctx.args = [] + elif rest: + ctx.protected_args, ctx.args = rest[:1], rest[1:] + + return ctx.args + + def invoke(self, ctx): + def _process_result(value): + if self.result_callback is not None: + value = ctx.invoke(self.result_callback, value, + **ctx.params) + return value + + if not ctx.protected_args: + # If we are invoked without command the chain flag controls + # how this happens. If we are not in chain mode, the return + # value here is the return value of the command. + # If however we are in chain mode, the return value is the + # return value of the result processor invoked with an empty + # list (which means that no subcommand actually was executed). + if self.invoke_without_command: + if not self.chain: + return Command.invoke(self, ctx) + with ctx: + Command.invoke(self, ctx) + return _process_result([]) + ctx.fail('Missing command.') + + # Fetch args back out + args = ctx.protected_args + ctx.args + ctx.args = [] + ctx.protected_args = [] + + # If we're not in chain mode, we only allow the invocation of a + # single command but we also inform the current context about the + # name of the command to invoke. + if not self.chain: + # Make sure the context is entered so we do not clean up + # resources until the result processor has worked. + with ctx: + cmd_name, cmd, args = self.resolve_command(ctx, args) + ctx.invoked_subcommand = cmd_name + Command.invoke(self, ctx) + sub_ctx = cmd.make_context(cmd_name, args, parent=ctx) + with sub_ctx: + return _process_result(sub_ctx.command.invoke(sub_ctx)) + + # In chain mode we create the contexts step by step, but after the + # base command has been invoked. Because at that point we do not + # know the subcommands yet, the invoked subcommand attribute is + # set to ``*`` to inform the command that subcommands are executed + # but nothing else. + with ctx: + ctx.invoked_subcommand = args and '*' or None + Command.invoke(self, ctx) + + # Otherwise we make every single context and invoke them in a + # chain. In that case the return value to the result processor + # is the list of all invoked subcommand's results. + contexts = [] + while args: + cmd_name, cmd, args = self.resolve_command(ctx, args) + sub_ctx = cmd.make_context(cmd_name, args, parent=ctx, + allow_extra_args=True, + allow_interspersed_args=False) + contexts.append(sub_ctx) + args, sub_ctx.args = sub_ctx.args, [] + + rv = [] + for sub_ctx in contexts: + with sub_ctx: + rv.append(sub_ctx.command.invoke(sub_ctx)) + return _process_result(rv) + + def resolve_command(self, ctx, args): + cmd_name = make_str(args[0]) + original_cmd_name = cmd_name + + # Get the command + cmd = self.get_command(ctx, cmd_name) + + # If we can't find the command but there is a normalization + # function available, we try with that one. + if cmd is None and ctx.token_normalize_func is not None: + cmd_name = ctx.token_normalize_func(cmd_name) + cmd = self.get_command(ctx, cmd_name) + + # If we don't find the command we want to show an error message + # to the user that it was not provided. However, there is + # something else we should do: if the first argument looks like + # an option we want to kick off parsing again for arguments to + # resolve things like --help which now should go to the main + # place. + if cmd is None: + if split_opt(cmd_name)[0]: + self.parse_args(ctx, ctx.args) + ctx.fail('No such command "%s".' % original_cmd_name) + + return cmd_name, cmd, args[1:] + + def get_command(self, ctx, cmd_name): + """Given a context and a command name, this returns a + :class:`Command` object if it exists or returns `None`. + """ + raise NotImplementedError() + + def list_commands(self, ctx): + """Returns a list of subcommand names in the order they should + appear. + """ + return [] + + +class Group(MultiCommand): + """A group allows a command to have subcommands attached. This is the + most common way to implement nesting in Click. + + :param commands: a dictionary of commands. + """ + + def __init__(self, name=None, commands=None, **attrs): + MultiCommand.__init__(self, name, **attrs) + #: the registered subcommands by their exported names. + self.commands = commands or {} + + def add_command(self, cmd, name=None): + """Registers another :class:`Command` with this group. If the name + is not provided, the name of the command is used. + """ + name = name or cmd.name + if name is None: + raise TypeError('Command has no name.') + _check_multicommand(self, name, cmd, register=True) + self.commands[name] = cmd + + def command(self, *args, **kwargs): + """A shortcut decorator for declaring and attaching a command to + the group. This takes the same arguments as :func:`command` but + immediately registers the created command with this instance by + calling into :meth:`add_command`. + """ + def decorator(f): + cmd = command(*args, **kwargs)(f) + self.add_command(cmd) + return cmd + return decorator + + def group(self, *args, **kwargs): + """A shortcut decorator for declaring and attaching a group to + the group. This takes the same arguments as :func:`group` but + immediately registers the created command with this instance by + calling into :meth:`add_command`. + """ + def decorator(f): + cmd = group(*args, **kwargs)(f) + self.add_command(cmd) + return cmd + return decorator + + def get_command(self, ctx, cmd_name): + return self.commands.get(cmd_name) + + def list_commands(self, ctx): + return sorted(self.commands) + + +class CommandCollection(MultiCommand): + """A command collection is a multi command that merges multiple multi + commands together into one. This is a straightforward implementation + that accepts a list of different multi commands as sources and + provides all the commands for each of them. + """ + + def __init__(self, name=None, sources=None, **attrs): + MultiCommand.__init__(self, name, **attrs) + #: The list of registered multi commands. + self.sources = sources or [] + + def add_source(self, multi_cmd): + """Adds a new multi command to the chain dispatcher.""" + self.sources.append(multi_cmd) + + def get_command(self, ctx, cmd_name): + for source in self.sources: + rv = source.get_command(ctx, cmd_name) + if rv is not None: + if self.chain: + _check_multicommand(self, cmd_name, rv) + return rv + + def list_commands(self, ctx): + rv = set() + for source in self.sources: + rv.update(source.list_commands(ctx)) + return sorted(rv) + + +class Parameter(object): + """A parameter to a command comes in two versions: they are either + :class:`Option`\s or :class:`Argument`\s. Other subclasses are currently + not supported by design as some of the internals for parsing are + intentionally not finalized. + + Some settings are supported by both options and arguments. + + .. versionchanged:: 2.0 + Changed signature for parameter callback to also be passed the + parameter. In Click 2.0, the old callback format will still work, + but it will raise a warning to give you change to migrate the + code easier. + + :param param_decls: the parameter declarations for this option or + argument. This is a list of flags or argument + names. + :param type: the type that should be used. Either a :class:`ParamType` + or a Python type. The later is converted into the former + automatically if supported. + :param required: controls if this is optional or not. + :param default: the default value if omitted. This can also be a callable, + in which case it's invoked when the default is needed + without any arguments. + :param callback: a callback that should be executed after the parameter + was matched. This is called as ``fn(ctx, param, + value)`` and needs to return the value. Before Click + 2.0, the signature was ``(ctx, value)``. + :param nargs: the number of arguments to match. If not ``1`` the return + value is a tuple instead of single value. The default for + nargs is ``1`` (except if the type is a tuple, then it's + the arity of the tuple). + :param metavar: how the value is represented in the help page. + :param expose_value: if this is `True` then the value is passed onwards + to the command callback and stored on the context, + otherwise it's skipped. + :param is_eager: eager values are processed before non eager ones. This + should not be set for arguments or it will inverse the + order of processing. + :param envvar: a string or list of strings that are environment variables + that should be checked. + """ + param_type_name = 'parameter' + + def __init__(self, param_decls=None, type=None, required=False, + default=None, callback=None, nargs=None, metavar=None, + expose_value=True, is_eager=False, envvar=None): + self.name, self.opts, self.secondary_opts = \ + self._parse_decls(param_decls or (), expose_value) + + self.type = convert_type(type, default) + + # Default nargs to what the type tells us if we have that + # information available. + if nargs is None: + if self.type.is_composite: + nargs = self.type.arity + else: + nargs = 1 + + self.required = required + self.callback = callback + self.nargs = nargs + self.multiple = False + self.expose_value = expose_value + self.default = default + self.is_eager = is_eager + self.metavar = metavar + self.envvar = envvar + + @property + def human_readable_name(self): + """Returns the human readable name of this parameter. This is the + same as the name for options, but the metavar for arguments. + """ + return self.name + + def make_metavar(self): + if self.metavar is not None: + return self.metavar + metavar = self.type.get_metavar(self) + if metavar is None: + metavar = self.type.name.upper() + if self.nargs != 1: + metavar += '...' + return metavar + + def get_default(self, ctx): + """Given a context variable this calculates the default value.""" + # Otherwise go with the regular default. + if callable(self.default): + rv = self.default() + else: + rv = self.default + return self.type_cast_value(ctx, rv) + + def add_to_parser(self, parser, ctx): + pass + + def consume_value(self, ctx, opts): + value = opts.get(self.name) + if value is None: + value = ctx.lookup_default(self.name) + if value is None: + value = self.value_from_envvar(ctx) + return value + + def type_cast_value(self, ctx, value): + """Given a value this runs it properly through the type system. + This automatically handles things like `nargs` and `multiple` as + well as composite types. + """ + if self.type.is_composite: + if self.nargs <= 1: + raise TypeError('Attempted to invoke composite type ' + 'but nargs has been set to %s. This is ' + 'not supported; nargs needs to be set to ' + 'a fixed value > 1.' % self.nargs) + if self.multiple: + return tuple(self.type(x or (), self, ctx) for x in value or ()) + return self.type(value or (), self, ctx) + + def _convert(value, level): + if level == 0: + return self.type(value, self, ctx) + return tuple(_convert(x, level - 1) for x in value or ()) + return _convert(value, (self.nargs != 1) + bool(self.multiple)) + + def process_value(self, ctx, value): + """Given a value and context this runs the logic to convert the + value as necessary. + """ + # If the value we were given is None we do nothing. This way + # code that calls this can easily figure out if something was + # not provided. Otherwise it would be converted into an empty + # tuple for multiple invocations which is inconvenient. + if value is not None: + return self.type_cast_value(ctx, value) + + def value_is_missing(self, value): + if value is None: + return True + if (self.nargs != 1 or self.multiple) and value == (): + return True + return False + + def full_process_value(self, ctx, value): + value = self.process_value(ctx, value) + + if value is None: + value = self.get_default(ctx) + + if self.required and self.value_is_missing(value): + raise MissingParameter(ctx=ctx, param=self) + + return value + + def resolve_envvar_value(self, ctx): + if self.envvar is None: + return + if isinstance(self.envvar, (tuple, list)): + for envvar in self.envvar: + rv = os.environ.get(envvar) + if rv is not None: + return rv + else: + return os.environ.get(self.envvar) + + def value_from_envvar(self, ctx): + rv = self.resolve_envvar_value(ctx) + if rv is not None and self.nargs != 1: + rv = self.type.split_envvar_value(rv) + return rv + + def handle_parse_result(self, ctx, opts, args): + with augment_usage_errors(ctx, param=self): + value = self.consume_value(ctx, opts) + try: + value = self.full_process_value(ctx, value) + except Exception: + if not ctx.resilient_parsing: + raise + value = None + if self.callback is not None: + try: + value = invoke_param_callback( + self.callback, ctx, self, value) + except Exception: + if not ctx.resilient_parsing: + raise + + if self.expose_value: + ctx.params[self.name] = value + return value, args + + def get_help_record(self, ctx): + pass + + def get_usage_pieces(self, ctx): + return [] + + +class Option(Parameter): + """Options are usually optional values on the command line and + have some extra features that arguments don't have. + + All other parameters are passed onwards to the parameter constructor. + + :param show_default: controls if the default value should be shown on the + help page. Normally, defaults are not shown. + :param prompt: if set to `True` or a non empty string then the user will + be prompted for input if not set. If set to `True` the + prompt will be the option name capitalized. + :param confirmation_prompt: if set then the value will need to be confirmed + if it was prompted for. + :param hide_input: if this is `True` then the input on the prompt will be + hidden from the user. This is useful for password + input. + :param is_flag: forces this option to act as a flag. The default is + auto detection. + :param flag_value: which value should be used for this flag if it's + enabled. This is set to a boolean automatically if + the option string contains a slash to mark two options. + :param multiple: if this is set to `True` then the argument is accepted + multiple times and recorded. This is similar to ``nargs`` + in how it works but supports arbitrary number of + arguments. + :param count: this flag makes an option increment an integer. + :param allow_from_autoenv: if this is enabled then the value of this + parameter will be pulled from an environment + variable in case a prefix is defined on the + context. + :param help: the help string. + """ + param_type_name = 'option' + + def __init__(self, param_decls=None, show_default=False, + prompt=False, confirmation_prompt=False, + hide_input=False, is_flag=None, flag_value=None, + multiple=False, count=False, allow_from_autoenv=True, + type=None, help=None, **attrs): + default_is_missing = attrs.get('default', _missing) is _missing + Parameter.__init__(self, param_decls, type=type, **attrs) + + if prompt is True: + prompt_text = self.name.replace('_', ' ').capitalize() + elif prompt is False: + prompt_text = None + else: + prompt_text = prompt + self.prompt = prompt_text + self.confirmation_prompt = confirmation_prompt + self.hide_input = hide_input + + # Flags + if is_flag is None: + if flag_value is not None: + is_flag = True + else: + is_flag = bool(self.secondary_opts) + if is_flag and default_is_missing: + self.default = False + if flag_value is None: + flag_value = not self.default + self.is_flag = is_flag + self.flag_value = flag_value + if self.is_flag and isinstance(self.flag_value, bool) \ + and type is None: + self.type = BOOL + self.is_bool_flag = True + else: + self.is_bool_flag = False + + # Counting + self.count = count + if count: + if type is None: + self.type = IntRange(min=0) + if default_is_missing: + self.default = 0 + + self.multiple = multiple + self.allow_from_autoenv = allow_from_autoenv + self.help = help + self.show_default = show_default + + # Sanity check for stuff we don't support + if __debug__: + if self.nargs < 0: + raise TypeError('Options cannot have nargs < 0') + if self.prompt and self.is_flag and not self.is_bool_flag: + raise TypeError('Cannot prompt for flags that are not bools.') + if not self.is_bool_flag and self.secondary_opts: + raise TypeError('Got secondary option for non boolean flag.') + if self.is_bool_flag and self.hide_input \ + and self.prompt is not None: + raise TypeError('Hidden input does not work with boolean ' + 'flag prompts.') + if self.count: + if self.multiple: + raise TypeError('Options cannot be multiple and count ' + 'at the same time.') + elif self.is_flag: + raise TypeError('Options cannot be count and flags at ' + 'the same time.') + + def _parse_decls(self, decls, expose_value): + opts = [] + secondary_opts = [] + name = None + possible_names = [] + + for decl in decls: + if isidentifier(decl): + if name is not None: + raise TypeError('Name defined twice') + name = decl + else: + split_char = decl[:1] == '/' and ';' or '/' + if split_char in decl: + first, second = decl.split(split_char, 1) + first = first.rstrip() + if first: + possible_names.append(split_opt(first)) + opts.append(first) + second = second.lstrip() + if second: + secondary_opts.append(second.lstrip()) + else: + possible_names.append(split_opt(decl)) + opts.append(decl) + + if name is None and possible_names: + possible_names.sort(key=lambda x: len(x[0])) + name = possible_names[-1][1].replace('-', '_').lower() + if not isidentifier(name): + name = None + + if name is None: + if not expose_value: + return None, opts, secondary_opts + raise TypeError('Could not determine name for option') + + if not opts and not secondary_opts: + raise TypeError('No options defined but a name was passed (%s). ' + 'Did you mean to declare an argument instead ' + 'of an option?' % name) + + return name, opts, secondary_opts + + def add_to_parser(self, parser, ctx): + kwargs = { + 'dest': self.name, + 'nargs': self.nargs, + 'obj': self, + } + + if self.multiple: + action = 'append' + elif self.count: + action = 'count' + else: + action = 'store' + + if self.is_flag: + kwargs.pop('nargs', None) + if self.is_bool_flag and self.secondary_opts: + parser.add_option(self.opts, action=action + '_const', + const=True, **kwargs) + parser.add_option(self.secondary_opts, action=action + + '_const', const=False, **kwargs) + else: + parser.add_option(self.opts, action=action + '_const', + const=self.flag_value, + **kwargs) + else: + kwargs['action'] = action + parser.add_option(self.opts, **kwargs) + + def get_help_record(self, ctx): + any_prefix_is_slash = [] + + def _write_opts(opts): + rv, any_slashes = join_options(opts) + if any_slashes: + any_prefix_is_slash[:] = [True] + if not self.is_flag and not self.count: + rv += ' ' + self.make_metavar() + return rv + + rv = [_write_opts(self.opts)] + if self.secondary_opts: + rv.append(_write_opts(self.secondary_opts)) + + help = self.help or '' + extra = [] + if self.default is not None and self.show_default: + extra.append('default: %s' % ( + ', '.join('%s' % d for d in self.default) + if isinstance(self.default, (list, tuple)) + else self.default, )) + if self.required: + extra.append('required') + if extra: + help = '%s[%s]' % (help and help + ' ' or '', '; '.join(extra)) + + return ((any_prefix_is_slash and '; ' or ' / ').join(rv), help) + + def get_default(self, ctx): + # If we're a non boolean flag out default is more complex because + # we need to look at all flags in the same group to figure out + # if we're the the default one in which case we return the flag + # value as default. + if self.is_flag and not self.is_bool_flag: + for param in ctx.command.params: + if param.name == self.name and param.default: + return param.flag_value + return None + return Parameter.get_default(self, ctx) + + def prompt_for_value(self, ctx): + """This is an alternative flow that can be activated in the full + value processing if a value does not exist. It will prompt the + user until a valid value exists and then returns the processed + value as result. + """ + # Calculate the default before prompting anything to be stable. + default = self.get_default(ctx) + + # If this is a prompt for a flag we need to handle this + # differently. + if self.is_bool_flag: + return confirm(self.prompt, default) + + return prompt(self.prompt, default=default, + hide_input=self.hide_input, + confirmation_prompt=self.confirmation_prompt, + value_proc=lambda x: self.process_value(ctx, x)) + + def resolve_envvar_value(self, ctx): + rv = Parameter.resolve_envvar_value(self, ctx) + if rv is not None: + return rv + if self.allow_from_autoenv and \ + ctx.auto_envvar_prefix is not None: + envvar = '%s_%s' % (ctx.auto_envvar_prefix, self.name.upper()) + return os.environ.get(envvar) + + def value_from_envvar(self, ctx): + rv = self.resolve_envvar_value(ctx) + if rv is None: + return None + value_depth = (self.nargs != 1) + bool(self.multiple) + if value_depth > 0 and rv is not None: + rv = self.type.split_envvar_value(rv) + if self.multiple and self.nargs != 1: + rv = batch(rv, self.nargs) + return rv + + def full_process_value(self, ctx, value): + if value is None and self.prompt is not None \ + and not ctx.resilient_parsing: + return self.prompt_for_value(ctx) + return Parameter.full_process_value(self, ctx, value) + + +class Argument(Parameter): + """Arguments are positional parameters to a command. They generally + provide fewer features than options but can have infinite ``nargs`` + and are required by default. + + All parameters are passed onwards to the parameter constructor. + """ + param_type_name = 'argument' + + def __init__(self, param_decls, required=None, **attrs): + if required is None: + if attrs.get('default') is not None: + required = False + else: + required = attrs.get('nargs', 1) > 0 + Parameter.__init__(self, param_decls, required=required, **attrs) + if self.default is not None and self.nargs < 0: + raise TypeError('nargs=-1 in combination with a default value ' + 'is not supported.') + + @property + def human_readable_name(self): + if self.metavar is not None: + return self.metavar + return self.name.upper() + + def make_metavar(self): + if self.metavar is not None: + return self.metavar + var = self.name.upper() + if not self.required: + var = '[%s]' % var + if self.nargs != 1: + var += '...' + return var + + def _parse_decls(self, decls, expose_value): + if not decls: + if not expose_value: + return None, [], [] + raise TypeError('Could not determine name for argument') + if len(decls) == 1: + name = arg = decls[0] + name = name.replace('-', '_').lower() + elif len(decls) == 2: + name, arg = decls + else: + raise TypeError('Arguments take exactly one or two ' + 'parameter declarations, got %d' % len(decls)) + return name, [arg], [] + + def get_usage_pieces(self, ctx): + return [self.make_metavar()] + + def add_to_parser(self, parser, ctx): + parser.add_argument(dest=self.name, nargs=self.nargs, + obj=self) + + +# Circular dependency between decorators and core +from .decorators import command, group diff --git a/venv/lib/python2.7/site-packages/click/decorators.py b/venv/lib/python2.7/site-packages/click/decorators.py new file mode 100644 index 0000000..9893452 --- /dev/null +++ b/venv/lib/python2.7/site-packages/click/decorators.py @@ -0,0 +1,304 @@ +import sys +import inspect + +from functools import update_wrapper + +from ._compat import iteritems +from ._unicodefun import _check_for_unicode_literals +from .utils import echo +from .globals import get_current_context + + +def pass_context(f): + """Marks a callback as wanting to receive the current context + object as first argument. + """ + def new_func(*args, **kwargs): + return f(get_current_context(), *args, **kwargs) + return update_wrapper(new_func, f) + + +def pass_obj(f): + """Similar to :func:`pass_context`, but only pass the object on the + context onwards (:attr:`Context.obj`). This is useful if that object + represents the state of a nested system. + """ + def new_func(*args, **kwargs): + return f(get_current_context().obj, *args, **kwargs) + return update_wrapper(new_func, f) + + +def make_pass_decorator(object_type, ensure=False): + """Given an object type this creates a decorator that will work + similar to :func:`pass_obj` but instead of passing the object of the + current context, it will find the innermost context of type + :func:`object_type`. + + This generates a decorator that works roughly like this:: + + from functools import update_wrapper + + def decorator(f): + @pass_context + def new_func(ctx, *args, **kwargs): + obj = ctx.find_object(object_type) + return ctx.invoke(f, obj, *args, **kwargs) + return update_wrapper(new_func, f) + return decorator + + :param object_type: the type of the object to pass. + :param ensure: if set to `True`, a new object will be created and + remembered on the context if it's not there yet. + """ + def decorator(f): + def new_func(*args, **kwargs): + ctx = get_current_context() + if ensure: + obj = ctx.ensure_object(object_type) + else: + obj = ctx.find_object(object_type) + if obj is None: + raise RuntimeError('Managed to invoke callback without a ' + 'context object of type %r existing' + % object_type.__name__) + return ctx.invoke(f, obj, *args[1:], **kwargs) + return update_wrapper(new_func, f) + return decorator + + +def _make_command(f, name, attrs, cls): + if isinstance(f, Command): + raise TypeError('Attempted to convert a callback into a ' + 'command twice.') + try: + params = f.__click_params__ + params.reverse() + del f.__click_params__ + except AttributeError: + params = [] + help = attrs.get('help') + if help is None: + help = inspect.getdoc(f) + if isinstance(help, bytes): + help = help.decode('utf-8') + else: + help = inspect.cleandoc(help) + attrs['help'] = help + _check_for_unicode_literals() + return cls(name=name or f.__name__.lower(), + callback=f, params=params, **attrs) + + +def command(name=None, cls=None, **attrs): + """Creates a new :class:`Command` and uses the decorated function as + callback. This will also automatically attach all decorated + :func:`option`\s and :func:`argument`\s as parameters to the command. + + The name of the command defaults to the name of the function. If you + want to change that, you can pass the intended name as the first + argument. + + All keyword arguments are forwarded to the underlying command class. + + Once decorated the function turns into a :class:`Command` instance + that can be invoked as a command line utility or be attached to a + command :class:`Group`. + + :param name: the name of the command. This defaults to the function + name. + :param cls: the command class to instantiate. This defaults to + :class:`Command`. + """ + if cls is None: + cls = Command + def decorator(f): + cmd = _make_command(f, name, attrs, cls) + cmd.__doc__ = f.__doc__ + return cmd + return decorator + + +def group(name=None, **attrs): + """Creates a new :class:`Group` with a function as callback. This + works otherwise the same as :func:`command` just that the `cls` + parameter is set to :class:`Group`. + """ + attrs.setdefault('cls', Group) + return command(name, **attrs) + + +def _param_memo(f, param): + if isinstance(f, Command): + f.params.append(param) + else: + if not hasattr(f, '__click_params__'): + f.__click_params__ = [] + f.__click_params__.append(param) + + +def argument(*param_decls, **attrs): + """Attaches an argument to the command. All positional arguments are + passed as parameter declarations to :class:`Argument`; all keyword + arguments are forwarded unchanged (except ``cls``). + This is equivalent to creating an :class:`Argument` instance manually + and attaching it to the :attr:`Command.params` list. + + :param cls: the argument class to instantiate. This defaults to + :class:`Argument`. + """ + def decorator(f): + ArgumentClass = attrs.pop('cls', Argument) + _param_memo(f, ArgumentClass(param_decls, **attrs)) + return f + return decorator + + +def option(*param_decls, **attrs): + """Attaches an option to the command. All positional arguments are + passed as parameter declarations to :class:`Option`; all keyword + arguments are forwarded unchanged (except ``cls``). + This is equivalent to creating an :class:`Option` instance manually + and attaching it to the :attr:`Command.params` list. + + :param cls: the option class to instantiate. This defaults to + :class:`Option`. + """ + def decorator(f): + if 'help' in attrs: + attrs['help'] = inspect.cleandoc(attrs['help']) + OptionClass = attrs.pop('cls', Option) + _param_memo(f, OptionClass(param_decls, **attrs)) + return f + return decorator + + +def confirmation_option(*param_decls, **attrs): + """Shortcut for confirmation prompts that can be ignored by passing + ``--yes`` as parameter. + + This is equivalent to decorating a function with :func:`option` with + the following parameters:: + + def callback(ctx, param, value): + if not value: + ctx.abort() + + @click.command() + @click.option('--yes', is_flag=True, callback=callback, + expose_value=False, prompt='Do you want to continue?') + def dropdb(): + pass + """ + def decorator(f): + def callback(ctx, param, value): + if not value: + ctx.abort() + attrs.setdefault('is_flag', True) + attrs.setdefault('callback', callback) + attrs.setdefault('expose_value', False) + attrs.setdefault('prompt', 'Do you want to continue?') + attrs.setdefault('help', 'Confirm the action without prompting.') + return option(*(param_decls or ('--yes',)), **attrs)(f) + return decorator + + +def password_option(*param_decls, **attrs): + """Shortcut for password prompts. + + This is equivalent to decorating a function with :func:`option` with + the following parameters:: + + @click.command() + @click.option('--password', prompt=True, confirmation_prompt=True, + hide_input=True) + def changeadmin(password): + pass + """ + def decorator(f): + attrs.setdefault('prompt', True) + attrs.setdefault('confirmation_prompt', True) + attrs.setdefault('hide_input', True) + return option(*(param_decls or ('--password',)), **attrs)(f) + return decorator + + +def version_option(version=None, *param_decls, **attrs): + """Adds a ``--version`` option which immediately ends the program + printing out the version number. This is implemented as an eager + option that prints the version and exits the program in the callback. + + :param version: the version number to show. If not provided Click + attempts an auto discovery via setuptools. + :param prog_name: the name of the program (defaults to autodetection) + :param message: custom message to show instead of the default + (``'%(prog)s, version %(version)s'``) + :param others: everything else is forwarded to :func:`option`. + """ + if version is None: + module = sys._getframe(1).f_globals.get('__name__') + def decorator(f): + prog_name = attrs.pop('prog_name', None) + message = attrs.pop('message', '%(prog)s, version %(version)s') + + def callback(ctx, param, value): + if not value or ctx.resilient_parsing: + return + prog = prog_name + if prog is None: + prog = ctx.find_root().info_name + ver = version + if ver is None: + try: + import pkg_resources + except ImportError: + pass + else: + for dist in pkg_resources.working_set: + scripts = dist.get_entry_map().get('console_scripts') or {} + for script_name, entry_point in iteritems(scripts): + if entry_point.module_name == module: + ver = dist.version + break + if ver is None: + raise RuntimeError('Could not determine version') + echo(message % { + 'prog': prog, + 'version': ver, + }, color=ctx.color) + ctx.exit() + + attrs.setdefault('is_flag', True) + attrs.setdefault('expose_value', False) + attrs.setdefault('is_eager', True) + attrs.setdefault('help', 'Show the version and exit.') + attrs['callback'] = callback + return option(*(param_decls or ('--version',)), **attrs)(f) + return decorator + + +def help_option(*param_decls, **attrs): + """Adds a ``--help`` option which immediately ends the program + printing out the help page. This is usually unnecessary to add as + this is added by default to all commands unless suppressed. + + Like :func:`version_option`, this is implemented as eager option that + prints in the callback and exits. + + All arguments are forwarded to :func:`option`. + """ + def decorator(f): + def callback(ctx, param, value): + if value and not ctx.resilient_parsing: + echo(ctx.get_help(), color=ctx.color) + ctx.exit() + attrs.setdefault('is_flag', True) + attrs.setdefault('expose_value', False) + attrs.setdefault('help', 'Show this message and exit.') + attrs.setdefault('is_eager', True) + attrs['callback'] = callback + return option(*(param_decls or ('--help',)), **attrs)(f) + return decorator + + +# Circular dependencies between core and decorators +from .core import Command, Group, Argument, Option diff --git a/venv/lib/python2.7/site-packages/click/exceptions.py b/venv/lib/python2.7/site-packages/click/exceptions.py new file mode 100644 index 0000000..74a4542 --- /dev/null +++ b/venv/lib/python2.7/site-packages/click/exceptions.py @@ -0,0 +1,201 @@ +from ._compat import PY2, filename_to_ui, get_text_stderr +from .utils import echo + + +class ClickException(Exception): + """An exception that Click can handle and show to the user.""" + + #: The exit code for this exception + exit_code = 1 + + def __init__(self, message): + if PY2: + if message is not None: + message = message.encode('utf-8') + Exception.__init__(self, message) + self.message = message + + def format_message(self): + return self.message + + def show(self, file=None): + if file is None: + file = get_text_stderr() + echo('Error: %s' % self.format_message(), file=file) + + +class UsageError(ClickException): + """An internal exception that signals a usage error. This typically + aborts any further handling. + + :param message: the error message to display. + :param ctx: optionally the context that caused this error. Click will + fill in the context automatically in some situations. + """ + exit_code = 2 + + def __init__(self, message, ctx=None): + ClickException.__init__(self, message) + self.ctx = ctx + + def show(self, file=None): + if file is None: + file = get_text_stderr() + color = None + if self.ctx is not None: + color = self.ctx.color + echo(self.ctx.get_usage() + '\n', file=file, color=color) + echo('Error: %s' % self.format_message(), file=file, color=color) + + +class BadParameter(UsageError): + """An exception that formats out a standardized error message for a + bad parameter. This is useful when thrown from a callback or type as + Click will attach contextual information to it (for instance, which + parameter it is). + + .. versionadded:: 2.0 + + :param param: the parameter object that caused this error. This can + be left out, and Click will attach this info itself + if possible. + :param param_hint: a string that shows up as parameter name. This + can be used as alternative to `param` in cases + where custom validation should happen. If it is + a string it's used as such, if it's a list then + each item is quoted and separated. + """ + + def __init__(self, message, ctx=None, param=None, + param_hint=None): + UsageError.__init__(self, message, ctx) + self.param = param + self.param_hint = param_hint + + def format_message(self): + if self.param_hint is not None: + param_hint = self.param_hint + elif self.param is not None: + param_hint = self.param.opts or [self.param.human_readable_name] + else: + return 'Invalid value: %s' % self.message + if isinstance(param_hint, (tuple, list)): + param_hint = ' / '.join('"%s"' % x for x in param_hint) + return 'Invalid value for %s: %s' % (param_hint, self.message) + + +class MissingParameter(BadParameter): + """Raised if click required an option or argument but it was not + provided when invoking the script. + + .. versionadded:: 4.0 + + :param param_type: a string that indicates the type of the parameter. + The default is to inherit the parameter type from + the given `param`. Valid values are ``'parameter'``, + ``'option'`` or ``'argument'``. + """ + + def __init__(self, message=None, ctx=None, param=None, + param_hint=None, param_type=None): + BadParameter.__init__(self, message, ctx, param, param_hint) + self.param_type = param_type + + def format_message(self): + if self.param_hint is not None: + param_hint = self.param_hint + elif self.param is not None: + param_hint = self.param.opts or [self.param.human_readable_name] + else: + param_hint = None + if isinstance(param_hint, (tuple, list)): + param_hint = ' / '.join('"%s"' % x for x in param_hint) + + param_type = self.param_type + if param_type is None and self.param is not None: + param_type = self.param.param_type_name + + msg = self.message + if self.param is not None: + msg_extra = self.param.type.get_missing_message(self.param) + if msg_extra: + if msg: + msg += '. ' + msg_extra + else: + msg = msg_extra + + return 'Missing %s%s%s%s' % ( + param_type, + param_hint and ' %s' % param_hint or '', + msg and '. ' or '.', + msg or '', + ) + + +class NoSuchOption(UsageError): + """Raised if click attempted to handle an option that does not + exist. + + .. versionadded:: 4.0 + """ + + def __init__(self, option_name, message=None, possibilities=None, + ctx=None): + if message is None: + message = 'no such option: %s' % option_name + UsageError.__init__(self, message, ctx) + self.option_name = option_name + self.possibilities = possibilities + + def format_message(self): + bits = [self.message] + if self.possibilities: + if len(self.possibilities) == 1: + bits.append('Did you mean %s?' % self.possibilities[0]) + else: + possibilities = sorted(self.possibilities) + bits.append('(Possible options: %s)' % ', '.join(possibilities)) + return ' '.join(bits) + + +class BadOptionUsage(UsageError): + """Raised if an option is generally supplied but the use of the option + was incorrect. This is for instance raised if the number of arguments + for an option is not correct. + + .. versionadded:: 4.0 + """ + + def __init__(self, message, ctx=None): + UsageError.__init__(self, message, ctx) + + +class BadArgumentUsage(UsageError): + """Raised if an argument is generally supplied but the use of the argument + was incorrect. This is for instance raised if the number of values + for an argument is not correct. + + .. versionadded:: 6.0 + """ + + def __init__(self, message, ctx=None): + UsageError.__init__(self, message, ctx) + + +class FileError(ClickException): + """Raised if a file cannot be opened.""" + + def __init__(self, filename, hint=None): + ui_filename = filename_to_ui(filename) + if hint is None: + hint = 'unknown error' + ClickException.__init__(self, hint) + self.ui_filename = ui_filename + self.filename = filename + + def format_message(self): + return 'Could not open file %s: %s' % (self.ui_filename, self.message) + + +class Abort(RuntimeError): + """An internal signalling exception that signals Click to abort.""" diff --git a/venv/lib/python2.7/site-packages/click/formatting.py b/venv/lib/python2.7/site-packages/click/formatting.py new file mode 100644 index 0000000..a3d6a4d --- /dev/null +++ b/venv/lib/python2.7/site-packages/click/formatting.py @@ -0,0 +1,256 @@ +from contextlib import contextmanager +from .termui import get_terminal_size +from .parser import split_opt +from ._compat import term_len + + +# Can force a width. This is used by the test system +FORCED_WIDTH = None + + +def measure_table(rows): + widths = {} + for row in rows: + for idx, col in enumerate(row): + widths[idx] = max(widths.get(idx, 0), term_len(col)) + return tuple(y for x, y in sorted(widths.items())) + + +def iter_rows(rows, col_count): + for row in rows: + row = tuple(row) + yield row + ('',) * (col_count - len(row)) + + +def wrap_text(text, width=78, initial_indent='', subsequent_indent='', + preserve_paragraphs=False): + """A helper function that intelligently wraps text. By default, it + assumes that it operates on a single paragraph of text but if the + `preserve_paragraphs` parameter is provided it will intelligently + handle paragraphs (defined by two empty lines). + + If paragraphs are handled, a paragraph can be prefixed with an empty + line containing the ``\\b`` character (``\\x08``) to indicate that + no rewrapping should happen in that block. + + :param text: the text that should be rewrapped. + :param width: the maximum width for the text. + :param initial_indent: the initial indent that should be placed on the + first line as a string. + :param subsequent_indent: the indent string that should be placed on + each consecutive line. + :param preserve_paragraphs: if this flag is set then the wrapping will + intelligently handle paragraphs. + """ + from ._textwrap import TextWrapper + text = text.expandtabs() + wrapper = TextWrapper(width, initial_indent=initial_indent, + subsequent_indent=subsequent_indent, + replace_whitespace=False) + if not preserve_paragraphs: + return wrapper.fill(text) + + p = [] + buf = [] + indent = None + + def _flush_par(): + if not buf: + return + if buf[0].strip() == '\b': + p.append((indent or 0, True, '\n'.join(buf[1:]))) + else: + p.append((indent or 0, False, ' '.join(buf))) + del buf[:] + + for line in text.splitlines(): + if not line: + _flush_par() + indent = None + else: + if indent is None: + orig_len = term_len(line) + line = line.lstrip() + indent = orig_len - term_len(line) + buf.append(line) + _flush_par() + + rv = [] + for indent, raw, text in p: + with wrapper.extra_indent(' ' * indent): + if raw: + rv.append(wrapper.indent_only(text)) + else: + rv.append(wrapper.fill(text)) + + return '\n\n'.join(rv) + + +class HelpFormatter(object): + """This class helps with formatting text-based help pages. It's + usually just needed for very special internal cases, but it's also + exposed so that developers can write their own fancy outputs. + + At present, it always writes into memory. + + :param indent_increment: the additional increment for each level. + :param width: the width for the text. This defaults to the terminal + width clamped to a maximum of 78. + """ + + def __init__(self, indent_increment=2, width=None, max_width=None): + self.indent_increment = indent_increment + if max_width is None: + max_width = 80 + if width is None: + width = FORCED_WIDTH + if width is None: + width = max(min(get_terminal_size()[0], max_width) - 2, 50) + self.width = width + self.current_indent = 0 + self.buffer = [] + + def write(self, string): + """Writes a unicode string into the internal buffer.""" + self.buffer.append(string) + + def indent(self): + """Increases the indentation.""" + self.current_indent += self.indent_increment + + def dedent(self): + """Decreases the indentation.""" + self.current_indent -= self.indent_increment + + def write_usage(self, prog, args='', prefix='Usage: '): + """Writes a usage line into the buffer. + + :param prog: the program name. + :param args: whitespace separated list of arguments. + :param prefix: the prefix for the first line. + """ + usage_prefix = '%*s%s ' % (self.current_indent, prefix, prog) + text_width = self.width - self.current_indent + + if text_width >= (term_len(usage_prefix) + 20): + # The arguments will fit to the right of the prefix. + indent = ' ' * term_len(usage_prefix) + self.write(wrap_text(args, text_width, + initial_indent=usage_prefix, + subsequent_indent=indent)) + else: + # The prefix is too long, put the arguments on the next line. + self.write(usage_prefix) + self.write('\n') + indent = ' ' * (max(self.current_indent, term_len(prefix)) + 4) + self.write(wrap_text(args, text_width, + initial_indent=indent, + subsequent_indent=indent)) + + self.write('\n') + + def write_heading(self, heading): + """Writes a heading into the buffer.""" + self.write('%*s%s:\n' % (self.current_indent, '', heading)) + + def write_paragraph(self): + """Writes a paragraph into the buffer.""" + if self.buffer: + self.write('\n') + + def write_text(self, text): + """Writes re-indented text into the buffer. This rewraps and + preserves paragraphs. + """ + text_width = max(self.width - self.current_indent, 11) + indent = ' ' * self.current_indent + self.write(wrap_text(text, text_width, + initial_indent=indent, + subsequent_indent=indent, + preserve_paragraphs=True)) + self.write('\n') + + def write_dl(self, rows, col_max=30, col_spacing=2): + """Writes a definition list into the buffer. This is how options + and commands are usually formatted. + + :param rows: a list of two item tuples for the terms and values. + :param col_max: the maximum width of the first column. + :param col_spacing: the number of spaces between the first and + second column. + """ + rows = list(rows) + widths = measure_table(rows) + if len(widths) != 2: + raise TypeError('Expected two columns for definition list') + + first_col = min(widths[0], col_max) + col_spacing + + for first, second in iter_rows(rows, len(widths)): + self.write('%*s%s' % (self.current_indent, '', first)) + if not second: + self.write('\n') + continue + if term_len(first) <= first_col - col_spacing: + self.write(' ' * (first_col - term_len(first))) + else: + self.write('\n') + self.write(' ' * (first_col + self.current_indent)) + + text_width = max(self.width - first_col - 2, 10) + lines = iter(wrap_text(second, text_width).splitlines()) + if lines: + self.write(next(lines) + '\n') + for line in lines: + self.write('%*s%s\n' % ( + first_col + self.current_indent, '', line)) + else: + self.write('\n') + + @contextmanager + def section(self, name): + """Helpful context manager that writes a paragraph, a heading, + and the indents. + + :param name: the section name that is written as heading. + """ + self.write_paragraph() + self.write_heading(name) + self.indent() + try: + yield + finally: + self.dedent() + + @contextmanager + def indentation(self): + """A context manager that increases the indentation.""" + self.indent() + try: + yield + finally: + self.dedent() + + def getvalue(self): + """Returns the buffer contents.""" + return ''.join(self.buffer) + + +def join_options(options): + """Given a list of option strings this joins them in the most appropriate + way and returns them in the form ``(formatted_string, + any_prefix_is_slash)`` where the second item in the tuple is a flag that + indicates if any of the option prefixes was a slash. + """ + rv = [] + any_prefix_is_slash = False + for opt in options: + prefix = split_opt(opt)[0] + if prefix == '/': + any_prefix_is_slash = True + rv.append((len(prefix), opt)) + + rv.sort(key=lambda x: x[0]) + + rv = ', '.join(x[1] for x in rv) + return rv, any_prefix_is_slash diff --git a/venv/lib/python2.7/site-packages/click/globals.py b/venv/lib/python2.7/site-packages/click/globals.py new file mode 100644 index 0000000..14338e6 --- /dev/null +++ b/venv/lib/python2.7/site-packages/click/globals.py @@ -0,0 +1,48 @@ +from threading import local + + +_local = local() + + +def get_current_context(silent=False): + """Returns the current click context. This can be used as a way to + access the current context object from anywhere. This is a more implicit + alternative to the :func:`pass_context` decorator. This function is + primarily useful for helpers such as :func:`echo` which might be + interested in changing it's behavior based on the current context. + + To push the current context, :meth:`Context.scope` can be used. + + .. versionadded:: 5.0 + + :param silent: is set to `True` the return value is `None` if no context + is available. The default behavior is to raise a + :exc:`RuntimeError`. + """ + try: + return getattr(_local, 'stack')[-1] + except (AttributeError, IndexError): + if not silent: + raise RuntimeError('There is no active click context.') + + +def push_context(ctx): + """Pushes a new context to the current stack.""" + _local.__dict__.setdefault('stack', []).append(ctx) + + +def pop_context(): + """Removes the top level from the stack.""" + _local.stack.pop() + + +def resolve_color_default(color=None): + """"Internal helper to get the default value of the color flag. If a + value is passed it's returned unchanged, otherwise it's looked up from + the current context. + """ + if color is not None: + return color + ctx = get_current_context(silent=True) + if ctx is not None: + return ctx.color diff --git a/venv/lib/python2.7/site-packages/click/parser.py b/venv/lib/python2.7/site-packages/click/parser.py new file mode 100644 index 0000000..9775c9f --- /dev/null +++ b/venv/lib/python2.7/site-packages/click/parser.py @@ -0,0 +1,426 @@ +# -*- coding: utf-8 -*- +""" + click.parser + ~~~~~~~~~~~~ + + This module started out as largely a copy paste from the stdlib's + optparse module with the features removed that we do not need from + optparse because we implement them in Click on a higher level (for + instance type handling, help formatting and a lot more). + + The plan is to remove more and more from here over time. + + The reason this is a different module and not optparse from the stdlib + is that there are differences in 2.x and 3.x about the error messages + generated and optparse in the stdlib uses gettext for no good reason + and might cause us issues. +""" +import re +from collections import deque +from .exceptions import UsageError, NoSuchOption, BadOptionUsage, \ + BadArgumentUsage + + +def _unpack_args(args, nargs_spec): + """Given an iterable of arguments and an iterable of nargs specifications, + it returns a tuple with all the unpacked arguments at the first index + and all remaining arguments as the second. + + The nargs specification is the number of arguments that should be consumed + or `-1` to indicate that this position should eat up all the remainders. + + Missing items are filled with `None`. + """ + args = deque(args) + nargs_spec = deque(nargs_spec) + rv = [] + spos = None + + def _fetch(c): + try: + if spos is None: + return c.popleft() + else: + return c.pop() + except IndexError: + return None + + while nargs_spec: + nargs = _fetch(nargs_spec) + if nargs == 1: + rv.append(_fetch(args)) + elif nargs > 1: + x = [_fetch(args) for _ in range(nargs)] + # If we're reversed, we're pulling in the arguments in reverse, + # so we need to turn them around. + if spos is not None: + x.reverse() + rv.append(tuple(x)) + elif nargs < 0: + if spos is not None: + raise TypeError('Cannot have two nargs < 0') + spos = len(rv) + rv.append(None) + + # spos is the position of the wildcard (star). If it's not `None`, + # we fill it with the remainder. + if spos is not None: + rv[spos] = tuple(args) + args = [] + rv[spos + 1:] = reversed(rv[spos + 1:]) + + return tuple(rv), list(args) + + +def _error_opt_args(nargs, opt): + if nargs == 1: + raise BadOptionUsage('%s option requires an argument' % opt) + raise BadOptionUsage('%s option requires %d arguments' % (opt, nargs)) + + +def split_opt(opt): + first = opt[:1] + if first.isalnum(): + return '', opt + if opt[1:2] == first: + return opt[:2], opt[2:] + return first, opt[1:] + + +def normalize_opt(opt, ctx): + if ctx is None or ctx.token_normalize_func is None: + return opt + prefix, opt = split_opt(opt) + return prefix + ctx.token_normalize_func(opt) + + +def split_arg_string(string): + """Given an argument string this attempts to split it into small parts.""" + rv = [] + for match in re.finditer(r"('([^'\\]*(?:\\.[^'\\]*)*)'" + r'|"([^"\\]*(?:\\.[^"\\]*)*)"' + r'|\S+)\s*', string, re.S): + arg = match.group().strip() + if arg[:1] == arg[-1:] and arg[:1] in '"\'': + arg = arg[1:-1].encode('ascii', 'backslashreplace') \ + .decode('unicode-escape') + try: + arg = type(string)(arg) + except UnicodeError: + pass + rv.append(arg) + return rv + + +class Option(object): + + def __init__(self, opts, dest, action=None, nargs=1, const=None, obj=None): + self._short_opts = [] + self._long_opts = [] + self.prefixes = set() + + for opt in opts: + prefix, value = split_opt(opt) + if not prefix: + raise ValueError('Invalid start character for option (%s)' + % opt) + self.prefixes.add(prefix[0]) + if len(prefix) == 1 and len(value) == 1: + self._short_opts.append(opt) + else: + self._long_opts.append(opt) + self.prefixes.add(prefix) + + if action is None: + action = 'store' + + self.dest = dest + self.action = action + self.nargs = nargs + self.const = const + self.obj = obj + + @property + def takes_value(self): + return self.action in ('store', 'append') + + def process(self, value, state): + if self.action == 'store': + state.opts[self.dest] = value + elif self.action == 'store_const': + state.opts[self.dest] = self.const + elif self.action == 'append': + state.opts.setdefault(self.dest, []).append(value) + elif self.action == 'append_const': + state.opts.setdefault(self.dest, []).append(self.const) + elif self.action == 'count': + state.opts[self.dest] = state.opts.get(self.dest, 0) + 1 + else: + raise ValueError('unknown action %r' % self.action) + state.order.append(self.obj) + + +class Argument(object): + + def __init__(self, dest, nargs=1, obj=None): + self.dest = dest + self.nargs = nargs + self.obj = obj + + def process(self, value, state): + if self.nargs > 1: + holes = sum(1 for x in value if x is None) + if holes == len(value): + value = None + elif holes != 0: + raise BadArgumentUsage('argument %s takes %d values' + % (self.dest, self.nargs)) + state.opts[self.dest] = value + state.order.append(self.obj) + + +class ParsingState(object): + + def __init__(self, rargs): + self.opts = {} + self.largs = [] + self.rargs = rargs + self.order = [] + + +class OptionParser(object): + """The option parser is an internal class that is ultimately used to + parse options and arguments. It's modelled after optparse and brings + a similar but vastly simplified API. It should generally not be used + directly as the high level Click classes wrap it for you. + + It's not nearly as extensible as optparse or argparse as it does not + implement features that are implemented on a higher level (such as + types or defaults). + + :param ctx: optionally the :class:`~click.Context` where this parser + should go with. + """ + + def __init__(self, ctx=None): + #: The :class:`~click.Context` for this parser. This might be + #: `None` for some advanced use cases. + self.ctx = ctx + #: This controls how the parser deals with interspersed arguments. + #: If this is set to `False`, the parser will stop on the first + #: non-option. Click uses this to implement nested subcommands + #: safely. + self.allow_interspersed_args = True + #: This tells the parser how to deal with unknown options. By + #: default it will error out (which is sensible), but there is a + #: second mode where it will ignore it and continue processing + #: after shifting all the unknown options into the resulting args. + self.ignore_unknown_options = False + if ctx is not None: + self.allow_interspersed_args = ctx.allow_interspersed_args + self.ignore_unknown_options = ctx.ignore_unknown_options + self._short_opt = {} + self._long_opt = {} + self._opt_prefixes = set(['-', '--']) + self._args = [] + + def add_option(self, opts, dest, action=None, nargs=1, const=None, + obj=None): + """Adds a new option named `dest` to the parser. The destination + is not inferred (unlike with optparse) and needs to be explicitly + provided. Action can be any of ``store``, ``store_const``, + ``append``, ``appnd_const`` or ``count``. + + The `obj` can be used to identify the option in the order list + that is returned from the parser. + """ + if obj is None: + obj = dest + opts = [normalize_opt(opt, self.ctx) for opt in opts] + option = Option(opts, dest, action=action, nargs=nargs, + const=const, obj=obj) + self._opt_prefixes.update(option.prefixes) + for opt in option._short_opts: + self._short_opt[opt] = option + for opt in option._long_opts: + self._long_opt[opt] = option + + def add_argument(self, dest, nargs=1, obj=None): + """Adds a positional argument named `dest` to the parser. + + The `obj` can be used to identify the option in the order list + that is returned from the parser. + """ + if obj is None: + obj = dest + self._args.append(Argument(dest=dest, nargs=nargs, obj=obj)) + + def parse_args(self, args): + """Parses positional arguments and returns ``(values, args, order)`` + for the parsed options and arguments as well as the leftover + arguments if there are any. The order is a list of objects as they + appear on the command line. If arguments appear multiple times they + will be memorized multiple times as well. + """ + state = ParsingState(args) + try: + self._process_args_for_options(state) + self._process_args_for_args(state) + except UsageError: + if self.ctx is None or not self.ctx.resilient_parsing: + raise + return state.opts, state.largs, state.order + + def _process_args_for_args(self, state): + pargs, args = _unpack_args(state.largs + state.rargs, + [x.nargs for x in self._args]) + + for idx, arg in enumerate(self._args): + arg.process(pargs[idx], state) + + state.largs = args + state.rargs = [] + + def _process_args_for_options(self, state): + while state.rargs: + arg = state.rargs.pop(0) + arglen = len(arg) + # Double dashes always handled explicitly regardless of what + # prefixes are valid. + if arg == '--': + return + elif arg[:1] in self._opt_prefixes and arglen > 1: + self._process_opts(arg, state) + elif self.allow_interspersed_args: + state.largs.append(arg) + else: + state.rargs.insert(0, arg) + return + + # Say this is the original argument list: + # [arg0, arg1, ..., arg(i-1), arg(i), arg(i+1), ..., arg(N-1)] + # ^ + # (we are about to process arg(i)). + # + # Then rargs is [arg(i), ..., arg(N-1)] and largs is a *subset* of + # [arg0, ..., arg(i-1)] (any options and their arguments will have + # been removed from largs). + # + # The while loop will usually consume 1 or more arguments per pass. + # If it consumes 1 (eg. arg is an option that takes no arguments), + # then after _process_arg() is done the situation is: + # + # largs = subset of [arg0, ..., arg(i)] + # rargs = [arg(i+1), ..., arg(N-1)] + # + # If allow_interspersed_args is false, largs will always be + # *empty* -- still a subset of [arg0, ..., arg(i-1)], but + # not a very interesting subset! + + def _match_long_opt(self, opt, explicit_value, state): + if opt not in self._long_opt: + possibilities = [word for word in self._long_opt + if word.startswith(opt)] + raise NoSuchOption(opt, possibilities=possibilities) + + option = self._long_opt[opt] + if option.takes_value: + # At this point it's safe to modify rargs by injecting the + # explicit value, because no exception is raised in this + # branch. This means that the inserted value will be fully + # consumed. + if explicit_value is not None: + state.rargs.insert(0, explicit_value) + + nargs = option.nargs + if len(state.rargs) < nargs: + _error_opt_args(nargs, opt) + elif nargs == 1: + value = state.rargs.pop(0) + else: + value = tuple(state.rargs[:nargs]) + del state.rargs[:nargs] + + elif explicit_value is not None: + raise BadOptionUsage('%s option does not take a value' % opt) + + else: + value = None + + option.process(value, state) + + def _match_short_opt(self, arg, state): + stop = False + i = 1 + prefix = arg[0] + unknown_options = [] + + for ch in arg[1:]: + opt = normalize_opt(prefix + ch, self.ctx) + option = self._short_opt.get(opt) + i += 1 + + if not option: + if self.ignore_unknown_options: + unknown_options.append(ch) + continue + raise NoSuchOption(opt) + if option.takes_value: + # Any characters left in arg? Pretend they're the + # next arg, and stop consuming characters of arg. + if i < len(arg): + state.rargs.insert(0, arg[i:]) + stop = True + + nargs = option.nargs + if len(state.rargs) < nargs: + _error_opt_args(nargs, opt) + elif nargs == 1: + value = state.rargs.pop(0) + else: + value = tuple(state.rargs[:nargs]) + del state.rargs[:nargs] + + else: + value = None + + option.process(value, state) + + if stop: + break + + # If we got any unknown options we re-combinate the string of the + # remaining options and re-attach the prefix, then report that + # to the state as new larg. This way there is basic combinatorics + # that can be achieved while still ignoring unknown arguments. + if self.ignore_unknown_options and unknown_options: + state.largs.append(prefix + ''.join(unknown_options)) + + def _process_opts(self, arg, state): + explicit_value = None + # Long option handling happens in two parts. The first part is + # supporting explicitly attached values. In any case, we will try + # to long match the option first. + if '=' in arg: + long_opt, explicit_value = arg.split('=', 1) + else: + long_opt = arg + norm_long_opt = normalize_opt(long_opt, self.ctx) + + # At this point we will match the (assumed) long option through + # the long option matching code. Note that this allows options + # like "-foo" to be matched as long options. + try: + self._match_long_opt(norm_long_opt, explicit_value, state) + except NoSuchOption: + # At this point the long option matching failed, and we need + # to try with short options. However there is a special rule + # which says, that if we have a two character options prefix + # (applies to "--foo" for instance), we do not dispatch to the + # short option code and will instead raise the no option + # error. + if arg[:2] not in self._opt_prefixes: + return self._match_short_opt(arg, state) + if not self.ignore_unknown_options: + raise + state.largs.append(arg) diff --git a/venv/lib/python2.7/site-packages/click/termui.py b/venv/lib/python2.7/site-packages/click/termui.py new file mode 100644 index 0000000..d9fba52 --- /dev/null +++ b/venv/lib/python2.7/site-packages/click/termui.py @@ -0,0 +1,539 @@ +import os +import sys +import struct + +from ._compat import raw_input, text_type, string_types, \ + isatty, strip_ansi, get_winterm_size, DEFAULT_COLUMNS, WIN +from .utils import echo +from .exceptions import Abort, UsageError +from .types import convert_type +from .globals import resolve_color_default + + +# The prompt functions to use. The doc tools currently override these +# functions to customize how they work. +visible_prompt_func = raw_input + +_ansi_colors = ('black', 'red', 'green', 'yellow', 'blue', 'magenta', + 'cyan', 'white', 'reset') +_ansi_reset_all = '\033[0m' + + +def hidden_prompt_func(prompt): + import getpass + return getpass.getpass(prompt) + + +def _build_prompt(text, suffix, show_default=False, default=None): + prompt = text + if default is not None and show_default: + prompt = '%s [%s]' % (prompt, default) + return prompt + suffix + + +def prompt(text, default=None, hide_input=False, + confirmation_prompt=False, type=None, + value_proc=None, prompt_suffix=': ', + show_default=True, err=False): + """Prompts a user for input. This is a convenience function that can + be used to prompt a user for input later. + + If the user aborts the input by sending a interrupt signal, this + function will catch it and raise a :exc:`Abort` exception. + + .. versionadded:: 6.0 + Added unicode support for cmd.exe on Windows. + + .. versionadded:: 4.0 + Added the `err` parameter. + + :param text: the text to show for the prompt. + :param default: the default value to use if no input happens. If this + is not given it will prompt until it's aborted. + :param hide_input: if this is set to true then the input value will + be hidden. + :param confirmation_prompt: asks for confirmation for the value. + :param type: the type to use to check the value against. + :param value_proc: if this parameter is provided it's a function that + is invoked instead of the type conversion to + convert a value. + :param prompt_suffix: a suffix that should be added to the prompt. + :param show_default: shows or hides the default value in the prompt. + :param err: if set to true the file defaults to ``stderr`` instead of + ``stdout``, the same as with echo. + """ + result = None + + def prompt_func(text): + f = hide_input and hidden_prompt_func or visible_prompt_func + try: + # Write the prompt separately so that we get nice + # coloring through colorama on Windows + echo(text, nl=False, err=err) + return f('') + except (KeyboardInterrupt, EOFError): + # getpass doesn't print a newline if the user aborts input with ^C. + # Allegedly this behavior is inherited from getpass(3). + # A doc bug has been filed at https://bugs.python.org/issue24711 + if hide_input: + echo(None, err=err) + raise Abort() + + if value_proc is None: + value_proc = convert_type(type, default) + + prompt = _build_prompt(text, prompt_suffix, show_default, default) + + while 1: + while 1: + value = prompt_func(prompt) + if value: + break + # If a default is set and used, then the confirmation + # prompt is always skipped because that's the only thing + # that really makes sense. + elif default is not None: + return default + try: + result = value_proc(value) + except UsageError as e: + echo('Error: %s' % e.message, err=err) + continue + if not confirmation_prompt: + return result + while 1: + value2 = prompt_func('Repeat for confirmation: ') + if value2: + break + if value == value2: + return result + echo('Error: the two entered values do not match', err=err) + + +def confirm(text, default=False, abort=False, prompt_suffix=': ', + show_default=True, err=False): + """Prompts for confirmation (yes/no question). + + If the user aborts the input by sending a interrupt signal this + function will catch it and raise a :exc:`Abort` exception. + + .. versionadded:: 4.0 + Added the `err` parameter. + + :param text: the question to ask. + :param default: the default for the prompt. + :param abort: if this is set to `True` a negative answer aborts the + exception by raising :exc:`Abort`. + :param prompt_suffix: a suffix that should be added to the prompt. + :param show_default: shows or hides the default value in the prompt. + :param err: if set to true the file defaults to ``stderr`` instead of + ``stdout``, the same as with echo. + """ + prompt = _build_prompt(text, prompt_suffix, show_default, + default and 'Y/n' or 'y/N') + while 1: + try: + # Write the prompt separately so that we get nice + # coloring through colorama on Windows + echo(prompt, nl=False, err=err) + value = visible_prompt_func('').lower().strip() + except (KeyboardInterrupt, EOFError): + raise Abort() + if value in ('y', 'yes'): + rv = True + elif value in ('n', 'no'): + rv = False + elif value == '': + rv = default + else: + echo('Error: invalid input', err=err) + continue + break + if abort and not rv: + raise Abort() + return rv + + +def get_terminal_size(): + """Returns the current size of the terminal as tuple in the form + ``(width, height)`` in columns and rows. + """ + # If shutil has get_terminal_size() (Python 3.3 and later) use that + if sys.version_info >= (3, 3): + import shutil + shutil_get_terminal_size = getattr(shutil, 'get_terminal_size', None) + if shutil_get_terminal_size: + sz = shutil_get_terminal_size() + return sz.columns, sz.lines + + if get_winterm_size is not None: + return get_winterm_size() + + def ioctl_gwinsz(fd): + try: + import fcntl + import termios + cr = struct.unpack( + 'hh', fcntl.ioctl(fd, termios.TIOCGWINSZ, '1234')) + except Exception: + return + return cr + + cr = ioctl_gwinsz(0) or ioctl_gwinsz(1) or ioctl_gwinsz(2) + if not cr: + try: + fd = os.open(os.ctermid(), os.O_RDONLY) + try: + cr = ioctl_gwinsz(fd) + finally: + os.close(fd) + except Exception: + pass + if not cr or not cr[0] or not cr[1]: + cr = (os.environ.get('LINES', 25), + os.environ.get('COLUMNS', DEFAULT_COLUMNS)) + return int(cr[1]), int(cr[0]) + + +def echo_via_pager(text, color=None): + """This function takes a text and shows it via an environment specific + pager on stdout. + + .. versionchanged:: 3.0 + Added the `color` flag. + + :param text: the text to page. + :param color: controls if the pager supports ANSI colors or not. The + default is autodetection. + """ + color = resolve_color_default(color) + if not isinstance(text, string_types): + text = text_type(text) + from ._termui_impl import pager + return pager(text + '\n', color) + + +def progressbar(iterable=None, length=None, label=None, show_eta=True, + show_percent=None, show_pos=False, + item_show_func=None, fill_char='#', empty_char='-', + bar_template='%(label)s [%(bar)s] %(info)s', + info_sep=' ', width=36, file=None, color=None): + """This function creates an iterable context manager that can be used + to iterate over something while showing a progress bar. It will + either iterate over the `iterable` or `length` items (that are counted + up). While iteration happens, this function will print a rendered + progress bar to the given `file` (defaults to stdout) and will attempt + to calculate remaining time and more. By default, this progress bar + will not be rendered if the file is not a terminal. + + The context manager creates the progress bar. When the context + manager is entered the progress bar is already displayed. With every + iteration over the progress bar, the iterable passed to the bar is + advanced and the bar is updated. When the context manager exits, + a newline is printed and the progress bar is finalized on screen. + + No printing must happen or the progress bar will be unintentionally + destroyed. + + Example usage:: + + with progressbar(items) as bar: + for item in bar: + do_something_with(item) + + Alternatively, if no iterable is specified, one can manually update the + progress bar through the `update()` method instead of directly + iterating over the progress bar. The update method accepts the number + of steps to increment the bar with:: + + with progressbar(length=chunks.total_bytes) as bar: + for chunk in chunks: + process_chunk(chunk) + bar.update(chunks.bytes) + + .. versionadded:: 2.0 + + .. versionadded:: 4.0 + Added the `color` parameter. Added a `update` method to the + progressbar object. + + :param iterable: an iterable to iterate over. If not provided the length + is required. + :param length: the number of items to iterate over. By default the + progressbar will attempt to ask the iterator about its + length, which might or might not work. If an iterable is + also provided this parameter can be used to override the + length. If an iterable is not provided the progress bar + will iterate over a range of that length. + :param label: the label to show next to the progress bar. + :param show_eta: enables or disables the estimated time display. This is + automatically disabled if the length cannot be + determined. + :param show_percent: enables or disables the percentage display. The + default is `True` if the iterable has a length or + `False` if not. + :param show_pos: enables or disables the absolute position display. The + default is `False`. + :param item_show_func: a function called with the current item which + can return a string to show the current item + next to the progress bar. Note that the current + item can be `None`! + :param fill_char: the character to use to show the filled part of the + progress bar. + :param empty_char: the character to use to show the non-filled part of + the progress bar. + :param bar_template: the format string to use as template for the bar. + The parameters in it are ``label`` for the label, + ``bar`` for the progress bar and ``info`` for the + info section. + :param info_sep: the separator between multiple info items (eta etc.) + :param width: the width of the progress bar in characters, 0 means full + terminal width + :param file: the file to write to. If this is not a terminal then + only the label is printed. + :param color: controls if the terminal supports ANSI colors or not. The + default is autodetection. This is only needed if ANSI + codes are included anywhere in the progress bar output + which is not the case by default. + """ + from ._termui_impl import ProgressBar + color = resolve_color_default(color) + return ProgressBar(iterable=iterable, length=length, show_eta=show_eta, + show_percent=show_percent, show_pos=show_pos, + item_show_func=item_show_func, fill_char=fill_char, + empty_char=empty_char, bar_template=bar_template, + info_sep=info_sep, file=file, label=label, + width=width, color=color) + + +def clear(): + """Clears the terminal screen. This will have the effect of clearing + the whole visible space of the terminal and moving the cursor to the + top left. This does not do anything if not connected to a terminal. + + .. versionadded:: 2.0 + """ + if not isatty(sys.stdout): + return + # If we're on Windows and we don't have colorama available, then we + # clear the screen by shelling out. Otherwise we can use an escape + # sequence. + if WIN: + os.system('cls') + else: + sys.stdout.write('\033[2J\033[1;1H') + + +def style(text, fg=None, bg=None, bold=None, dim=None, underline=None, + blink=None, reverse=None, reset=True): + """Styles a text with ANSI styles and returns the new string. By + default the styling is self contained which means that at the end + of the string a reset code is issued. This can be prevented by + passing ``reset=False``. + + Examples:: + + click.echo(click.style('Hello World!', fg='green')) + click.echo(click.style('ATTENTION!', blink=True)) + click.echo(click.style('Some things', reverse=True, fg='cyan')) + + Supported color names: + + * ``black`` (might be a gray) + * ``red`` + * ``green`` + * ``yellow`` (might be an orange) + * ``blue`` + * ``magenta`` + * ``cyan`` + * ``white`` (might be light gray) + * ``reset`` (reset the color code only) + + .. versionadded:: 2.0 + + :param text: the string to style with ansi codes. + :param fg: if provided this will become the foreground color. + :param bg: if provided this will become the background color. + :param bold: if provided this will enable or disable bold mode. + :param dim: if provided this will enable or disable dim mode. This is + badly supported. + :param underline: if provided this will enable or disable underline. + :param blink: if provided this will enable or disable blinking. + :param reverse: if provided this will enable or disable inverse + rendering (foreground becomes background and the + other way round). + :param reset: by default a reset-all code is added at the end of the + string which means that styles do not carry over. This + can be disabled to compose styles. + """ + bits = [] + if fg: + try: + bits.append('\033[%dm' % (_ansi_colors.index(fg) + 30)) + except ValueError: + raise TypeError('Unknown color %r' % fg) + if bg: + try: + bits.append('\033[%dm' % (_ansi_colors.index(bg) + 40)) + except ValueError: + raise TypeError('Unknown color %r' % bg) + if bold is not None: + bits.append('\033[%dm' % (1 if bold else 22)) + if dim is not None: + bits.append('\033[%dm' % (2 if dim else 22)) + if underline is not None: + bits.append('\033[%dm' % (4 if underline else 24)) + if blink is not None: + bits.append('\033[%dm' % (5 if blink else 25)) + if reverse is not None: + bits.append('\033[%dm' % (7 if reverse else 27)) + bits.append(text) + if reset: + bits.append(_ansi_reset_all) + return ''.join(bits) + + +def unstyle(text): + """Removes ANSI styling information from a string. Usually it's not + necessary to use this function as Click's echo function will + automatically remove styling if necessary. + + .. versionadded:: 2.0 + + :param text: the text to remove style information from. + """ + return strip_ansi(text) + + +def secho(text, file=None, nl=True, err=False, color=None, **styles): + """This function combines :func:`echo` and :func:`style` into one + call. As such the following two calls are the same:: + + click.secho('Hello World!', fg='green') + click.echo(click.style('Hello World!', fg='green')) + + All keyword arguments are forwarded to the underlying functions + depending on which one they go with. + + .. versionadded:: 2.0 + """ + return echo(style(text, **styles), file=file, nl=nl, err=err, color=color) + + +def edit(text=None, editor=None, env=None, require_save=True, + extension='.txt', filename=None): + r"""Edits the given text in the defined editor. If an editor is given + (should be the full path to the executable but the regular operating + system search path is used for finding the executable) it overrides + the detected editor. Optionally, some environment variables can be + used. If the editor is closed without changes, `None` is returned. In + case a file is edited directly the return value is always `None` and + `require_save` and `extension` are ignored. + + If the editor cannot be opened a :exc:`UsageError` is raised. + + Note for Windows: to simplify cross-platform usage, the newlines are + automatically converted from POSIX to Windows and vice versa. As such, + the message here will have ``\n`` as newline markers. + + :param text: the text to edit. + :param editor: optionally the editor to use. Defaults to automatic + detection. + :param env: environment variables to forward to the editor. + :param require_save: if this is true, then not saving in the editor + will make the return value become `None`. + :param extension: the extension to tell the editor about. This defaults + to `.txt` but changing this might change syntax + highlighting. + :param filename: if provided it will edit this file instead of the + provided text contents. It will not use a temporary + file as an indirection in that case. + """ + from ._termui_impl import Editor + editor = Editor(editor=editor, env=env, require_save=require_save, + extension=extension) + if filename is None: + return editor.edit(text) + editor.edit_file(filename) + + +def launch(url, wait=False, locate=False): + """This function launches the given URL (or filename) in the default + viewer application for this file type. If this is an executable, it + might launch the executable in a new session. The return value is + the exit code of the launched application. Usually, ``0`` indicates + success. + + Examples:: + + click.launch('http://click.pocoo.org/') + click.launch('/my/downloaded/file', locate=True) + + .. versionadded:: 2.0 + + :param url: URL or filename of the thing to launch. + :param wait: waits for the program to stop. + :param locate: if this is set to `True` then instead of launching the + application associated with the URL it will attempt to + launch a file manager with the file located. This + might have weird effects if the URL does not point to + the filesystem. + """ + from ._termui_impl import open_url + return open_url(url, wait=wait, locate=locate) + + +# If this is provided, getchar() calls into this instead. This is used +# for unittesting purposes. +_getchar = None + + +def getchar(echo=False): + """Fetches a single character from the terminal and returns it. This + will always return a unicode character and under certain rare + circumstances this might return more than one character. The + situations which more than one character is returned is when for + whatever reason multiple characters end up in the terminal buffer or + standard input was not actually a terminal. + + Note that this will always read from the terminal, even if something + is piped into the standard input. + + .. versionadded:: 2.0 + + :param echo: if set to `True`, the character read will also show up on + the terminal. The default is to not show it. + """ + f = _getchar + if f is None: + from ._termui_impl import getchar as f + return f(echo) + + +def pause(info='Press any key to continue ...', err=False): + """This command stops execution and waits for the user to press any + key to continue. This is similar to the Windows batch "pause" + command. If the program is not run through a terminal, this command + will instead do nothing. + + .. versionadded:: 2.0 + + .. versionadded:: 4.0 + Added the `err` parameter. + + :param info: the info string to print before pausing. + :param err: if set to message goes to ``stderr`` instead of + ``stdout``, the same as with echo. + """ + if not isatty(sys.stdin) or not isatty(sys.stdout): + return + try: + if info: + echo(info, nl=False, err=err) + try: + getchar() + except (KeyboardInterrupt, EOFError): + pass + finally: + if info: + echo(err=err) diff --git a/venv/lib/python2.7/site-packages/click/testing.py b/venv/lib/python2.7/site-packages/click/testing.py new file mode 100644 index 0000000..d43581f --- /dev/null +++ b/venv/lib/python2.7/site-packages/click/testing.py @@ -0,0 +1,322 @@ +import os +import sys +import shutil +import tempfile +import contextlib + +from ._compat import iteritems, PY2 + + +# If someone wants to vendor click, we want to ensure the +# correct package is discovered. Ideally we could use a +# relative import here but unfortunately Python does not +# support that. +clickpkg = sys.modules[__name__.rsplit('.', 1)[0]] + + +if PY2: + from cStringIO import StringIO +else: + import io + from ._compat import _find_binary_reader + + +class EchoingStdin(object): + + def __init__(self, input, output): + self._input = input + self._output = output + + def __getattr__(self, x): + return getattr(self._input, x) + + def _echo(self, rv): + self._output.write(rv) + return rv + + def read(self, n=-1): + return self._echo(self._input.read(n)) + + def readline(self, n=-1): + return self._echo(self._input.readline(n)) + + def readlines(self): + return [self._echo(x) for x in self._input.readlines()] + + def __iter__(self): + return iter(self._echo(x) for x in self._input) + + def __repr__(self): + return repr(self._input) + + +def make_input_stream(input, charset): + # Is already an input stream. + if hasattr(input, 'read'): + if PY2: + return input + rv = _find_binary_reader(input) + if rv is not None: + return rv + raise TypeError('Could not find binary reader for input stream.') + + if input is None: + input = b'' + elif not isinstance(input, bytes): + input = input.encode(charset) + if PY2: + return StringIO(input) + return io.BytesIO(input) + + +class Result(object): + """Holds the captured result of an invoked CLI script.""" + + def __init__(self, runner, output_bytes, exit_code, exception, + exc_info=None): + #: The runner that created the result + self.runner = runner + #: The output as bytes. + self.output_bytes = output_bytes + #: The exit code as integer. + self.exit_code = exit_code + #: The exception that happend if one did. + self.exception = exception + #: The traceback + self.exc_info = exc_info + + @property + def output(self): + """The output as unicode string.""" + return self.output_bytes.decode(self.runner.charset, 'replace') \ + .replace('\r\n', '\n') + + def __repr__(self): + return '' % ( + self.exception and repr(self.exception) or 'okay', + ) + + +class CliRunner(object): + """The CLI runner provides functionality to invoke a Click command line + script for unittesting purposes in a isolated environment. This only + works in single-threaded systems without any concurrency as it changes the + global interpreter state. + + :param charset: the character set for the input and output data. This is + UTF-8 by default and should not be changed currently as + the reporting to Click only works in Python 2 properly. + :param env: a dictionary with environment variables for overriding. + :param echo_stdin: if this is set to `True`, then reading from stdin writes + to stdout. This is useful for showing examples in + some circumstances. Note that regular prompts + will automatically echo the input. + """ + + def __init__(self, charset=None, env=None, echo_stdin=False): + if charset is None: + charset = 'utf-8' + self.charset = charset + self.env = env or {} + self.echo_stdin = echo_stdin + + def get_default_prog_name(self, cli): + """Given a command object it will return the default program name + for it. The default is the `name` attribute or ``"root"`` if not + set. + """ + return cli.name or 'root' + + def make_env(self, overrides=None): + """Returns the environment overrides for invoking a script.""" + rv = dict(self.env) + if overrides: + rv.update(overrides) + return rv + + @contextlib.contextmanager + def isolation(self, input=None, env=None, color=False): + """A context manager that sets up the isolation for invoking of a + command line tool. This sets up stdin with the given input data + and `os.environ` with the overrides from the given dictionary. + This also rebinds some internals in Click to be mocked (like the + prompt functionality). + + This is automatically done in the :meth:`invoke` method. + + .. versionadded:: 4.0 + The ``color`` parameter was added. + + :param input: the input stream to put into sys.stdin. + :param env: the environment overrides as dictionary. + :param color: whether the output should contain color codes. The + application can still override this explicitly. + """ + input = make_input_stream(input, self.charset) + + old_stdin = sys.stdin + old_stdout = sys.stdout + old_stderr = sys.stderr + old_forced_width = clickpkg.formatting.FORCED_WIDTH + clickpkg.formatting.FORCED_WIDTH = 80 + + env = self.make_env(env) + + if PY2: + sys.stdout = sys.stderr = bytes_output = StringIO() + if self.echo_stdin: + input = EchoingStdin(input, bytes_output) + else: + bytes_output = io.BytesIO() + if self.echo_stdin: + input = EchoingStdin(input, bytes_output) + input = io.TextIOWrapper(input, encoding=self.charset) + sys.stdout = sys.stderr = io.TextIOWrapper( + bytes_output, encoding=self.charset) + + sys.stdin = input + + def visible_input(prompt=None): + sys.stdout.write(prompt or '') + val = input.readline().rstrip('\r\n') + sys.stdout.write(val + '\n') + sys.stdout.flush() + return val + + def hidden_input(prompt=None): + sys.stdout.write((prompt or '') + '\n') + sys.stdout.flush() + return input.readline().rstrip('\r\n') + + def _getchar(echo): + char = sys.stdin.read(1) + if echo: + sys.stdout.write(char) + sys.stdout.flush() + return char + + default_color = color + def should_strip_ansi(stream=None, color=None): + if color is None: + return not default_color + return not color + + old_visible_prompt_func = clickpkg.termui.visible_prompt_func + old_hidden_prompt_func = clickpkg.termui.hidden_prompt_func + old__getchar_func = clickpkg.termui._getchar + old_should_strip_ansi = clickpkg.utils.should_strip_ansi + clickpkg.termui.visible_prompt_func = visible_input + clickpkg.termui.hidden_prompt_func = hidden_input + clickpkg.termui._getchar = _getchar + clickpkg.utils.should_strip_ansi = should_strip_ansi + + old_env = {} + try: + for key, value in iteritems(env): + old_env[key] = os.environ.get(value) + if value is None: + try: + del os.environ[key] + except Exception: + pass + else: + os.environ[key] = value + yield bytes_output + finally: + for key, value in iteritems(old_env): + if value is None: + try: + del os.environ[key] + except Exception: + pass + else: + os.environ[key] = value + sys.stdout = old_stdout + sys.stderr = old_stderr + sys.stdin = old_stdin + clickpkg.termui.visible_prompt_func = old_visible_prompt_func + clickpkg.termui.hidden_prompt_func = old_hidden_prompt_func + clickpkg.termui._getchar = old__getchar_func + clickpkg.utils.should_strip_ansi = old_should_strip_ansi + clickpkg.formatting.FORCED_WIDTH = old_forced_width + + def invoke(self, cli, args=None, input=None, env=None, + catch_exceptions=True, color=False, **extra): + """Invokes a command in an isolated environment. The arguments are + forwarded directly to the command line script, the `extra` keyword + arguments are passed to the :meth:`~clickpkg.Command.main` function of + the command. + + This returns a :class:`Result` object. + + .. versionadded:: 3.0 + The ``catch_exceptions`` parameter was added. + + .. versionchanged:: 3.0 + The result object now has an `exc_info` attribute with the + traceback if available. + + .. versionadded:: 4.0 + The ``color`` parameter was added. + + :param cli: the command to invoke + :param args: the arguments to invoke + :param input: the input data for `sys.stdin`. + :param env: the environment overrides. + :param catch_exceptions: Whether to catch any other exceptions than + ``SystemExit``. + :param extra: the keyword arguments to pass to :meth:`main`. + :param color: whether the output should contain color codes. The + application can still override this explicitly. + """ + exc_info = None + with self.isolation(input=input, env=env, color=color) as out: + exception = None + exit_code = 0 + + try: + cli.main(args=args or (), + prog_name=self.get_default_prog_name(cli), **extra) + except SystemExit as e: + if e.code != 0: + exception = e + + exc_info = sys.exc_info() + + exit_code = e.code + if not isinstance(exit_code, int): + sys.stdout.write(str(exit_code)) + sys.stdout.write('\n') + exit_code = 1 + except Exception as e: + if not catch_exceptions: + raise + exception = e + exit_code = -1 + exc_info = sys.exc_info() + finally: + sys.stdout.flush() + output = out.getvalue() + + return Result(runner=self, + output_bytes=output, + exit_code=exit_code, + exception=exception, + exc_info=exc_info) + + @contextlib.contextmanager + def isolated_filesystem(self): + """A context manager that creates a temporary folder and changes + the current working directory to it for isolated filesystem tests. + """ + cwd = os.getcwd() + t = tempfile.mkdtemp() + os.chdir(t) + try: + yield t + finally: + os.chdir(cwd) + try: + shutil.rmtree(t) + except (OSError, IOError): + pass diff --git a/venv/lib/python2.7/site-packages/click/types.py b/venv/lib/python2.7/site-packages/click/types.py new file mode 100644 index 0000000..3639002 --- /dev/null +++ b/venv/lib/python2.7/site-packages/click/types.py @@ -0,0 +1,550 @@ +import os +import stat + +from ._compat import open_stream, text_type, filename_to_ui, \ + get_filesystem_encoding, get_streerror, _get_argv_encoding, PY2 +from .exceptions import BadParameter +from .utils import safecall, LazyFile + + +class ParamType(object): + """Helper for converting values through types. The following is + necessary for a valid type: + + * it needs a name + * it needs to pass through None unchanged + * it needs to convert from a string + * it needs to convert its result type through unchanged + (eg: needs to be idempotent) + * it needs to be able to deal with param and context being `None`. + This can be the case when the object is used with prompt + inputs. + """ + is_composite = False + + #: the descriptive name of this type + name = None + + #: if a list of this type is expected and the value is pulled from a + #: string environment variable, this is what splits it up. `None` + #: means any whitespace. For all parameters the general rule is that + #: whitespace splits them up. The exception are paths and files which + #: are split by ``os.path.pathsep`` by default (":" on Unix and ";" on + #: Windows). + envvar_list_splitter = None + + def __call__(self, value, param=None, ctx=None): + if value is not None: + return self.convert(value, param, ctx) + + def get_metavar(self, param): + """Returns the metavar default for this param if it provides one.""" + + def get_missing_message(self, param): + """Optionally might return extra information about a missing + parameter. + + .. versionadded:: 2.0 + """ + + def convert(self, value, param, ctx): + """Converts the value. This is not invoked for values that are + `None` (the missing value). + """ + return value + + def split_envvar_value(self, rv): + """Given a value from an environment variable this splits it up + into small chunks depending on the defined envvar list splitter. + + If the splitter is set to `None`, which means that whitespace splits, + then leading and trailing whitespace is ignored. Otherwise, leading + and trailing splitters usually lead to empty items being included. + """ + return (rv or '').split(self.envvar_list_splitter) + + def fail(self, message, param=None, ctx=None): + """Helper method to fail with an invalid value message.""" + raise BadParameter(message, ctx=ctx, param=param) + + +class CompositeParamType(ParamType): + is_composite = True + + @property + def arity(self): + raise NotImplementedError() + + +class FuncParamType(ParamType): + + def __init__(self, func): + self.name = func.__name__ + self.func = func + + def convert(self, value, param, ctx): + try: + return self.func(value) + except ValueError: + try: + value = text_type(value) + except UnicodeError: + value = str(value).decode('utf-8', 'replace') + self.fail(value, param, ctx) + + +class UnprocessedParamType(ParamType): + name = 'text' + + def convert(self, value, param, ctx): + return value + + def __repr__(self): + return 'UNPROCESSED' + + +class StringParamType(ParamType): + name = 'text' + + def convert(self, value, param, ctx): + if isinstance(value, bytes): + enc = _get_argv_encoding() + try: + value = value.decode(enc) + except UnicodeError: + fs_enc = get_filesystem_encoding() + if fs_enc != enc: + try: + value = value.decode(fs_enc) + except UnicodeError: + value = value.decode('utf-8', 'replace') + return value + return value + + def __repr__(self): + return 'STRING' + + +class Choice(ParamType): + """The choice type allows a value to be checked against a fixed set of + supported values. All of these values have to be strings. + + See :ref:`choice-opts` for an example. + """ + name = 'choice' + + def __init__(self, choices): + self.choices = choices + + def get_metavar(self, param): + return '[%s]' % '|'.join(self.choices) + + def get_missing_message(self, param): + return 'Choose from %s.' % ', '.join(self.choices) + + def convert(self, value, param, ctx): + # Exact match + if value in self.choices: + return value + + # Match through normalization + if ctx is not None and \ + ctx.token_normalize_func is not None: + value = ctx.token_normalize_func(value) + for choice in self.choices: + if ctx.token_normalize_func(choice) == value: + return choice + + self.fail('invalid choice: %s. (choose from %s)' % + (value, ', '.join(self.choices)), param, ctx) + + def __repr__(self): + return 'Choice(%r)' % list(self.choices) + + +class IntParamType(ParamType): + name = 'integer' + + def convert(self, value, param, ctx): + try: + return int(value) + except (ValueError, UnicodeError): + self.fail('%s is not a valid integer' % value, param, ctx) + + def __repr__(self): + return 'INT' + + +class IntRange(IntParamType): + """A parameter that works similar to :data:`click.INT` but restricts + the value to fit into a range. The default behavior is to fail if the + value falls outside the range, but it can also be silently clamped + between the two edges. + + See :ref:`ranges` for an example. + """ + name = 'integer range' + + def __init__(self, min=None, max=None, clamp=False): + self.min = min + self.max = max + self.clamp = clamp + + def convert(self, value, param, ctx): + rv = IntParamType.convert(self, value, param, ctx) + if self.clamp: + if self.min is not None and rv < self.min: + return self.min + if self.max is not None and rv > self.max: + return self.max + if self.min is not None and rv < self.min or \ + self.max is not None and rv > self.max: + if self.min is None: + self.fail('%s is bigger than the maximum valid value ' + '%s.' % (rv, self.max), param, ctx) + elif self.max is None: + self.fail('%s is smaller than the minimum valid value ' + '%s.' % (rv, self.min), param, ctx) + else: + self.fail('%s is not in the valid range of %s to %s.' + % (rv, self.min, self.max), param, ctx) + return rv + + def __repr__(self): + return 'IntRange(%r, %r)' % (self.min, self.max) + + +class BoolParamType(ParamType): + name = 'boolean' + + def convert(self, value, param, ctx): + if isinstance(value, bool): + return bool(value) + value = value.lower() + if value in ('true', '1', 'yes', 'y'): + return True + elif value in ('false', '0', 'no', 'n'): + return False + self.fail('%s is not a valid boolean' % value, param, ctx) + + def __repr__(self): + return 'BOOL' + + +class FloatParamType(ParamType): + name = 'float' + + def convert(self, value, param, ctx): + try: + return float(value) + except (UnicodeError, ValueError): + self.fail('%s is not a valid floating point value' % + value, param, ctx) + + def __repr__(self): + return 'FLOAT' + + +class UUIDParameterType(ParamType): + name = 'uuid' + + def convert(self, value, param, ctx): + import uuid + try: + if PY2 and isinstance(value, text_type): + value = value.encode('ascii') + return uuid.UUID(value) + except (UnicodeError, ValueError): + self.fail('%s is not a valid UUID value' % value, param, ctx) + + def __repr__(self): + return 'UUID' + + +class File(ParamType): + """Declares a parameter to be a file for reading or writing. The file + is automatically closed once the context tears down (after the command + finished working). + + Files can be opened for reading or writing. The special value ``-`` + indicates stdin or stdout depending on the mode. + + By default, the file is opened for reading text data, but it can also be + opened in binary mode or for writing. The encoding parameter can be used + to force a specific encoding. + + The `lazy` flag controls if the file should be opened immediately or + upon first IO. The default is to be non lazy for standard input and + output streams as well as files opened for reading, lazy otherwise. + + Starting with Click 2.0, files can also be opened atomically in which + case all writes go into a separate file in the same folder and upon + completion the file will be moved over to the original location. This + is useful if a file regularly read by other users is modified. + + See :ref:`file-args` for more information. + """ + name = 'filename' + envvar_list_splitter = os.path.pathsep + + def __init__(self, mode='r', encoding=None, errors='strict', lazy=None, + atomic=False): + self.mode = mode + self.encoding = encoding + self.errors = errors + self.lazy = lazy + self.atomic = atomic + + def resolve_lazy_flag(self, value): + if self.lazy is not None: + return self.lazy + if value == '-': + return False + elif 'w' in self.mode: + return True + return False + + def convert(self, value, param, ctx): + try: + if hasattr(value, 'read') or hasattr(value, 'write'): + return value + + lazy = self.resolve_lazy_flag(value) + + if lazy: + f = LazyFile(value, self.mode, self.encoding, self.errors, + atomic=self.atomic) + if ctx is not None: + ctx.call_on_close(f.close_intelligently) + return f + + f, should_close = open_stream(value, self.mode, + self.encoding, self.errors, + atomic=self.atomic) + # If a context is provided, we automatically close the file + # at the end of the context execution (or flush out). If a + # context does not exist, it's the caller's responsibility to + # properly close the file. This for instance happens when the + # type is used with prompts. + if ctx is not None: + if should_close: + ctx.call_on_close(safecall(f.close)) + else: + ctx.call_on_close(safecall(f.flush)) + return f + except (IOError, OSError) as e: + self.fail('Could not open file: %s: %s' % ( + filename_to_ui(value), + get_streerror(e), + ), param, ctx) + + +class Path(ParamType): + """The path type is similar to the :class:`File` type but it performs + different checks. First of all, instead of returning an open file + handle it returns just the filename. Secondly, it can perform various + basic checks about what the file or directory should be. + + .. versionchanged:: 6.0 + `allow_dash` was added. + + :param exists: if set to true, the file or directory needs to exist for + this value to be valid. If this is not required and a + file does indeed not exist, then all further checks are + silently skipped. + :param file_okay: controls if a file is a possible value. + :param dir_okay: controls if a directory is a possible value. + :param writable: if true, a writable check is performed. + :param readable: if true, a readable check is performed. + :param resolve_path: if this is true, then the path is fully resolved + before the value is passed onwards. This means + that it's absolute and symlinks are resolved. + :param allow_dash: If this is set to `True`, a single dash to indicate + standard streams is permitted. + :param type: optionally a string type that should be used to + represent the path. The default is `None` which + means the return value will be either bytes or + unicode depending on what makes most sense given the + input data Click deals with. + """ + envvar_list_splitter = os.path.pathsep + + def __init__(self, exists=False, file_okay=True, dir_okay=True, + writable=False, readable=True, resolve_path=False, + allow_dash=False, path_type=None): + self.exists = exists + self.file_okay = file_okay + self.dir_okay = dir_okay + self.writable = writable + self.readable = readable + self.resolve_path = resolve_path + self.allow_dash = allow_dash + self.type = path_type + + if self.file_okay and not self.dir_okay: + self.name = 'file' + self.path_type = 'File' + if self.dir_okay and not self.file_okay: + self.name = 'directory' + self.path_type = 'Directory' + else: + self.name = 'path' + self.path_type = 'Path' + + def coerce_path_result(self, rv): + if self.type is not None and not isinstance(rv, self.type): + if self.type is text_type: + rv = rv.decode(get_filesystem_encoding()) + else: + rv = rv.encode(get_filesystem_encoding()) + return rv + + def convert(self, value, param, ctx): + rv = value + + is_dash = self.file_okay and self.allow_dash and rv in (b'-', '-') + + if not is_dash: + if self.resolve_path: + rv = os.path.realpath(rv) + + try: + st = os.stat(rv) + except OSError: + if not self.exists: + return self.coerce_path_result(rv) + self.fail('%s "%s" does not exist.' % ( + self.path_type, + filename_to_ui(value) + ), param, ctx) + + if not self.file_okay and stat.S_ISREG(st.st_mode): + self.fail('%s "%s" is a file.' % ( + self.path_type, + filename_to_ui(value) + ), param, ctx) + if not self.dir_okay and stat.S_ISDIR(st.st_mode): + self.fail('%s "%s" is a directory.' % ( + self.path_type, + filename_to_ui(value) + ), param, ctx) + if self.writable and not os.access(value, os.W_OK): + self.fail('%s "%s" is not writable.' % ( + self.path_type, + filename_to_ui(value) + ), param, ctx) + if self.readable and not os.access(value, os.R_OK): + self.fail('%s "%s" is not readable.' % ( + self.path_type, + filename_to_ui(value) + ), param, ctx) + + return self.coerce_path_result(rv) + + +class Tuple(CompositeParamType): + """The default behavior of Click is to apply a type on a value directly. + This works well in most cases, except for when `nargs` is set to a fixed + count and different types should be used for different items. In this + case the :class:`Tuple` type can be used. This type can only be used + if `nargs` is set to a fixed number. + + For more information see :ref:`tuple-type`. + + This can be selected by using a Python tuple literal as a type. + + :param types: a list of types that should be used for the tuple items. + """ + + def __init__(self, types): + self.types = [convert_type(ty) for ty in types] + + @property + def name(self): + return "<" + " ".join(ty.name for ty in self.types) + ">" + + @property + def arity(self): + return len(self.types) + + def convert(self, value, param, ctx): + if len(value) != len(self.types): + raise TypeError('It would appear that nargs is set to conflict ' + 'with the composite type arity.') + return tuple(ty(x, param, ctx) for ty, x in zip(self.types, value)) + + +def convert_type(ty, default=None): + """Converts a callable or python ty into the most appropriate param + ty. + """ + guessed_type = False + if ty is None and default is not None: + if isinstance(default, tuple): + ty = tuple(map(type, default)) + else: + ty = type(default) + guessed_type = True + + if isinstance(ty, tuple): + return Tuple(ty) + if isinstance(ty, ParamType): + return ty + if ty is text_type or ty is str or ty is None: + return STRING + if ty is int: + return INT + # Booleans are only okay if not guessed. This is done because for + # flags the default value is actually a bit of a lie in that it + # indicates which of the flags is the one we want. See get_default() + # for more information. + if ty is bool and not guessed_type: + return BOOL + if ty is float: + return FLOAT + if guessed_type: + return STRING + + # Catch a common mistake + if __debug__: + try: + if issubclass(ty, ParamType): + raise AssertionError('Attempted to use an uninstantiated ' + 'parameter type (%s).' % ty) + except TypeError: + pass + return FuncParamType(ty) + + +#: A dummy parameter type that just does nothing. From a user's +#: perspective this appears to just be the same as `STRING` but internally +#: no string conversion takes place. This is necessary to achieve the +#: same bytes/unicode behavior on Python 2/3 in situations where you want +#: to not convert argument types. This is usually useful when working +#: with file paths as they can appear in bytes and unicode. +#: +#: For path related uses the :class:`Path` type is a better choice but +#: there are situations where an unprocessed type is useful which is why +#: it is is provided. +#: +#: .. versionadded:: 4.0 +UNPROCESSED = UnprocessedParamType() + +#: A unicode string parameter type which is the implicit default. This +#: can also be selected by using ``str`` as type. +STRING = StringParamType() + +#: An integer parameter. This can also be selected by using ``int`` as +#: type. +INT = IntParamType() + +#: A floating point value parameter. This can also be selected by using +#: ``float`` as type. +FLOAT = FloatParamType() + +#: A boolean parameter. This is the default for boolean flags. This can +#: also be selected by using ``bool`` as a type. +BOOL = BoolParamType() + +#: A UUID parameter. +UUID = UUIDParameterType() diff --git a/venv/lib/python2.7/site-packages/click/utils.py b/venv/lib/python2.7/site-packages/click/utils.py new file mode 100644 index 0000000..eee626d --- /dev/null +++ b/venv/lib/python2.7/site-packages/click/utils.py @@ -0,0 +1,415 @@ +import os +import sys + +from .globals import resolve_color_default + +from ._compat import text_type, open_stream, get_filesystem_encoding, \ + get_streerror, string_types, PY2, binary_streams, text_streams, \ + filename_to_ui, auto_wrap_for_ansi, strip_ansi, should_strip_ansi, \ + _default_text_stdout, _default_text_stderr, is_bytes, WIN + +if not PY2: + from ._compat import _find_binary_writer +elif WIN: + from ._winconsole import _get_windows_argv, \ + _hash_py_argv, _initial_argv_hash + + +echo_native_types = string_types + (bytes, bytearray) + + +def _posixify(name): + return '-'.join(name.split()).lower() + + +def safecall(func): + """Wraps a function so that it swallows exceptions.""" + def wrapper(*args, **kwargs): + try: + return func(*args, **kwargs) + except Exception: + pass + return wrapper + + +def make_str(value): + """Converts a value into a valid string.""" + if isinstance(value, bytes): + try: + return value.decode(get_filesystem_encoding()) + except UnicodeError: + return value.decode('utf-8', 'replace') + return text_type(value) + + +def make_default_short_help(help, max_length=45): + words = help.split() + total_length = 0 + result = [] + done = False + + for word in words: + if word[-1:] == '.': + done = True + new_length = result and 1 + len(word) or len(word) + if total_length + new_length > max_length: + result.append('...') + done = True + else: + if result: + result.append(' ') + result.append(word) + if done: + break + total_length += new_length + + return ''.join(result) + + +class LazyFile(object): + """A lazy file works like a regular file but it does not fully open + the file but it does perform some basic checks early to see if the + filename parameter does make sense. This is useful for safely opening + files for writing. + """ + + def __init__(self, filename, mode='r', encoding=None, errors='strict', + atomic=False): + self.name = filename + self.mode = mode + self.encoding = encoding + self.errors = errors + self.atomic = atomic + + if filename == '-': + self._f, self.should_close = open_stream(filename, mode, + encoding, errors) + else: + if 'r' in mode: + # Open and close the file in case we're opening it for + # reading so that we can catch at least some errors in + # some cases early. + open(filename, mode).close() + self._f = None + self.should_close = True + + def __getattr__(self, name): + return getattr(self.open(), name) + + def __repr__(self): + if self._f is not None: + return repr(self._f) + return '' % (self.name, self.mode) + + def open(self): + """Opens the file if it's not yet open. This call might fail with + a :exc:`FileError`. Not handling this error will produce an error + that Click shows. + """ + if self._f is not None: + return self._f + try: + rv, self.should_close = open_stream(self.name, self.mode, + self.encoding, + self.errors, + atomic=self.atomic) + except (IOError, OSError) as e: + from .exceptions import FileError + raise FileError(self.name, hint=get_streerror(e)) + self._f = rv + return rv + + def close(self): + """Closes the underlying file, no matter what.""" + if self._f is not None: + self._f.close() + + def close_intelligently(self): + """This function only closes the file if it was opened by the lazy + file wrapper. For instance this will never close stdin. + """ + if self.should_close: + self.close() + + def __enter__(self): + return self + + def __exit__(self, exc_type, exc_value, tb): + self.close_intelligently() + + def __iter__(self): + self.open() + return iter(self._f) + + +class KeepOpenFile(object): + + def __init__(self, file): + self._file = file + + def __getattr__(self, name): + return getattr(self._file, name) + + def __enter__(self): + return self + + def __exit__(self, exc_type, exc_value, tb): + pass + + def __repr__(self): + return repr(self._file) + + def __iter__(self): + return iter(self._file) + + +def echo(message=None, file=None, nl=True, err=False, color=None): + """Prints a message plus a newline to the given file or stdout. On + first sight, this looks like the print function, but it has improved + support for handling Unicode and binary data that does not fail no + matter how badly configured the system is. + + Primarily it means that you can print binary data as well as Unicode + data on both 2.x and 3.x to the given file in the most appropriate way + possible. This is a very carefree function as in that it will try its + best to not fail. As of Click 6.0 this includes support for unicode + output on the Windows console. + + In addition to that, if `colorama`_ is installed, the echo function will + also support clever handling of ANSI codes. Essentially it will then + do the following: + + - add transparent handling of ANSI color codes on Windows. + - hide ANSI codes automatically if the destination file is not a + terminal. + + .. _colorama: http://pypi.python.org/pypi/colorama + + .. versionchanged:: 6.0 + As of Click 6.0 the echo function will properly support unicode + output on the windows console. Not that click does not modify + the interpreter in any way which means that `sys.stdout` or the + print statement or function will still not provide unicode support. + + .. versionchanged:: 2.0 + Starting with version 2.0 of Click, the echo function will work + with colorama if it's installed. + + .. versionadded:: 3.0 + The `err` parameter was added. + + .. versionchanged:: 4.0 + Added the `color` flag. + + :param message: the message to print + :param file: the file to write to (defaults to ``stdout``) + :param err: if set to true the file defaults to ``stderr`` instead of + ``stdout``. This is faster and easier than calling + :func:`get_text_stderr` yourself. + :param nl: if set to `True` (the default) a newline is printed afterwards. + :param color: controls if the terminal supports ANSI colors or not. The + default is autodetection. + """ + if file is None: + if err: + file = _default_text_stderr() + else: + file = _default_text_stdout() + + # Convert non bytes/text into the native string type. + if message is not None and not isinstance(message, echo_native_types): + message = text_type(message) + + if nl: + message = message or u'' + if isinstance(message, text_type): + message += u'\n' + else: + message += b'\n' + + # If there is a message, and we're in Python 3, and the value looks + # like bytes, we manually need to find the binary stream and write the + # message in there. This is done separately so that most stream + # types will work as you would expect. Eg: you can write to StringIO + # for other cases. + if message and not PY2 and is_bytes(message): + binary_file = _find_binary_writer(file) + if binary_file is not None: + file.flush() + binary_file.write(message) + binary_file.flush() + return + + # ANSI-style support. If there is no message or we are dealing with + # bytes nothing is happening. If we are connected to a file we want + # to strip colors. If we are on windows we either wrap the stream + # to strip the color or we use the colorama support to translate the + # ansi codes to API calls. + if message and not is_bytes(message): + color = resolve_color_default(color) + if should_strip_ansi(file, color): + message = strip_ansi(message) + elif WIN: + if auto_wrap_for_ansi is not None: + file = auto_wrap_for_ansi(file) + elif not color: + message = strip_ansi(message) + + if message: + file.write(message) + file.flush() + + +def get_binary_stream(name): + """Returns a system stream for byte processing. This essentially + returns the stream from the sys module with the given name but it + solves some compatibility issues between different Python versions. + Primarily this function is necessary for getting binary streams on + Python 3. + + :param name: the name of the stream to open. Valid names are ``'stdin'``, + ``'stdout'`` and ``'stderr'`` + """ + opener = binary_streams.get(name) + if opener is None: + raise TypeError('Unknown standard stream %r' % name) + return opener() + + +def get_text_stream(name, encoding=None, errors='strict'): + """Returns a system stream for text processing. This usually returns + a wrapped stream around a binary stream returned from + :func:`get_binary_stream` but it also can take shortcuts on Python 3 + for already correctly configured streams. + + :param name: the name of the stream to open. Valid names are ``'stdin'``, + ``'stdout'`` and ``'stderr'`` + :param encoding: overrides the detected default encoding. + :param errors: overrides the default error mode. + """ + opener = text_streams.get(name) + if opener is None: + raise TypeError('Unknown standard stream %r' % name) + return opener(encoding, errors) + + +def open_file(filename, mode='r', encoding=None, errors='strict', + lazy=False, atomic=False): + """This is similar to how the :class:`File` works but for manual + usage. Files are opened non lazy by default. This can open regular + files as well as stdin/stdout if ``'-'`` is passed. + + If stdin/stdout is returned the stream is wrapped so that the context + manager will not close the stream accidentally. This makes it possible + to always use the function like this without having to worry to + accidentally close a standard stream:: + + with open_file(filename) as f: + ... + + .. versionadded:: 3.0 + + :param filename: the name of the file to open (or ``'-'`` for stdin/stdout). + :param mode: the mode in which to open the file. + :param encoding: the encoding to use. + :param errors: the error handling for this file. + :param lazy: can be flipped to true to open the file lazily. + :param atomic: in atomic mode writes go into a temporary file and it's + moved on close. + """ + if lazy: + return LazyFile(filename, mode, encoding, errors, atomic=atomic) + f, should_close = open_stream(filename, mode, encoding, errors, + atomic=atomic) + if not should_close: + f = KeepOpenFile(f) + return f + + +def get_os_args(): + """This returns the argument part of sys.argv in the most appropriate + form for processing. What this means is that this return value is in + a format that works for Click to process but does not necessarily + correspond well to what's actually standard for the interpreter. + + On most environments the return value is ``sys.argv[:1]`` unchanged. + However if you are on Windows and running Python 2 the return value + will actually be a list of unicode strings instead because the + default behavior on that platform otherwise will not be able to + carry all possible values that sys.argv can have. + + .. versionadded:: 6.0 + """ + # We can only extract the unicode argv if sys.argv has not been + # changed since the startup of the application. + if PY2 and WIN and _initial_argv_hash == _hash_py_argv(): + return _get_windows_argv() + return sys.argv[1:] + + +def format_filename(filename, shorten=False): + """Formats a filename for user display. The main purpose of this + function is to ensure that the filename can be displayed at all. This + will decode the filename to unicode if necessary in a way that it will + not fail. Optionally, it can shorten the filename to not include the + full path to the filename. + + :param filename: formats a filename for UI display. This will also convert + the filename into unicode without failing. + :param shorten: this optionally shortens the filename to strip of the + path that leads up to it. + """ + if shorten: + filename = os.path.basename(filename) + return filename_to_ui(filename) + + +def get_app_dir(app_name, roaming=True, force_posix=False): + r"""Returns the config folder for the application. The default behavior + is to return whatever is most appropriate for the operating system. + + To give you an idea, for an app called ``"Foo Bar"``, something like + the following folders could be returned: + + Mac OS X: + ``~/Library/Application Support/Foo Bar`` + Mac OS X (POSIX): + ``~/.foo-bar`` + Unix: + ``~/.config/foo-bar`` + Unix (POSIX): + ``~/.foo-bar`` + Win XP (roaming): + ``C:\Documents and Settings\\Local Settings\Application Data\Foo Bar`` + Win XP (not roaming): + ``C:\Documents and Settings\\Application Data\Foo Bar`` + Win 7 (roaming): + ``C:\Users\\AppData\Roaming\Foo Bar`` + Win 7 (not roaming): + ``C:\Users\\AppData\Local\Foo Bar`` + + .. versionadded:: 2.0 + + :param app_name: the application name. This should be properly capitalized + and can contain whitespace. + :param roaming: controls if the folder should be roaming or not on Windows. + Has no affect otherwise. + :param force_posix: if this is set to `True` then on any POSIX system the + folder will be stored in the home folder with a leading + dot instead of the XDG config home or darwin's + application support folder. + """ + if WIN: + key = roaming and 'APPDATA' or 'LOCALAPPDATA' + folder = os.environ.get(key) + if folder is None: + folder = os.path.expanduser('~') + return os.path.join(folder, app_name) + if force_posix: + return os.path.join(os.path.expanduser('~/.' + _posixify(app_name))) + if sys.platform == 'darwin': + return os.path.join(os.path.expanduser( + '~/Library/Application Support'), app_name) + return os.path.join( + os.environ.get('XDG_CONFIG_HOME', os.path.expanduser('~/.config')), + _posixify(app_name)) diff --git a/venv/lib/python2.7/site-packages/dateutil/__init__.py b/venv/lib/python2.7/site-packages/dateutil/__init__.py new file mode 100644 index 0000000..b78ef9d --- /dev/null +++ b/venv/lib/python2.7/site-packages/dateutil/__init__.py @@ -0,0 +1,2 @@ +# -*- coding: utf-8 -*- +__version__ = "2.5.3" diff --git a/venv/lib/python2.7/site-packages/dateutil/easter.py b/venv/lib/python2.7/site-packages/dateutil/easter.py new file mode 100644 index 0000000..8d30c4e --- /dev/null +++ b/venv/lib/python2.7/site-packages/dateutil/easter.py @@ -0,0 +1,89 @@ +# -*- coding: utf-8 -*- +""" +This module offers a generic easter computing method for any given year, using +Western, Orthodox or Julian algorithms. +""" + +import datetime + +__all__ = ["easter", "EASTER_JULIAN", "EASTER_ORTHODOX", "EASTER_WESTERN"] + +EASTER_JULIAN = 1 +EASTER_ORTHODOX = 2 +EASTER_WESTERN = 3 + + +def easter(year, method=EASTER_WESTERN): + """ + This method was ported from the work done by GM Arts, + on top of the algorithm by Claus Tondering, which was + based in part on the algorithm of Ouding (1940), as + quoted in "Explanatory Supplement to the Astronomical + Almanac", P. Kenneth Seidelmann, editor. + + This algorithm implements three different easter + calculation methods: + + 1 - Original calculation in Julian calendar, valid in + dates after 326 AD + 2 - Original method, with date converted to Gregorian + calendar, valid in years 1583 to 4099 + 3 - Revised method, in Gregorian calendar, valid in + years 1583 to 4099 as well + + These methods are represented by the constants: + + EASTER_JULIAN = 1 + EASTER_ORTHODOX = 2 + EASTER_WESTERN = 3 + + The default method is method 3. + + More about the algorithm may be found at: + + http://users.chariot.net.au/~gmarts/eastalg.htm + + and + + http://www.tondering.dk/claus/calendar.html + + """ + + if not (1 <= method <= 3): + raise ValueError("invalid method") + + # g - Golden year - 1 + # c - Century + # h - (23 - Epact) mod 30 + # i - Number of days from March 21 to Paschal Full Moon + # j - Weekday for PFM (0=Sunday, etc) + # p - Number of days from March 21 to Sunday on or before PFM + # (-6 to 28 methods 1 & 3, to 56 for method 2) + # e - Extra days to add for method 2 (converting Julian + # date to Gregorian date) + + y = year + g = y % 19 + e = 0 + if method < 3: + # Old method + i = (19*g + 15) % 30 + j = (y + y//4 + i) % 7 + if method == 2: + # Extra dates to convert Julian to Gregorian date + e = 10 + if y > 1600: + e = e + y//100 - 16 - (y//100 - 16)//4 + else: + # New method + c = y//100 + h = (c - c//4 - (8*c + 13)//25 + 19*g + 15) % 30 + i = h - (h//28)*(1 - (h//28)*(29//(h + 1))*((21 - g)//11)) + j = (y + y//4 + i + 2 - c + c//4) % 7 + + # p can be from -6 to 56 corresponding to dates 22 March to 23 May + # (later dates apply to method 2, although 23 May never actually occurs) + p = i - j + e + d = 1 + (p + 27 + (p + 6)//40) % 31 + m = 3 + (p + 26)//30 + return datetime.date(int(y), int(m), int(d)) diff --git a/venv/lib/python2.7/site-packages/dateutil/parser.py b/venv/lib/python2.7/site-packages/dateutil/parser.py new file mode 100644 index 0000000..6738791 --- /dev/null +++ b/venv/lib/python2.7/site-packages/dateutil/parser.py @@ -0,0 +1,1356 @@ +# -*- coding:iso-8859-1 -*- +""" +This module offers a generic date/time string parser which is able to parse +most known formats to represent a date and/or time. + +This module attempts to be forgiving with regards to unlikely input formats, +returning a datetime object even for dates which are ambiguous. If an element +of a date/time stamp is omitted, the following rules are applied: +- If AM or PM is left unspecified, a 24-hour clock is assumed, however, an hour + on a 12-hour clock (``0 <= hour <= 12``) *must* be specified if AM or PM is + specified. +- If a time zone is omitted, a timezone-naive datetime is returned. + +If any other elements are missing, they are taken from the +:class:`datetime.datetime` object passed to the parameter ``default``. If this +results in a day number exceeding the valid number of days per month, the +value falls back to the end of the month. + +Additional resources about date/time string formats can be found below: + +- `A summary of the international standard date and time notation + `_ +- `W3C Date and Time Formats `_ +- `Time Formats (Planetary Rings Node) `_ +- `CPAN ParseDate module + `_ +- `Java SimpleDateFormat Class + `_ +""" +from __future__ import unicode_literals + +import datetime +import string +import time +import collections +import re +from io import StringIO +from calendar import monthrange, isleap + +from six import text_type, binary_type, integer_types + +from . import relativedelta +from . import tz + +__all__ = ["parse", "parserinfo"] + + +class _timelex(object): + # Fractional seconds are sometimes split by a comma + _split_decimal = re.compile("([\.,])") + + def __init__(self, instream): + if isinstance(instream, binary_type): + instream = instream.decode() + + if isinstance(instream, text_type): + instream = StringIO(instream) + + self.instream = instream + self.charstack = [] + self.tokenstack = [] + self.eof = False + + def get_token(self): + """ + This function breaks the time string into lexical units (tokens), which + can be parsed by the parser. Lexical units are demarcated by changes in + the character set, so any continuous string of letters is considered + one unit, any continuous string of numbers is considered one unit. + + The main complication arises from the fact that dots ('.') can be used + both as separators (e.g. "Sep.20.2009") or decimal points (e.g. + "4:30:21.447"). As such, it is necessary to read the full context of + any dot-separated strings before breaking it into tokens; as such, this + function maintains a "token stack", for when the ambiguous context + demands that multiple tokens be parsed at once. + """ + if self.tokenstack: + return self.tokenstack.pop(0) + + seenletters = False + token = None + state = None + + while not self.eof: + # We only realize that we've reached the end of a token when we + # find a character that's not part of the current token - since + # that character may be part of the next token, it's stored in the + # charstack. + if self.charstack: + nextchar = self.charstack.pop(0) + else: + nextchar = self.instream.read(1) + while nextchar == '\x00': + nextchar = self.instream.read(1) + + if not nextchar: + self.eof = True + break + elif not state: + # First character of the token - determines if we're starting + # to parse a word, a number or something else. + token = nextchar + if self.isword(nextchar): + state = 'a' + elif self.isnum(nextchar): + state = '0' + elif self.isspace(nextchar): + token = ' ' + break # emit token + else: + break # emit token + elif state == 'a': + # If we've already started reading a word, we keep reading + # letters until we find something that's not part of a word. + seenletters = True + if self.isword(nextchar): + token += nextchar + elif nextchar == '.': + token += nextchar + state = 'a.' + else: + self.charstack.append(nextchar) + break # emit token + elif state == '0': + # If we've already started reading a number, we keep reading + # numbers until we find something that doesn't fit. + if self.isnum(nextchar): + token += nextchar + elif nextchar == '.' or (nextchar == ',' and len(token) >= 2): + token += nextchar + state = '0.' + else: + self.charstack.append(nextchar) + break # emit token + elif state == 'a.': + # If we've seen some letters and a dot separator, continue + # parsing, and the tokens will be broken up later. + seenletters = True + if nextchar == '.' or self.isword(nextchar): + token += nextchar + elif self.isnum(nextchar) and token[-1] == '.': + token += nextchar + state = '0.' + else: + self.charstack.append(nextchar) + break # emit token + elif state == '0.': + # If we've seen at least one dot separator, keep going, we'll + # break up the tokens later. + if nextchar == '.' or self.isnum(nextchar): + token += nextchar + elif self.isword(nextchar) and token[-1] == '.': + token += nextchar + state = 'a.' + else: + self.charstack.append(nextchar) + break # emit token + + if (state in ('a.', '0.') and (seenletters or token.count('.') > 1 or + token[-1] in '.,')): + l = self._split_decimal.split(token) + token = l[0] + for tok in l[1:]: + if tok: + self.tokenstack.append(tok) + + if state == '0.' and token.count('.') == 0: + token = token.replace(',', '.') + + return token + + def __iter__(self): + return self + + def __next__(self): + token = self.get_token() + if token is None: + raise StopIteration + + return token + + def next(self): + return self.__next__() # Python 2.x support + + @classmethod + def split(cls, s): + return list(cls(s)) + + @classmethod + def isword(cls, nextchar): + """ Whether or not the next character is part of a word """ + return nextchar.isalpha() + + @classmethod + def isnum(cls, nextchar): + """ Whether the next character is part of a number """ + return nextchar.isdigit() + + @classmethod + def isspace(cls, nextchar): + """ Whether the next character is whitespace """ + return nextchar.isspace() + + +class _resultbase(object): + + def __init__(self): + for attr in self.__slots__: + setattr(self, attr, None) + + def _repr(self, classname): + l = [] + for attr in self.__slots__: + value = getattr(self, attr) + if value is not None: + l.append("%s=%s" % (attr, repr(value))) + return "%s(%s)" % (classname, ", ".join(l)) + + def __len__(self): + return (sum(getattr(self, attr) is not None + for attr in self.__slots__)) + + def __repr__(self): + return self._repr(self.__class__.__name__) + + +class parserinfo(object): + """ + Class which handles what inputs are accepted. Subclass this to customize + the language and acceptable values for each parameter. + + :param dayfirst: + Whether to interpret the first value in an ambiguous 3-integer date + (e.g. 01/05/09) as the day (``True``) or month (``False``). If + ``yearfirst`` is set to ``True``, this distinguishes between YDM + and YMD. Default is ``False``. + + :param yearfirst: + Whether to interpret the first value in an ambiguous 3-integer date + (e.g. 01/05/09) as the year. If ``True``, the first number is taken + to be the year, otherwise the last number is taken to be the year. + Default is ``False``. + """ + + # m from a.m/p.m, t from ISO T separator + JUMP = [" ", ".", ",", ";", "-", "/", "'", + "at", "on", "and", "ad", "m", "t", "of", + "st", "nd", "rd", "th"] + + WEEKDAYS = [("Mon", "Monday"), + ("Tue", "Tuesday"), + ("Wed", "Wednesday"), + ("Thu", "Thursday"), + ("Fri", "Friday"), + ("Sat", "Saturday"), + ("Sun", "Sunday")] + MONTHS = [("Jan", "January"), + ("Feb", "February"), + ("Mar", "March"), + ("Apr", "April"), + ("May", "May"), + ("Jun", "June"), + ("Jul", "July"), + ("Aug", "August"), + ("Sep", "Sept", "September"), + ("Oct", "October"), + ("Nov", "November"), + ("Dec", "December")] + HMS = [("h", "hour", "hours"), + ("m", "minute", "minutes"), + ("s", "second", "seconds")] + AMPM = [("am", "a"), + ("pm", "p")] + UTCZONE = ["UTC", "GMT", "Z"] + PERTAIN = ["of"] + TZOFFSET = {} + + def __init__(self, dayfirst=False, yearfirst=False): + self._jump = self._convert(self.JUMP) + self._weekdays = self._convert(self.WEEKDAYS) + self._months = self._convert(self.MONTHS) + self._hms = self._convert(self.HMS) + self._ampm = self._convert(self.AMPM) + self._utczone = self._convert(self.UTCZONE) + self._pertain = self._convert(self.PERTAIN) + + self.dayfirst = dayfirst + self.yearfirst = yearfirst + + self._year = time.localtime().tm_year + self._century = self._year // 100 * 100 + + def _convert(self, lst): + dct = {} + for i, v in enumerate(lst): + if isinstance(v, tuple): + for v in v: + dct[v.lower()] = i + else: + dct[v.lower()] = i + return dct + + def jump(self, name): + return name.lower() in self._jump + + def weekday(self, name): + if len(name) >= 3: + try: + return self._weekdays[name.lower()] + except KeyError: + pass + return None + + def month(self, name): + if len(name) >= 3: + try: + return self._months[name.lower()] + 1 + except KeyError: + pass + return None + + def hms(self, name): + try: + return self._hms[name.lower()] + except KeyError: + return None + + def ampm(self, name): + try: + return self._ampm[name.lower()] + except KeyError: + return None + + def pertain(self, name): + return name.lower() in self._pertain + + def utczone(self, name): + return name.lower() in self._utczone + + def tzoffset(self, name): + if name in self._utczone: + return 0 + + return self.TZOFFSET.get(name) + + def convertyear(self, year, century_specified=False): + if year < 100 and not century_specified: + year += self._century + if abs(year - self._year) >= 50: + if year < self._year: + year += 100 + else: + year -= 100 + return year + + def validate(self, res): + # move to info + if res.year is not None: + res.year = self.convertyear(res.year, res.century_specified) + + if res.tzoffset == 0 and not res.tzname or res.tzname == 'Z': + res.tzname = "UTC" + res.tzoffset = 0 + elif res.tzoffset != 0 and res.tzname and self.utczone(res.tzname): + res.tzoffset = 0 + return True + + +class _ymd(list): + def __init__(self, tzstr, *args, **kwargs): + super(self.__class__, self).__init__(*args, **kwargs) + self.century_specified = False + self.tzstr = tzstr + + @staticmethod + def token_could_be_year(token, year): + try: + return int(token) == year + except ValueError: + return False + + @staticmethod + def find_potential_year_tokens(year, tokens): + return [token for token in tokens if _ymd.token_could_be_year(token, year)] + + def find_probable_year_index(self, tokens): + """ + attempt to deduce if a pre 100 year was lost + due to padded zeros being taken off + """ + for index, token in enumerate(self): + potential_year_tokens = _ymd.find_potential_year_tokens(token, tokens) + if len(potential_year_tokens) == 1 and len(potential_year_tokens[0]) > 2: + return index + + def append(self, val): + if hasattr(val, '__len__'): + if val.isdigit() and len(val) > 2: + self.century_specified = True + elif val > 100: + self.century_specified = True + + super(self.__class__, self).append(int(val)) + + def resolve_ymd(self, mstridx, yearfirst, dayfirst): + len_ymd = len(self) + year, month, day = (None, None, None) + + if len_ymd > 3: + raise ValueError("More than three YMD values") + elif len_ymd == 1 or (mstridx != -1 and len_ymd == 2): + # One member, or two members with a month string + if mstridx != -1: + month = self[mstridx] + del self[mstridx] + + if len_ymd > 1 or mstridx == -1: + if self[0] > 31: + year = self[0] + else: + day = self[0] + + elif len_ymd == 2: + # Two members with numbers + if self[0] > 31: + # 99-01 + year, month = self + elif self[1] > 31: + # 01-99 + month, year = self + elif dayfirst and self[1] <= 12: + # 13-01 + day, month = self + else: + # 01-13 + month, day = self + + elif len_ymd == 3: + # Three members + if mstridx == 0: + month, day, year = self + elif mstridx == 1: + if self[0] > 31 or (yearfirst and self[2] <= 31): + # 99-Jan-01 + year, month, day = self + else: + # 01-Jan-01 + # Give precendence to day-first, since + # two-digit years is usually hand-written. + day, month, year = self + + elif mstridx == 2: + # WTF!? + if self[1] > 31: + # 01-99-Jan + day, year, month = self + else: + # 99-01-Jan + year, day, month = self + + else: + if self[0] > 31 or \ + self.find_probable_year_index(_timelex.split(self.tzstr)) == 0 or \ + (yearfirst and self[1] <= 12 and self[2] <= 31): + # 99-01-01 + if dayfirst and self[2] <= 12: + year, day, month = self + else: + year, month, day = self + elif self[0] > 12 or (dayfirst and self[1] <= 12): + # 13-01-01 + day, month, year = self + else: + # 01-13-01 + month, day, year = self + + return year, month, day + + +class parser(object): + def __init__(self, info=None): + self.info = info or parserinfo() + + def parse(self, timestr, default=None, ignoretz=False, tzinfos=None, **kwargs): + """ + Parse the date/time string into a :class:`datetime.datetime` object. + + :param timestr: + Any date/time string using the supported formats. + + :param default: + The default datetime object, if this is a datetime object and not + ``None``, elements specified in ``timestr`` replace elements in the + default object. + + :param ignoretz: + If set ``True``, time zones in parsed strings are ignored and a + naive :class:`datetime.datetime` object is returned. + + :param tzinfos: + Additional time zone names / aliases which may be present in the + string. This argument maps time zone names (and optionally offsets + from those time zones) to time zones. This parameter can be a + dictionary with timezone aliases mapping time zone names to time + zones or a function taking two parameters (``tzname`` and + ``tzoffset``) and returning a time zone. + + The timezones to which the names are mapped can be an integer + offset from UTC in minutes or a :class:`tzinfo` object. + + .. doctest:: + :options: +NORMALIZE_WHITESPACE + + >>> from dateutil.parser import parse + >>> from dateutil.tz import gettz + >>> tzinfos = {"BRST": -10800, "CST": gettz("America/Chicago")} + >>> parse("2012-01-19 17:21:00 BRST", tzinfos=tzinfos) + datetime.datetime(2012, 1, 19, 17, 21, tzinfo=tzoffset(u'BRST', -10800)) + >>> parse("2012-01-19 17:21:00 CST", tzinfos=tzinfos) + datetime.datetime(2012, 1, 19, 17, 21, + tzinfo=tzfile('/usr/share/zoneinfo/America/Chicago')) + + This parameter is ignored if ``ignoretz`` is set. + + :param **kwargs: + Keyword arguments as passed to ``_parse()``. + + :return: + Returns a :class:`datetime.datetime` object or, if the + ``fuzzy_with_tokens`` option is ``True``, returns a tuple, the + first element being a :class:`datetime.datetime` object, the second + a tuple containing the fuzzy tokens. + + :raises ValueError: + Raised for invalid or unknown string format, if the provided + :class:`tzinfo` is not in a valid format, or if an invalid date + would be created. + + :raises OverflowError: + Raised if the parsed date exceeds the largest valid C integer on + your system. + """ + + if default is None: + effective_dt = datetime.datetime.now() + default = datetime.datetime.now().replace(hour=0, minute=0, + second=0, microsecond=0) + else: + effective_dt = default + + res, skipped_tokens = self._parse(timestr, **kwargs) + + if res is None: + raise ValueError("Unknown string format") + + if len(res) == 0: + raise ValueError("String does not contain a date.") + + repl = {} + for attr in ("year", "month", "day", "hour", + "minute", "second", "microsecond"): + value = getattr(res, attr) + if value is not None: + repl[attr] = value + + if 'day' not in repl: + # If the default day exceeds the last day of the month, fall back to + # the end of the month. + cyear = default.year if res.year is None else res.year + cmonth = default.month if res.month is None else res.month + cday = default.day if res.day is None else res.day + + if cday > monthrange(cyear, cmonth)[1]: + repl['day'] = monthrange(cyear, cmonth)[1] + + ret = default.replace(**repl) + + if res.weekday is not None and not res.day: + ret = ret+relativedelta.relativedelta(weekday=res.weekday) + + if not ignoretz: + if (isinstance(tzinfos, collections.Callable) or + tzinfos and res.tzname in tzinfos): + + if isinstance(tzinfos, collections.Callable): + tzdata = tzinfos(res.tzname, res.tzoffset) + else: + tzdata = tzinfos.get(res.tzname) + + if isinstance(tzdata, datetime.tzinfo): + tzinfo = tzdata + elif isinstance(tzdata, text_type): + tzinfo = tz.tzstr(tzdata) + elif isinstance(tzdata, integer_types): + tzinfo = tz.tzoffset(res.tzname, tzdata) + else: + raise ValueError("Offset must be tzinfo subclass, " + "tz string, or int offset.") + ret = ret.replace(tzinfo=tzinfo) + elif res.tzname and res.tzname in time.tzname: + ret = ret.replace(tzinfo=tz.tzlocal()) + elif res.tzoffset == 0: + ret = ret.replace(tzinfo=tz.tzutc()) + elif res.tzoffset: + ret = ret.replace(tzinfo=tz.tzoffset(res.tzname, res.tzoffset)) + + if kwargs.get('fuzzy_with_tokens', False): + return ret, skipped_tokens + else: + return ret + + class _result(_resultbase): + __slots__ = ["year", "month", "day", "weekday", + "hour", "minute", "second", "microsecond", + "tzname", "tzoffset", "ampm"] + + def _parse(self, timestr, dayfirst=None, yearfirst=None, fuzzy=False, + fuzzy_with_tokens=False): + """ + Private method which performs the heavy lifting of parsing, called from + ``parse()``, which passes on its ``kwargs`` to this function. + + :param timestr: + The string to parse. + + :param dayfirst: + Whether to interpret the first value in an ambiguous 3-integer date + (e.g. 01/05/09) as the day (``True``) or month (``False``). If + ``yearfirst`` is set to ``True``, this distinguishes between YDM + and YMD. If set to ``None``, this value is retrieved from the + current :class:`parserinfo` object (which itself defaults to + ``False``). + + :param yearfirst: + Whether to interpret the first value in an ambiguous 3-integer date + (e.g. 01/05/09) as the year. If ``True``, the first number is taken + to be the year, otherwise the last number is taken to be the year. + If this is set to ``None``, the value is retrieved from the current + :class:`parserinfo` object (which itself defaults to ``False``). + + :param fuzzy: + Whether to allow fuzzy parsing, allowing for string like "Today is + January 1, 2047 at 8:21:00AM". + + :param fuzzy_with_tokens: + If ``True``, ``fuzzy`` is automatically set to True, and the parser + will return a tuple where the first element is the parsed + :class:`datetime.datetime` datetimestamp and the second element is + a tuple containing the portions of the string which were ignored: + + .. doctest:: + + >>> from dateutil.parser import parse + >>> parse("Today is January 1, 2047 at 8:21:00AM", fuzzy_with_tokens=True) + (datetime.datetime(2047, 1, 1, 8, 21), (u'Today is ', u' ', u'at ')) + + """ + if fuzzy_with_tokens: + fuzzy = True + + info = self.info + + if dayfirst is None: + dayfirst = info.dayfirst + + if yearfirst is None: + yearfirst = info.yearfirst + + res = self._result() + l = _timelex.split(timestr) # Splits the timestr into tokens + + # keep up with the last token skipped so we can recombine + # consecutively skipped tokens (-2 for when i begins at 0). + last_skipped_token_i = -2 + skipped_tokens = list() + + try: + # year/month/day list + ymd = _ymd(timestr) + + # Index of the month string in ymd + mstridx = -1 + + len_l = len(l) + i = 0 + while i < len_l: + + # Check if it's a number + try: + value_repr = l[i] + value = float(value_repr) + except ValueError: + value = None + + if value is not None: + # Token is a number + len_li = len(l[i]) + i += 1 + + if (len(ymd) == 3 and len_li in (2, 4) + and res.hour is None and (i >= len_l or (l[i] != ':' and + info.hms(l[i]) is None))): + # 19990101T23[59] + s = l[i-1] + res.hour = int(s[:2]) + + if len_li == 4: + res.minute = int(s[2:]) + + elif len_li == 6 or (len_li > 6 and l[i-1].find('.') == 6): + # YYMMDD or HHMMSS[.ss] + s = l[i-1] + + if not ymd and l[i-1].find('.') == -1: + #ymd.append(info.convertyear(int(s[:2]))) + + ymd.append(s[:2]) + ymd.append(s[2:4]) + ymd.append(s[4:]) + else: + # 19990101T235959[.59] + res.hour = int(s[:2]) + res.minute = int(s[2:4]) + res.second, res.microsecond = _parsems(s[4:]) + + elif len_li in (8, 12, 14): + # YYYYMMDD + s = l[i-1] + ymd.append(s[:4]) + ymd.append(s[4:6]) + ymd.append(s[6:8]) + + if len_li > 8: + res.hour = int(s[8:10]) + res.minute = int(s[10:12]) + + if len_li > 12: + res.second = int(s[12:]) + + elif ((i < len_l and info.hms(l[i]) is not None) or + (i+1 < len_l and l[i] == ' ' and + info.hms(l[i+1]) is not None)): + + # HH[ ]h or MM[ ]m or SS[.ss][ ]s + if l[i] == ' ': + i += 1 + + idx = info.hms(l[i]) + + while True: + if idx == 0: + res.hour = int(value) + + if value % 1: + res.minute = int(60*(value % 1)) + + elif idx == 1: + res.minute = int(value) + + if value % 1: + res.second = int(60*(value % 1)) + + elif idx == 2: + res.second, res.microsecond = \ + _parsems(value_repr) + + i += 1 + + if i >= len_l or idx == 2: + break + + # 12h00 + try: + value_repr = l[i] + value = float(value_repr) + except ValueError: + break + else: + i += 1 + idx += 1 + + if i < len_l: + newidx = info.hms(l[i]) + + if newidx is not None: + idx = newidx + + elif (i == len_l and l[i-2] == ' ' and + info.hms(l[i-3]) is not None): + # X h MM or X m SS + idx = info.hms(l[i-3]) + 1 + + if idx == 1: + res.minute = int(value) + + if value % 1: + res.second = int(60*(value % 1)) + elif idx == 2: + res.second, res.microsecond = \ + _parsems(value_repr) + i += 1 + + elif i+1 < len_l and l[i] == ':': + # HH:MM[:SS[.ss]] + res.hour = int(value) + i += 1 + value = float(l[i]) + res.minute = int(value) + + if value % 1: + res.second = int(60*(value % 1)) + + i += 1 + + if i < len_l and l[i] == ':': + res.second, res.microsecond = _parsems(l[i+1]) + i += 2 + + elif i < len_l and l[i] in ('-', '/', '.'): + sep = l[i] + ymd.append(value_repr) + i += 1 + + if i < len_l and not info.jump(l[i]): + try: + # 01-01[-01] + ymd.append(l[i]) + except ValueError: + # 01-Jan[-01] + value = info.month(l[i]) + + if value is not None: + ymd.append(value) + assert mstridx == -1 + mstridx = len(ymd)-1 + else: + return None, None + + i += 1 + + if i < len_l and l[i] == sep: + # We have three members + i += 1 + value = info.month(l[i]) + + if value is not None: + ymd.append(value) + mstridx = len(ymd)-1 + assert mstridx == -1 + else: + ymd.append(l[i]) + + i += 1 + elif i >= len_l or info.jump(l[i]): + if i+1 < len_l and info.ampm(l[i+1]) is not None: + # 12 am + res.hour = int(value) + + if res.hour < 12 and info.ampm(l[i+1]) == 1: + res.hour += 12 + elif res.hour == 12 and info.ampm(l[i+1]) == 0: + res.hour = 0 + + i += 1 + else: + # Year, month or day + ymd.append(value) + i += 1 + elif info.ampm(l[i]) is not None: + + # 12am + res.hour = int(value) + + if res.hour < 12 and info.ampm(l[i]) == 1: + res.hour += 12 + elif res.hour == 12 and info.ampm(l[i]) == 0: + res.hour = 0 + i += 1 + + elif not fuzzy: + return None, None + else: + i += 1 + continue + + # Check weekday + value = info.weekday(l[i]) + if value is not None: + res.weekday = value + i += 1 + continue + + # Check month name + value = info.month(l[i]) + if value is not None: + ymd.append(value) + assert mstridx == -1 + mstridx = len(ymd)-1 + + i += 1 + if i < len_l: + if l[i] in ('-', '/'): + # Jan-01[-99] + sep = l[i] + i += 1 + ymd.append(l[i]) + i += 1 + + if i < len_l and l[i] == sep: + # Jan-01-99 + i += 1 + ymd.append(l[i]) + i += 1 + + elif (i+3 < len_l and l[i] == l[i+2] == ' ' + and info.pertain(l[i+1])): + # Jan of 01 + # In this case, 01 is clearly year + try: + value = int(l[i+3]) + except ValueError: + # Wrong guess + pass + else: + # Convert it here to become unambiguous + ymd.append(str(info.convertyear(value))) + i += 4 + continue + + # Check am/pm + value = info.ampm(l[i]) + if value is not None: + # For fuzzy parsing, 'a' or 'am' (both valid English words) + # may erroneously trigger the AM/PM flag. Deal with that + # here. + val_is_ampm = True + + # If there's already an AM/PM flag, this one isn't one. + if fuzzy and res.ampm is not None: + val_is_ampm = False + + # If AM/PM is found and hour is not, raise a ValueError + if res.hour is None: + if fuzzy: + val_is_ampm = False + else: + raise ValueError('No hour specified with ' + + 'AM or PM flag.') + elif not 0 <= res.hour <= 12: + # If AM/PM is found, it's a 12 hour clock, so raise + # an error for invalid range + if fuzzy: + val_is_ampm = False + else: + raise ValueError('Invalid hour specified for ' + + '12-hour clock.') + + if val_is_ampm: + if value == 1 and res.hour < 12: + res.hour += 12 + elif value == 0 and res.hour == 12: + res.hour = 0 + + res.ampm = value + + i += 1 + continue + + # Check for a timezone name + if (res.hour is not None and len(l[i]) <= 5 and + res.tzname is None and res.tzoffset is None and + not [x for x in l[i] if x not in + string.ascii_uppercase]): + res.tzname = l[i] + res.tzoffset = info.tzoffset(res.tzname) + i += 1 + + # Check for something like GMT+3, or BRST+3. Notice + # that it doesn't mean "I am 3 hours after GMT", but + # "my time +3 is GMT". If found, we reverse the + # logic so that timezone parsing code will get it + # right. + if i < len_l and l[i] in ('+', '-'): + l[i] = ('+', '-')[l[i] == '+'] + res.tzoffset = None + if info.utczone(res.tzname): + # With something like GMT+3, the timezone + # is *not* GMT. + res.tzname = None + + continue + + # Check for a numbered timezone + if res.hour is not None and l[i] in ('+', '-'): + signal = (-1, 1)[l[i] == '+'] + i += 1 + len_li = len(l[i]) + + if len_li == 4: + # -0300 + res.tzoffset = int(l[i][:2])*3600+int(l[i][2:])*60 + elif i+1 < len_l and l[i+1] == ':': + # -03:00 + res.tzoffset = int(l[i])*3600+int(l[i+2])*60 + i += 2 + elif len_li <= 2: + # -[0]3 + res.tzoffset = int(l[i][:2])*3600 + else: + return None, None + i += 1 + + res.tzoffset *= signal + + # Look for a timezone name between parenthesis + if (i+3 < len_l and + info.jump(l[i]) and l[i+1] == '(' and l[i+3] == ')' and + 3 <= len(l[i+2]) <= 5 and + not [x for x in l[i+2] + if x not in string.ascii_uppercase]): + # -0300 (BRST) + res.tzname = l[i+2] + i += 4 + continue + + # Check jumps + if not (info.jump(l[i]) or fuzzy): + return None, None + + if last_skipped_token_i == i - 1: + # recombine the tokens + skipped_tokens[-1] += l[i] + else: + # just append + skipped_tokens.append(l[i]) + last_skipped_token_i = i + i += 1 + + # Process year/month/day + year, month, day = ymd.resolve_ymd(mstridx, yearfirst, dayfirst) + if year is not None: + res.year = year + res.century_specified = ymd.century_specified + + if month is not None: + res.month = month + + if day is not None: + res.day = day + + except (IndexError, ValueError, AssertionError): + return None, None + + if not info.validate(res): + return None, None + + if fuzzy_with_tokens: + return res, tuple(skipped_tokens) + else: + return res, None + +DEFAULTPARSER = parser() + + +def parse(timestr, parserinfo=None, **kwargs): + """ + + Parse a string in one of the supported formats, using the + ``parserinfo`` parameters. + + :param timestr: + A string containing a date/time stamp. + + :param parserinfo: + A :class:`parserinfo` object containing parameters for the parser. + If ``None``, the default arguments to the :class:`parserinfo` + constructor are used. + + The ``**kwargs`` parameter takes the following keyword arguments: + + :param default: + The default datetime object, if this is a datetime object and not + ``None``, elements specified in ``timestr`` replace elements in the + default object. + + :param ignoretz: + If set ``True``, time zones in parsed strings are ignored and a naive + :class:`datetime` object is returned. + + :param tzinfos: + Additional time zone names / aliases which may be present in the + string. This argument maps time zone names (and optionally offsets + from those time zones) to time zones. This parameter can be a + dictionary with timezone aliases mapping time zone names to time + zones or a function taking two parameters (``tzname`` and + ``tzoffset``) and returning a time zone. + + The timezones to which the names are mapped can be an integer + offset from UTC in minutes or a :class:`tzinfo` object. + + .. doctest:: + :options: +NORMALIZE_WHITESPACE + + >>> from dateutil.parser import parse + >>> from dateutil.tz import gettz + >>> tzinfos = {"BRST": -10800, "CST": gettz("America/Chicago")} + >>> parse("2012-01-19 17:21:00 BRST", tzinfos=tzinfos) + datetime.datetime(2012, 1, 19, 17, 21, tzinfo=tzoffset(u'BRST', -10800)) + >>> parse("2012-01-19 17:21:00 CST", tzinfos=tzinfos) + datetime.datetime(2012, 1, 19, 17, 21, + tzinfo=tzfile('/usr/share/zoneinfo/America/Chicago')) + + This parameter is ignored if ``ignoretz`` is set. + + :param dayfirst: + Whether to interpret the first value in an ambiguous 3-integer date + (e.g. 01/05/09) as the day (``True``) or month (``False``). If + ``yearfirst`` is set to ``True``, this distinguishes between YDM and + YMD. If set to ``None``, this value is retrieved from the current + :class:`parserinfo` object (which itself defaults to ``False``). + + :param yearfirst: + Whether to interpret the first value in an ambiguous 3-integer date + (e.g. 01/05/09) as the year. If ``True``, the first number is taken to + be the year, otherwise the last number is taken to be the year. If + this is set to ``None``, the value is retrieved from the current + :class:`parserinfo` object (which itself defaults to ``False``). + + :param fuzzy: + Whether to allow fuzzy parsing, allowing for string like "Today is + January 1, 2047 at 8:21:00AM". + + :param fuzzy_with_tokens: + If ``True``, ``fuzzy`` is automatically set to True, and the parser + will return a tuple where the first element is the parsed + :class:`datetime.datetime` datetimestamp and the second element is + a tuple containing the portions of the string which were ignored: + + .. doctest:: + + >>> from dateutil.parser import parse + >>> parse("Today is January 1, 2047 at 8:21:00AM", fuzzy_with_tokens=True) + (datetime.datetime(2011, 1, 1, 8, 21), (u'Today is ', u' ', u'at ')) + + :return: + Returns a :class:`datetime.datetime` object or, if the + ``fuzzy_with_tokens`` option is ``True``, returns a tuple, the + first element being a :class:`datetime.datetime` object, the second + a tuple containing the fuzzy tokens. + + :raises ValueError: + Raised for invalid or unknown string format, if the provided + :class:`tzinfo` is not in a valid format, or if an invalid date + would be created. + + :raises OverflowError: + Raised if the parsed date exceeds the largest valid C integer on + your system. + """ + if parserinfo: + return parser(parserinfo).parse(timestr, **kwargs) + else: + return DEFAULTPARSER.parse(timestr, **kwargs) + + +class _tzparser(object): + + class _result(_resultbase): + + __slots__ = ["stdabbr", "stdoffset", "dstabbr", "dstoffset", + "start", "end"] + + class _attr(_resultbase): + __slots__ = ["month", "week", "weekday", + "yday", "jyday", "day", "time"] + + def __repr__(self): + return self._repr("") + + def __init__(self): + _resultbase.__init__(self) + self.start = self._attr() + self.end = self._attr() + + def parse(self, tzstr): + res = self._result() + l = _timelex.split(tzstr) + try: + + len_l = len(l) + + i = 0 + while i < len_l: + # BRST+3[BRDT[+2]] + j = i + while j < len_l and not [x for x in l[j] + if x in "0123456789:,-+"]: + j += 1 + if j != i: + if not res.stdabbr: + offattr = "stdoffset" + res.stdabbr = "".join(l[i:j]) + else: + offattr = "dstoffset" + res.dstabbr = "".join(l[i:j]) + i = j + if (i < len_l and (l[i] in ('+', '-') or l[i][0] in + "0123456789")): + if l[i] in ('+', '-'): + # Yes, that's right. See the TZ variable + # documentation. + signal = (1, -1)[l[i] == '+'] + i += 1 + else: + signal = -1 + len_li = len(l[i]) + if len_li == 4: + # -0300 + setattr(res, offattr, (int(l[i][:2])*3600 + + int(l[i][2:])*60)*signal) + elif i+1 < len_l and l[i+1] == ':': + # -03:00 + setattr(res, offattr, + (int(l[i])*3600+int(l[i+2])*60)*signal) + i += 2 + elif len_li <= 2: + # -[0]3 + setattr(res, offattr, + int(l[i][:2])*3600*signal) + else: + return None + i += 1 + if res.dstabbr: + break + else: + break + + if i < len_l: + for j in range(i, len_l): + if l[j] == ';': + l[j] = ',' + + assert l[i] == ',' + + i += 1 + + if i >= len_l: + pass + elif (8 <= l.count(',') <= 9 and + not [y for x in l[i:] if x != ',' + for y in x if y not in "0123456789"]): + # GMT0BST,3,0,30,3600,10,0,26,7200[,3600] + for x in (res.start, res.end): + x.month = int(l[i]) + i += 2 + if l[i] == '-': + value = int(l[i+1])*-1 + i += 1 + else: + value = int(l[i]) + i += 2 + if value: + x.week = value + x.weekday = (int(l[i])-1) % 7 + else: + x.day = int(l[i]) + i += 2 + x.time = int(l[i]) + i += 2 + if i < len_l: + if l[i] in ('-', '+'): + signal = (-1, 1)[l[i] == "+"] + i += 1 + else: + signal = 1 + res.dstoffset = (res.stdoffset+int(l[i]))*signal + elif (l.count(',') == 2 and l[i:].count('/') <= 2 and + not [y for x in l[i:] if x not in (',', '/', 'J', 'M', + '.', '-', ':') + for y in x if y not in "0123456789"]): + for x in (res.start, res.end): + if l[i] == 'J': + # non-leap year day (1 based) + i += 1 + x.jyday = int(l[i]) + elif l[i] == 'M': + # month[-.]week[-.]weekday + i += 1 + x.month = int(l[i]) + i += 1 + assert l[i] in ('-', '.') + i += 1 + x.week = int(l[i]) + if x.week == 5: + x.week = -1 + i += 1 + assert l[i] in ('-', '.') + i += 1 + x.weekday = (int(l[i])-1) % 7 + else: + # year day (zero based) + x.yday = int(l[i])+1 + + i += 1 + + if i < len_l and l[i] == '/': + i += 1 + # start time + len_li = len(l[i]) + if len_li == 4: + # -0300 + x.time = (int(l[i][:2])*3600+int(l[i][2:])*60) + elif i+1 < len_l and l[i+1] == ':': + # -03:00 + x.time = int(l[i])*3600+int(l[i+2])*60 + i += 2 + if i+1 < len_l and l[i+1] == ':': + i += 2 + x.time += int(l[i]) + elif len_li <= 2: + # -[0]3 + x.time = (int(l[i][:2])*3600) + else: + return None + i += 1 + + assert i == len_l or l[i] == ',' + + i += 1 + + assert i >= len_l + + except (IndexError, ValueError, AssertionError): + return None + + return res + + +DEFAULTTZPARSER = _tzparser() + + +def _parsetz(tzstr): + return DEFAULTTZPARSER.parse(tzstr) + + +def _parsems(value): + """Parse a I[.F] seconds value into (seconds, microseconds).""" + if "." not in value: + return int(value), 0 + else: + i, f = value.split(".") + return int(i), int(f.ljust(6, "0")[:6]) + + +# vim:ts=4:sw=4:et diff --git a/venv/lib/python2.7/site-packages/dateutil/relativedelta.py b/venv/lib/python2.7/site-packages/dateutil/relativedelta.py new file mode 100644 index 0000000..0217d57 --- /dev/null +++ b/venv/lib/python2.7/site-packages/dateutil/relativedelta.py @@ -0,0 +1,530 @@ +# -*- coding: utf-8 -*- +import datetime +import calendar + +import operator +from math import copysign + +from six import integer_types +from warnings import warn + +__all__ = ["relativedelta", "MO", "TU", "WE", "TH", "FR", "SA", "SU"] + + +class weekday(object): + __slots__ = ["weekday", "n"] + + def __init__(self, weekday, n=None): + self.weekday = weekday + self.n = n + + def __call__(self, n): + if n == self.n: + return self + else: + return self.__class__(self.weekday, n) + + def __eq__(self, other): + try: + if self.weekday != other.weekday or self.n != other.n: + return False + except AttributeError: + return False + return True + + def __repr__(self): + s = ("MO", "TU", "WE", "TH", "FR", "SA", "SU")[self.weekday] + if not self.n: + return s + else: + return "%s(%+d)" % (s, self.n) + +MO, TU, WE, TH, FR, SA, SU = weekdays = tuple([weekday(x) for x in range(7)]) + + +class relativedelta(object): + """ + The relativedelta type is based on the specification of the excellent + work done by M.-A. Lemburg in his + `mx.DateTime `_ extension. + However, notice that this type does *NOT* implement the same algorithm as + his work. Do *NOT* expect it to behave like mx.DateTime's counterpart. + + There are two different ways to build a relativedelta instance. The + first one is passing it two date/datetime classes:: + + relativedelta(datetime1, datetime2) + + The second one is passing it any number of the following keyword arguments:: + + relativedelta(arg1=x,arg2=y,arg3=z...) + + year, month, day, hour, minute, second, microsecond: + Absolute information (argument is singular); adding or subtracting a + relativedelta with absolute information does not perform an aritmetic + operation, but rather REPLACES the corresponding value in the + original datetime with the value(s) in relativedelta. + + years, months, weeks, days, hours, minutes, seconds, microseconds: + Relative information, may be negative (argument is plural); adding + or subtracting a relativedelta with relative information performs + the corresponding aritmetic operation on the original datetime value + with the information in the relativedelta. + + weekday: + One of the weekday instances (MO, TU, etc). These instances may + receive a parameter N, specifying the Nth weekday, which could + be positive or negative (like MO(+1) or MO(-2). Not specifying + it is the same as specifying +1. You can also use an integer, + where 0=MO. + + leapdays: + Will add given days to the date found, if year is a leap + year, and the date found is post 28 of february. + + yearday, nlyearday: + Set the yearday or the non-leap year day (jump leap days). + These are converted to day/month/leapdays information. + + Here is the behavior of operations with relativedelta: + + 1. Calculate the absolute year, using the 'year' argument, or the + original datetime year, if the argument is not present. + + 2. Add the relative 'years' argument to the absolute year. + + 3. Do steps 1 and 2 for month/months. + + 4. Calculate the absolute day, using the 'day' argument, or the + original datetime day, if the argument is not present. Then, + subtract from the day until it fits in the year and month + found after their operations. + + 5. Add the relative 'days' argument to the absolute day. Notice + that the 'weeks' argument is multiplied by 7 and added to + 'days'. + + 6. Do steps 1 and 2 for hour/hours, minute/minutes, second/seconds, + microsecond/microseconds. + + 7. If the 'weekday' argument is present, calculate the weekday, + with the given (wday, nth) tuple. wday is the index of the + weekday (0-6, 0=Mon), and nth is the number of weeks to add + forward or backward, depending on its signal. Notice that if + the calculated date is already Monday, for example, using + (0, 1) or (0, -1) won't change the day. + """ + + def __init__(self, dt1=None, dt2=None, + years=0, months=0, days=0, leapdays=0, weeks=0, + hours=0, minutes=0, seconds=0, microseconds=0, + year=None, month=None, day=None, weekday=None, + yearday=None, nlyearday=None, + hour=None, minute=None, second=None, microsecond=None): + + # Check for non-integer values in integer-only quantities + if any(x is not None and x != int(x) for x in (years, months)): + raise ValueError("Non-integer years and months are " + "ambiguous and not currently supported.") + + if dt1 and dt2: + # datetime is a subclass of date. So both must be date + if not (isinstance(dt1, datetime.date) and + isinstance(dt2, datetime.date)): + raise TypeError("relativedelta only diffs datetime/date") + + # We allow two dates, or two datetimes, so we coerce them to be + # of the same type + if (isinstance(dt1, datetime.datetime) != + isinstance(dt2, datetime.datetime)): + if not isinstance(dt1, datetime.datetime): + dt1 = datetime.datetime.fromordinal(dt1.toordinal()) + elif not isinstance(dt2, datetime.datetime): + dt2 = datetime.datetime.fromordinal(dt2.toordinal()) + + self.years = 0 + self.months = 0 + self.days = 0 + self.leapdays = 0 + self.hours = 0 + self.minutes = 0 + self.seconds = 0 + self.microseconds = 0 + self.year = None + self.month = None + self.day = None + self.weekday = None + self.hour = None + self.minute = None + self.second = None + self.microsecond = None + self._has_time = 0 + + # Get year / month delta between the two + months = (dt1.year - dt2.year) * 12 + (dt1.month - dt2.month) + self._set_months(months) + + # Remove the year/month delta so the timedelta is just well-defined + # time units (seconds, days and microseconds) + dtm = self.__radd__(dt2) + + # If we've overshot our target, make an adjustment + if dt1 < dt2: + compare = operator.gt + increment = 1 + else: + compare = operator.lt + increment = -1 + + while compare(dt1, dtm): + months += increment + self._set_months(months) + dtm = self.__radd__(dt2) + + # Get the timedelta between the "months-adjusted" date and dt1 + delta = dt1 - dtm + self.seconds = delta.seconds + delta.days * 86400 + self.microseconds = delta.microseconds + else: + # Relative information + self.years = years + self.months = months + self.days = days + weeks * 7 + self.leapdays = leapdays + self.hours = hours + self.minutes = minutes + self.seconds = seconds + self.microseconds = microseconds + + # Absolute information + self.year = year + self.month = month + self.day = day + self.hour = hour + self.minute = minute + self.second = second + self.microsecond = microsecond + + if any(x is not None and int(x) != x + for x in (year, month, day, hour, + minute, second, microsecond)): + # For now we'll deprecate floats - later it'll be an error. + warn("Non-integer value passed as absolute information. " + + "This is not a well-defined condition and will raise " + + "errors in future versions.", DeprecationWarning) + + + if isinstance(weekday, integer_types): + self.weekday = weekdays[weekday] + else: + self.weekday = weekday + + yday = 0 + if nlyearday: + yday = nlyearday + elif yearday: + yday = yearday + if yearday > 59: + self.leapdays = -1 + if yday: + ydayidx = [31, 59, 90, 120, 151, 181, 212, + 243, 273, 304, 334, 366] + for idx, ydays in enumerate(ydayidx): + if yday <= ydays: + self.month = idx+1 + if idx == 0: + self.day = yday + else: + self.day = yday-ydayidx[idx-1] + break + else: + raise ValueError("invalid year day (%d)" % yday) + + self._fix() + + def _fix(self): + if abs(self.microseconds) > 999999: + s = _sign(self.microseconds) + div, mod = divmod(self.microseconds * s, 1000000) + self.microseconds = mod * s + self.seconds += div * s + if abs(self.seconds) > 59: + s = _sign(self.seconds) + div, mod = divmod(self.seconds * s, 60) + self.seconds = mod * s + self.minutes += div * s + if abs(self.minutes) > 59: + s = _sign(self.minutes) + div, mod = divmod(self.minutes * s, 60) + self.minutes = mod * s + self.hours += div * s + if abs(self.hours) > 23: + s = _sign(self.hours) + div, mod = divmod(self.hours * s, 24) + self.hours = mod * s + self.days += div * s + if abs(self.months) > 11: + s = _sign(self.months) + div, mod = divmod(self.months * s, 12) + self.months = mod * s + self.years += div * s + if (self.hours or self.minutes or self.seconds or self.microseconds + or self.hour is not None or self.minute is not None or + self.second is not None or self.microsecond is not None): + self._has_time = 1 + else: + self._has_time = 0 + + @property + def weeks(self): + return self.days // 7 + @weeks.setter + def weeks(self, value): + self.days = self.days - (self.weeks * 7) + value * 7 + + def _set_months(self, months): + self.months = months + if abs(self.months) > 11: + s = _sign(self.months) + div, mod = divmod(self.months * s, 12) + self.months = mod * s + self.years = div * s + else: + self.years = 0 + + def normalized(self): + """ + Return a version of this object represented entirely using integer + values for the relative attributes. + + >>> relativedelta(days=1.5, hours=2).normalized() + relativedelta(days=1, hours=14) + + :return: + Returns a :class:`dateutil.relativedelta.relativedelta` object. + """ + # Cascade remainders down (rounding each to roughly nearest microsecond) + days = int(self.days) + + hours_f = round(self.hours + 24 * (self.days - days), 11) + hours = int(hours_f) + + minutes_f = round(self.minutes + 60 * (hours_f - hours), 10) + minutes = int(minutes_f) + + seconds_f = round(self.seconds + 60 * (minutes_f - minutes), 8) + seconds = int(seconds_f) + + microseconds = round(self.microseconds + 1e6 * (seconds_f - seconds)) + + # Constructor carries overflow back up with call to _fix() + return self.__class__(years=self.years, months=self.months, + days=days, hours=hours, minutes=minutes, + seconds=seconds, microseconds=microseconds, + leapdays=self.leapdays, year=self.year, + month=self.month, day=self.day, + weekday=self.weekday, hour=self.hour, + minute=self.minute, second=self.second, + microsecond=self.microsecond) + + def __add__(self, other): + if isinstance(other, relativedelta): + return self.__class__(years=other.years + self.years, + months=other.months + self.months, + days=other.days + self.days, + hours=other.hours + self.hours, + minutes=other.minutes + self.minutes, + seconds=other.seconds + self.seconds, + microseconds=(other.microseconds + + self.microseconds), + leapdays=other.leapdays or self.leapdays, + year=other.year or self.year, + month=other.month or self.month, + day=other.day or self.day, + weekday=other.weekday or self.weekday, + hour=other.hour or self.hour, + minute=other.minute or self.minute, + second=other.second or self.second, + microsecond=(other.microsecond or + self.microsecond)) + if not isinstance(other, datetime.date): + raise TypeError("unsupported type for add operation") + elif self._has_time and not isinstance(other, datetime.datetime): + other = datetime.datetime.fromordinal(other.toordinal()) + year = (self.year or other.year)+self.years + month = self.month or other.month + if self.months: + assert 1 <= abs(self.months) <= 12 + month += self.months + if month > 12: + year += 1 + month -= 12 + elif month < 1: + year -= 1 + month += 12 + day = min(calendar.monthrange(year, month)[1], + self.day or other.day) + repl = {"year": year, "month": month, "day": day} + for attr in ["hour", "minute", "second", "microsecond"]: + value = getattr(self, attr) + if value is not None: + repl[attr] = value + days = self.days + if self.leapdays and month > 2 and calendar.isleap(year): + days += self.leapdays + ret = (other.replace(**repl) + + datetime.timedelta(days=days, + hours=self.hours, + minutes=self.minutes, + seconds=self.seconds, + microseconds=self.microseconds)) + if self.weekday: + weekday, nth = self.weekday.weekday, self.weekday.n or 1 + jumpdays = (abs(nth) - 1) * 7 + if nth > 0: + jumpdays += (7 - ret.weekday() + weekday) % 7 + else: + jumpdays += (ret.weekday() - weekday) % 7 + jumpdays *= -1 + ret += datetime.timedelta(days=jumpdays) + return ret + + def __radd__(self, other): + return self.__add__(other) + + def __rsub__(self, other): + return self.__neg__().__radd__(other) + + def __sub__(self, other): + if not isinstance(other, relativedelta): + raise TypeError("unsupported type for sub operation") + return self.__class__(years=self.years - other.years, + months=self.months - other.months, + days=self.days - other.days, + hours=self.hours - other.hours, + minutes=self.minutes - other.minutes, + seconds=self.seconds - other.seconds, + microseconds=self.microseconds - other.microseconds, + leapdays=self.leapdays or other.leapdays, + year=self.year or other.year, + month=self.month or other.month, + day=self.day or other.day, + weekday=self.weekday or other.weekday, + hour=self.hour or other.hour, + minute=self.minute or other.minute, + second=self.second or other.second, + microsecond=self.microsecond or other.microsecond) + + def __neg__(self): + return self.__class__(years=-self.years, + months=-self.months, + days=-self.days, + hours=-self.hours, + minutes=-self.minutes, + seconds=-self.seconds, + microseconds=-self.microseconds, + leapdays=self.leapdays, + year=self.year, + month=self.month, + day=self.day, + weekday=self.weekday, + hour=self.hour, + minute=self.minute, + second=self.second, + microsecond=self.microsecond) + + def __bool__(self): + return not (not self.years and + not self.months and + not self.days and + not self.hours and + not self.minutes and + not self.seconds and + not self.microseconds and + not self.leapdays and + self.year is None and + self.month is None and + self.day is None and + self.weekday is None and + self.hour is None and + self.minute is None and + self.second is None and + self.microsecond is None) + # Compatibility with Python 2.x + __nonzero__ = __bool__ + + def __mul__(self, other): + f = float(other) + return self.__class__(years=int(self.years * f), + months=int(self.months * f), + days=int(self.days * f), + hours=int(self.hours * f), + minutes=int(self.minutes * f), + seconds=int(self.seconds * f), + microseconds=int(self.microseconds * f), + leapdays=self.leapdays, + year=self.year, + month=self.month, + day=self.day, + weekday=self.weekday, + hour=self.hour, + minute=self.minute, + second=self.second, + microsecond=self.microsecond) + + __rmul__ = __mul__ + + def __eq__(self, other): + if not isinstance(other, relativedelta): + return False + if self.weekday or other.weekday: + if not self.weekday or not other.weekday: + return False + if self.weekday.weekday != other.weekday.weekday: + return False + n1, n2 = self.weekday.n, other.weekday.n + if n1 != n2 and not ((not n1 or n1 == 1) and (not n2 or n2 == 1)): + return False + return (self.years == other.years and + self.months == other.months and + self.days == other.days and + self.hours == other.hours and + self.minutes == other.minutes and + self.seconds == other.seconds and + self.microseconds == other.microseconds and + self.leapdays == other.leapdays and + self.year == other.year and + self.month == other.month and + self.day == other.day and + self.hour == other.hour and + self.minute == other.minute and + self.second == other.second and + self.microsecond == other.microsecond) + + def __ne__(self, other): + return not self.__eq__(other) + + def __div__(self, other): + return self.__mul__(1/float(other)) + + __truediv__ = __div__ + + def __repr__(self): + l = [] + for attr in ["years", "months", "days", "leapdays", + "hours", "minutes", "seconds", "microseconds"]: + value = getattr(self, attr) + if value: + l.append("{attr}={value:+g}".format(attr=attr, value=value)) + for attr in ["year", "month", "day", "weekday", + "hour", "minute", "second", "microsecond"]: + value = getattr(self, attr) + if value is not None: + l.append("{attr}={value}".format(attr=attr, value=repr(value))) + return "{classname}({attrs})".format(classname=self.__class__.__name__, + attrs=", ".join(l)) + +def _sign(x): + return int(copysign(1, x)) + +# vim:ts=4:sw=4:et diff --git a/venv/lib/python2.7/site-packages/dateutil/rrule.py b/venv/lib/python2.7/site-packages/dateutil/rrule.py new file mode 100644 index 0000000..22d6dfc --- /dev/null +++ b/venv/lib/python2.7/site-packages/dateutil/rrule.py @@ -0,0 +1,1612 @@ +# -*- coding: utf-8 -*- +""" +The rrule module offers a small, complete, and very fast, implementation of +the recurrence rules documented in the +`iCalendar RFC `_, +including support for caching of results. +""" +import itertools +import datetime +import calendar +import sys + +try: + from math import gcd +except ImportError: + from fractions import gcd + +from six import advance_iterator, integer_types +from six.moves import _thread +import heapq + +# For warning about deprecation of until and count +from warnings import warn + +__all__ = ["rrule", "rruleset", "rrulestr", + "YEARLY", "MONTHLY", "WEEKLY", "DAILY", + "HOURLY", "MINUTELY", "SECONDLY", + "MO", "TU", "WE", "TH", "FR", "SA", "SU"] + +# Every mask is 7 days longer to handle cross-year weekly periods. +M366MASK = tuple([1]*31+[2]*29+[3]*31+[4]*30+[5]*31+[6]*30 + + [7]*31+[8]*31+[9]*30+[10]*31+[11]*30+[12]*31+[1]*7) +M365MASK = list(M366MASK) +M29, M30, M31 = list(range(1, 30)), list(range(1, 31)), list(range(1, 32)) +MDAY366MASK = tuple(M31+M29+M31+M30+M31+M30+M31+M31+M30+M31+M30+M31+M31[:7]) +MDAY365MASK = list(MDAY366MASK) +M29, M30, M31 = list(range(-29, 0)), list(range(-30, 0)), list(range(-31, 0)) +NMDAY366MASK = tuple(M31+M29+M31+M30+M31+M30+M31+M31+M30+M31+M30+M31+M31[:7]) +NMDAY365MASK = list(NMDAY366MASK) +M366RANGE = (0, 31, 60, 91, 121, 152, 182, 213, 244, 274, 305, 335, 366) +M365RANGE = (0, 31, 59, 90, 120, 151, 181, 212, 243, 273, 304, 334, 365) +WDAYMASK = [0, 1, 2, 3, 4, 5, 6]*55 +del M29, M30, M31, M365MASK[59], MDAY365MASK[59], NMDAY365MASK[31] +MDAY365MASK = tuple(MDAY365MASK) +M365MASK = tuple(M365MASK) + +FREQNAMES = ['YEARLY','MONTHLY','WEEKLY','DAILY','HOURLY','MINUTELY','SECONDLY'] + +(YEARLY, + MONTHLY, + WEEKLY, + DAILY, + HOURLY, + MINUTELY, + SECONDLY) = list(range(7)) + +# Imported on demand. +easter = None +parser = None + + +class weekday(object): + __slots__ = ["weekday", "n"] + + def __init__(self, weekday, n=None): + if n == 0: + raise ValueError("Can't create weekday with n == 0") + + self.weekday = weekday + self.n = n + + def __call__(self, n): + if n == self.n: + return self + else: + return self.__class__(self.weekday, n) + + def __eq__(self, other): + try: + if self.weekday != other.weekday or self.n != other.n: + return False + except AttributeError: + return False + return True + + def __repr__(self): + s = ("MO", "TU", "WE", "TH", "FR", "SA", "SU")[self.weekday] + if not self.n: + return s + else: + return "%s(%+d)" % (s, self.n) + +MO, TU, WE, TH, FR, SA, SU = weekdays = tuple([weekday(x) for x in range(7)]) + + +def _invalidates_cache(f): + """ + Decorator for rruleset methods which may invalidate the + cached length. + """ + def inner_func(self, *args, **kwargs): + rv = f(self, *args, **kwargs) + self._invalidate_cache() + return rv + + return inner_func + + +class rrulebase(object): + def __init__(self, cache=False): + if cache: + self._cache = [] + self._cache_lock = _thread.allocate_lock() + self._invalidate_cache() + else: + self._cache = None + self._cache_complete = False + self._len = None + + def __iter__(self): + if self._cache_complete: + return iter(self._cache) + elif self._cache is None: + return self._iter() + else: + return self._iter_cached() + + def _invalidate_cache(self): + if self._cache is not None: + self._cache = [] + self._cache_complete = False + self._cache_gen = self._iter() + + if self._cache_lock.locked(): + self._cache_lock.release() + + self._len = None + + def _iter_cached(self): + i = 0 + gen = self._cache_gen + cache = self._cache + acquire = self._cache_lock.acquire + release = self._cache_lock.release + while gen: + if i == len(cache): + acquire() + if self._cache_complete: + break + try: + for j in range(10): + cache.append(advance_iterator(gen)) + except StopIteration: + self._cache_gen = gen = None + self._cache_complete = True + break + release() + yield cache[i] + i += 1 + while i < self._len: + yield cache[i] + i += 1 + + def __getitem__(self, item): + if self._cache_complete: + return self._cache[item] + elif isinstance(item, slice): + if item.step and item.step < 0: + return list(iter(self))[item] + else: + return list(itertools.islice(self, + item.start or 0, + item.stop or sys.maxsize, + item.step or 1)) + elif item >= 0: + gen = iter(self) + try: + for i in range(item+1): + res = advance_iterator(gen) + except StopIteration: + raise IndexError + return res + else: + return list(iter(self))[item] + + def __contains__(self, item): + if self._cache_complete: + return item in self._cache + else: + for i in self: + if i == item: + return True + elif i > item: + return False + return False + + # __len__() introduces a large performance penality. + def count(self): + """ Returns the number of recurrences in this set. It will have go + trough the whole recurrence, if this hasn't been done before. """ + if self._len is None: + for x in self: + pass + return self._len + + def before(self, dt, inc=False): + """ Returns the last recurrence before the given datetime instance. The + inc keyword defines what happens if dt is an occurrence. With + inc=True, if dt itself is an occurrence, it will be returned. """ + if self._cache_complete: + gen = self._cache + else: + gen = self + last = None + if inc: + for i in gen: + if i > dt: + break + last = i + else: + for i in gen: + if i >= dt: + break + last = i + return last + + def after(self, dt, inc=False): + """ Returns the first recurrence after the given datetime instance. The + inc keyword defines what happens if dt is an occurrence. With + inc=True, if dt itself is an occurrence, it will be returned. """ + if self._cache_complete: + gen = self._cache + else: + gen = self + if inc: + for i in gen: + if i >= dt: + return i + else: + for i in gen: + if i > dt: + return i + return None + + def xafter(self, dt, count=None, inc=False): + """ + Generator which yields up to `count` recurrences after the given + datetime instance, equivalent to `after`. + + :param dt: + The datetime at which to start generating recurrences. + + :param count: + The maximum number of recurrences to generate. If `None` (default), + dates are generated until the recurrence rule is exhausted. + + :param inc: + If `dt` is an instance of the rule and `inc` is `True`, it is + included in the output. + + :yields: Yields a sequence of `datetime` objects. + """ + + if self._cache_complete: + gen = self._cache + else: + gen = self + + # Select the comparison function + if inc: + comp = lambda dc, dtc: dc >= dtc + else: + comp = lambda dc, dtc: dc > dtc + + # Generate dates + n = 0 + for d in gen: + if comp(d, dt): + yield d + + if count is not None: + n += 1 + if n >= count: + break + + def between(self, after, before, inc=False, count=1): + """ Returns all the occurrences of the rrule between after and before. + The inc keyword defines what happens if after and/or before are + themselves occurrences. With inc=True, they will be included in the + list, if they are found in the recurrence set. """ + if self._cache_complete: + gen = self._cache + else: + gen = self + started = False + l = [] + if inc: + for i in gen: + if i > before: + break + elif not started: + if i >= after: + started = True + l.append(i) + else: + l.append(i) + else: + for i in gen: + if i >= before: + break + elif not started: + if i > after: + started = True + l.append(i) + else: + l.append(i) + return l + + +class rrule(rrulebase): + """ + That's the base of the rrule operation. It accepts all the keywords + defined in the RFC as its constructor parameters (except byday, + which was renamed to byweekday) and more. The constructor prototype is:: + + rrule(freq) + + Where freq must be one of YEARLY, MONTHLY, WEEKLY, DAILY, HOURLY, MINUTELY, + or SECONDLY. + + .. note:: + Per RFC section 3.3.10, recurrence instances falling on invalid dates + and times are ignored rather than coerced: + + Recurrence rules may generate recurrence instances with an invalid + date (e.g., February 30) or nonexistent local time (e.g., 1:30 AM + on a day where the local time is moved forward by an hour at 1:00 + AM). Such recurrence instances MUST be ignored and MUST NOT be + counted as part of the recurrence set. + + This can lead to possibly surprising behavior when, for example, the + start date occurs at the end of the month: + + >>> from dateutil.rrule import rrule, MONTHLY + >>> from datetime import datetime + >>> start_date = datetime(2014, 12, 31) + >>> list(rrule(freq=MONTHLY, count=4, dtstart=start_date)) + ... # doctest: +NORMALIZE_WHITESPACE + [datetime.datetime(2014, 12, 31, 0, 0), + datetime.datetime(2015, 1, 31, 0, 0), + datetime.datetime(2015, 3, 31, 0, 0), + datetime.datetime(2015, 5, 31, 0, 0)] + + Additionally, it supports the following keyword arguments: + + :param cache: + If given, it must be a boolean value specifying to enable or disable + caching of results. If you will use the same rrule instance multiple + times, enabling caching will improve the performance considerably. + :param dtstart: + The recurrence start. Besides being the base for the recurrence, + missing parameters in the final recurrence instances will also be + extracted from this date. If not given, datetime.now() will be used + instead. + :param interval: + The interval between each freq iteration. For example, when using + YEARLY, an interval of 2 means once every two years, but with HOURLY, + it means once every two hours. The default interval is 1. + :param wkst: + The week start day. Must be one of the MO, TU, WE constants, or an + integer, specifying the first day of the week. This will affect + recurrences based on weekly periods. The default week start is got + from calendar.firstweekday(), and may be modified by + calendar.setfirstweekday(). + :param count: + How many occurrences will be generated. + + .. note:: + As of version 2.5.0, the use of the ``until`` keyword together + with the ``count`` keyword is deprecated per RFC-2445 Sec. 4.3.10. + :param until: + If given, this must be a datetime instance, that will specify the + limit of the recurrence. The last recurrence in the rule is the greatest + datetime that is less than or equal to the value specified in the + ``until`` parameter. + + .. note:: + As of version 2.5.0, the use of the ``until`` keyword together + with the ``count`` keyword is deprecated per RFC-2445 Sec. 4.3.10. + :param bysetpos: + If given, it must be either an integer, or a sequence of integers, + positive or negative. Each given integer will specify an occurrence + number, corresponding to the nth occurrence of the rule inside the + frequency period. For example, a bysetpos of -1 if combined with a + MONTHLY frequency, and a byweekday of (MO, TU, WE, TH, FR), will + result in the last work day of every month. + :param bymonth: + If given, it must be either an integer, or a sequence of integers, + meaning the months to apply the recurrence to. + :param bymonthday: + If given, it must be either an integer, or a sequence of integers, + meaning the month days to apply the recurrence to. + :param byyearday: + If given, it must be either an integer, or a sequence of integers, + meaning the year days to apply the recurrence to. + :param byweekno: + If given, it must be either an integer, or a sequence of integers, + meaning the week numbers to apply the recurrence to. Week numbers + have the meaning described in ISO8601, that is, the first week of + the year is that containing at least four days of the new year. + :param byweekday: + If given, it must be either an integer (0 == MO), a sequence of + integers, one of the weekday constants (MO, TU, etc), or a sequence + of these constants. When given, these variables will define the + weekdays where the recurrence will be applied. It's also possible to + use an argument n for the weekday instances, which will mean the nth + occurrence of this weekday in the period. For example, with MONTHLY, + or with YEARLY and BYMONTH, using FR(+1) in byweekday will specify the + first friday of the month where the recurrence happens. Notice that in + the RFC documentation, this is specified as BYDAY, but was renamed to + avoid the ambiguity of that keyword. + :param byhour: + If given, it must be either an integer, or a sequence of integers, + meaning the hours to apply the recurrence to. + :param byminute: + If given, it must be either an integer, or a sequence of integers, + meaning the minutes to apply the recurrence to. + :param bysecond: + If given, it must be either an integer, or a sequence of integers, + meaning the seconds to apply the recurrence to. + :param byeaster: + If given, it must be either an integer, or a sequence of integers, + positive or negative. Each integer will define an offset from the + Easter Sunday. Passing the offset 0 to byeaster will yield the Easter + Sunday itself. This is an extension to the RFC specification. + """ + def __init__(self, freq, dtstart=None, + interval=1, wkst=None, count=None, until=None, bysetpos=None, + bymonth=None, bymonthday=None, byyearday=None, byeaster=None, + byweekno=None, byweekday=None, + byhour=None, byminute=None, bysecond=None, + cache=False): + super(rrule, self).__init__(cache) + global easter + if not dtstart: + dtstart = datetime.datetime.now().replace(microsecond=0) + elif not isinstance(dtstart, datetime.datetime): + dtstart = datetime.datetime.fromordinal(dtstart.toordinal()) + else: + dtstart = dtstart.replace(microsecond=0) + self._dtstart = dtstart + self._tzinfo = dtstart.tzinfo + self._freq = freq + self._interval = interval + self._count = count + + # Cache the original byxxx rules, if they are provided, as the _byxxx + # attributes do not necessarily map to the inputs, and this can be + # a problem in generating the strings. Only store things if they've + # been supplied (the string retrieval will just use .get()) + self._original_rule = {} + + if until and not isinstance(until, datetime.datetime): + until = datetime.datetime.fromordinal(until.toordinal()) + self._until = until + + if count and until: + warn("Using both 'count' and 'until' is inconsistent with RFC 2445" + " and has been deprecated in dateutil. Future versions will " + "raise an error.", DeprecationWarning) + + if wkst is None: + self._wkst = calendar.firstweekday() + elif isinstance(wkst, integer_types): + self._wkst = wkst + else: + self._wkst = wkst.weekday + + if bysetpos is None: + self._bysetpos = None + elif isinstance(bysetpos, integer_types): + if bysetpos == 0 or not (-366 <= bysetpos <= 366): + raise ValueError("bysetpos must be between 1 and 366, " + "or between -366 and -1") + self._bysetpos = (bysetpos,) + else: + self._bysetpos = tuple(bysetpos) + for pos in self._bysetpos: + if pos == 0 or not (-366 <= pos <= 366): + raise ValueError("bysetpos must be between 1 and 366, " + "or between -366 and -1") + + if self._bysetpos: + self._original_rule['bysetpos'] = self._bysetpos + + if (byweekno is None and byyearday is None and bymonthday is None and + byweekday is None and byeaster is None): + if freq == YEARLY: + if bymonth is None: + bymonth = dtstart.month + self._original_rule['bymonth'] = None + bymonthday = dtstart.day + self._original_rule['bymonthday'] = None + elif freq == MONTHLY: + bymonthday = dtstart.day + self._original_rule['bymonthday'] = None + elif freq == WEEKLY: + byweekday = dtstart.weekday() + self._original_rule['byweekday'] = None + + # bymonth + if bymonth is None: + self._bymonth = None + else: + if isinstance(bymonth, integer_types): + bymonth = (bymonth,) + + self._bymonth = tuple(sorted(set(bymonth))) + + if 'bymonth' not in self._original_rule: + self._original_rule['bymonth'] = self._bymonth + + # byyearday + if byyearday is None: + self._byyearday = None + else: + if isinstance(byyearday, integer_types): + byyearday = (byyearday,) + + self._byyearday = tuple(sorted(set(byyearday))) + self._original_rule['byyearday'] = self._byyearday + + # byeaster + if byeaster is not None: + if not easter: + from dateutil import easter + if isinstance(byeaster, integer_types): + self._byeaster = (byeaster,) + else: + self._byeaster = tuple(sorted(byeaster)) + + self._original_rule['byeaster'] = self._byeaster + else: + self._byeaster = None + + # bymonthday + if bymonthday is None: + self._bymonthday = () + self._bynmonthday = () + else: + if isinstance(bymonthday, integer_types): + bymonthday = (bymonthday,) + + bymonthday = set(bymonthday) # Ensure it's unique + + self._bymonthday = tuple(sorted([x for x in bymonthday if x > 0])) + self._bynmonthday = tuple(sorted([x for x in bymonthday if x < 0])) + + # Storing positive numbers first, then negative numbers + if 'bymonthday' not in self._original_rule: + self._original_rule['bymonthday'] = tuple( + itertools.chain(self._bymonthday, self._bynmonthday)) + + # byweekno + if byweekno is None: + self._byweekno = None + else: + if isinstance(byweekno, integer_types): + byweekno = (byweekno,) + + self._byweekno = tuple(sorted(set(byweekno))) + + self._original_rule['byweekno'] = self._byweekno + + # byweekday / bynweekday + if byweekday is None: + self._byweekday = None + self._bynweekday = None + else: + # If it's one of the valid non-sequence types, convert to a + # single-element sequence before the iterator that builds the + # byweekday set. + if isinstance(byweekday, integer_types) or hasattr(byweekday, "n"): + byweekday = (byweekday,) + + self._byweekday = set() + self._bynweekday = set() + for wday in byweekday: + if isinstance(wday, integer_types): + self._byweekday.add(wday) + elif not wday.n or freq > MONTHLY: + self._byweekday.add(wday.weekday) + else: + self._bynweekday.add((wday.weekday, wday.n)) + + if not self._byweekday: + self._byweekday = None + elif not self._bynweekday: + self._bynweekday = None + + if self._byweekday is not None: + self._byweekday = tuple(sorted(self._byweekday)) + orig_byweekday = [weekday(x) for x in self._byweekday] + else: + orig_byweekday = tuple() + + if self._bynweekday is not None: + self._bynweekday = tuple(sorted(self._bynweekday)) + orig_bynweekday = [weekday(*x) for x in self._bynweekday] + else: + orig_bynweekday = tuple() + + if 'byweekday' not in self._original_rule: + self._original_rule['byweekday'] = tuple(itertools.chain( + orig_byweekday, orig_bynweekday)) + + # byhour + if byhour is None: + if freq < HOURLY: + self._byhour = set((dtstart.hour,)) + else: + self._byhour = None + else: + if isinstance(byhour, integer_types): + byhour = (byhour,) + + if freq == HOURLY: + self._byhour = self.__construct_byset(start=dtstart.hour, + byxxx=byhour, + base=24) + else: + self._byhour = set(byhour) + + self._byhour = tuple(sorted(self._byhour)) + self._original_rule['byhour'] = self._byhour + + # byminute + if byminute is None: + if freq < MINUTELY: + self._byminute = set((dtstart.minute,)) + else: + self._byminute = None + else: + if isinstance(byminute, integer_types): + byminute = (byminute,) + + if freq == MINUTELY: + self._byminute = self.__construct_byset(start=dtstart.minute, + byxxx=byminute, + base=60) + else: + self._byminute = set(byminute) + + self._byminute = tuple(sorted(self._byminute)) + self._original_rule['byminute'] = self._byminute + + # bysecond + if bysecond is None: + if freq < SECONDLY: + self._bysecond = ((dtstart.second,)) + else: + self._bysecond = None + else: + if isinstance(bysecond, integer_types): + bysecond = (bysecond,) + + self._bysecond = set(bysecond) + + if freq == SECONDLY: + self._bysecond = self.__construct_byset(start=dtstart.second, + byxxx=bysecond, + base=60) + else: + self._bysecond = set(bysecond) + + self._bysecond = tuple(sorted(self._bysecond)) + self._original_rule['bysecond'] = self._bysecond + + if self._freq >= HOURLY: + self._timeset = None + else: + self._timeset = [] + for hour in self._byhour: + for minute in self._byminute: + for second in self._bysecond: + self._timeset.append( + datetime.time(hour, minute, second, + tzinfo=self._tzinfo)) + self._timeset.sort() + self._timeset = tuple(self._timeset) + + def __str__(self): + """ + Output a string that would generate this RRULE if passed to rrulestr. + This is mostly compatible with RFC2445, except for the + dateutil-specific extension BYEASTER. + """ + + output = [] + h, m, s = [None] * 3 + if self._dtstart: + output.append(self._dtstart.strftime('DTSTART:%Y%m%dT%H%M%S')) + h, m, s = self._dtstart.timetuple()[3:6] + + parts = ['FREQ=' + FREQNAMES[self._freq]] + if self._interval != 1: + parts.append('INTERVAL=' + str(self._interval)) + + if self._wkst: + parts.append('WKST=' + str(self._wkst)) + + if self._count: + parts.append('COUNT=' + str(self._count)) + + if self._until: + parts.append(self._until.strftime('UNTIL=%Y%m%dT%H%M%S')) + + if self._original_rule.get('byweekday') is not None: + # The str() method on weekday objects doesn't generate + # RFC2445-compliant strings, so we should modify that. + original_rule = dict(self._original_rule) + wday_strings = [] + for wday in original_rule['byweekday']: + if wday.n: + wday_strings.append('{n:+d}{wday}'.format( + n=wday.n, + wday=repr(wday)[0:2])) + else: + wday_strings.append(repr(wday)) + + original_rule['byweekday'] = wday_strings + else: + original_rule = self._original_rule + + partfmt = '{name}={vals}' + for name, key in [('BYSETPOS', 'bysetpos'), + ('BYMONTH', 'bymonth'), + ('BYMONTHDAY', 'bymonthday'), + ('BYYEARDAY', 'byyearday'), + ('BYWEEKNO', 'byweekno'), + ('BYDAY', 'byweekday'), + ('BYHOUR', 'byhour'), + ('BYMINUTE', 'byminute'), + ('BYSECOND', 'bysecond'), + ('BYEASTER', 'byeaster')]: + value = original_rule.get(key) + if value: + parts.append(partfmt.format(name=name, vals=(','.join(str(v) + for v in value)))) + + output.append(';'.join(parts)) + return '\n'.join(output) + + def _iter(self): + year, month, day, hour, minute, second, weekday, yearday, _ = \ + self._dtstart.timetuple() + + # Some local variables to speed things up a bit + freq = self._freq + interval = self._interval + wkst = self._wkst + until = self._until + bymonth = self._bymonth + byweekno = self._byweekno + byyearday = self._byyearday + byweekday = self._byweekday + byeaster = self._byeaster + bymonthday = self._bymonthday + bynmonthday = self._bynmonthday + bysetpos = self._bysetpos + byhour = self._byhour + byminute = self._byminute + bysecond = self._bysecond + + ii = _iterinfo(self) + ii.rebuild(year, month) + + getdayset = {YEARLY: ii.ydayset, + MONTHLY: ii.mdayset, + WEEKLY: ii.wdayset, + DAILY: ii.ddayset, + HOURLY: ii.ddayset, + MINUTELY: ii.ddayset, + SECONDLY: ii.ddayset}[freq] + + if freq < HOURLY: + timeset = self._timeset + else: + gettimeset = {HOURLY: ii.htimeset, + MINUTELY: ii.mtimeset, + SECONDLY: ii.stimeset}[freq] + if ((freq >= HOURLY and + self._byhour and hour not in self._byhour) or + (freq >= MINUTELY and + self._byminute and minute not in self._byminute) or + (freq >= SECONDLY and + self._bysecond and second not in self._bysecond)): + timeset = () + else: + timeset = gettimeset(hour, minute, second) + + total = 0 + count = self._count + while True: + # Get dayset with the right frequency + dayset, start, end = getdayset(year, month, day) + + # Do the "hard" work ;-) + filtered = False + for i in dayset[start:end]: + if ((bymonth and ii.mmask[i] not in bymonth) or + (byweekno and not ii.wnomask[i]) or + (byweekday and ii.wdaymask[i] not in byweekday) or + (ii.nwdaymask and not ii.nwdaymask[i]) or + (byeaster and not ii.eastermask[i]) or + ((bymonthday or bynmonthday) and + ii.mdaymask[i] not in bymonthday and + ii.nmdaymask[i] not in bynmonthday) or + (byyearday and + ((i < ii.yearlen and i+1 not in byyearday and + -ii.yearlen+i not in byyearday) or + (i >= ii.yearlen and i+1-ii.yearlen not in byyearday and + -ii.nextyearlen+i-ii.yearlen not in byyearday)))): + dayset[i] = None + filtered = True + + # Output results + if bysetpos and timeset: + poslist = [] + for pos in bysetpos: + if pos < 0: + daypos, timepos = divmod(pos, len(timeset)) + else: + daypos, timepos = divmod(pos-1, len(timeset)) + try: + i = [x for x in dayset[start:end] + if x is not None][daypos] + time = timeset[timepos] + except IndexError: + pass + else: + date = datetime.date.fromordinal(ii.yearordinal+i) + res = datetime.datetime.combine(date, time) + if res not in poslist: + poslist.append(res) + poslist.sort() + for res in poslist: + if until and res > until: + self._len = total + return + elif res >= self._dtstart: + total += 1 + yield res + if count: + count -= 1 + if not count: + self._len = total + return + else: + for i in dayset[start:end]: + if i is not None: + date = datetime.date.fromordinal(ii.yearordinal + i) + for time in timeset: + res = datetime.datetime.combine(date, time) + if until and res > until: + self._len = total + return + elif res >= self._dtstart: + total += 1 + yield res + if count: + count -= 1 + if not count: + self._len = total + return + + # Handle frequency and interval + fixday = False + if freq == YEARLY: + year += interval + if year > datetime.MAXYEAR: + self._len = total + return + ii.rebuild(year, month) + elif freq == MONTHLY: + month += interval + if month > 12: + div, mod = divmod(month, 12) + month = mod + year += div + if month == 0: + month = 12 + year -= 1 + if year > datetime.MAXYEAR: + self._len = total + return + ii.rebuild(year, month) + elif freq == WEEKLY: + if wkst > weekday: + day += -(weekday+1+(6-wkst))+self._interval*7 + else: + day += -(weekday-wkst)+self._interval*7 + weekday = wkst + fixday = True + elif freq == DAILY: + day += interval + fixday = True + elif freq == HOURLY: + if filtered: + # Jump to one iteration before next day + hour += ((23-hour)//interval)*interval + + if byhour: + ndays, hour = self.__mod_distance(value=hour, + byxxx=self._byhour, + base=24) + else: + ndays, hour = divmod(hour+interval, 24) + + if ndays: + day += ndays + fixday = True + + timeset = gettimeset(hour, minute, second) + elif freq == MINUTELY: + if filtered: + # Jump to one iteration before next day + minute += ((1439-(hour*60+minute))//interval)*interval + + valid = False + rep_rate = (24*60) + for j in range(rep_rate // gcd(interval, rep_rate)): + if byminute: + nhours, minute = \ + self.__mod_distance(value=minute, + byxxx=self._byminute, + base=60) + else: + nhours, minute = divmod(minute+interval, 60) + + div, hour = divmod(hour+nhours, 24) + if div: + day += div + fixday = True + filtered = False + + if not byhour or hour in byhour: + valid = True + break + + if not valid: + raise ValueError('Invalid combination of interval and ' + + 'byhour resulting in empty rule.') + + timeset = gettimeset(hour, minute, second) + elif freq == SECONDLY: + if filtered: + # Jump to one iteration before next day + second += (((86399 - (hour * 3600 + minute * 60 + second)) + // interval) * interval) + + rep_rate = (24 * 3600) + valid = False + for j in range(0, rep_rate // gcd(interval, rep_rate)): + if bysecond: + nminutes, second = \ + self.__mod_distance(value=second, + byxxx=self._bysecond, + base=60) + else: + nminutes, second = divmod(second+interval, 60) + + div, minute = divmod(minute+nminutes, 60) + if div: + hour += div + div, hour = divmod(hour, 24) + if div: + day += div + fixday = True + + if ((not byhour or hour in byhour) and + (not byminute or minute in byminute) and + (not bysecond or second in bysecond)): + valid = True + break + + if not valid: + raise ValueError('Invalid combination of interval, ' + + 'byhour and byminute resulting in empty' + + ' rule.') + + timeset = gettimeset(hour, minute, second) + + if fixday and day > 28: + daysinmonth = calendar.monthrange(year, month)[1] + if day > daysinmonth: + while day > daysinmonth: + day -= daysinmonth + month += 1 + if month == 13: + month = 1 + year += 1 + if year > datetime.MAXYEAR: + self._len = total + return + daysinmonth = calendar.monthrange(year, month)[1] + ii.rebuild(year, month) + + def __construct_byset(self, start, byxxx, base): + """ + If a `BYXXX` sequence is passed to the constructor at the same level as + `FREQ` (e.g. `FREQ=HOURLY,BYHOUR={2,4,7},INTERVAL=3`), there are some + specifications which cannot be reached given some starting conditions. + + This occurs whenever the interval is not coprime with the base of a + given unit and the difference between the starting position and the + ending position is not coprime with the greatest common denominator + between the interval and the base. For example, with a FREQ of hourly + starting at 17:00 and an interval of 4, the only valid values for + BYHOUR would be {21, 1, 5, 9, 13, 17}, because 4 and 24 are not + coprime. + + :param start: + Specifies the starting position. + :param byxxx: + An iterable containing the list of allowed values. + :param base: + The largest allowable value for the specified frequency (e.g. + 24 hours, 60 minutes). + + This does not preserve the type of the iterable, returning a set, since + the values should be unique and the order is irrelevant, this will + speed up later lookups. + + In the event of an empty set, raises a :exception:`ValueError`, as this + results in an empty rrule. + """ + + cset = set() + + # Support a single byxxx value. + if isinstance(byxxx, integer_types): + byxxx = (byxxx, ) + + for num in byxxx: + i_gcd = gcd(self._interval, base) + # Use divmod rather than % because we need to wrap negative nums. + if i_gcd == 1 or divmod(num - start, i_gcd)[1] == 0: + cset.add(num) + + if len(cset) == 0: + raise ValueError("Invalid rrule byxxx generates an empty set.") + + return cset + + def __mod_distance(self, value, byxxx, base): + """ + Calculates the next value in a sequence where the `FREQ` parameter is + specified along with a `BYXXX` parameter at the same "level" + (e.g. `HOURLY` specified with `BYHOUR`). + + :param value: + The old value of the component. + :param byxxx: + The `BYXXX` set, which should have been generated by + `rrule._construct_byset`, or something else which checks that a + valid rule is present. + :param base: + The largest allowable value for the specified frequency (e.g. + 24 hours, 60 minutes). + + If a valid value is not found after `base` iterations (the maximum + number before the sequence would start to repeat), this raises a + :exception:`ValueError`, as no valid values were found. + + This returns a tuple of `divmod(n*interval, base)`, where `n` is the + smallest number of `interval` repetitions until the next specified + value in `byxxx` is found. + """ + accumulator = 0 + for ii in range(1, base + 1): + # Using divmod() over % to account for negative intervals + div, value = divmod(value + self._interval, base) + accumulator += div + if value in byxxx: + return (accumulator, value) + + +class _iterinfo(object): + __slots__ = ["rrule", "lastyear", "lastmonth", + "yearlen", "nextyearlen", "yearordinal", "yearweekday", + "mmask", "mrange", "mdaymask", "nmdaymask", + "wdaymask", "wnomask", "nwdaymask", "eastermask"] + + def __init__(self, rrule): + for attr in self.__slots__: + setattr(self, attr, None) + self.rrule = rrule + + def rebuild(self, year, month): + # Every mask is 7 days longer to handle cross-year weekly periods. + rr = self.rrule + if year != self.lastyear: + self.yearlen = 365 + calendar.isleap(year) + self.nextyearlen = 365 + calendar.isleap(year + 1) + firstyday = datetime.date(year, 1, 1) + self.yearordinal = firstyday.toordinal() + self.yearweekday = firstyday.weekday() + + wday = datetime.date(year, 1, 1).weekday() + if self.yearlen == 365: + self.mmask = M365MASK + self.mdaymask = MDAY365MASK + self.nmdaymask = NMDAY365MASK + self.wdaymask = WDAYMASK[wday:] + self.mrange = M365RANGE + else: + self.mmask = M366MASK + self.mdaymask = MDAY366MASK + self.nmdaymask = NMDAY366MASK + self.wdaymask = WDAYMASK[wday:] + self.mrange = M366RANGE + + if not rr._byweekno: + self.wnomask = None + else: + self.wnomask = [0]*(self.yearlen+7) + # no1wkst = firstwkst = self.wdaymask.index(rr._wkst) + no1wkst = firstwkst = (7-self.yearweekday+rr._wkst) % 7 + if no1wkst >= 4: + no1wkst = 0 + # Number of days in the year, plus the days we got + # from last year. + wyearlen = self.yearlen+(self.yearweekday-rr._wkst) % 7 + else: + # Number of days in the year, minus the days we + # left in last year. + wyearlen = self.yearlen-no1wkst + div, mod = divmod(wyearlen, 7) + numweeks = div+mod//4 + for n in rr._byweekno: + if n < 0: + n += numweeks+1 + if not (0 < n <= numweeks): + continue + if n > 1: + i = no1wkst+(n-1)*7 + if no1wkst != firstwkst: + i -= 7-firstwkst + else: + i = no1wkst + for j in range(7): + self.wnomask[i] = 1 + i += 1 + if self.wdaymask[i] == rr._wkst: + break + if 1 in rr._byweekno: + # Check week number 1 of next year as well + # TODO: Check -numweeks for next year. + i = no1wkst+numweeks*7 + if no1wkst != firstwkst: + i -= 7-firstwkst + if i < self.yearlen: + # If week starts in next year, we + # don't care about it. + for j in range(7): + self.wnomask[i] = 1 + i += 1 + if self.wdaymask[i] == rr._wkst: + break + if no1wkst: + # Check last week number of last year as + # well. If no1wkst is 0, either the year + # started on week start, or week number 1 + # got days from last year, so there are no + # days from last year's last week number in + # this year. + if -1 not in rr._byweekno: + lyearweekday = datetime.date(year-1, 1, 1).weekday() + lno1wkst = (7-lyearweekday+rr._wkst) % 7 + lyearlen = 365+calendar.isleap(year-1) + if lno1wkst >= 4: + lno1wkst = 0 + lnumweeks = 52+(lyearlen + + (lyearweekday-rr._wkst) % 7) % 7//4 + else: + lnumweeks = 52+(self.yearlen-no1wkst) % 7//4 + else: + lnumweeks = -1 + if lnumweeks in rr._byweekno: + for i in range(no1wkst): + self.wnomask[i] = 1 + + if (rr._bynweekday and (month != self.lastmonth or + year != self.lastyear)): + ranges = [] + if rr._freq == YEARLY: + if rr._bymonth: + for month in rr._bymonth: + ranges.append(self.mrange[month-1:month+1]) + else: + ranges = [(0, self.yearlen)] + elif rr._freq == MONTHLY: + ranges = [self.mrange[month-1:month+1]] + if ranges: + # Weekly frequency won't get here, so we may not + # care about cross-year weekly periods. + self.nwdaymask = [0]*self.yearlen + for first, last in ranges: + last -= 1 + for wday, n in rr._bynweekday: + if n < 0: + i = last+(n+1)*7 + i -= (self.wdaymask[i]-wday) % 7 + else: + i = first+(n-1)*7 + i += (7-self.wdaymask[i]+wday) % 7 + if first <= i <= last: + self.nwdaymask[i] = 1 + + if rr._byeaster: + self.eastermask = [0]*(self.yearlen+7) + eyday = easter.easter(year).toordinal()-self.yearordinal + for offset in rr._byeaster: + self.eastermask[eyday+offset] = 1 + + self.lastyear = year + self.lastmonth = month + + def ydayset(self, year, month, day): + return list(range(self.yearlen)), 0, self.yearlen + + def mdayset(self, year, month, day): + dset = [None]*self.yearlen + start, end = self.mrange[month-1:month+1] + for i in range(start, end): + dset[i] = i + return dset, start, end + + def wdayset(self, year, month, day): + # We need to handle cross-year weeks here. + dset = [None]*(self.yearlen+7) + i = datetime.date(year, month, day).toordinal()-self.yearordinal + start = i + for j in range(7): + dset[i] = i + i += 1 + # if (not (0 <= i < self.yearlen) or + # self.wdaymask[i] == self.rrule._wkst): + # This will cross the year boundary, if necessary. + if self.wdaymask[i] == self.rrule._wkst: + break + return dset, start, i + + def ddayset(self, year, month, day): + dset = [None] * self.yearlen + i = datetime.date(year, month, day).toordinal() - self.yearordinal + dset[i] = i + return dset, i, i + 1 + + def htimeset(self, hour, minute, second): + tset = [] + rr = self.rrule + for minute in rr._byminute: + for second in rr._bysecond: + tset.append(datetime.time(hour, minute, second, + tzinfo=rr._tzinfo)) + tset.sort() + return tset + + def mtimeset(self, hour, minute, second): + tset = [] + rr = self.rrule + for second in rr._bysecond: + tset.append(datetime.time(hour, minute, second, tzinfo=rr._tzinfo)) + tset.sort() + return tset + + def stimeset(self, hour, minute, second): + return (datetime.time(hour, minute, second, + tzinfo=self.rrule._tzinfo),) + + +class rruleset(rrulebase): + """ The rruleset type allows more complex recurrence setups, mixing + multiple rules, dates, exclusion rules, and exclusion dates. The type + constructor takes the following keyword arguments: + + :param cache: If True, caching of results will be enabled, improving + performance of multiple queries considerably. """ + + class _genitem(object): + def __init__(self, genlist, gen): + try: + self.dt = advance_iterator(gen) + genlist.append(self) + except StopIteration: + pass + self.genlist = genlist + self.gen = gen + + def __next__(self): + try: + self.dt = advance_iterator(self.gen) + except StopIteration: + if self.genlist[0] is self: + heapq.heappop(self.genlist) + else: + self.genlist.remove(self) + heapq.heapify(self.genlist) + + next = __next__ + + def __lt__(self, other): + return self.dt < other.dt + + def __gt__(self, other): + return self.dt > other.dt + + def __eq__(self, other): + return self.dt == other.dt + + def __ne__(self, other): + return self.dt != other.dt + + def __init__(self, cache=False): + super(rruleset, self).__init__(cache) + self._rrule = [] + self._rdate = [] + self._exrule = [] + self._exdate = [] + + @_invalidates_cache + def rrule(self, rrule): + """ Include the given :py:class:`rrule` instance in the recurrence set + generation. """ + self._rrule.append(rrule) + + @_invalidates_cache + def rdate(self, rdate): + """ Include the given :py:class:`datetime` instance in the recurrence + set generation. """ + self._rdate.append(rdate) + + @_invalidates_cache + def exrule(self, exrule): + """ Include the given rrule instance in the recurrence set exclusion + list. Dates which are part of the given recurrence rules will not + be generated, even if some inclusive rrule or rdate matches them. + """ + self._exrule.append(exrule) + + @_invalidates_cache + def exdate(self, exdate): + """ Include the given datetime instance in the recurrence set + exclusion list. Dates included that way will not be generated, + even if some inclusive rrule or rdate matches them. """ + self._exdate.append(exdate) + + def _iter(self): + rlist = [] + self._rdate.sort() + self._genitem(rlist, iter(self._rdate)) + for gen in [iter(x) for x in self._rrule]: + self._genitem(rlist, gen) + exlist = [] + self._exdate.sort() + self._genitem(exlist, iter(self._exdate)) + for gen in [iter(x) for x in self._exrule]: + self._genitem(exlist, gen) + lastdt = None + total = 0 + heapq.heapify(rlist) + heapq.heapify(exlist) + while rlist: + ritem = rlist[0] + if not lastdt or lastdt != ritem.dt: + while exlist and exlist[0] < ritem: + exitem = exlist[0] + advance_iterator(exitem) + if exlist and exlist[0] is exitem: + heapq.heapreplace(exlist, exitem) + if not exlist or ritem != exlist[0]: + total += 1 + yield ritem.dt + lastdt = ritem.dt + advance_iterator(ritem) + if rlist and rlist[0] is ritem: + heapq.heapreplace(rlist, ritem) + self._len = total + + +class _rrulestr(object): + + _freq_map = {"YEARLY": YEARLY, + "MONTHLY": MONTHLY, + "WEEKLY": WEEKLY, + "DAILY": DAILY, + "HOURLY": HOURLY, + "MINUTELY": MINUTELY, + "SECONDLY": SECONDLY} + + _weekday_map = {"MO": 0, "TU": 1, "WE": 2, "TH": 3, + "FR": 4, "SA": 5, "SU": 6} + + def _handle_int(self, rrkwargs, name, value, **kwargs): + rrkwargs[name.lower()] = int(value) + + def _handle_int_list(self, rrkwargs, name, value, **kwargs): + rrkwargs[name.lower()] = [int(x) for x in value.split(',')] + + _handle_INTERVAL = _handle_int + _handle_COUNT = _handle_int + _handle_BYSETPOS = _handle_int_list + _handle_BYMONTH = _handle_int_list + _handle_BYMONTHDAY = _handle_int_list + _handle_BYYEARDAY = _handle_int_list + _handle_BYEASTER = _handle_int_list + _handle_BYWEEKNO = _handle_int_list + _handle_BYHOUR = _handle_int_list + _handle_BYMINUTE = _handle_int_list + _handle_BYSECOND = _handle_int_list + + def _handle_FREQ(self, rrkwargs, name, value, **kwargs): + rrkwargs["freq"] = self._freq_map[value] + + def _handle_UNTIL(self, rrkwargs, name, value, **kwargs): + global parser + if not parser: + from dateutil import parser + try: + rrkwargs["until"] = parser.parse(value, + ignoretz=kwargs.get("ignoretz"), + tzinfos=kwargs.get("tzinfos")) + except ValueError: + raise ValueError("invalid until date") + + def _handle_WKST(self, rrkwargs, name, value, **kwargs): + rrkwargs["wkst"] = self._weekday_map[value] + + def _handle_BYWEEKDAY(self, rrkwargs, name, value, **kwargs): + """ + Two ways to specify this: +1MO or MO(+1) + """ + l = [] + for wday in value.split(','): + if '(' in wday: + # If it's of the form TH(+1), etc. + splt = wday.split('(') + w = splt[0] + n = int(splt[1][:-1]) + elif len(wday): + # If it's of the form +1MO + for i in range(len(wday)): + if wday[i] not in '+-0123456789': + break + n = wday[:i] or None + w = wday[i:] + if n: + n = int(n) + else: + raise ValueError("Invalid (empty) BYDAY specification.") + + l.append(weekdays[self._weekday_map[w]](n)) + rrkwargs["byweekday"] = l + + _handle_BYDAY = _handle_BYWEEKDAY + + def _parse_rfc_rrule(self, line, + dtstart=None, + cache=False, + ignoretz=False, + tzinfos=None): + if line.find(':') != -1: + name, value = line.split(':') + if name != "RRULE": + raise ValueError("unknown parameter name") + else: + value = line + rrkwargs = {} + for pair in value.split(';'): + name, value = pair.split('=') + name = name.upper() + value = value.upper() + try: + getattr(self, "_handle_"+name)(rrkwargs, name, value, + ignoretz=ignoretz, + tzinfos=tzinfos) + except AttributeError: + raise ValueError("unknown parameter '%s'" % name) + except (KeyError, ValueError): + raise ValueError("invalid '%s': %s" % (name, value)) + return rrule(dtstart=dtstart, cache=cache, **rrkwargs) + + def _parse_rfc(self, s, + dtstart=None, + cache=False, + unfold=False, + forceset=False, + compatible=False, + ignoretz=False, + tzinfos=None): + global parser + if compatible: + forceset = True + unfold = True + s = s.upper() + if not s.strip(): + raise ValueError("empty string") + if unfold: + lines = s.splitlines() + i = 0 + while i < len(lines): + line = lines[i].rstrip() + if not line: + del lines[i] + elif i > 0 and line[0] == " ": + lines[i-1] += line[1:] + del lines[i] + else: + i += 1 + else: + lines = s.split() + if (not forceset and len(lines) == 1 and (s.find(':') == -1 or + s.startswith('RRULE:'))): + return self._parse_rfc_rrule(lines[0], cache=cache, + dtstart=dtstart, ignoretz=ignoretz, + tzinfos=tzinfos) + else: + rrulevals = [] + rdatevals = [] + exrulevals = [] + exdatevals = [] + for line in lines: + if not line: + continue + if line.find(':') == -1: + name = "RRULE" + value = line + else: + name, value = line.split(':', 1) + parms = name.split(';') + if not parms: + raise ValueError("empty property name") + name = parms[0] + parms = parms[1:] + if name == "RRULE": + for parm in parms: + raise ValueError("unsupported RRULE parm: "+parm) + rrulevals.append(value) + elif name == "RDATE": + for parm in parms: + if parm != "VALUE=DATE-TIME": + raise ValueError("unsupported RDATE parm: "+parm) + rdatevals.append(value) + elif name == "EXRULE": + for parm in parms: + raise ValueError("unsupported EXRULE parm: "+parm) + exrulevals.append(value) + elif name == "EXDATE": + for parm in parms: + if parm != "VALUE=DATE-TIME": + raise ValueError("unsupported RDATE parm: "+parm) + exdatevals.append(value) + elif name == "DTSTART": + for parm in parms: + raise ValueError("unsupported DTSTART parm: "+parm) + if not parser: + from dateutil import parser + dtstart = parser.parse(value, ignoretz=ignoretz, + tzinfos=tzinfos) + else: + raise ValueError("unsupported property: "+name) + if (forceset or len(rrulevals) > 1 or rdatevals + or exrulevals or exdatevals): + if not parser and (rdatevals or exdatevals): + from dateutil import parser + rset = rruleset(cache=cache) + for value in rrulevals: + rset.rrule(self._parse_rfc_rrule(value, dtstart=dtstart, + ignoretz=ignoretz, + tzinfos=tzinfos)) + for value in rdatevals: + for datestr in value.split(','): + rset.rdate(parser.parse(datestr, + ignoretz=ignoretz, + tzinfos=tzinfos)) + for value in exrulevals: + rset.exrule(self._parse_rfc_rrule(value, dtstart=dtstart, + ignoretz=ignoretz, + tzinfos=tzinfos)) + for value in exdatevals: + for datestr in value.split(','): + rset.exdate(parser.parse(datestr, + ignoretz=ignoretz, + tzinfos=tzinfos)) + if compatible and dtstart: + rset.rdate(dtstart) + return rset + else: + return self._parse_rfc_rrule(rrulevals[0], + dtstart=dtstart, + cache=cache, + ignoretz=ignoretz, + tzinfos=tzinfos) + + def __call__(self, s, **kwargs): + return self._parse_rfc(s, **kwargs) + +rrulestr = _rrulestr() + +# vim:ts=4:sw=4:et diff --git a/venv/lib/python2.7/site-packages/dateutil/tz/__init__.py b/venv/lib/python2.7/site-packages/dateutil/tz/__init__.py new file mode 100644 index 0000000..1cba7b9 --- /dev/null +++ b/venv/lib/python2.7/site-packages/dateutil/tz/__init__.py @@ -0,0 +1,4 @@ +from .tz import * + +__all__ = ["tzutc", "tzoffset", "tzlocal", "tzfile", "tzrange", + "tzstr", "tzical", "tzwin", "tzwinlocal", "gettz"] diff --git a/venv/lib/python2.7/site-packages/dateutil/tz/_common.py b/venv/lib/python2.7/site-packages/dateutil/tz/_common.py new file mode 100644 index 0000000..bbce0fb --- /dev/null +++ b/venv/lib/python2.7/site-packages/dateutil/tz/_common.py @@ -0,0 +1,18 @@ +from six import PY3 + +__all__ = ['tzname_in_python2'] + +def tzname_in_python2(namefunc): + """Change unicode output into bytestrings in Python 2 + + tzname() API changed in Python 3. It used to return bytes, but was changed + to unicode strings + """ + def adjust_encoding(*args, **kwargs): + name = namefunc(*args, **kwargs) + if name is not None and not PY3: + name = name.encode() + + return name + + return adjust_encoding \ No newline at end of file diff --git a/venv/lib/python2.7/site-packages/dateutil/tz/tz.py b/venv/lib/python2.7/site-packages/dateutil/tz/tz.py new file mode 100644 index 0000000..ecf1ee1 --- /dev/null +++ b/venv/lib/python2.7/site-packages/dateutil/tz/tz.py @@ -0,0 +1,979 @@ +# -*- coding: utf-8 -*- +""" +This module offers timezone implementations subclassing the abstract +:py:`datetime.tzinfo` type. There are classes to handle tzfile format files +(usually are in :file:`/etc/localtime`, :file:`/usr/share/zoneinfo`, etc), TZ +environment string (in all known formats), given ranges (with help from +relative deltas), local machine timezone, fixed offset timezone, and UTC +timezone. +""" +import datetime +import struct +import time +import sys +import os + +from six import string_types, PY3 +from ._common import tzname_in_python2 + +try: + from .win import tzwin, tzwinlocal +except ImportError: + tzwin = tzwinlocal = None + +relativedelta = None +parser = None +rrule = None + +ZERO = datetime.timedelta(0) +EPOCHORDINAL = datetime.datetime.utcfromtimestamp(0).toordinal() + +class tzutc(datetime.tzinfo): + + def utcoffset(self, dt): + return ZERO + + def dst(self, dt): + return ZERO + + @tzname_in_python2 + def tzname(self, dt): + return "UTC" + + def __eq__(self, other): + return (isinstance(other, tzutc) or + (isinstance(other, tzoffset) and other._offset == ZERO)) + + def __ne__(self, other): + return not self.__eq__(other) + + def __repr__(self): + return "%s()" % self.__class__.__name__ + + __reduce__ = object.__reduce__ + + +class tzoffset(datetime.tzinfo): + + def __init__(self, name, offset): + self._name = name + self._offset = datetime.timedelta(seconds=offset) + + def utcoffset(self, dt): + return self._offset + + def dst(self, dt): + return ZERO + + @tzname_in_python2 + def tzname(self, dt): + return self._name + + def __eq__(self, other): + return (isinstance(other, tzoffset) and + self._offset == other._offset) + + def __ne__(self, other): + return not self.__eq__(other) + + def __repr__(self): + return "%s(%s, %s)" % (self.__class__.__name__, + repr(self._name), + self._offset.days*86400+self._offset.seconds) + + __reduce__ = object.__reduce__ + + +class tzlocal(datetime.tzinfo): + def __init__(self): + self._std_offset = datetime.timedelta(seconds=-time.timezone) + if time.daylight: + self._dst_offset = datetime.timedelta(seconds=-time.altzone) + else: + self._dst_offset = self._std_offset + + def utcoffset(self, dt): + if dt is None: + return dt + + if self._isdst(dt): + return self._dst_offset + else: + return self._std_offset + + def dst(self, dt): + if self._isdst(dt): + return self._dst_offset-self._std_offset + else: + return ZERO + + @tzname_in_python2 + def tzname(self, dt): + return time.tzname[self._isdst(dt)] + + def _isdst(self, dt): + # We can't use mktime here. It is unstable when deciding if + # the hour near to a change is DST or not. + # + # timestamp = time.mktime((dt.year, dt.month, dt.day, dt.hour, + # dt.minute, dt.second, dt.weekday(), 0, -1)) + # return time.localtime(timestamp).tm_isdst + # + # The code above yields the following result: + # + # >>> import tz, datetime + # >>> t = tz.tzlocal() + # >>> datetime.datetime(2003,2,15,23,tzinfo=t).tzname() + # 'BRDT' + # >>> datetime.datetime(2003,2,16,0,tzinfo=t).tzname() + # 'BRST' + # >>> datetime.datetime(2003,2,15,23,tzinfo=t).tzname() + # 'BRST' + # >>> datetime.datetime(2003,2,15,22,tzinfo=t).tzname() + # 'BRDT' + # >>> datetime.datetime(2003,2,15,23,tzinfo=t).tzname() + # 'BRDT' + # + # Here is a more stable implementation: + # + timestamp = ((dt.toordinal() - EPOCHORDINAL) * 86400 + + dt.hour * 3600 + + dt.minute * 60 + + dt.second) + return time.localtime(timestamp+time.timezone).tm_isdst + + def __eq__(self, other): + return (isinstance(other, tzlocal) and + (self._std_offset == other._std_offset and + self._dst_offset == other._dst_offset)) + + def __ne__(self, other): + return not self.__eq__(other) + + def __repr__(self): + return "%s()" % self.__class__.__name__ + + __reduce__ = object.__reduce__ + + +class _ttinfo(object): + __slots__ = ["offset", "delta", "isdst", "abbr", "isstd", "isgmt"] + + def __init__(self): + for attr in self.__slots__: + setattr(self, attr, None) + + def __repr__(self): + l = [] + for attr in self.__slots__: + value = getattr(self, attr) + if value is not None: + l.append("%s=%s" % (attr, repr(value))) + return "%s(%s)" % (self.__class__.__name__, ", ".join(l)) + + def __eq__(self, other): + if not isinstance(other, _ttinfo): + return False + return (self.offset == other.offset and + self.delta == other.delta and + self.isdst == other.isdst and + self.abbr == other.abbr and + self.isstd == other.isstd and + self.isgmt == other.isgmt) + + def __ne__(self, other): + return not self.__eq__(other) + + def __getstate__(self): + state = {} + for name in self.__slots__: + state[name] = getattr(self, name, None) + return state + + def __setstate__(self, state): + for name in self.__slots__: + if name in state: + setattr(self, name, state[name]) + + +class tzfile(datetime.tzinfo): + + # http://www.twinsun.com/tz/tz-link.htm + # ftp://ftp.iana.org/tz/tz*.tar.gz + + def __init__(self, fileobj, filename=None): + file_opened_here = False + if isinstance(fileobj, string_types): + self._filename = fileobj + fileobj = open(fileobj, 'rb') + file_opened_here = True + elif filename is not None: + self._filename = filename + elif hasattr(fileobj, "name"): + self._filename = fileobj.name + else: + self._filename = repr(fileobj) + + # From tzfile(5): + # + # The time zone information files used by tzset(3) + # begin with the magic characters "TZif" to identify + # them as time zone information files, followed by + # sixteen bytes reserved for future use, followed by + # six four-byte values of type long, written in a + # ``standard'' byte order (the high-order byte + # of the value is written first). + try: + if fileobj.read(4).decode() != "TZif": + raise ValueError("magic not found") + + fileobj.read(16) + + ( + # The number of UTC/local indicators stored in the file. + ttisgmtcnt, + + # The number of standard/wall indicators stored in the file. + ttisstdcnt, + + # The number of leap seconds for which data is + # stored in the file. + leapcnt, + + # The number of "transition times" for which data + # is stored in the file. + timecnt, + + # The number of "local time types" for which data + # is stored in the file (must not be zero). + typecnt, + + # The number of characters of "time zone + # abbreviation strings" stored in the file. + charcnt, + + ) = struct.unpack(">6l", fileobj.read(24)) + + # The above header is followed by tzh_timecnt four-byte + # values of type long, sorted in ascending order. + # These values are written in ``standard'' byte order. + # Each is used as a transition time (as returned by + # time(2)) at which the rules for computing local time + # change. + + if timecnt: + self._trans_list = struct.unpack(">%dl" % timecnt, + fileobj.read(timecnt*4)) + else: + self._trans_list = [] + + # Next come tzh_timecnt one-byte values of type unsigned + # char; each one tells which of the different types of + # ``local time'' types described in the file is associated + # with the same-indexed transition time. These values + # serve as indices into an array of ttinfo structures that + # appears next in the file. + + if timecnt: + self._trans_idx = struct.unpack(">%dB" % timecnt, + fileobj.read(timecnt)) + else: + self._trans_idx = [] + + # Each ttinfo structure is written as a four-byte value + # for tt_gmtoff of type long, in a standard byte + # order, followed by a one-byte value for tt_isdst + # and a one-byte value for tt_abbrind. In each + # structure, tt_gmtoff gives the number of + # seconds to be added to UTC, tt_isdst tells whether + # tm_isdst should be set by localtime(3), and + # tt_abbrind serves as an index into the array of + # time zone abbreviation characters that follow the + # ttinfo structure(s) in the file. + + ttinfo = [] + + for i in range(typecnt): + ttinfo.append(struct.unpack(">lbb", fileobj.read(6))) + + abbr = fileobj.read(charcnt).decode() + + # Then there are tzh_leapcnt pairs of four-byte + # values, written in standard byte order; the + # first value of each pair gives the time (as + # returned by time(2)) at which a leap second + # occurs; the second gives the total number of + # leap seconds to be applied after the given time. + # The pairs of values are sorted in ascending order + # by time. + + # Not used, for now (but read anyway for correct file position) + if leapcnt: + leap = struct.unpack(">%dl" % (leapcnt*2), + fileobj.read(leapcnt*8)) + + # Then there are tzh_ttisstdcnt standard/wall + # indicators, each stored as a one-byte value; + # they tell whether the transition times associated + # with local time types were specified as standard + # time or wall clock time, and are used when + # a time zone file is used in handling POSIX-style + # time zone environment variables. + + if ttisstdcnt: + isstd = struct.unpack(">%db" % ttisstdcnt, + fileobj.read(ttisstdcnt)) + + # Finally, there are tzh_ttisgmtcnt UTC/local + # indicators, each stored as a one-byte value; + # they tell whether the transition times associated + # with local time types were specified as UTC or + # local time, and are used when a time zone file + # is used in handling POSIX-style time zone envi- + # ronment variables. + + if ttisgmtcnt: + isgmt = struct.unpack(">%db" % ttisgmtcnt, + fileobj.read(ttisgmtcnt)) + + # ** Everything has been read ** + finally: + if file_opened_here: + fileobj.close() + + # Build ttinfo list + self._ttinfo_list = [] + for i in range(typecnt): + gmtoff, isdst, abbrind = ttinfo[i] + # Round to full-minutes if that's not the case. Python's + # datetime doesn't accept sub-minute timezones. Check + # http://python.org/sf/1447945 for some information. + gmtoff = (gmtoff+30)//60*60 + tti = _ttinfo() + tti.offset = gmtoff + tti.delta = datetime.timedelta(seconds=gmtoff) + tti.isdst = isdst + tti.abbr = abbr[abbrind:abbr.find('\x00', abbrind)] + tti.isstd = (ttisstdcnt > i and isstd[i] != 0) + tti.isgmt = (ttisgmtcnt > i and isgmt[i] != 0) + self._ttinfo_list.append(tti) + + # Replace ttinfo indexes for ttinfo objects. + trans_idx = [] + for idx in self._trans_idx: + trans_idx.append(self._ttinfo_list[idx]) + self._trans_idx = tuple(trans_idx) + + # Set standard, dst, and before ttinfos. before will be + # used when a given time is before any transitions, + # and will be set to the first non-dst ttinfo, or to + # the first dst, if all of them are dst. + self._ttinfo_std = None + self._ttinfo_dst = None + self._ttinfo_before = None + if self._ttinfo_list: + if not self._trans_list: + self._ttinfo_std = self._ttinfo_first = self._ttinfo_list[0] + else: + for i in range(timecnt-1, -1, -1): + tti = self._trans_idx[i] + if not self._ttinfo_std and not tti.isdst: + self._ttinfo_std = tti + elif not self._ttinfo_dst and tti.isdst: + self._ttinfo_dst = tti + if self._ttinfo_std and self._ttinfo_dst: + break + else: + if self._ttinfo_dst and not self._ttinfo_std: + self._ttinfo_std = self._ttinfo_dst + + for tti in self._ttinfo_list: + if not tti.isdst: + self._ttinfo_before = tti + break + else: + self._ttinfo_before = self._ttinfo_list[0] + + # Now fix transition times to become relative to wall time. + # + # I'm not sure about this. In my tests, the tz source file + # is setup to wall time, and in the binary file isstd and + # isgmt are off, so it should be in wall time. OTOH, it's + # always in gmt time. Let me know if you have comments + # about this. + laststdoffset = 0 + self._trans_list = list(self._trans_list) + for i in range(len(self._trans_list)): + tti = self._trans_idx[i] + if not tti.isdst: + # This is std time. + self._trans_list[i] += tti.offset + laststdoffset = tti.offset + else: + # This is dst time. Convert to std. + self._trans_list[i] += laststdoffset + self._trans_list = tuple(self._trans_list) + + def _find_ttinfo(self, dt, laststd=0): + timestamp = ((dt.toordinal() - EPOCHORDINAL) * 86400 + + dt.hour * 3600 + + dt.minute * 60 + + dt.second) + idx = 0 + for trans in self._trans_list: + if timestamp < trans: + break + idx += 1 + else: + return self._ttinfo_std + if idx == 0: + return self._ttinfo_before + if laststd: + while idx > 0: + tti = self._trans_idx[idx-1] + if not tti.isdst: + return tti + idx -= 1 + else: + return self._ttinfo_std + else: + return self._trans_idx[idx-1] + + def utcoffset(self, dt): + if dt is None: + return None + + if not self._ttinfo_std: + return ZERO + return self._find_ttinfo(dt).delta + + def dst(self, dt): + if not self._ttinfo_dst: + return ZERO + tti = self._find_ttinfo(dt) + if not tti.isdst: + return ZERO + + # The documentation says that utcoffset()-dst() must + # be constant for every dt. + return tti.delta-self._find_ttinfo(dt, laststd=1).delta + + # An alternative for that would be: + # + # return self._ttinfo_dst.offset-self._ttinfo_std.offset + # + # However, this class stores historical changes in the + # dst offset, so I belive that this wouldn't be the right + # way to implement this. + + @tzname_in_python2 + def tzname(self, dt): + if not self._ttinfo_std: + return None + return self._find_ttinfo(dt).abbr + + def __eq__(self, other): + if not isinstance(other, tzfile): + return False + return (self._trans_list == other._trans_list and + self._trans_idx == other._trans_idx and + self._ttinfo_list == other._ttinfo_list) + + def __ne__(self, other): + return not self.__eq__(other) + + def __repr__(self): + return "%s(%s)" % (self.__class__.__name__, repr(self._filename)) + + def __reduce__(self): + if not os.path.isfile(self._filename): + raise ValueError("Unpickable %s class" % self.__class__.__name__) + return (self.__class__, (self._filename,)) + + +class tzrange(datetime.tzinfo): + def __init__(self, stdabbr, stdoffset=None, + dstabbr=None, dstoffset=None, + start=None, end=None): + global relativedelta + if not relativedelta: + from dateutil import relativedelta + self._std_abbr = stdabbr + self._dst_abbr = dstabbr + if stdoffset is not None: + self._std_offset = datetime.timedelta(seconds=stdoffset) + else: + self._std_offset = ZERO + if dstoffset is not None: + self._dst_offset = datetime.timedelta(seconds=dstoffset) + elif dstabbr and stdoffset is not None: + self._dst_offset = self._std_offset+datetime.timedelta(hours=+1) + else: + self._dst_offset = ZERO + if dstabbr and start is None: + self._start_delta = relativedelta.relativedelta( + hours=+2, month=4, day=1, weekday=relativedelta.SU(+1)) + else: + self._start_delta = start + if dstabbr and end is None: + self._end_delta = relativedelta.relativedelta( + hours=+1, month=10, day=31, weekday=relativedelta.SU(-1)) + else: + self._end_delta = end + + def utcoffset(self, dt): + if dt is None: + return None + + if self._isdst(dt): + return self._dst_offset + else: + return self._std_offset + + def dst(self, dt): + if self._isdst(dt): + return self._dst_offset-self._std_offset + else: + return ZERO + + @tzname_in_python2 + def tzname(self, dt): + if self._isdst(dt): + return self._dst_abbr + else: + return self._std_abbr + + def _isdst(self, dt): + if not self._start_delta: + return False + year = datetime.datetime(dt.year, 1, 1) + start = year+self._start_delta + end = year+self._end_delta + dt = dt.replace(tzinfo=None) + if start < end: + return dt >= start and dt < end + else: + return dt >= start or dt < end + + def __eq__(self, other): + if not isinstance(other, tzrange): + return False + return (self._std_abbr == other._std_abbr and + self._dst_abbr == other._dst_abbr and + self._std_offset == other._std_offset and + self._dst_offset == other._dst_offset and + self._start_delta == other._start_delta and + self._end_delta == other._end_delta) + + def __ne__(self, other): + return not self.__eq__(other) + + def __repr__(self): + return "%s(...)" % self.__class__.__name__ + + __reduce__ = object.__reduce__ + + +class tzstr(tzrange): + + def __init__(self, s): + global parser + if not parser: + from dateutil import parser + self._s = s + + res = parser._parsetz(s) + if res is None: + raise ValueError("unknown string format") + + # Here we break the compatibility with the TZ variable handling. + # GMT-3 actually *means* the timezone -3. + if res.stdabbr in ("GMT", "UTC"): + res.stdoffset *= -1 + + # We must initialize it first, since _delta() needs + # _std_offset and _dst_offset set. Use False in start/end + # to avoid building it two times. + tzrange.__init__(self, res.stdabbr, res.stdoffset, + res.dstabbr, res.dstoffset, + start=False, end=False) + + if not res.dstabbr: + self._start_delta = None + self._end_delta = None + else: + self._start_delta = self._delta(res.start) + if self._start_delta: + self._end_delta = self._delta(res.end, isend=1) + + def _delta(self, x, isend=0): + kwargs = {} + if x.month is not None: + kwargs["month"] = x.month + if x.weekday is not None: + kwargs["weekday"] = relativedelta.weekday(x.weekday, x.week) + if x.week > 0: + kwargs["day"] = 1 + else: + kwargs["day"] = 31 + elif x.day: + kwargs["day"] = x.day + elif x.yday is not None: + kwargs["yearday"] = x.yday + elif x.jyday is not None: + kwargs["nlyearday"] = x.jyday + if not kwargs: + # Default is to start on first sunday of april, and end + # on last sunday of october. + if not isend: + kwargs["month"] = 4 + kwargs["day"] = 1 + kwargs["weekday"] = relativedelta.SU(+1) + else: + kwargs["month"] = 10 + kwargs["day"] = 31 + kwargs["weekday"] = relativedelta.SU(-1) + if x.time is not None: + kwargs["seconds"] = x.time + else: + # Default is 2AM. + kwargs["seconds"] = 7200 + if isend: + # Convert to standard time, to follow the documented way + # of working with the extra hour. See the documentation + # of the tzinfo class. + delta = self._dst_offset-self._std_offset + kwargs["seconds"] -= delta.seconds+delta.days*86400 + return relativedelta.relativedelta(**kwargs) + + def __repr__(self): + return "%s(%s)" % (self.__class__.__name__, repr(self._s)) + + +class _tzicalvtzcomp(object): + def __init__(self, tzoffsetfrom, tzoffsetto, isdst, + tzname=None, rrule=None): + self.tzoffsetfrom = datetime.timedelta(seconds=tzoffsetfrom) + self.tzoffsetto = datetime.timedelta(seconds=tzoffsetto) + self.tzoffsetdiff = self.tzoffsetto-self.tzoffsetfrom + self.isdst = isdst + self.tzname = tzname + self.rrule = rrule + + +class _tzicalvtz(datetime.tzinfo): + def __init__(self, tzid, comps=[]): + self._tzid = tzid + self._comps = comps + self._cachedate = [] + self._cachecomp = [] + + def _find_comp(self, dt): + if len(self._comps) == 1: + return self._comps[0] + dt = dt.replace(tzinfo=None) + try: + return self._cachecomp[self._cachedate.index(dt)] + except ValueError: + pass + lastcomp = None + lastcompdt = None + for comp in self._comps: + if not comp.isdst: + # Handle the extra hour in DST -> STD + compdt = comp.rrule.before(dt-comp.tzoffsetdiff, inc=True) + else: + compdt = comp.rrule.before(dt, inc=True) + if compdt and (not lastcompdt or lastcompdt < compdt): + lastcompdt = compdt + lastcomp = comp + if not lastcomp: + # RFC says nothing about what to do when a given + # time is before the first onset date. We'll look for the + # first standard component, or the first component, if + # none is found. + for comp in self._comps: + if not comp.isdst: + lastcomp = comp + break + else: + lastcomp = comp[0] + self._cachedate.insert(0, dt) + self._cachecomp.insert(0, lastcomp) + if len(self._cachedate) > 10: + self._cachedate.pop() + self._cachecomp.pop() + return lastcomp + + def utcoffset(self, dt): + if dt is None: + return None + + return self._find_comp(dt).tzoffsetto + + def dst(self, dt): + comp = self._find_comp(dt) + if comp.isdst: + return comp.tzoffsetdiff + else: + return ZERO + + @tzname_in_python2 + def tzname(self, dt): + return self._find_comp(dt).tzname + + def __repr__(self): + return "" % repr(self._tzid) + + __reduce__ = object.__reduce__ + + +class tzical(object): + def __init__(self, fileobj): + global rrule + if not rrule: + from dateutil import rrule + + if isinstance(fileobj, string_types): + self._s = fileobj + # ical should be encoded in UTF-8 with CRLF + fileobj = open(fileobj, 'r') + elif hasattr(fileobj, "name"): + self._s = fileobj.name + else: + self._s = repr(fileobj) + + self._vtz = {} + + self._parse_rfc(fileobj.read()) + + def keys(self): + return list(self._vtz.keys()) + + def get(self, tzid=None): + if tzid is None: + keys = list(self._vtz.keys()) + if len(keys) == 0: + raise ValueError("no timezones defined") + elif len(keys) > 1: + raise ValueError("more than one timezone available") + tzid = keys[0] + return self._vtz.get(tzid) + + def _parse_offset(self, s): + s = s.strip() + if not s: + raise ValueError("empty offset") + if s[0] in ('+', '-'): + signal = (-1, +1)[s[0] == '+'] + s = s[1:] + else: + signal = +1 + if len(s) == 4: + return (int(s[:2])*3600+int(s[2:])*60)*signal + elif len(s) == 6: + return (int(s[:2])*3600+int(s[2:4])*60+int(s[4:]))*signal + else: + raise ValueError("invalid offset: "+s) + + def _parse_rfc(self, s): + lines = s.splitlines() + if not lines: + raise ValueError("empty string") + + # Unfold + i = 0 + while i < len(lines): + line = lines[i].rstrip() + if not line: + del lines[i] + elif i > 0 and line[0] == " ": + lines[i-1] += line[1:] + del lines[i] + else: + i += 1 + + tzid = None + comps = [] + invtz = False + comptype = None + for line in lines: + if not line: + continue + name, value = line.split(':', 1) + parms = name.split(';') + if not parms: + raise ValueError("empty property name") + name = parms[0].upper() + parms = parms[1:] + if invtz: + if name == "BEGIN": + if value in ("STANDARD", "DAYLIGHT"): + # Process component + pass + else: + raise ValueError("unknown component: "+value) + comptype = value + founddtstart = False + tzoffsetfrom = None + tzoffsetto = None + rrulelines = [] + tzname = None + elif name == "END": + if value == "VTIMEZONE": + if comptype: + raise ValueError("component not closed: "+comptype) + if not tzid: + raise ValueError("mandatory TZID not found") + if not comps: + raise ValueError( + "at least one component is needed") + # Process vtimezone + self._vtz[tzid] = _tzicalvtz(tzid, comps) + invtz = False + elif value == comptype: + if not founddtstart: + raise ValueError("mandatory DTSTART not found") + if tzoffsetfrom is None: + raise ValueError( + "mandatory TZOFFSETFROM not found") + if tzoffsetto is None: + raise ValueError( + "mandatory TZOFFSETFROM not found") + # Process component + rr = None + if rrulelines: + rr = rrule.rrulestr("\n".join(rrulelines), + compatible=True, + ignoretz=True, + cache=True) + comp = _tzicalvtzcomp(tzoffsetfrom, tzoffsetto, + (comptype == "DAYLIGHT"), + tzname, rr) + comps.append(comp) + comptype = None + else: + raise ValueError("invalid component end: "+value) + elif comptype: + if name == "DTSTART": + rrulelines.append(line) + founddtstart = True + elif name in ("RRULE", "RDATE", "EXRULE", "EXDATE"): + rrulelines.append(line) + elif name == "TZOFFSETFROM": + if parms: + raise ValueError( + "unsupported %s parm: %s " % (name, parms[0])) + tzoffsetfrom = self._parse_offset(value) + elif name == "TZOFFSETTO": + if parms: + raise ValueError( + "unsupported TZOFFSETTO parm: "+parms[0]) + tzoffsetto = self._parse_offset(value) + elif name == "TZNAME": + if parms: + raise ValueError( + "unsupported TZNAME parm: "+parms[0]) + tzname = value + elif name == "COMMENT": + pass + else: + raise ValueError("unsupported property: "+name) + else: + if name == "TZID": + if parms: + raise ValueError( + "unsupported TZID parm: "+parms[0]) + tzid = value + elif name in ("TZURL", "LAST-MODIFIED", "COMMENT"): + pass + else: + raise ValueError("unsupported property: "+name) + elif name == "BEGIN" and value == "VTIMEZONE": + tzid = None + comps = [] + invtz = True + + def __repr__(self): + return "%s(%s)" % (self.__class__.__name__, repr(self._s)) + +if sys.platform != "win32": + TZFILES = ["/etc/localtime", "localtime"] + TZPATHS = ["/usr/share/zoneinfo", "/usr/lib/zoneinfo", "/etc/zoneinfo"] +else: + TZFILES = [] + TZPATHS = [] + + +def gettz(name=None): + tz = None + if not name: + try: + name = os.environ["TZ"] + except KeyError: + pass + if name is None or name == ":": + for filepath in TZFILES: + if not os.path.isabs(filepath): + filename = filepath + for path in TZPATHS: + filepath = os.path.join(path, filename) + if os.path.isfile(filepath): + break + else: + continue + if os.path.isfile(filepath): + try: + tz = tzfile(filepath) + break + except (IOError, OSError, ValueError): + pass + else: + tz = tzlocal() + else: + if name.startswith(":"): + name = name[:-1] + if os.path.isabs(name): + if os.path.isfile(name): + tz = tzfile(name) + else: + tz = None + else: + for path in TZPATHS: + filepath = os.path.join(path, name) + if not os.path.isfile(filepath): + filepath = filepath.replace(' ', '_') + if not os.path.isfile(filepath): + continue + try: + tz = tzfile(filepath) + break + except (IOError, OSError, ValueError): + pass + else: + tz = None + if tzwin is not None: + try: + tz = tzwin(name) + except WindowsError: + tz = None + if not tz: + from dateutil.zoneinfo import gettz + tz = gettz(name) + if not tz: + for c in name: + # name must have at least one offset to be a tzstr + if c in "0123456789": + try: + tz = tzstr(name) + except ValueError: + pass + break + else: + if name in ("GMT", "UTC"): + tz = tzutc() + elif name in time.tzname: + tz = tzlocal() + return tz + +# vim:ts=4:sw=4:et diff --git a/venv/lib/python2.7/site-packages/dateutil/tz/win.py b/venv/lib/python2.7/site-packages/dateutil/tz/win.py new file mode 100644 index 0000000..7642e5c --- /dev/null +++ b/venv/lib/python2.7/site-packages/dateutil/tz/win.py @@ -0,0 +1,330 @@ +# This code was originally contributed by Jeffrey Harris. +import datetime +import struct + +from six.moves import winreg +from six import text_type + +try: + import ctypes + from ctypes import wintypes +except ValueError: + # ValueError is raised on non-Windows systems for some horrible reason. + raise ImportError("Running tzwin on non-Windows system") + +from ._common import tzname_in_python2 + +__all__ = ["tzwin", "tzwinlocal", "tzres"] + +ONEWEEK = datetime.timedelta(7) + +TZKEYNAMENT = r"SOFTWARE\Microsoft\Windows NT\CurrentVersion\Time Zones" +TZKEYNAME9X = r"SOFTWARE\Microsoft\Windows\CurrentVersion\Time Zones" +TZLOCALKEYNAME = r"SYSTEM\CurrentControlSet\Control\TimeZoneInformation" + + +def _settzkeyname(): + handle = winreg.ConnectRegistry(None, winreg.HKEY_LOCAL_MACHINE) + try: + winreg.OpenKey(handle, TZKEYNAMENT).Close() + TZKEYNAME = TZKEYNAMENT + except WindowsError: + TZKEYNAME = TZKEYNAME9X + handle.Close() + return TZKEYNAME + +TZKEYNAME = _settzkeyname() + + +class tzres(object): + """ + Class for accessing `tzres.dll`, which contains timezone name related + resources. + + ..versionadded:: 2.5.0 + """ + p_wchar = ctypes.POINTER(wintypes.WCHAR) # Pointer to a wide char + + def __init__(self, tzres_loc='tzres.dll'): + # Load the user32 DLL so we can load strings from tzres + user32 = ctypes.WinDLL('user32') + + # Specify the LoadStringW function + user32.LoadStringW.argtypes = (wintypes.HINSTANCE, + wintypes.UINT, + wintypes.LPWSTR, + ctypes.c_int) + + self.LoadStringW = user32.LoadStringW + self._tzres = ctypes.WinDLL(tzres_loc) + self.tzres_loc = tzres_loc + + def load_name(self, offset): + """ + Load a timezone name from a DLL offset (integer). + + >>> from dateutil.tzwin import tzres + >>> tzr = tzres() + >>> print(tzr.load_name(112)) + 'Eastern Standard Time' + + :param offset: + A positive integer value referring to a string from the tzres dll. + + ..note: + Offsets found in the registry are generally of the form + `@tzres.dll,-114`. The offset in this case if 114, not -114. + + """ + resource = self.p_wchar() + lpBuffer = ctypes.cast(ctypes.byref(resource), wintypes.LPWSTR) + nchar = self.LoadStringW(self._tzres._handle, offset, lpBuffer, 0) + return resource[:nchar] + + def name_from_string(self, tzname_str): + """ + Parse strings as returned from the Windows registry into the time zone + name as defined in the registry. + + >>> from dateutil.tzwin import tzres + >>> tzr = tzres() + >>> print(tzr.name_from_string('@tzres.dll,-251')) + 'Dateline Daylight Time' + >>> print(tzr.name_from_string('Eastern Standard Time')) + 'Eastern Standard Time' + + :param tzname_str: + A timezone name string as returned from a Windows registry key. + + :return: + Returns the localized timezone string from tzres.dll if the string + is of the form `@tzres.dll,-offset`, else returns the input string. + """ + if not tzname_str.startswith('@'): + return tzname_str + + name_splt = tzname_str.split(',-') + try: + offset = int(name_splt[1]) + except: + raise ValueError("Malformed timezone string.") + + return self.load_name(offset) + + +class tzwinbase(datetime.tzinfo): + """tzinfo class based on win32's timezones available in the registry.""" + def __eq__(self, other): + # Compare on all relevant dimensions, including name. + return (isinstance(other, tzwinbase) and + (self._stdoffset == other._stdoffset and + self._dstoffset == other._dstoffset and + self._stddayofweek == other._stddayofweek and + self._dstdayofweek == other._dstdayofweek and + self._stdweeknumber == other._stdweeknumber and + self._dstweeknumber == other._dstweeknumber and + self._stdhour == other._stdhour and + self._dsthour == other._dsthour and + self._stdminute == other._stdminute and + self._dstminute == other._dstminute and + self._stdname == other._stdname and + self._dstname == other._dstname)) + + def __ne__(self, other): + return not self.__eq__(other) + + def utcoffset(self, dt): + isdst = self._isdst(dt) + + if isdst is None: + return None + elif isdst: + return datetime.timedelta(minutes=self._dstoffset) + else: + return datetime.timedelta(minutes=self._stdoffset) + + def dst(self, dt): + isdst = self._isdst(dt) + + if isdst is None: + return None + elif isdst: + minutes = self._dstoffset - self._stdoffset + return datetime.timedelta(minutes=minutes) + else: + return datetime.timedelta(0) + + @tzname_in_python2 + def tzname(self, dt): + if self._isdst(dt): + return self._dstname + else: + return self._stdname + + @staticmethod + def list(): + """Return a list of all time zones known to the system.""" + handle = winreg.ConnectRegistry(None, winreg.HKEY_LOCAL_MACHINE) + tzkey = winreg.OpenKey(handle, TZKEYNAME) + result = [winreg.EnumKey(tzkey, i) + for i in range(winreg.QueryInfoKey(tzkey)[0])] + tzkey.Close() + handle.Close() + return result + + def display(self): + return self._display + + def _isdst(self, dt): + if not self._dstmonth: + # dstmonth == 0 signals the zone has no daylight saving time + return False + elif dt is None: + return None + + dston = picknthweekday(dt.year, self._dstmonth, self._dstdayofweek, + self._dsthour, self._dstminute, + self._dstweeknumber) + dstoff = picknthweekday(dt.year, self._stdmonth, self._stddayofweek, + self._stdhour, self._stdminute, + self._stdweeknumber) + if dston < dstoff: + return dston <= dt.replace(tzinfo=None) < dstoff + else: + return not dstoff <= dt.replace(tzinfo=None) < dston + + +class tzwin(tzwinbase): + + def __init__(self, name): + self._name = name + + # multiple contexts only possible in 2.7 and 3.1, we still support 2.6 + with winreg.ConnectRegistry(None, winreg.HKEY_LOCAL_MACHINE) as handle: + tzkeyname = text_type("{kn}\{name}").format(kn=TZKEYNAME, name=name) + with winreg.OpenKey(handle, tzkeyname) as tzkey: + keydict = valuestodict(tzkey) + + self._stdname = keydict["Std"] + self._dstname = keydict["Dlt"] + + self._display = keydict["Display"] + + # See http://ww_winreg.jsiinc.com/SUBA/tip0300/rh0398.htm + tup = struct.unpack("=3l16h", keydict["TZI"]) + self._stdoffset = -tup[0]-tup[1] # Bias + StandardBias * -1 + self._dstoffset = self._stdoffset-tup[2] # + DaylightBias * -1 + + # for the meaning see the win32 TIME_ZONE_INFORMATION structure docs + # http://msdn.microsoft.com/en-us/library/windows/desktop/ms725481(v=vs.85).aspx + (self._stdmonth, + self._stddayofweek, # Sunday = 0 + self._stdweeknumber, # Last = 5 + self._stdhour, + self._stdminute) = tup[4:9] + + (self._dstmonth, + self._dstdayofweek, # Sunday = 0 + self._dstweeknumber, # Last = 5 + self._dsthour, + self._dstminute) = tup[12:17] + + def __repr__(self): + return "tzwin(%s)" % repr(self._name) + + def __reduce__(self): + return (self.__class__, (self._name,)) + + +class tzwinlocal(tzwinbase): + + def __init__(self): + with winreg.ConnectRegistry(None, winreg.HKEY_LOCAL_MACHINE) as handle: + + with winreg.OpenKey(handle, TZLOCALKEYNAME) as tzlocalkey: + keydict = valuestodict(tzlocalkey) + + self._stdname = keydict["StandardName"] + self._dstname = keydict["DaylightName"] + + try: + tzkeyname = text_type('{kn}\{sn}').format(kn=TZKEYNAME, + sn=self._stdname) + with winreg.OpenKey(handle, tzkeyname) as tzkey: + _keydict = valuestodict(tzkey) + self._display = _keydict["Display"] + except OSError: + self._display = None + + self._stdoffset = -keydict["Bias"]-keydict["StandardBias"] + self._dstoffset = self._stdoffset-keydict["DaylightBias"] + + # For reasons unclear, in this particular key, the day of week has been + # moved to the END of the SYSTEMTIME structure. + tup = struct.unpack("=8h", keydict["StandardStart"]) + + (self._stdmonth, + self._stdweeknumber, # Last = 5 + self._stdhour, + self._stdminute) = tup[1:5] + + self._stddayofweek = tup[7] + + tup = struct.unpack("=8h", keydict["DaylightStart"]) + + (self._dstmonth, + self._dstweeknumber, # Last = 5 + self._dsthour, + self._dstminute) = tup[1:5] + + self._dstdayofweek = tup[7] + + def __repr__(self): + return "tzwinlocal()" + + def __str__(self): + # str will return the standard name, not the daylight name. + return "tzwinlocal(%s)" % repr(self._stdname) + + def __reduce__(self): + return (self.__class__, ()) + + +def picknthweekday(year, month, dayofweek, hour, minute, whichweek): + """ dayofweek == 0 means Sunday, whichweek 5 means last instance """ + first = datetime.datetime(year, month, 1, hour, minute) + + # This will work if dayofweek is ISO weekday (1-7) or Microsoft-style (0-6), + # Because 7 % 7 = 0 + weekdayone = first.replace(day=((dayofweek - first.isoweekday()) % 7) + 1) + wd = weekdayone + ((whichweek - 1) * ONEWEEK) + if (wd.month != month): + wd -= ONEWEEK + + return wd + + +def valuestodict(key): + """Convert a registry key's values to a dictionary.""" + dout = {} + size = winreg.QueryInfoKey(key)[1] + tz_res = None + + for i in range(size): + key_name, value, dtype = winreg.EnumValue(key, i) + if dtype == winreg.REG_DWORD or dtype == winreg.REG_DWORD_LITTLE_ENDIAN: + # If it's a DWORD (32-bit integer), it's stored as unsigned - convert + # that to a proper signed integer + if value & (1 << 31): + value = value - (1 << 32) + elif dtype == winreg.REG_SZ: + # If it's a reference to the tzres DLL, load the actual string + if value.startswith('@tzres'): + tz_res = tz_res or tzres() + value = tz_res.name_from_string(value) + + value = value.rstrip('\x00') # Remove trailing nulls + + dout[key_name] = value + + return dout diff --git a/venv/lib/python2.7/site-packages/dateutil/tzwin.py b/venv/lib/python2.7/site-packages/dateutil/tzwin.py new file mode 100644 index 0000000..55cd910 --- /dev/null +++ b/venv/lib/python2.7/site-packages/dateutil/tzwin.py @@ -0,0 +1,2 @@ +# tzwin has moved to dateutil.tz.win +from .tz.win import * \ No newline at end of file diff --git a/venv/lib/python2.7/site-packages/dateutil/zoneinfo/__init__.py b/venv/lib/python2.7/site-packages/dateutil/zoneinfo/__init__.py new file mode 100644 index 0000000..b1a39b0 --- /dev/null +++ b/venv/lib/python2.7/site-packages/dateutil/zoneinfo/__init__.py @@ -0,0 +1,101 @@ +# -*- coding: utf-8 -*- +import logging +import os +import warnings +import tempfile +import shutil +import json + +from tarfile import TarFile +from pkgutil import get_data +from io import BytesIO +from contextlib import closing + +from dateutil.tz import tzfile + +__all__ = ["gettz", "gettz_db_metadata", "rebuild"] + +ZONEFILENAME = "dateutil-zoneinfo.tar.gz" +METADATA_FN = 'METADATA' + +# python2.6 compatability. Note that TarFile.__exit__ != TarFile.close, but +# it's close enough for python2.6 +tar_open = TarFile.open +if not hasattr(TarFile, '__exit__'): + def tar_open(*args, **kwargs): + return closing(TarFile.open(*args, **kwargs)) + + +class tzfile(tzfile): + def __reduce__(self): + return (gettz, (self._filename,)) + + +def getzoneinfofile_stream(): + try: + return BytesIO(get_data(__name__, ZONEFILENAME)) + except IOError as e: # TODO switch to FileNotFoundError? + warnings.warn("I/O error({0}): {1}".format(e.errno, e.strerror)) + return None + + +class ZoneInfoFile(object): + def __init__(self, zonefile_stream=None): + if zonefile_stream is not None: + with tar_open(fileobj=zonefile_stream, mode='r') as tf: + # dict comprehension does not work on python2.6 + # TODO: get back to the nicer syntax when we ditch python2.6 + # self.zones = {zf.name: tzfile(tf.extractfile(zf), + # filename = zf.name) + # for zf in tf.getmembers() if zf.isfile()} + self.zones = dict((zf.name, tzfile(tf.extractfile(zf), + filename=zf.name)) + for zf in tf.getmembers() + if zf.isfile() and zf.name != METADATA_FN) + # deal with links: They'll point to their parent object. Less + # waste of memory + # links = {zl.name: self.zones[zl.linkname] + # for zl in tf.getmembers() if zl.islnk() or zl.issym()} + links = dict((zl.name, self.zones[zl.linkname]) + for zl in tf.getmembers() if + zl.islnk() or zl.issym()) + self.zones.update(links) + try: + metadata_json = tf.extractfile(tf.getmember(METADATA_FN)) + metadata_str = metadata_json.read().decode('UTF-8') + self.metadata = json.loads(metadata_str) + except KeyError: + # no metadata in tar file + self.metadata = None + else: + self.zones = dict() + self.metadata = None + + +# The current API has gettz as a module function, although in fact it taps into +# a stateful class. So as a workaround for now, without changing the API, we +# will create a new "global" class instance the first time a user requests a +# timezone. Ugly, but adheres to the api. +# +# TODO: deprecate this. +_CLASS_ZONE_INSTANCE = list() + + +def gettz(name): + if len(_CLASS_ZONE_INSTANCE) == 0: + _CLASS_ZONE_INSTANCE.append(ZoneInfoFile(getzoneinfofile_stream())) + return _CLASS_ZONE_INSTANCE[0].zones.get(name) + + +def gettz_db_metadata(): + """ Get the zonefile metadata + + See `zonefile_metadata`_ + + :returns: A dictionary with the database metadata + """ + if len(_CLASS_ZONE_INSTANCE) == 0: + _CLASS_ZONE_INSTANCE.append(ZoneInfoFile(getzoneinfofile_stream())) + return _CLASS_ZONE_INSTANCE[0].metadata + + diff --git a/venv/lib/python2.7/site-packages/dateutil/zoneinfo/dateutil-zoneinfo.tar.gz b/venv/lib/python2.7/site-packages/dateutil/zoneinfo/dateutil-zoneinfo.tar.gz new file mode 100644 index 0000000..4b89c0f Binary files /dev/null and b/venv/lib/python2.7/site-packages/dateutil/zoneinfo/dateutil-zoneinfo.tar.gz differ diff --git a/venv/lib/python2.7/site-packages/dateutil/zoneinfo/rebuild.py b/venv/lib/python2.7/site-packages/dateutil/zoneinfo/rebuild.py new file mode 100644 index 0000000..a66c1d9 --- /dev/null +++ b/venv/lib/python2.7/site-packages/dateutil/zoneinfo/rebuild.py @@ -0,0 +1,51 @@ +import logging +import os +import tempfile +import shutil +import json +from subprocess import check_call + +from dateutil.zoneinfo import tar_open, METADATA_FN, ZONEFILENAME + + +def rebuild(filename, tag=None, format="gz", zonegroups=[], metadata=None): + """Rebuild the internal timezone info in dateutil/zoneinfo/zoneinfo*tar* + + filename is the timezone tarball from ftp.iana.org/tz. + + """ + tmpdir = tempfile.mkdtemp() + zonedir = os.path.join(tmpdir, "zoneinfo") + moduledir = os.path.dirname(__file__) + try: + with tar_open(filename) as tf: + for name in zonegroups: + tf.extract(name, tmpdir) + filepaths = [os.path.join(tmpdir, n) for n in zonegroups] + try: + check_call(["zic", "-d", zonedir] + filepaths) + except OSError as e: + _print_on_nosuchfile(e) + raise + # write metadata file + with open(os.path.join(zonedir, METADATA_FN), 'w') as f: + json.dump(metadata, f, indent=4, sort_keys=True) + target = os.path.join(moduledir, ZONEFILENAME) + with tar_open(target, "w:%s" % format) as tf: + for entry in os.listdir(zonedir): + entrypath = os.path.join(zonedir, entry) + tf.add(entrypath, entry) + finally: + shutil.rmtree(tmpdir) + +def _print_on_nosuchfile(e): + """Print helpful troubleshooting message + + e is an exception raised by subprocess.check_call() + + """ + if e.errno == 2: + logging.error( + "Could not find zic. Perhaps you need to install " + "libc-bin or some other package that provides it, " + "or it's not in your PATH?") diff --git a/venv/lib/python2.7/site-packages/easy_install.py b/venv/lib/python2.7/site-packages/easy_install.py new file mode 100644 index 0000000..d87e984 --- /dev/null +++ b/venv/lib/python2.7/site-packages/easy_install.py @@ -0,0 +1,5 @@ +"""Run the EasyInstall command""" + +if __name__ == '__main__': + from setuptools.command.easy_install import main + main() diff --git a/venv/lib/python2.7/site-packages/elasticsearch-2.3.0.dist-info/DESCRIPTION.rst b/venv/lib/python2.7/site-packages/elasticsearch-2.3.0.dist-info/DESCRIPTION.rst new file mode 100644 index 0000000..07adc79 --- /dev/null +++ b/venv/lib/python2.7/site-packages/elasticsearch-2.3.0.dist-info/DESCRIPTION.rst @@ -0,0 +1,130 @@ +Python Elasticsearch Client +=========================== + +Official low-level client for Elasticsearch. Its goal is to provide common +ground for all Elasticsearch-related code in Python; because of this it tries +to be opinion-free and very extendable. + +For a more high level client library with more limited scope, have a look at +`elasticsearch-dsl`_ - a more pythonic library sitting on top of +``elasticsearch-py``. + +It provides a more convenient and idiomatic way to write and manipulate +`queries`_. It stays close to the Elasticsearch JSON DSL, mirroring its +terminology and structure while exposing the whole range of the DSL from Python +either directly using defined classes or a queryset-like expressions. + +It also provides an optional `persistence layer`_ for working with documents as +Python objects in an ORM-like fashion: defining mappings, retrieving and saving +documents, wrapping the document data in user-defined classes. + +.. _elasticsearch-dsl: http://elasticsearch-dsl.rtfd.org/ +.. _queries: http://elasticsearch-dsl.readthedocs.org/en/latest/search_dsl.html +.. _persistence layer: http://elasticsearch-dsl.readthedocs.org/en/latest/persistence.html#doctype + +Compatibility +------------- + +The library is compatible with all Elasticsearch versions since ``0.90.x`` but you +**have to use a matching major version**: + +For **Elasticsearch 2.0** and later, use the major version 2 (``2.x.y``) of the +library. + +For **Elasticsearch 1.0** and later, use the major version 1 (``1.x.y``) of the +library. + +For **Elasticsearch 0.90.x**, use a version from ``0.4.x`` releases of the +library. + +The recommended way to set your requirements in your `setup.py` or +`requirements.txt` is:: + + # Elasticsearch 2.x + elasticsearch>=2.0.0,<3.0.0 + + # Elasticsearch 1.x + elasticsearch>=1.0.0,<2.0.0 + + # Elasticsearch 0.90.x + elasticsearch<1.0.0 + +The development is happening on ``master`` and ``1.x`` branches, respectively. + +Installation +------------ + +Install the ``elasticsearch`` package with `pip +`_:: + + pip install elasticsearch + + +Example use +----------- + +Simple use-case:: + + >>> from datetime import datetime + >>> from elasticsearch import Elasticsearch + + # by default we connect to localhost:9200 + >>> es = Elasticsearch() + + # create an index in elasticsearch, ignore status code 400 (index already exists) + >>> es.indices.create(index='my-index', ignore=400) + {u'acknowledged': True} + + # datetimes will be serialized + >>> es.index(index="my-index", doc_type="test-type", id=42, body={"any": "data", "timestamp": datetime.now()}) + {u'_id': u'42', u'_index': u'my-index', u'_type': u'test-type', u'_version': 1, u'ok': True} + + # but not deserialized + >>> es.get(index="my-index", doc_type="test-type", id=42)['_source'] + {u'any': u'data', u'timestamp': u'2013-05-12T19:45:31.804229'} + +`Full documentation`_. + +.. _Full documentation: http://elasticsearch-py.rtfd.org/ + + +Features +-------- + +The client's features include: + + * translating basic Python data types to and from json (datetimes are not + decoded for performance reasons) + * configurable automatic discovery of cluster nodes + * persistent connections + * load balancing (with pluggable selection strategy) across all available nodes + * failed connection penalization (time based - failed connections won't be + retried until a timeout is reached) + * support for ssl and http authentication + * thread safety + * pluggable architecture + + +License +------- + +Copyright 2015 Elasticsearch + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. + +Build status +------------ + +.. image:: https://secure.travis-ci.org/elastic/elasticsearch-py.png + :target: http://travis-ci.org/#!/elastic/elasticsearch-py + diff --git a/venv/lib/python2.7/site-packages/elasticsearch-2.3.0.dist-info/INSTALLER b/venv/lib/python2.7/site-packages/elasticsearch-2.3.0.dist-info/INSTALLER new file mode 100644 index 0000000..a1b589e --- /dev/null +++ b/venv/lib/python2.7/site-packages/elasticsearch-2.3.0.dist-info/INSTALLER @@ -0,0 +1 @@ +pip diff --git a/venv/lib/python2.7/site-packages/elasticsearch-2.3.0.dist-info/METADATA b/venv/lib/python2.7/site-packages/elasticsearch-2.3.0.dist-info/METADATA new file mode 100644 index 0000000..235c0c1 --- /dev/null +++ b/venv/lib/python2.7/site-packages/elasticsearch-2.3.0.dist-info/METADATA @@ -0,0 +1,155 @@ +Metadata-Version: 2.0 +Name: elasticsearch +Version: 2.3.0 +Summary: Python client for Elasticsearch +Home-page: https://github.com/elastic/elasticsearch-py +Author: Honza Král +Author-email: honza.kral@gmail.com +License: Apache License, Version 2.0 +Platform: UNKNOWN +Classifier: Development Status :: 5 - Production/Stable +Classifier: License :: OSI Approved :: Apache Software License +Classifier: Intended Audience :: Developers +Classifier: Operating System :: OS Independent +Classifier: Programming Language :: Python +Classifier: Programming Language :: Python :: 2 +Classifier: Programming Language :: Python :: 2.6 +Classifier: Programming Language :: Python :: 2.7 +Classifier: Programming Language :: Python :: 3 +Classifier: Programming Language :: Python :: 3.2 +Classifier: Programming Language :: Python :: 3.3 +Classifier: Programming Language :: Python :: 3.4 +Classifier: Programming Language :: Python :: Implementation :: CPython +Classifier: Programming Language :: Python :: Implementation :: PyPy +Requires-Dist: urllib3 (>=1.8,<2.0) + +Python Elasticsearch Client +=========================== + +Official low-level client for Elasticsearch. Its goal is to provide common +ground for all Elasticsearch-related code in Python; because of this it tries +to be opinion-free and very extendable. + +For a more high level client library with more limited scope, have a look at +`elasticsearch-dsl`_ - a more pythonic library sitting on top of +``elasticsearch-py``. + +It provides a more convenient and idiomatic way to write and manipulate +`queries`_. It stays close to the Elasticsearch JSON DSL, mirroring its +terminology and structure while exposing the whole range of the DSL from Python +either directly using defined classes or a queryset-like expressions. + +It also provides an optional `persistence layer`_ for working with documents as +Python objects in an ORM-like fashion: defining mappings, retrieving and saving +documents, wrapping the document data in user-defined classes. + +.. _elasticsearch-dsl: http://elasticsearch-dsl.rtfd.org/ +.. _queries: http://elasticsearch-dsl.readthedocs.org/en/latest/search_dsl.html +.. _persistence layer: http://elasticsearch-dsl.readthedocs.org/en/latest/persistence.html#doctype + +Compatibility +------------- + +The library is compatible with all Elasticsearch versions since ``0.90.x`` but you +**have to use a matching major version**: + +For **Elasticsearch 2.0** and later, use the major version 2 (``2.x.y``) of the +library. + +For **Elasticsearch 1.0** and later, use the major version 1 (``1.x.y``) of the +library. + +For **Elasticsearch 0.90.x**, use a version from ``0.4.x`` releases of the +library. + +The recommended way to set your requirements in your `setup.py` or +`requirements.txt` is:: + + # Elasticsearch 2.x + elasticsearch>=2.0.0,<3.0.0 + + # Elasticsearch 1.x + elasticsearch>=1.0.0,<2.0.0 + + # Elasticsearch 0.90.x + elasticsearch<1.0.0 + +The development is happening on ``master`` and ``1.x`` branches, respectively. + +Installation +------------ + +Install the ``elasticsearch`` package with `pip +`_:: + + pip install elasticsearch + + +Example use +----------- + +Simple use-case:: + + >>> from datetime import datetime + >>> from elasticsearch import Elasticsearch + + # by default we connect to localhost:9200 + >>> es = Elasticsearch() + + # create an index in elasticsearch, ignore status code 400 (index already exists) + >>> es.indices.create(index='my-index', ignore=400) + {u'acknowledged': True} + + # datetimes will be serialized + >>> es.index(index="my-index", doc_type="test-type", id=42, body={"any": "data", "timestamp": datetime.now()}) + {u'_id': u'42', u'_index': u'my-index', u'_type': u'test-type', u'_version': 1, u'ok': True} + + # but not deserialized + >>> es.get(index="my-index", doc_type="test-type", id=42)['_source'] + {u'any': u'data', u'timestamp': u'2013-05-12T19:45:31.804229'} + +`Full documentation`_. + +.. _Full documentation: http://elasticsearch-py.rtfd.org/ + + +Features +-------- + +The client's features include: + + * translating basic Python data types to and from json (datetimes are not + decoded for performance reasons) + * configurable automatic discovery of cluster nodes + * persistent connections + * load balancing (with pluggable selection strategy) across all available nodes + * failed connection penalization (time based - failed connections won't be + retried until a timeout is reached) + * support for ssl and http authentication + * thread safety + * pluggable architecture + + +License +------- + +Copyright 2015 Elasticsearch + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. + +Build status +------------ + +.. image:: https://secure.travis-ci.org/elastic/elasticsearch-py.png + :target: http://travis-ci.org/#!/elastic/elasticsearch-py + diff --git a/venv/lib/python2.7/site-packages/elasticsearch-2.3.0.dist-info/RECORD b/venv/lib/python2.7/site-packages/elasticsearch-2.3.0.dist-info/RECORD new file mode 100644 index 0000000..8f4c11a --- /dev/null +++ b/venv/lib/python2.7/site-packages/elasticsearch-2.3.0.dist-info/RECORD @@ -0,0 +1,48 @@ +elasticsearch/compat.py,sha256=MLDabdJN3w5KkeVykvh0rpUO1SFFds0mBnDWZJ30SdI,312 +elasticsearch/transport.py,sha256=El6Li6eHmkFZxeLMfWiZb-nf-zBnsXBwQR8KRamiAiU,15277 +elasticsearch/exceptions.py,sha256=RGOIdnNlpKYB9-Hy-gyMIALNq3CxOdzPu4bySZN97q8,3127 +elasticsearch/connection_pool.py,sha256=re2lYiUtyO3CSzkJLZc8L2ooVZ76bH7rKm1oQ_5SG9Q,9607 +elasticsearch/serializer.py,sha256=qqyY9_JaaPuTw1UAbL99CSKllbvXOHF0VfCXcupM4sk,2287 +elasticsearch/__init__.py,sha256=xPgCWpzwwo4-PB0zjt3u9Yx9MnO1VcULq8e1W3589-U,837 +elasticsearch/client/cluster.py,sha256=ez_hB0Ywkn8s1QSh-Sdolr9EEaKXPiVQ1T8CU1v0udQ,7619 +elasticsearch/client/cat.py,sha256=2egI0ak1z9u8ruR_rp9BqQxLOpDltt4dgL1o99QnN_Q,15962 +elasticsearch/client/snapshot.py,sha256=w-cteBMUMYV0DOU1oJXFqRnhfvGjb0dJreSbj2Uy6Vs,7609 +elasticsearch/client/indices.py,sha256=juLLjjOtNo0BL6DzMcyXS2Ch_PrwL24M89Ut7XewezM,52688 +elasticsearch/client/utils.py,sha256=O9ELpuzQIShFON8UBhTo87T4vzQTQwHga8oVcskiVOk,2645 +elasticsearch/client/__init__.py,sha256=9Gllv5q-PmhLVNqAxa1ESWVf5vGkQmJWTWV_lyx8Bc8,64638 +elasticsearch/client/nodes.py,sha256=P8YUL-BppKfn4tdw3gL5hEbne5nw5hzu_oivmQVXd0I,4805 +elasticsearch/connection/pooling.py,sha256=19x-kzyqndWJnnMGZ1ucoZvpMIliH3q3oJLe34OEROA,798 +elasticsearch/connection/http_urllib3.py,sha256=8uPSBkXe-0nR9lGxRjpBBZO6ccmVmDWf1tos2s0VqE8,5285 +elasticsearch/connection/base.py,sha256=s-5JTcSCR5aHGkywwsreG9XUFcK_EavjG9mBFJ6jTR0,4357 +elasticsearch/connection/http_requests.py,sha256=m2mWMyJw-I-CAwjNEt336Md5kMhbLrdrxVX1OD2_9kg,4039 +elasticsearch/connection/__init__.py,sha256=V50xojadYNqjTkV1KdY4GQnxfG-hrxr-FkmQknvYjJo,127 +elasticsearch/helpers/test.py,sha256=iyaFosPjWYupZOcpk6kYdIzpw6-qNGkfWuK-G-iFtYQ,1839 +elasticsearch/helpers/__init__.py,sha256=oL6ffwCljzXhKBh1rdVtiouw6yVTCo7pA0xEaOuFANk,14046 +elasticsearch-2.3.0.dist-info/pbr.json,sha256=7LDsmS2o3dnopaClq6T03zl5eegwepeyDWzJEMMl-Jg,47 +elasticsearch-2.3.0.dist-info/top_level.txt,sha256=Jp2bLWq49skvCN4YCZsg1Hfn_NDLgleC-x-Bn01_HgM,14 +elasticsearch-2.3.0.dist-info/WHEEL,sha256=AvR0WeTpDaxT645bl5FQxUK6NPsTls2ttpcGJg3j1Xg,110 +elasticsearch-2.3.0.dist-info/METADATA,sha256=mYScigdcXtHELTg0kiPdRuVQgMv-puR5mNPqIwPxLUw,5261 +elasticsearch-2.3.0.dist-info/DESCRIPTION.rst,sha256=oVlNdYwfM0G2v0gLd_xVaUcGpbX5n4cmTM96JZb9DWo,4221 +elasticsearch-2.3.0.dist-info/metadata.json,sha256=qPjP4V88tKhTOWEiRc9mVnbjangh3NcqHao1awv08RE,1272 +elasticsearch-2.3.0.dist-info/RECORD,, +elasticsearch-2.3.0.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4 +elasticsearch/transport.pyc,, +elasticsearch/connection/pooling.pyc,, +elasticsearch/compat.pyc,, +elasticsearch/client/snapshot.pyc,, +elasticsearch/client/indices.pyc,, +elasticsearch/connection/__init__.pyc,, +elasticsearch/client/utils.pyc,, +elasticsearch/__init__.pyc,, +elasticsearch/client/nodes.pyc,, +elasticsearch/client/__init__.pyc,, +elasticsearch/client/cat.pyc,, +elasticsearch/connection/http_urllib3.pyc,, +elasticsearch/connection/http_requests.pyc,, +elasticsearch/serializer.pyc,, +elasticsearch/connection_pool.pyc,, +elasticsearch/helpers/__init__.pyc,, +elasticsearch/client/cluster.pyc,, +elasticsearch/exceptions.pyc,, +elasticsearch/connection/base.pyc,, +elasticsearch/helpers/test.pyc,, diff --git a/venv/lib/python2.7/site-packages/elasticsearch-2.3.0.dist-info/WHEEL b/venv/lib/python2.7/site-packages/elasticsearch-2.3.0.dist-info/WHEEL new file mode 100644 index 0000000..9dff69d --- /dev/null +++ b/venv/lib/python2.7/site-packages/elasticsearch-2.3.0.dist-info/WHEEL @@ -0,0 +1,6 @@ +Wheel-Version: 1.0 +Generator: bdist_wheel (0.24.0) +Root-Is-Purelib: true +Tag: py2-none-any +Tag: py3-none-any + diff --git a/venv/lib/python2.7/site-packages/elasticsearch-2.3.0.dist-info/metadata.json b/venv/lib/python2.7/site-packages/elasticsearch-2.3.0.dist-info/metadata.json new file mode 100644 index 0000000..4a9da4d --- /dev/null +++ b/venv/lib/python2.7/site-packages/elasticsearch-2.3.0.dist-info/metadata.json @@ -0,0 +1 @@ +{"summary": "Python client for Elasticsearch", "extras": [], "metadata_version": "2.0", "test_requires": [{"requires": ["requests (>=1.0.0,<3.0.0)", "nose", "coverage", "mock", "pyaml", "nosexcover"]}], "version": "2.3.0", "name": "elasticsearch", "extensions": {"python.details": {"document_names": {"description": "DESCRIPTION.rst"}, "contacts": [{"role": "author", "name": "Honza Kr\u00e1l", "email": "honza.kral@gmail.com"}], "project_urls": {"Home": "https://github.com/elastic/elasticsearch-py"}}}, "classifiers": ["Development Status :: 5 - Production/Stable", "License :: OSI Approved :: Apache Software License", "Intended Audience :: Developers", "Operating System :: OS Independent", "Programming Language :: Python", "Programming Language :: Python :: 2", "Programming Language :: Python :: 2.6", "Programming Language :: Python :: 2.7", "Programming Language :: Python :: 3", "Programming Language :: Python :: 3.2", "Programming Language :: Python :: 3.3", "Programming Language :: Python :: 3.4", "Programming Language :: Python :: Implementation :: CPython", "Programming Language :: Python :: Implementation :: PyPy"], "license": "Apache License, Version 2.0", "generator": "bdist_wheel (0.24.0)", "run_requires": [{"requires": ["urllib3 (>=1.8,<2.0)"]}]} \ No newline at end of file diff --git a/venv/lib/python2.7/site-packages/elasticsearch-2.3.0.dist-info/pbr.json b/venv/lib/python2.7/site-packages/elasticsearch-2.3.0.dist-info/pbr.json new file mode 100644 index 0000000..17f6d5d --- /dev/null +++ b/venv/lib/python2.7/site-packages/elasticsearch-2.3.0.dist-info/pbr.json @@ -0,0 +1 @@ +{"git_version": "14f5b35", "is_release": false} \ No newline at end of file diff --git a/venv/lib/python2.7/site-packages/elasticsearch-2.3.0.dist-info/top_level.txt b/venv/lib/python2.7/site-packages/elasticsearch-2.3.0.dist-info/top_level.txt new file mode 100644 index 0000000..174c3f8 --- /dev/null +++ b/venv/lib/python2.7/site-packages/elasticsearch-2.3.0.dist-info/top_level.txt @@ -0,0 +1 @@ +elasticsearch diff --git a/venv/lib/python2.7/site-packages/elasticsearch/__init__.py b/venv/lib/python2.7/site-packages/elasticsearch/__init__.py new file mode 100644 index 0000000..405bc21 --- /dev/null +++ b/venv/lib/python2.7/site-packages/elasticsearch/__init__.py @@ -0,0 +1,25 @@ +from __future__ import absolute_import + +VERSION = (2, 3, 0) +__version__ = VERSION +__versionstr__ = '.'.join(map(str, VERSION)) + +import sys + +if (2, 7) <= sys.version_info < (3, 2): + # On Python 2.7 and Python3 < 3.2, install no-op handler to silence + # `No handlers could be found for logger "elasticsearch"` message per + # + import logging + logger = logging.getLogger('elasticsearch') + logger.addHandler(logging.NullHandler()) + +from .client import Elasticsearch +from .transport import Transport +from .connection_pool import ConnectionPool, ConnectionSelector, \ + RoundRobinSelector +from .serializer import JSONSerializer +from .connection import Connection, RequestsHttpConnection, \ + Urllib3HttpConnection +from .exceptions import * + diff --git a/venv/lib/python2.7/site-packages/elasticsearch/client/__init__.py b/venv/lib/python2.7/site-packages/elasticsearch/client/__init__.py new file mode 100644 index 0000000..b7275c9 --- /dev/null +++ b/venv/lib/python2.7/site-packages/elasticsearch/client/__init__.py @@ -0,0 +1,1279 @@ +from __future__ import unicode_literals +import weakref +import logging + +from ..transport import Transport +from ..exceptions import NotFoundError, TransportError +from ..compat import string_types, urlparse +from .indices import IndicesClient +from .cluster import ClusterClient +from .cat import CatClient +from .nodes import NodesClient +from .snapshot import SnapshotClient +from .utils import query_params, _make_path, SKIP_IN_PATH + +logger = logging.getLogger('elasticsearch') + +def _normalize_hosts(hosts): + """ + Helper function to transform hosts argument to + :class:`~elasticsearch.Elasticsearch` to a list of dicts. + """ + # if hosts are empty, just defer to defaults down the line + if hosts is None: + return [{}] + + # passed in just one string + if isinstance(hosts, string_types): + hosts = [hosts] + + out = [] + # normalize hosts to dicts + for host in hosts: + if isinstance(host, string_types): + if '://' not in host: + host = "//%s" % host + + parsed_url = urlparse(host) + h = {"host": parsed_url.hostname} + + if parsed_url.port: + h["port"] = parsed_url.port + + if parsed_url.scheme == "https": + h['port'] = parsed_url.port or 443 + h['use_ssl'] = True + h['scheme'] = 'http' + elif parsed_url.scheme: + h['scheme'] = parsed_url.scheme + + if parsed_url.username or parsed_url.password: + h['http_auth'] = '%s:%s' % (parsed_url.username, parsed_url.password) + + if parsed_url.path and parsed_url.path != '/': + h['url_prefix'] = parsed_url.path + + out.append(h) + else: + out.append(host) + return out + + +class Elasticsearch(object): + """ + Elasticsearch low-level client. Provides a straightforward mapping from + Python to ES REST endpoints. + + The instance has attributes ``cat``, ``cluster``, ``indices``, ``nodes`` + and ``snapshot`` that provide access to instances of + :class:`~elasticsearch.client.CatClient`, + :class:`~elasticsearch.client.ClusterClient`, + :class:`~elasticsearch.client.IndicesClient`, + :class:`~elasticsearch.client.NodesClient` and + :class:`~elasticsearch.client.SnapshotClient` respectively. This is the + preferred (and only supported) way to get access to those classes and their + methods. + + You can specify your own connection class which should be used by providing + the ``connection_class`` parameter:: + + # create connection to localhost using the ThriftConnection + es = Elasticsearch(connection_class=ThriftConnection) + + If you want to turn on :ref:`sniffing` you have several options (described + in :class:`~elasticsearch.Transport`):: + + # create connection that will automatically inspect the cluster to get + # the list of active nodes. Start with nodes running on 'esnode1' and + # 'esnode2' + es = Elasticsearch( + ['esnode1', 'esnode2'], + # sniff before doing anything + sniff_on_start=True, + # refresh nodes after a node fails to respond + sniff_on_connection_fail=True, + # and also every 60 seconds + sniffer_timeout=60 + ) + + Different hosts can have different parameters, use a dictionary per node to + specify those:: + + # connect to localhost directly and another node using SSL on port 443 + # and an url_prefix. Note that ``port`` needs to be an int. + es = Elasticsearch([ + {'host': 'localhost'}, + {'host': 'othernode', 'port': 443, 'url_prefix': 'es', 'use_ssl': True}, + ]) + + If using SSL, there are several parameters that control how we deal with + certificates (see :class:`~elasticsearch.Urllib3HttpConnection` for + detailed description of the options):: + + es = Elasticsearch( + ['localhost:443', 'other_host:443'], + # turn on SSL + use_ssl=True, + # make sure we verify SSL certificates (off by default) + verify_certs=True, + # provide a path to CA certs on disk + ca_certs='/path/to/CA_certs' + ) + + SSL client authentication is supported + (see :class:`~elasticsearch.Urllib3HttpConnection` for + detailed description of the options):: + + es = Elasticsearch( + ['localhost:443', 'other_host:443'], + # turn on SSL + use_ssl=True, + # make sure we verify SSL certificates (off by default) + verify_certs=True, + # provide a path to CA certs on disk + ca_certs='/path/to/CA_certs', + # PEM formatted SSL client certificate + client_cert='/path/to/clientcert.pem', + # PEM formatted SSL client key + client_key='/path/to/clientkey.pem' + ) + + Alternatively you can use RFC-1738 formatted URLs, as long as they are not + in conflict with other options:: + + es = Elasticsearch( + [ + 'http://user:secret@localhost:9200/', + 'https://user:secret@other_host:443/production' + ], + verify_certs=True + ) + + """ + def __init__(self, hosts=None, transport_class=Transport, **kwargs): + """ + :arg hosts: list of nodes we should connect to. Node should be a + dictionary ({"host": "localhost", "port": 9200}), the entire dictionary + will be passed to the :class:`~elasticsearch.Connection` class as + kwargs, or a string in the format of ``host[:port]`` which will be + translated to a dictionary automatically. If no value is given the + :class:`~elasticsearch.Urllib3HttpConnection` class defaults will be used. + + :arg transport_class: :class:`~elasticsearch.Transport` subclass to use. + + :arg kwargs: any additional arguments will be passed on to the + :class:`~elasticsearch.Transport` class and, subsequently, to the + :class:`~elasticsearch.Connection` instances. + """ + self.transport = transport_class(_normalize_hosts(hosts), **kwargs) + + # namespaced clients for compatibility with API names + # use weakref to make GC's work a little easier + self.indices = IndicesClient(weakref.proxy(self)) + self.cluster = ClusterClient(weakref.proxy(self)) + self.cat = CatClient(weakref.proxy(self)) + self.nodes = NodesClient(weakref.proxy(self)) + self.snapshot = SnapshotClient(weakref.proxy(self)) + + def __repr__(self): + try: + # get a lost of all connections + cons = self.transport.hosts + # truncate to 10 if there are too many + if len(cons) > 5: + cons = cons[:5] + ['...'] + return '' % cons + except: + # probably operating on custom transport and connection_pool, ignore + return super(Elasticsearch, self).__repr__() + + def _bulk_body(self, body): + # if not passed in a string, serialize items and join by newline + if not isinstance(body, string_types): + body = '\n'.join(map(self.transport.serializer.dumps, body)) + + # bulk body must end with a newline + if not body.endswith('\n'): + body += '\n' + + return body + + @query_params() + def ping(self, params=None): + """ + Returns True if the cluster is up, False otherwise. + ``_ + """ + try: + self.transport.perform_request('HEAD', '/', params=params) + except NotFoundError: + return False + return True + + @query_params() + def info(self, params=None): + """ + Get the basic info from the current cluster. + ``_ + """ + _, data = self.transport.perform_request('GET', '/', params=params) + return data + + @query_params('consistency', 'parent', 'refresh', 'routing', + 'timeout', 'timestamp', 'ttl', 'version', 'version_type') + def create(self, index, doc_type, body, id=None, params=None): + """ + Adds a typed JSON document in a specific index, making it searchable. + Behind the scenes this method calls index(..., op_type='create') + ``_ + + :arg index: The name of the index + :arg doc_type: The type of the document + :arg body: The document + :arg id: Document ID + :arg consistency: Explicit write consistency setting for the operation, + valid choices are: 'one', 'quorum', 'all' + :arg op_type: Explicit operation type, default 'index', valid choices + are: 'index', 'create' + :arg parent: ID of the parent document + :arg refresh: Refresh the index after performing the operation + :arg routing: Specific routing value + :arg timeout: Explicit operation timeout + :arg timestamp: Explicit timestamp for the document + :arg ttl: Expiration time for the document + :arg version: Explicit version number for concurrency control + :arg version_type: Specific version type, valid choices are: 'internal', + 'external', 'external_gte', 'force' + """ + return self.index(index, doc_type, body, id=id, params=params, op_type='create') + + @query_params('consistency', 'op_type', 'parent', 'refresh', 'routing', + 'timeout', 'timestamp', 'ttl', 'version', 'version_type') + def index(self, index, doc_type, body, id=None, params=None): + """ + Adds or updates a typed JSON document in a specific index, making it searchable. + ``_ + + :arg index: The name of the index + :arg doc_type: The type of the document + :arg body: The document + :arg id: Document ID + :arg consistency: Explicit write consistency setting for the operation, + valid choices are: 'one', 'quorum', 'all' + :arg op_type: Explicit operation type, default 'index', valid choices + are: 'index', 'create' + :arg parent: ID of the parent document + :arg refresh: Refresh the index after performing the operation + :arg routing: Specific routing value + :arg timeout: Explicit operation timeout + :arg timestamp: Explicit timestamp for the document + :arg ttl: Expiration time for the document + :arg version: Explicit version number for concurrency control + :arg version_type: Specific version type, valid choices are: 'internal', + 'external', 'external_gte', 'force' + """ + for param in (index, doc_type, body): + if param in SKIP_IN_PATH: + raise ValueError("Empty value passed for a required argument.") + _, data = self.transport.perform_request('POST' if id in SKIP_IN_PATH else 'PUT', + _make_path(index, doc_type, id), params=params, body=body) + return data + + @query_params('parent', 'preference', 'realtime', 'refresh', 'routing') + def exists(self, index, doc_type, id, params=None): + """ + Returns a boolean indicating whether or not given document exists in Elasticsearch. + ``_ + + :arg index: The name of the index + :arg doc_type: The type of the document (use `_all` to fetch the first + document matching the ID across all types) + :arg id: The document ID + :arg parent: The ID of the parent document + :arg preference: Specify the node or shard the operation should be + performed on (default: random) + :arg realtime: Specify whether to perform the operation in realtime or + search mode + :arg refresh: Refresh the shard containing the document before + performing the operation + :arg routing: Specific routing value + """ + for param in (index, doc_type, id): + if param in SKIP_IN_PATH: + raise ValueError("Empty value passed for a required argument.") + try: + self.transport.perform_request('HEAD', _make_path(index, doc_type, + id), params=params) + except NotFoundError: + return False + return True + + @query_params('_source', '_source_exclude', '_source_include', 'fields', + 'parent', 'preference', 'realtime', 'refresh', 'routing', 'version', + 'version_type') + def get(self, index, id, doc_type='_all', params=None): + """ + Get a typed JSON document from the index based on its id. + ``_ + + :arg index: The name of the index + :arg doc_type: The type of the document (use `_all` to fetch the first + document matching the ID across all types) + :arg id: The document ID + :arg _source: True or false to return the _source field or not, or a + list of fields to return + :arg _source_exclude: A list of fields to exclude from the returned + _source field + :arg _source_include: A list of fields to extract and return from the + _source field + :arg fields: A comma-separated list of fields to return in the response + :arg parent: The ID of the parent document + :arg preference: Specify the node or shard the operation should be + performed on (default: random) + :arg realtime: Specify whether to perform the operation in realtime or + search mode + :arg refresh: Refresh the shard containing the document before + performing the operation + :arg routing: Specific routing value + :arg version: Explicit version number for concurrency control + :arg version_type: Specific version type, valid choices are: 'internal', + 'external', 'external_gte', 'force' + """ + for param in (index, doc_type, id): + if param in SKIP_IN_PATH: + raise ValueError("Empty value passed for a required argument.") + _, data = self.transport.perform_request('GET', _make_path(index, + doc_type, id), params=params) + return data + + @query_params('_source', '_source_exclude', '_source_include', 'parent', + 'preference', 'realtime', 'refresh', 'routing', 'version', + 'version_type') + def get_source(self, index, doc_type, id, params=None): + """ + Get the source of a document by it's index, type and id. + ``_ + + :arg index: The name of the index + :arg doc_type: The type of the document; use `_all` to fetch the first + document matching the ID across all types + :arg id: The document ID + :arg _source: True or false to return the _source field or not, or a + list of fields to return + :arg _source_exclude: A list of fields to exclude from the returned + _source field + :arg _source_include: A list of fields to extract and return from the + _source field + :arg parent: The ID of the parent document + :arg preference: Specify the node or shard the operation should be + performed on (default: random) + :arg realtime: Specify whether to perform the operation in realtime or + search mode + :arg refresh: Refresh the shard containing the document before + performing the operation + :arg routing: Specific routing value + :arg version: Explicit version number for concurrency control + :arg version_type: Specific version type, valid choices are: 'internal', + 'external', 'external_gte', 'force' + """ + for param in (index, doc_type, id): + if param in SKIP_IN_PATH: + raise ValueError("Empty value passed for a required argument.") + _, data = self.transport.perform_request('GET', _make_path(index, + doc_type, id, '_source'), params=params) + return data + + @query_params('_source', '_source_exclude', '_source_include', 'fields', + 'preference', 'realtime', 'refresh') + def mget(self, body, index=None, doc_type=None, params=None): + """ + Get multiple documents based on an index, type (optional) and ids. + ``_ + + :arg body: Document identifiers; can be either `docs` (containing full + document information) or `ids` (when index and type is provided in + the URL. + :arg index: The name of the index + :arg doc_type: The type of the document + :arg _source: True or false to return the _source field or not, or a + list of fields to return + :arg _source_exclude: A list of fields to exclude from the returned + _source field + :arg _source_include: A list of fields to extract and return from the + _source field + :arg fields: A comma-separated list of fields to return in the response + :arg preference: Specify the node or shard the operation should be + performed on (default: random) + :arg realtime: Specify whether to perform the operation in realtime or + search mode + :arg refresh: Refresh the shard containing the document before + performing the operation + """ + if body in SKIP_IN_PATH: + raise ValueError("Empty value passed for a required argument 'body'.") + _, data = self.transport.perform_request('GET', _make_path(index, + doc_type, '_mget'), params=params, body=body) + return data + + @query_params('consistency', 'detect_noop', 'fields', 'lang', 'parent', + 'refresh', 'retry_on_conflict', 'routing', 'script', 'script_id', + 'scripted_upsert', 'timeout', 'timestamp', 'ttl', 'version', + 'version_type') + def update(self, index, doc_type, id, body=None, params=None): + """ + Update a document based on a script or partial data provided. + ``_ + + :arg index: The name of the index + :arg doc_type: The type of the document + :arg id: Document ID + :arg body: The request definition using either `script` or partial `doc` + :arg consistency: Explicit write consistency setting for the operation, + valid choices are: 'one', 'quorum', 'all' + :arg detect_noop: Specifying as true will cause Elasticsearch to check + if there are changes and, if there aren't, turn the update request + into a noop. + :arg fields: A comma-separated list of fields to return in the response + :arg lang: The script language (default: groovy) + :arg parent: ID of the parent document. Is is only used for routing and + when for the upsert request + :arg refresh: Refresh the index after performing the operation + :arg retry_on_conflict: Specify how many times should the operation be + retried when a conflict occurs (default: 0) + :arg routing: Specific routing value + :arg script: The URL-encoded script definition (instead of using request + body) + :arg script_id: The id of a stored script + :arg scripted_upsert: True if the script referenced in script or + script_id should be called to perform inserts - defaults to false + :arg timeout: Explicit operation timeout + :arg timestamp: Explicit timestamp for the document + :arg ttl: Expiration time for the document + :arg version: Explicit version number for concurrency control + :arg version_type: Specific version type, valid choices are: 'internal', + 'force' + """ + for param in (index, doc_type, id): + if param in SKIP_IN_PATH: + raise ValueError("Empty value passed for a required argument.") + _, data = self.transport.perform_request('POST', _make_path(index, + doc_type, id, '_update'), params=params, body=body) + return data + + @query_params('_source', '_source_exclude', '_source_include', + 'allow_no_indices', 'analyze_wildcard', 'analyzer', 'default_operator', + 'df', 'expand_wildcards', 'explain', 'fielddata_fields', 'fields', + 'from_', 'ignore_unavailable', 'lenient', 'lowercase_expanded_terms', + 'preference', 'q', 'request_cache', 'routing', 'scroll', 'search_type', + 'size', 'sort', 'stats', 'suggest_field', 'suggest_mode', + 'suggest_size', 'suggest_text', 'terminate_after', 'timeout', + 'track_scores', 'version') + def search(self, index=None, doc_type=None, body=None, params=None): + """ + Execute a search query and get back search hits that match the query. + ``_ + + :arg index: A comma-separated list of index names to search; use `_all` + or empty string to perform the operation on all indices + :arg doc_type: A comma-separated list of document types to search; leave + empty to perform the operation on all types + :arg body: The search definition using the Query DSL + :arg _source: True or false to return the _source field or not, or a + list of fields to return + :arg _source_exclude: A list of fields to exclude from the returned + _source field + :arg _source_include: A list of fields to extract and return from the + _source field + :arg allow_no_indices: Whether to ignore if a wildcard indices + expression resolves into no concrete indices. (This includes `_all` + string or when no indices have been specified) + :arg analyze_wildcard: Specify whether wildcard and prefix queries + should be analyzed (default: false) + :arg analyzer: The analyzer to use for the query string + :arg default_operator: The default operator for query string query (AND + or OR), default 'OR', valid choices are: 'AND', 'OR' + :arg df: The field to use as default where no field prefix is given in + the query string + :arg expand_wildcards: Whether to expand wildcard expression to concrete + indices that are open, closed or both., default 'open', valid + choices are: 'open', 'closed', 'none', 'all' + :arg explain: Specify whether to return detailed information about score + computation as part of a hit + :arg fielddata_fields: A comma-separated list of fields to return as the + field data representation of a field for each hit + :arg fields: A comma-separated list of fields to return as part of a hit + :arg from\_: Starting offset (default: 0) + :arg ignore_unavailable: Whether specified concrete indices should be + ignored when unavailable (missing or closed) + :arg lenient: Specify whether format-based query failures (such as + providing text to a numeric field) should be ignored + :arg lowercase_expanded_terms: Specify whether query terms should be + lowercased + :arg preference: Specify the node or shard the operation should be + performed on (default: random) + :arg q: Query in the Lucene query string syntax + :arg request_cache: Specify if request cache should be used for this + request or not, defaults to index level setting + :arg routing: A comma-separated list of specific routing values + :arg scroll: Specify how long a consistent view of the index should be + maintained for scrolled search + :arg search_type: Search operation type, valid choices are: + 'query_then_fetch', 'dfs_query_then_fetch', 'count', 'scan' + :arg size: Number of hits to return (default: 10) + :arg sort: A comma-separated list of : pairs + :arg stats: Specific 'tag' of the request for logging and statistical + purposes + :arg suggest_field: Specify which field to use for suggestions + :arg suggest_mode: Specify suggest mode, default 'missing', valid + choices are: 'missing', 'popular', 'always' + :arg suggest_size: How many suggestions to return in response + :arg suggest_text: The source text for which the suggestions should be + returned + :arg terminate_after: The maximum number of documents to collect for + each shard, upon reaching which the query execution will terminate + early. + :arg timeout: Explicit operation timeout + :arg track_scores: Whether to calculate and return scores even if they + are not used for sorting + :arg version: Specify whether to return document version as part of a + hit + """ + # from is a reserved word so it cannot be used, use from_ instead + if 'from_' in params: + params['from'] = params.pop('from_') + + if doc_type and not index: + index = '_all' + _, data = self.transport.perform_request('GET', _make_path(index, + doc_type, '_search'), params=params, body=body) + return data + + @query_params('allow_no_indices', 'expand_wildcards', 'ignore_unavailable', + 'local', 'preference', 'routing') + def search_shards(self, index=None, doc_type=None, params=None): + """ + The search shards api returns the indices and shards that a search + request would be executed against. This can give useful feedback for working + out issues or planning optimizations with routing and shard preferences. + ``_ + + :arg index: A comma-separated list of index names to search; use `_all` + or empty string to perform the operation on all indices + :arg doc_type: A comma-separated list of document types to search; leave + empty to perform the operation on all types + :arg allow_no_indices: Whether to ignore if a wildcard indices + expression resolves into no concrete indices. (This includes `_all` + string or when no indices have been specified) + :arg expand_wildcards: Whether to expand wildcard expression to concrete + indices that are open, closed or both., default 'open', valid + choices are: 'open', 'closed', 'none', 'all' + :arg ignore_unavailable: Whether specified concrete indices should be + ignored when unavailable (missing or closed) + :arg local: Return local information, do not retrieve the state from + master node (default: false) + :arg preference: Specify the node or shard the operation should be + performed on (default: random) + :arg routing: Specific routing value + """ + _, data = self.transport.perform_request('GET', _make_path(index, + doc_type, '_search_shards'), params=params) + return data + + @query_params('allow_no_indices', 'expand_wildcards', 'ignore_unavailable', + 'preference', 'routing', 'scroll', 'search_type') + def search_template(self, index=None, doc_type=None, body=None, params=None): + """ + A query that accepts a query template and a map of key/value pairs to + fill in template parameters. + ``_ + + :arg index: A comma-separated list of index names to search; use `_all` + or empty string to perform the operation on all indices + :arg doc_type: A comma-separated list of document types to search; leave + empty to perform the operation on all types + :arg body: The search definition template and its params + :arg allow_no_indices: Whether to ignore if a wildcard indices + expression resolves into no concrete indices. (This includes `_all` + string or when no indices have been specified) + :arg expand_wildcards: Whether to expand wildcard expression to concrete + indices that are open, closed or both., default 'open', valid + choices are: 'open', 'closed', 'none', 'all' + :arg ignore_unavailable: Whether specified concrete indices should be + ignored when unavailable (missing or closed) + :arg preference: Specify the node or shard the operation should be + performed on (default: random) + :arg routing: A comma-separated list of specific routing values + :arg scroll: Specify how long a consistent view of the index should be + maintained for scrolled search + :arg search_type: Search operation type, valid choices are: + 'query_then_fetch', 'query_and_fetch', 'dfs_query_then_fetch', + 'dfs_query_and_fetch', 'count', 'scan' + """ + _, data = self.transport.perform_request('GET', _make_path(index, + doc_type, '_search', 'template'), params=params, body=body) + return data + + @query_params('_source', '_source_exclude', '_source_include', + 'analyze_wildcard', 'analyzer', 'default_operator', 'df', 'fields', + 'lenient', 'lowercase_expanded_terms', 'parent', 'preference', 'q', + 'routing') + def explain(self, index, doc_type, id, body=None, params=None): + """ + The explain api computes a score explanation for a query and a specific + document. This can give useful feedback whether a document matches or + didn't match a specific query. + ``_ + + :arg index: The name of the index + :arg doc_type: The type of the document + :arg id: The document ID + :arg body: The query definition using the Query DSL + :arg _source: True or false to return the _source field or not, or a + list of fields to return + :arg _source_exclude: A list of fields to exclude from the returned + _source field + :arg _source_include: A list of fields to extract and return from the + _source field + :arg analyze_wildcard: Specify whether wildcards and prefix queries in + the query string query should be analyzed (default: false) + :arg analyzer: The analyzer for the query string query + :arg default_operator: The default operator for query string query (AND + or OR), default 'OR', valid choices are: 'AND', 'OR' + :arg df: The default field for query string query (default: _all) + :arg fields: A comma-separated list of fields to return in the response + :arg lenient: Specify whether format-based query failures (such as + providing text to a numeric field) should be ignored + :arg lowercase_expanded_terms: Specify whether query terms should be + lowercased + :arg parent: The ID of the parent document + :arg preference: Specify the node or shard the operation should be + performed on (default: random) + :arg q: Query in the Lucene query string syntax + :arg routing: Specific routing value + """ + for param in (index, doc_type, id): + if param in SKIP_IN_PATH: + raise ValueError("Empty value passed for a required argument.") + _, data = self.transport.perform_request('GET', _make_path(index, + doc_type, id, '_explain'), params=params, body=body) + return data + + @query_params('scroll') + def scroll(self, scroll_id=None, body=None, params=None): + """ + Scroll a search request created by specifying the scroll parameter. + ``_ + + :arg scroll_id: The scroll ID + :arg body: The scroll ID if not passed by URL or query parameter. + :arg scroll: Specify how long a consistent view of the index should be + maintained for scrolled search + """ + if scroll_id in SKIP_IN_PATH and body in SKIP_IN_PATH: + raise ValueError("You need to supply scroll_id or body.") + elif scroll_id and not body: + body = scroll_id + elif scroll_id: + params['scroll_id'] = scroll_id + + _, data = self.transport.perform_request('GET', '/_search/scroll', + params=params, body=body) + return data + + @query_params() + def clear_scroll(self, scroll_id=None, body=None, params=None): + """ + Clear the scroll request created by specifying the scroll parameter to + search. + ``_ + + :arg scroll_id: A comma-separated list of scroll IDs to clear + :arg body: A comma-separated list of scroll IDs to clear if none was + specified via the scroll_id parameter + """ + _, data = self.transport.perform_request('DELETE', _make_path('_search', + 'scroll', scroll_id), params=params, body=body) + return data + + @query_params('consistency', 'parent', 'refresh', 'routing', 'timeout', + 'version', 'version_type') + def delete(self, index, doc_type, id, params=None): + """ + Delete a typed JSON document from a specific index based on its id. + ``_ + + :arg index: The name of the index + :arg doc_type: The type of the document + :arg id: The document ID + :arg consistency: Specific write consistency setting for the operation, + valid choices are: 'one', 'quorum', 'all' + :arg parent: ID of parent document + :arg refresh: Refresh the index after performing the operation + :arg routing: Specific routing value + :arg timeout: Explicit operation timeout + :arg version: Explicit version number for concurrency control + :arg version_type: Specific version type, valid choices are: 'internal', + 'external', 'external_gte', 'force' + """ + for param in (index, doc_type, id): + if param in SKIP_IN_PATH: + raise ValueError("Empty value passed for a required argument.") + _, data = self.transport.perform_request('DELETE', _make_path(index, + doc_type, id), params=params) + return data + + @query_params('allow_no_indices', 'analyze_wildcard', 'analyzer', + 'default_operator', 'df', 'expand_wildcards', 'ignore_unavailable', + 'lenient', 'lowercase_expanded_terms', 'min_score', 'preference', 'q', + 'routing') + def count(self, index=None, doc_type=None, body=None, params=None): + """ + Execute a query and get the number of matches for that query. + ``_ + + :arg index: A comma-separated list of indices to restrict the results + :arg doc_type: A comma-separated list of types to restrict the results + :arg body: A query to restrict the results specified with the Query DSL + (optional) + :arg allow_no_indices: Whether to ignore if a wildcard indices + expression resolves into no concrete indices. (This includes `_all` + string or when no indices have been specified) + :arg analyze_wildcard: Specify whether wildcard and prefix queries + should be analyzed (default: false) + :arg analyzer: The analyzer to use for the query string + :arg default_operator: The default operator for query string query (AND + or OR), default 'OR', valid choices are: 'AND', 'OR' + :arg df: The field to use as default where no field prefix is given in + the query string + :arg expand_wildcards: Whether to expand wildcard expression to concrete + indices that are open, closed or both., default 'open', valid + choices are: 'open', 'closed', 'none', 'all' + :arg ignore_unavailable: Whether specified concrete indices should be + ignored when unavailable (missing or closed) + :arg lenient: Specify whether format-based query failures (such as + providing text to a numeric field) should be ignored + :arg lowercase_expanded_terms: Specify whether query terms should be + lowercased + :arg min_score: Include only documents with a specific `_score` value in + the result + :arg preference: Specify the node or shard the operation should be + performed on (default: random) + :arg q: Query in the Lucene query string syntax + :arg routing: Specific routing value + """ + if doc_type and not index: + index = '_all' + + _, data = self.transport.perform_request('GET', _make_path(index, + doc_type, '_count'), params=params, body=body) + return data + + @query_params('consistency', 'fields', 'refresh', 'routing', 'timeout') + def bulk(self, body, index=None, doc_type=None, params=None): + """ + Perform many index/delete operations in a single API call. + + See the :func:`~elasticsearch.helpers.bulk` helper function for a more + friendly API. + ``_ + + :arg body: The operation definition and data (action-data pairs), + separated by newlines + :arg index: Default index for items which don't provide one + :arg doc_type: Default document type for items which don't provide one + :arg consistency: Explicit write consistency setting for the operation, + valid choices are: 'one', 'quorum', 'all' + :arg fields: Default comma-separated list of fields to return in the + response for updates + :arg refresh: Refresh the index after performing the operation + :arg routing: Specific routing value + :arg timeout: Explicit operation timeout + """ + if body in SKIP_IN_PATH: + raise ValueError("Empty value passed for a required argument 'body'.") + _, data = self.transport.perform_request('POST', _make_path(index, + doc_type, '_bulk'), params=params, body=self._bulk_body(body)) + return data + + @query_params('search_type') + def msearch(self, body, index=None, doc_type=None, params=None): + """ + Execute several search requests within the same API. + ``_ + + :arg body: The request definitions (metadata-search request definition + pairs), separated by newlines + :arg index: A comma-separated list of index names to use as default + :arg doc_type: A comma-separated list of document types to use as + default + :arg search_type: Search operation type, valid choices are: + 'query_then_fetch', 'query_and_fetch', 'dfs_query_then_fetch', + 'dfs_query_and_fetch', 'count', 'scan' + """ + if body in SKIP_IN_PATH: + raise ValueError("Empty value passed for a required argument 'body'.") + _, data = self.transport.perform_request('GET', _make_path(index, + doc_type, '_msearch'), params=params, body=self._bulk_body(body)) + return data + + @query_params('allow_no_indices', 'expand_wildcards', 'ignore_unavailable', + 'preference', 'routing') + def suggest(self, body, index=None, params=None): + """ + The suggest feature suggests similar looking terms based on a provided + text by using a suggester. + ``_ + + :arg body: The request definition + :arg index: A comma-separated list of index names to restrict the + operation; use `_all` or empty string to perform the operation on + all indices + :arg allow_no_indices: Whether to ignore if a wildcard indices + expression resolves into no concrete indices. (This includes `_all` + string or when no indices have been specified) + :arg expand_wildcards: Whether to expand wildcard expression to concrete + indices that are open, closed or both., default 'open', valid + choices are: 'open', 'closed', 'none', 'all' + :arg ignore_unavailable: Whether specified concrete indices should be + ignored when unavailable (missing or closed) + :arg preference: Specify the node or shard the operation should be + performed on (default: random) + :arg routing: Specific routing value + """ + if body in SKIP_IN_PATH: + raise ValueError("Empty value passed for a required argument 'body'.") + _, data = self.transport.perform_request('POST', _make_path(index, + '_suggest'), params=params, body=body) + return data + + @query_params('allow_no_indices', 'expand_wildcards', 'ignore_unavailable', + 'percolate_format', 'percolate_index', 'percolate_preference', + 'percolate_routing', 'percolate_type', 'preference', 'routing', + 'version', 'version_type') + def percolate(self, index, doc_type, id=None, body=None, params=None): + """ + The percolator allows to register queries against an index, and then + send percolate requests which include a doc, and getting back the + queries that match on that doc out of the set of registered queries. + ``_ + + :arg index: The index of the document being percolated. + :arg doc_type: The type of the document being percolated. + :arg id: Substitute the document in the request body with a document + that is known by the specified id. On top of the id, the index and + type parameter will be used to retrieve the document from within the + cluster. + :arg body: The percolator request definition using the percolate DSL + :arg allow_no_indices: Whether to ignore if a wildcard indices + expression resolves into no concrete indices. (This includes `_all` + string or when no indices have been specified) + :arg expand_wildcards: Whether to expand wildcard expression to concrete + indices that are open, closed or both., default 'open', valid + choices are: 'open', 'closed', 'none', 'all' + :arg ignore_unavailable: Whether specified concrete indices should be + ignored when unavailable (missing or closed) + :arg percolate_format: Return an array of matching query IDs instead of + objects, valid choices are: 'ids' + :arg percolate_index: The index to percolate the document into. Defaults + to index. + :arg percolate_preference: Which shard to prefer when executing the + percolate request. + :arg percolate_routing: The routing value to use when percolating the + existing document. + :arg percolate_type: The type to percolate document into. Defaults to + type. + :arg preference: Specify the node or shard the operation should be + performed on (default: random) + :arg routing: A comma-separated list of specific routing values + :arg version: Explicit version number for concurrency control + :arg version_type: Specific version type, valid choices are: 'internal', + 'external', 'external_gte', 'force' + """ + for param in (index, doc_type): + if param in SKIP_IN_PATH: + raise ValueError("Empty value passed for a required argument.") + _, data = self.transport.perform_request('GET', _make_path(index, + doc_type, id, '_percolate'), params=params, body=body) + return data + + @query_params('allow_no_indices', 'expand_wildcards', 'ignore_unavailable') + def mpercolate(self, body, index=None, doc_type=None, params=None): + """ + The percolator allows to register queries against an index, and then + send percolate requests which include a doc, and getting back the + queries that match on that doc out of the set of registered queries. + ``_ + + :arg body: The percolate request definitions (header & body pair), + separated by newlines + :arg index: The index of the document being count percolated to use as + default + :arg doc_type: The type of the document being percolated to use as + default. + :arg allow_no_indices: Whether to ignore if a wildcard indices + expression resolves into no concrete indices. (This includes `_all` + string or when no indices have been specified) + :arg expand_wildcards: Whether to expand wildcard expression to concrete + indices that are open, closed or both., default 'open', valid + choices are: 'open', 'closed', 'none', 'all' + :arg ignore_unavailable: Whether specified concrete indices should be + ignored when unavailable (missing or closed) + """ + if body in SKIP_IN_PATH: + raise ValueError("Empty value passed for a required argument 'body'.") + _, data = self.transport.perform_request('GET', _make_path(index, + doc_type, '_mpercolate'), params=params, body=self._bulk_body(body)) + return data + + @query_params('allow_no_indices', 'expand_wildcards', 'ignore_unavailable', + 'percolate_index', 'percolate_type', 'preference', 'routing', 'version', + 'version_type') + def count_percolate(self, index, doc_type, id=None, body=None, params=None): + """ + The percolator allows to register queries against an index, and then + send percolate requests which include a doc, and getting back the + queries that match on that doc out of the set of registered queries. + ``_ + + :arg index: The index of the document being count percolated. + :arg doc_type: The type of the document being count percolated. + :arg id: Substitute the document in the request body with a document + that is known by the specified id. On top of the id, the index and + type parameter will be used to retrieve the document from within the + cluster. + :arg body: The count percolator request definition using the percolate + DSL + :arg allow_no_indices: Whether to ignore if a wildcard indices + expression resolves into no concrete indices. (This includes `_all` + string or when no indices have been specified) + :arg expand_wildcards: Whether to expand wildcard expression to concrete + indices that are open, closed or both., default 'open', valid + choices are: 'open', 'closed', 'none', 'all' + :arg ignore_unavailable: Whether specified concrete indices should be + ignored when unavailable (missing or closed) + :arg percolate_index: The index to count percolate the document into. + Defaults to index. + :arg percolate_type: The type to count percolate document into. Defaults + to type. + :arg preference: Specify the node or shard the operation should be + performed on (default: random) + :arg routing: A comma-separated list of specific routing values + :arg version: Explicit version number for concurrency control + :arg version_type: Specific version type, valid choices are: 'internal', + 'external', 'external_gte', 'force' + """ + for param in (index, doc_type): + if param in SKIP_IN_PATH: + raise ValueError("Empty value passed for a required argument.") + _, data = self.transport.perform_request('GET', _make_path(index, + doc_type, id, '_percolate', 'count'), params=params, body=body) + return data + + @query_params('dfs', 'field_statistics', 'fields', 'offsets', 'parent', + 'payloads', 'positions', 'preference', 'realtime', 'routing', + 'term_statistics', 'version', 'version_type') + def termvectors(self, index, doc_type, id=None, body=None, params=None): + """ + Returns information and statistics on terms in the fields of a + particular document. The document could be stored in the index or + artificially provided by the user (Added in 1.4). Note that for + documents stored in the index, this is a near realtime API as the term + vectors are not available until the next refresh. + ``_ + + :arg index: The index in which the document resides. + :arg doc_type: The type of the document. + :arg id: The id of the document, when not specified a doc param should + be supplied. + :arg body: Define parameters and or supply a document to get termvectors + for. See documentation. + :arg dfs: Specifies if distributed frequencies should be returned + instead shard frequencies., default False + :arg field_statistics: Specifies if document count, sum of document + frequencies and sum of total term frequencies should be returned., + default True + :arg fields: A comma-separated list of fields to return. + :arg offsets: Specifies if term offsets should be returned., default + True + :arg parent: Parent id of documents. + :arg payloads: Specifies if term payloads should be returned., default + True + :arg positions: Specifies if term positions should be returned., default + True + :arg preference: Specify the node or shard the operation should be + performed on (default: random). + :arg realtime: Specifies if request is real-time as opposed to near- + real-time (default: true). + :arg routing: Specific routing value. + :arg term_statistics: Specifies if total term frequency and document + frequency should be returned., default False + :arg version: Explicit version number for concurrency control + :arg version_type: Specific version type, valid choices are: 'internal', + 'external', 'external_gte', 'force' + """ + for param in (index, doc_type): + if param in SKIP_IN_PATH: + raise ValueError("Empty value passed for a required argument.") + _, data = self.transport.perform_request('GET', _make_path(index, + doc_type, id, '_termvectors'), params=params, body=body) + return data + + @query_params('field_statistics', 'fields', 'ids', 'offsets', 'parent', + 'payloads', 'positions', 'preference', 'realtime', 'routing', + 'term_statistics', 'version', 'version_type') + def mtermvectors(self, index=None, doc_type=None, body=None, params=None): + """ + Multi termvectors API allows to get multiple termvectors based on an + index, type and id. + ``_ + + :arg index: The index in which the document resides. + :arg doc_type: The type of the document. + :arg body: Define ids, documents, parameters or a list of parameters per + document here. You must at least provide a list of document ids. See + documentation. + :arg field_statistics: Specifies if document count, sum of document + frequencies and sum of total term frequencies should be returned. + Applies to all returned documents unless otherwise specified in body + "params" or "docs"., default True + :arg fields: A comma-separated list of fields to return. Applies to all + returned documents unless otherwise specified in body "params" or + "docs". + :arg ids: A comma-separated list of documents ids. You must define ids + as parameter or set "ids" or "docs" in the request body + :arg offsets: Specifies if term offsets should be returned. Applies to + all returned documents unless otherwise specified in body "params" + or "docs"., default True + :arg parent: Parent id of documents. Applies to all returned documents + unless otherwise specified in body "params" or "docs". + :arg payloads: Specifies if term payloads should be returned. Applies to + all returned documents unless otherwise specified in body "params" + or "docs"., default True + :arg positions: Specifies if term positions should be returned. Applies + to all returned documents unless otherwise specified in body + "params" or "docs"., default True + :arg preference: Specify the node or shard the operation should be + performed on (default: random) .Applies to all returned documents + unless otherwise specified in body "params" or "docs". + :arg realtime: Specifies if requests are real-time as opposed to near- + real-time (default: true). + :arg routing: Specific routing value. Applies to all returned documents + unless otherwise specified in body "params" or "docs". + :arg term_statistics: Specifies if total term frequency and document + frequency should be returned. Applies to all returned documents + unless otherwise specified in body "params" or "docs"., default + False + :arg version: Explicit version number for concurrency control + :arg version_type: Specific version type, valid choices are: 'internal', + 'external', 'external_gte', 'force' + """ + _, data = self.transport.perform_request('GET', _make_path(index, + doc_type, '_mtermvectors'), params=params, body=body) + return data + + @query_params('op_type', 'version', 'version_type') + def put_script(self, lang, id, body, params=None): + """ + Create a script in given language with specified ID. + ``_ + + :arg lang: Script language + :arg id: Script ID + :arg body: The document + :arg op_type: Explicit operation type, default 'index', valid choices + are: 'index', 'create' + :arg version: Explicit version number for concurrency control + :arg version_type: Specific version type, valid choices are: 'internal', + 'external', 'external_gte', 'force' + """ + for param in (lang, id, body): + if param in SKIP_IN_PATH: + raise ValueError("Empty value passed for a required argument.") + _, data = self.transport.perform_request('PUT', _make_path('_scripts', + lang, id), params=params, body=body) + return data + + @query_params('version', 'version_type') + def get_script(self, lang, id, params=None): + """ + Retrieve a script from the API. + ``_ + + :arg lang: Script language + :arg id: Script ID + :arg version: Explicit version number for concurrency control + :arg version_type: Specific version type, valid choices are: 'internal', + 'external', 'external_gte', 'force' + """ + for param in (lang, id): + if param in SKIP_IN_PATH: + raise ValueError("Empty value passed for a required argument.") + _, data = self.transport.perform_request('GET', _make_path('_scripts', + lang, id), params=params) + return data + + @query_params('version', 'version_type') + def delete_script(self, lang, id, params=None): + """ + Remove a stored script from elasticsearch. + ``_ + + :arg lang: Script language + :arg id: Script ID + :arg version: Explicit version number for concurrency control + :arg version_type: Specific version type, valid choices are: 'internal', + 'external', 'external_gte', 'force' + """ + for param in (lang, id): + if param in SKIP_IN_PATH: + raise ValueError("Empty value passed for a required argument.") + _, data = self.transport.perform_request('DELETE', + _make_path('_scripts', lang, id), params=params) + return data + + @query_params('op_type', 'version', 'version_type') + def put_template(self, id, body, params=None): + """ + Create a search template. + ``_ + + :arg id: Template ID + :arg body: The document + :arg op_type: Explicit operation type, default 'index', valid choices + are: 'index', 'create' + :arg version: Explicit version number for concurrency control + :arg version_type: Specific version type, valid choices are: 'internal', + 'external', 'external_gte', 'force' + """ + for param in (id, body): + if param in SKIP_IN_PATH: + raise ValueError("Empty value passed for a required argument.") + _, data = self.transport.perform_request('PUT', _make_path('_search', + 'template', id), params=params, body=body) + return data + + @query_params('version', 'version_type') + def get_template(self, id, params=None): + """ + Retrieve a search template. + ``_ + + :arg id: Template ID + :arg version: Explicit version number for concurrency control + :arg version_type: Specific version type, valid choices are: 'internal', + 'external', 'external_gte', 'force' + """ + if id in SKIP_IN_PATH: + raise ValueError("Empty value passed for a required argument 'id'.") + _, data = self.transport.perform_request('GET', _make_path('_search', + 'template', id), params=params) + return data + + @query_params('version', 'version_type') + def delete_template(self, id, params=None): + """ + Delete a search template. + ``_ + + :arg id: Template ID + :arg version: Explicit version number for concurrency control + :arg version_type: Specific version type, valid choices are: 'internal', + 'external', 'external_gte', 'force' + """ + if id in SKIP_IN_PATH: + raise ValueError("Empty value passed for a required argument 'id'.") + _, data = self.transport.perform_request('DELETE', _make_path('_search', + 'template', id), params=params) + return data + + @query_params('allow_no_indices', 'analyze_wildcard', 'analyzer', + 'default_operator', 'df', 'expand_wildcards', 'ignore_unavailable', + 'lenient', 'lowercase_expanded_terms', 'min_score', 'preference', 'q', + 'routing') + def search_exists(self, index=None, doc_type=None, body=None, params=None): + """ + The exists API allows to easily determine if any matching documents + exist for a provided query. + ``_ + + :arg index: A comma-separated list of indices to restrict the results + :arg doc_type: A comma-separated list of types to restrict the results + :arg body: A query to restrict the results specified with the Query DSL + (optional) + :arg allow_no_indices: Whether to ignore if a wildcard indices + expression resolves into no concrete indices. (This includes `_all` + string or when no indices have been specified) + :arg analyze_wildcard: Specify whether wildcard and prefix queries + should be analyzed (default: false) + :arg analyzer: The analyzer to use for the query string + :arg default_operator: The default operator for query string query (AND + or OR), default 'OR', valid choices are: 'AND', 'OR' + :arg df: The field to use as default where no field prefix is given in + the query string + :arg expand_wildcards: Whether to expand wildcard expression to concrete + indices that are open, closed or both., default 'open', valid + choices are: 'open', 'closed', 'none', 'all' + :arg ignore_unavailable: Whether specified concrete indices should be + ignored when unavailable (missing or closed) + :arg lenient: Specify whether format-based query failures (such as + providing text to a numeric field) should be ignored + :arg lowercase_expanded_terms: Specify whether query terms should be + lowercased + :arg min_score: Include only documents with a specific `_score` value in + the result + :arg preference: Specify the node or shard the operation should be + performed on (default: random) + :arg q: Query in the Lucene query string syntax + :arg routing: Specific routing value + """ + try: + self.transport.perform_request('POST', _make_path(index, + doc_type, '_search', 'exists'), params=params, body=body) + except NotFoundError: + return False + return True + + @query_params('allow_no_indices', 'expand_wildcards', 'fields', + 'ignore_unavailable', 'level') + def field_stats(self, index=None, body=None, params=None): + """ + The field stats api allows one to find statistical properties of a + field without executing a search, but looking up measurements that are + natively available in the Lucene index. + ``_ + + :arg index: A comma-separated list of index names; use `_all` or empty + string to perform the operation on all indices + :arg body: Field json objects containing the name and optionally a range + to filter out indices result, that have results outside the defined + bounds + :arg allow_no_indices: Whether to ignore if a wildcard indices + expression resolves into no concrete indices. (This includes `_all` + string or when no indices have been specified) + :arg expand_wildcards: Whether to expand wildcard expression to concrete + indices that are open, closed or both., default 'open', valid + choices are: 'open', 'closed', 'none', 'all' + :arg fields: A comma-separated list of fields for to get field + statistics for (min value, max value, and more) + :arg ignore_unavailable: Whether specified concrete indices should be + ignored when unavailable (missing or closed) + :arg level: Defines if field stats should be returned on a per index + level or on a cluster wide level, default 'cluster', valid choices + are: 'indices', 'cluster' + """ + _, data = self.transport.perform_request('GET', _make_path(index, + '_field_stats'), params=params, body=body) + return data + + @query_params() + def render_search_template(self, id=None, body=None, params=None): + """ + ``_ + + :arg id: The id of the stored search template + :arg body: The search definition template and its params + """ + _, data = self.transport.perform_request('GET', _make_path('_render', + 'template', id), params=params, body=body) + return data + diff --git a/venv/lib/python2.7/site-packages/elasticsearch/client/cat.py b/venv/lib/python2.7/site-packages/elasticsearch/client/cat.py new file mode 100644 index 0000000..092ce30 --- /dev/null +++ b/venv/lib/python2.7/site-packages/elasticsearch/client/cat.py @@ -0,0 +1,346 @@ +from .utils import NamespacedClient, query_params, _make_path + +class CatClient(NamespacedClient): + @query_params('h', 'help', 'local', 'master_timeout', 'v') + def aliases(self, name=None, params=None): + """ + + ``_ + + :arg name: A comma-separated list of alias names to return + :arg h: Comma-separated list of column names to display + :arg help: Return help information, default False + :arg local: Return local information, do not retrieve the state from + master node (default: false) + :arg master_timeout: Explicit operation timeout for connection to master + node + :arg v: Verbose mode. Display column headers, default False + """ + _, data = self.transport.perform_request('GET', _make_path('_cat', + 'aliases', name), params=params) + return data + + @query_params('bytes', 'h', 'help', 'local', 'master_timeout', 'v') + def allocation(self, node_id=None, params=None): + """ + Allocation provides a snapshot of how shards have located around the + cluster and the state of disk usage. + ``_ + + :arg node_id: A comma-separated list of node IDs or names to limit the + returned information + :arg bytes: The unit in which to display byte values, valid choices are: + 'b', 'k', 'm', 'g' + :arg h: Comma-separated list of column names to display + :arg help: Return help information, default False + :arg local: Return local information, do not retrieve the state from + master node (default: false) + :arg master_timeout: Explicit operation timeout for connection to master + node + :arg v: Verbose mode. Display column headers, default False + """ + _, data = self.transport.perform_request('GET', _make_path('_cat', + 'allocation', node_id), params=params) + return data + + @query_params('h', 'help', 'local', 'master_timeout', 'v') + def count(self, index=None, params=None): + """ + Count provides quick access to the document count of the entire cluster, + or individual indices. + ``_ + + :arg index: A comma-separated list of index names to limit the returned + information + :arg h: Comma-separated list of column names to display + :arg help: Return help information, default False + :arg local: Return local information, do not retrieve the state from + master node (default: false) + :arg master_timeout: Explicit operation timeout for connection to master + node + :arg v: Verbose mode. Display column headers, default False + """ + _, data = self.transport.perform_request('GET', _make_path('_cat', + 'count', index), params=params) + return data + + @query_params('h', 'help', 'local', 'master_timeout', 'ts', 'v') + def health(self, params=None): + """ + health is a terse, one-line representation of the same information from + :meth:`~elasticsearch.client.cluster.ClusterClient.health` API + ``_ + + :arg h: Comma-separated list of column names to display + :arg help: Return help information, default False + :arg local: Return local information, do not retrieve the state from + master node (default: false) + :arg master_timeout: Explicit operation timeout for connection to master + node + :arg ts: Set to false to disable timestamping, default True + :arg v: Verbose mode. Display column headers, default False + """ + _, data = self.transport.perform_request('GET', '/_cat/health', + params=params) + return data + + @query_params('help') + def help(self, params=None): + """ + A simple help for the cat api. + ``_ + + :arg help: Return help information, default False + """ + _, data = self.transport.perform_request('GET', '/_cat', params=params) + return data + + @query_params('bytes', 'h', 'help', 'local', 'master_timeout', 'pri', 'v') + def indices(self, index=None, params=None): + """ + The indices command provides a cross-section of each index. + ``_ + + :arg index: A comma-separated list of index names to limit the returned + information + :arg bytes: The unit in which to display byte values, valid choices are: + 'b', 'k', 'm', 'g' + :arg h: Comma-separated list of column names to display + :arg help: Return help information, default False + :arg local: Return local information, do not retrieve the state from + master node (default: false) + :arg master_timeout: Explicit operation timeout for connection to master + node + :arg pri: Set to true to return stats only for primary shards, default + False + :arg v: Verbose mode. Display column headers, default False + """ + _, data = self.transport.perform_request('GET', _make_path('_cat', + 'indices', index), params=params) + return data + + @query_params('h', 'help', 'local', 'master_timeout', 'v') + def master(self, params=None): + """ + Displays the master's node ID, bound IP address, and node name. + ``_ + + :arg h: Comma-separated list of column names to display + :arg help: Return help information, default False + :arg local: Return local information, do not retrieve the state from + master node (default: false) + :arg master_timeout: Explicit operation timeout for connection to master + node + :arg v: Verbose mode. Display column headers, default False + """ + _, data = self.transport.perform_request('GET', '/_cat/master', + params=params) + return data + + @query_params('h', 'help', 'local', 'master_timeout', 'v') + def nodes(self, params=None): + """ + The nodes command shows the cluster topology. + ``_ + + :arg h: Comma-separated list of column names to display + :arg help: Return help information, default False + :arg local: Return local information, do not retrieve the state from + master node (default: false) + :arg master_timeout: Explicit operation timeout for connection to master + node + :arg v: Verbose mode. Display column headers, default False + """ + _, data = self.transport.perform_request('GET', '/_cat/nodes', + params=params) + return data + + @query_params('bytes', 'h', 'help', 'master_timeout', 'v') + def recovery(self, index=None, params=None): + """ + recovery is a view of shard replication. + ``_ + + :arg index: A comma-separated list of index names to limit the returned + information + :arg bytes: The unit in which to display byte values, valid choices are: + 'b', 'k', 'm', 'g' + :arg h: Comma-separated list of column names to display + :arg help: Return help information, default False + :arg master_timeout: Explicit operation timeout for connection to master + node + :arg v: Verbose mode. Display column headers, default False + """ + _, data = self.transport.perform_request('GET', _make_path('_cat', + 'recovery', index), params=params) + return data + + @query_params('bytes', 'h', 'help', 'local', 'master_timeout', 'v') + def shards(self, index=None, params=None): + """ + The shards command is the detailed view of what nodes contain which shards. + ``_ + + :arg index: A comma-separated list of index names to limit the returned + information + :arg bytes: The unit in which to display byte values, valid choices are: + 'b', 'k', 'm', 'g' + :arg h: Comma-separated list of column names to display + :arg help: Return help information, default False + :arg local: Return local information, do not retrieve the state from + master node (default: false) + :arg master_timeout: Explicit operation timeout for connection to master + node + :arg v: Verbose mode. Display column headers, default False + """ + _, data = self.transport.perform_request('GET', _make_path('_cat', + 'shards', index), params=params) + return data + + @query_params('bytes', 'h', 'help', 'v') + def segments(self, index=None, params=None): + """ + The segments command is the detailed view of Lucene segments per index. + ``_ + + :arg index: A comma-separated list of index names to limit the returned + information + :arg bytes: The unit in which to display byte values, valid choices are: + 'b', 'k', 'm', 'g' + :arg h: Comma-separated list of column names to display + :arg help: Return help information, default False + :arg v: Verbose mode. Display column headers, default False + """ + _, data = self.transport.perform_request('GET', _make_path('_cat', + 'segments', index), params=params) + return data + + @query_params('h', 'help', 'local', 'master_timeout', 'v') + def pending_tasks(self, params=None): + """ + pending_tasks provides the same information as the + :meth:`~elasticsearch.client.cluster.ClusterClient.pending_tasks` API + in a convenient tabular format. + ``_ + + :arg h: Comma-separated list of column names to display + :arg help: Return help information, default False + :arg local: Return local information, do not retrieve the state from + master node (default: false) + :arg master_timeout: Explicit operation timeout for connection to master + node + :arg v: Verbose mode. Display column headers, default False + """ + _, data = self.transport.perform_request('GET', '/_cat/pending_tasks', + params=params) + return data + + @query_params('full_id', 'h', 'help', 'local', 'master_timeout', 'v') + def thread_pool(self, params=None): + """ + Get information about thread pools. + ``_ + + :arg full_id: Enables displaying the complete node ids, default False + :arg h: Comma-separated list of column names to display + :arg help: Return help information, default False + :arg local: Return local information, do not retrieve the state from + master node (default: false) + :arg master_timeout: Explicit operation timeout for connection to master + node + :arg v: Verbose mode. Display column headers, default False + """ + _, data = self.transport.perform_request('GET', '/_cat/thread_pool', + params=params) + return data + + @query_params('bytes', 'h', 'help', 'local', 'master_timeout', 'v') + def fielddata(self, fields=None, params=None): + """ + Shows information about currently loaded fielddata on a per-node basis. + ``_ + + :arg fields: A comma-separated list of fields to return the fielddata + size + :arg bytes: The unit in which to display byte values, valid choices are: + 'b', 'k', 'm', 'g' + :arg h: Comma-separated list of column names to display + :arg help: Return help information, default False + :arg local: Return local information, do not retrieve the state from + master node (default: false) + :arg master_timeout: Explicit operation timeout for connection to master + node + :arg v: Verbose mode. Display column headers, default False + """ + _, data = self.transport.perform_request('GET', _make_path('_cat', + 'fielddata', fields), params=params) + return data + + @query_params('h', 'help', 'local', 'master_timeout', 'v') + def plugins(self, params=None): + """ + + ``_ + + :arg h: Comma-separated list of column names to display + :arg help: Return help information, default False + :arg local: Return local information, do not retrieve the state from + master node (default: false) + :arg master_timeout: Explicit operation timeout for connection to master + node + :arg v: Verbose mode. Display column headers, default False + """ + _, data = self.transport.perform_request('GET', '/_cat/plugins', + params=params) + return data + + @query_params('h', 'help', 'local', 'master_timeout', 'v') + def nodeattrs(self, params=None): + """ + ``_ + + :arg h: Comma-separated list of column names to display + :arg help: Return help information, default False + :arg local: Return local information, do not retrieve the state from + master node (default: false) + :arg master_timeout: Explicit operation timeout for connection to master + node + :arg v: Verbose mode. Display column headers, default False + """ + _, data = self.transport.perform_request('GET', '/_cat/nodeattrs', + params=params) + return data + + @query_params('h', 'help', 'local', 'master_timeout', 'v') + def repositories(self, params=None): + """ + ``_ + + :arg h: Comma-separated list of column names to display + :arg help: Return help information, default False + :arg local: Return local information, do not retrieve the state from + master node, default False + :arg master_timeout: Explicit operation timeout for connection to master + node + :arg v: Verbose mode. Display column headers, default False + """ + _, data = self.transport.perform_request('GET', '/_cat/repositories', + params=params) + return data + + @query_params('h', 'help', 'master_timeout', 'v') + def snapshots(self, repository=None, params=None): + """ + ``_ + + :arg repository: Name of repository from which to fetch the snapshot + information + :arg h: Comma-separated list of column names to display + :arg help: Return help information, default False + :arg master_timeout: Explicit operation timeout for connection to master + node + :arg v: Verbose mode. Display column headers, default False + """ + _, data = self.transport.perform_request('GET', _make_path('_cat', + 'snapshots', repository), params=params) + return data diff --git a/venv/lib/python2.7/site-packages/elasticsearch/client/cluster.py b/venv/lib/python2.7/site-packages/elasticsearch/client/cluster.py new file mode 100644 index 0000000..2f9e063 --- /dev/null +++ b/venv/lib/python2.7/site-packages/elasticsearch/client/cluster.py @@ -0,0 +1,154 @@ +from .utils import NamespacedClient, query_params, _make_path + +class ClusterClient(NamespacedClient): + @query_params('level', 'local', 'master_timeout', 'timeout', + 'wait_for_active_shards', 'wait_for_nodes', + 'wait_for_relocating_shards', 'wait_for_status') + def health(self, index=None, params=None): + """ + Get a very simple status on the health of the cluster. + ``_ + + :arg index: Limit the information returned to a specific index + :arg level: Specify the level of detail for returned information, + default 'cluster', valid choices are: 'cluster', 'indices', 'shards' + :arg local: Return local information, do not retrieve the state from + master node (default: false) + :arg master_timeout: Explicit operation timeout for connection to master + node + :arg timeout: Explicit operation timeout + :arg wait_for_active_shards: Wait until the specified number of shards + is active + :arg wait_for_nodes: Wait until the specified number of nodes is + available + :arg wait_for_relocating_shards: Wait until the specified number of + relocating shards is finished + :arg wait_for_status: Wait until cluster is in a specific state, default + None, valid choices are: 'green', 'yellow', 'red' + """ + _, data = self.transport.perform_request('GET', _make_path('_cluster', + 'health', index), params=params) + return data + + @query_params('local', 'master_timeout') + def pending_tasks(self, params=None): + """ + The pending cluster tasks API returns a list of any cluster-level + changes (e.g. create index, update mapping, allocate or fail shard) + which have not yet been executed. + ``_ + + :arg local: Return local information, do not retrieve the state from + master node (default: false) + :arg master_timeout: Specify timeout for connection to master + """ + _, data = self.transport.perform_request('GET', + '/_cluster/pending_tasks', params=params) + return data + + @query_params('allow_no_indices', 'expand_wildcards', 'flat_settings', + 'ignore_unavailable', 'local', 'master_timeout') + def state(self, metric=None, index=None, params=None): + """ + Get a comprehensive state information of the whole cluster. + ``_ + + :arg metric: Limit the information returned to the specified metrics + :arg index: A comma-separated list of index names; use `_all` or empty + string to perform the operation on all indices + :arg allow_no_indices: Whether to ignore if a wildcard indices + expression resolves into no concrete indices. (This includes `_all` + string or when no indices have been specified) + :arg expand_wildcards: Whether to expand wildcard expression to concrete + indices that are open, closed or both., default 'open', valid + choices are: 'open', 'closed', 'none', 'all' + :arg flat_settings: Return settings in flat format (default: false) + :arg ignore_unavailable: Whether specified concrete indices should be + ignored when unavailable (missing or closed) + :arg local: Return local information, do not retrieve the state from + master node (default: false) + :arg master_timeout: Specify timeout for connection to master + """ + if index and not metric: + metric = '_all' + _, data = self.transport.perform_request('GET', _make_path('_cluster', + 'state', metric, index), params=params) + return data + + @query_params('flat_settings', 'human', 'timeout') + def stats(self, node_id=None, params=None): + """ + The Cluster Stats API allows to retrieve statistics from a cluster wide + perspective. The API returns basic index metrics and information about + the current nodes that form the cluster. + ``_ + + :arg node_id: A comma-separated list of node IDs or names to limit the + returned information; use `_local` to return information from the + node you're connecting to, leave empty to get information from all + nodes + :arg flat_settings: Return settings in flat format (default: false) + :arg human: Whether to return time and byte values in human-readable + format., default False + :arg timeout: Explicit operation timeout + """ + url = '/_cluster/stats' + if node_id: + url = _make_path('_cluster/stats/nodes', node_id) + _, data = self.transport.perform_request('GET', url, params=params) + return data + + @query_params('dry_run', 'explain', 'master_timeout', 'metric', 'timeout') + def reroute(self, body=None, params=None): + """ + Explicitly execute a cluster reroute allocation command including specific commands. + ``_ + + :arg body: The definition of `commands` to perform (`move`, `cancel`, + `allocate`) + :arg dry_run: Simulate the operation only and return the resulting state + :arg explain: Return an explanation of why the commands can or cannot be + executed + :arg master_timeout: Explicit operation timeout for connection to master + node + :arg metric: Limit the information returned to the specified metrics. + Defaults to all but metadata, valid choices are: '_all', 'blocks', + 'metadata', 'nodes', 'routing_table', 'master_node', 'version' + :arg timeout: Explicit operation timeout + """ + _, data = self.transport.perform_request('POST', '/_cluster/reroute', + params=params, body=body) + return data + + @query_params('flat_settings', 'master_timeout', 'timeout') + def get_settings(self, params=None): + """ + Get cluster settings. + ``_ + + :arg flat_settings: Return settings in flat format (default: false) + :arg master_timeout: Explicit operation timeout for connection to master + node + :arg timeout: Explicit operation timeout + """ + _, data = self.transport.perform_request('GET', '/_cluster/settings', + params=params) + return data + + @query_params('flat_settings', 'master_timeout', 'timeout') + def put_settings(self, body=None, params=None): + """ + Update cluster wide specific settings. + ``_ + + :arg body: The settings to be updated. Can be either `transient` or + `persistent` (survives cluster restart). + :arg flat_settings: Return settings in flat format (default: false) + :arg master_timeout: Explicit operation timeout for connection to master + node + :arg timeout: Explicit operation timeout + """ + _, data = self.transport.perform_request('PUT', '/_cluster/settings', + params=params, body=body) + return data + diff --git a/venv/lib/python2.7/site-packages/elasticsearch/client/indices.py b/venv/lib/python2.7/site-packages/elasticsearch/client/indices.py new file mode 100644 index 0000000..55dad3c --- /dev/null +++ b/venv/lib/python2.7/site-packages/elasticsearch/client/indices.py @@ -0,0 +1,998 @@ +from .utils import NamespacedClient, query_params, _make_path, SKIP_IN_PATH +from ..exceptions import NotFoundError + +class IndicesClient(NamespacedClient): + @query_params('analyzer', 'char_filters', 'field', 'filters', 'format', + 'prefer_local', 'text', 'tokenizer') + def analyze(self, index=None, body=None, params=None): + """ + Perform the analysis process on a text and return the tokens breakdown of the text. + ``_ + + :arg index: The name of the index to scope the operation + :arg body: The text on which the analysis should be performed + :arg analyzer: The name of the analyzer to use + :arg char_filters: A comma-separated list of character filters to use + for the analysis + :arg field: Use the analyzer configured for this field (instead of + passing the analyzer name) + :arg filters: A comma-separated list of filters to use for the analysis + :arg format: Format of the output, default 'detailed', valid choices + are: 'detailed', 'text' + :arg prefer_local: With `true`, specify that a local shard should be + used if available, with `false`, use a random shard (default: true) + :arg text: The text on which the analysis should be performed (when + request body is not used) + :arg tokenizer: The name of the tokenizer to use for the analysis + """ + _, data = self.transport.perform_request('GET', _make_path(index, + '_analyze'), params=params, body=body) + return data + + @query_params('allow_no_indices', 'expand_wildcards', 'force', + 'ignore_unavailable', 'operation_threading') + def refresh(self, index=None, params=None): + """ + Explicitly refresh one or more index, making all operations performed + since the last refresh available for search. + ``_ + + :arg index: A comma-separated list of index names; use `_all` or empty + string to perform the operation on all indices + :arg allow_no_indices: Whether to ignore if a wildcard indices + expression resolves into no concrete indices. (This includes `_all` + string or when no indices have been specified) + :arg expand_wildcards: Whether to expand wildcard expression to concrete + indices that are open, closed or both., default 'open', valid + choices are: 'open', 'closed', 'none', 'all' + :arg force: Force a refresh even if not required, default False + :arg ignore_unavailable: Whether specified concrete indices should be + ignored when unavailable (missing or closed) + :arg operation_threading: TODO: ? + """ + _, data = self.transport.perform_request('POST', _make_path(index, + '_refresh'), params=params) + return data + + @query_params('allow_no_indices', 'expand_wildcards', 'force', + 'ignore_unavailable', 'wait_if_ongoing') + def flush(self, index=None, params=None): + """ + Explicitly flush one or more indices. + ``_ + + :arg index: A comma-separated list of index names; use `_all` or empty + string for all indices + :arg allow_no_indices: Whether to ignore if a wildcard indices + expression resolves into no concrete indices. (This includes `_all` + string or when no indices have been specified) + :arg expand_wildcards: Whether to expand wildcard expression to concrete + indices that are open, closed or both., default 'open', valid + choices are: 'open', 'closed', 'none', 'all' + :arg force: Whether a flush should be forced even if it is not + necessarily needed ie. if no changes will be committed to the index. + This is useful if transaction log IDs should be incremented even if + no uncommitted changes are present. (This setting can be considered + as internal) + :arg ignore_unavailable: Whether specified concrete indices should be + ignored when unavailable (missing or closed) + :arg wait_if_ongoing: If set to true the flush operation will block + until the flush can be executed if another flush operation is + already executing. The default is false and will cause an exception + to be thrown on the shard level if another flush operation is + already running. + """ + _, data = self.transport.perform_request('POST', _make_path(index, + '_flush'), params=params) + return data + + @query_params('master_timeout', 'timeout', 'update_all_types') + def create(self, index, body=None, params=None): + """ + Create an index in Elasticsearch. + ``_ + + :arg index: The name of the index + :arg body: The configuration for the index (`settings` and `mappings`) + :arg master_timeout: Specify timeout for connection to master + :arg timeout: Explicit operation timeout + :arg update_all_types: Whether to update the mapping for all fields with + the same name across all types or not + """ + if index in SKIP_IN_PATH: + raise ValueError("Empty value passed for a required argument 'index'.") + _, data = self.transport.perform_request('PUT', _make_path(index), + params=params, body=body) + return data + + @query_params('allow_no_indices', 'expand_wildcards', 'flat_settings', + 'human', 'ignore_unavailable', 'local') + def get(self, index, feature=None, params=None): + """ + The get index API allows to retrieve information about one or more indexes. + ``_ + + :arg index: A comma-separated list of index names + :arg feature: A comma-separated list of features + :arg allow_no_indices: Ignore if a wildcard expression resolves to no + concrete indices (default: false) + :arg expand_wildcards: Whether wildcard expressions should get expanded + to open or closed indices (default: open), default 'open', valid + choices are: 'open', 'closed', 'none', 'all' + :arg flat_settings: Return settings in flat format (default: false) + :arg human: Whether to return version and creation date values in human- + readable format., default False + :arg ignore_unavailable: Ignore unavailable indexes (default: false) + :arg local: Return local information, do not retrieve the state from + master node (default: false) + """ + if index in SKIP_IN_PATH: + raise ValueError("Empty value passed for a required argument 'index'.") + _, data = self.transport.perform_request('GET', _make_path(index, + feature), params=params) + return data + + @query_params('allow_no_indices', 'expand_wildcards', 'ignore_unavailable', + 'master_timeout', 'timeout') + def open(self, index, params=None): + """ + Open a closed index to make it available for search. + ``_ + + :arg index: The name of the index + :arg allow_no_indices: Whether to ignore if a wildcard indices + expression resolves into no concrete indices. (This includes `_all` + string or when no indices have been specified) + :arg expand_wildcards: Whether to expand wildcard expression to concrete + indices that are open, closed or both., default 'closed', valid + choices are: 'open', 'closed', 'none', 'all' + :arg ignore_unavailable: Whether specified concrete indices should be + ignored when unavailable (missing or closed) + :arg master_timeout: Specify timeout for connection to master + :arg timeout: Explicit operation timeout + """ + if index in SKIP_IN_PATH: + raise ValueError("Empty value passed for a required argument 'index'.") + _, data = self.transport.perform_request('POST', _make_path(index, + '_open'), params=params) + return data + + @query_params('allow_no_indices', 'expand_wildcards', 'ignore_unavailable', + 'master_timeout', 'timeout') + def close(self, index, params=None): + """ + Close an index to remove it's overhead from the cluster. Closed index + is blocked for read/write operations. + ``_ + + :arg index: The name of the index + :arg allow_no_indices: Whether to ignore if a wildcard indices + expression resolves into no concrete indices. (This includes `_all` + string or when no indices have been specified) + :arg expand_wildcards: Whether to expand wildcard expression to concrete + indices that are open, closed or both., default 'open', valid + choices are: 'open', 'closed', 'none', 'all' + :arg ignore_unavailable: Whether specified concrete indices should be + ignored when unavailable (missing or closed) + :arg master_timeout: Specify timeout for connection to master + :arg timeout: Explicit operation timeout + """ + if index in SKIP_IN_PATH: + raise ValueError("Empty value passed for a required argument 'index'.") + _, data = self.transport.perform_request('POST', _make_path(index, + '_close'), params=params) + return data + + @query_params('master_timeout', 'timeout') + def delete(self, index, params=None): + """ + Delete an index in Elasticsearch + ``_ + + :arg index: A comma-separated list of indices to delete; use `_all` or + `*` string to delete all indices + :arg master_timeout: Specify timeout for connection to master + :arg timeout: Explicit operation timeout + """ + if index in SKIP_IN_PATH: + raise ValueError("Empty value passed for a required argument 'index'.") + _, data = self.transport.perform_request('DELETE', _make_path(index), + params=params) + return data + + @query_params('allow_no_indices', 'expand_wildcards', 'ignore_unavailable', + 'local') + def exists(self, index, params=None): + """ + Return a boolean indicating whether given index exists. + ``_ + + :arg index: A comma-separated list of indices to check + :arg allow_no_indices: Whether to ignore if a wildcard indices + expression resolves into no concrete indices. (This includes `_all` + string or when no indices have been specified) + :arg expand_wildcards: Whether to expand wildcard expression to concrete + indices that are open, closed or both., default 'open', valid + choices are: 'open', 'closed', 'none', 'all' + :arg ignore_unavailable: Whether specified concrete indices should be + ignored when unavailable (missing or closed) + :arg local: Return local information, do not retrieve the state from + master node (default: false) + """ + if index in SKIP_IN_PATH: + raise ValueError("Empty value passed for a required argument 'index'.") + try: + self.transport.perform_request('HEAD', _make_path(index), + params=params) + except NotFoundError: + return False + return True + + @query_params('allow_no_indices', 'expand_wildcards', 'ignore_unavailable', + 'local') + def exists_type(self, index, doc_type, params=None): + """ + Check if a type/types exists in an index/indices. + ``_ + + :arg index: A comma-separated list of index names; use `_all` to check + the types across all indices + :arg doc_type: A comma-separated list of document types to check + :arg allow_no_indices: Whether to ignore if a wildcard indices + expression resolves into no concrete indices. (This includes `_all` + string or when no indices have been specified) + :arg expand_wildcards: Whether to expand wildcard expression to concrete + indices that are open, closed or both., default 'open', valid + choices are: 'open', 'closed', 'none', 'all' + :arg ignore_unavailable: Whether specified concrete indices should be + ignored when unavailable (missing or closed) + :arg local: Return local information, do not retrieve the state from + master node (default: false) + """ + for param in (index, doc_type): + if param in SKIP_IN_PATH: + raise ValueError("Empty value passed for a required argument.") + try: + self.transport.perform_request('HEAD', _make_path(index, doc_type), + params=params) + except NotFoundError: + return False + return True + + @query_params('allow_no_indices', 'expand_wildcards', 'ignore_unavailable', + 'master_timeout', 'timeout', 'update_all_types') + def put_mapping(self, doc_type, body, index=None, params=None): + """ + Register specific mapping definition for a specific type. + ``_ + + :arg doc_type: The name of the document type + :arg body: The mapping definition + :arg index: A comma-separated list of index names the mapping should be + added to (supports wildcards); use `_all` or omit to add the mapping + on all indices. + :arg allow_no_indices: Whether to ignore if a wildcard indices + expression resolves into no concrete indices. (This includes `_all` + string or when no indices have been specified) + :arg expand_wildcards: Whether to expand wildcard expression to concrete + indices that are open, closed or both., default 'open', valid + choices are: 'open', 'closed', 'none', 'all' + :arg ignore_unavailable: Whether specified concrete indices should be + ignored when unavailable (missing or closed) + :arg master_timeout: Specify timeout for connection to master + :arg timeout: Explicit operation timeout + :arg update_all_types: Whether to update the mapping for all fields with + the same name across all types or not + """ + for param in (doc_type, body): + if param in SKIP_IN_PATH: + raise ValueError("Empty value passed for a required argument.") + _, data = self.transport.perform_request('PUT', _make_path(index, + '_mapping', doc_type), params=params, body=body) + return data + + @query_params('allow_no_indices', 'expand_wildcards', 'ignore_unavailable', + 'local') + def get_mapping(self, index=None, doc_type=None, params=None): + """ + Retrieve mapping definition of index or index/type. + ``_ + + :arg index: A comma-separated list of index names + :arg doc_type: A comma-separated list of document types + :arg allow_no_indices: Whether to ignore if a wildcard indices + expression resolves into no concrete indices. (This includes `_all` + string or when no indices have been specified) + :arg expand_wildcards: Whether to expand wildcard expression to concrete + indices that are open, closed or both., default 'open', valid + choices are: 'open', 'closed', 'none', 'all' + :arg ignore_unavailable: Whether specified concrete indices should be + ignored when unavailable (missing or closed) + :arg local: Return local information, do not retrieve the state from + master node (default: false) + """ + _, data = self.transport.perform_request('GET', _make_path(index, + '_mapping', doc_type), params=params) + return data + + @query_params('allow_no_indices', 'expand_wildcards', 'ignore_unavailable', + 'include_defaults', 'local') + def get_field_mapping(self, fields, index=None, doc_type=None, params=None): + """ + Retrieve mapping definition of a specific field. + ``_ + + :arg fields: A comma-separated list of fields + :arg index: A comma-separated list of index names + :arg doc_type: A comma-separated list of document types + :arg allow_no_indices: Whether to ignore if a wildcard indices + expression resolves into no concrete indices. (This includes `_all` + string or when no indices have been specified) + :arg expand_wildcards: Whether to expand wildcard expression to concrete + indices that are open, closed or both., default 'open', valid + choices are: 'open', 'closed', 'none', 'all' + :arg ignore_unavailable: Whether specified concrete indices should be + ignored when unavailable (missing or closed) + :arg include_defaults: Whether the default mapping values should be + returned as well + :arg local: Return local information, do not retrieve the state from + master node (default: false) + """ + if fields in SKIP_IN_PATH: + raise ValueError("Empty value passed for a required argument 'fields'.") + _, data = self.transport.perform_request('GET', _make_path(index, + '_mapping', doc_type, 'field', fields), params=params) + return data + + @query_params('master_timeout', 'timeout') + def put_alias(self, index, name, body=None, params=None): + """ + Create an alias for a specific index/indices. + ``_ + + :arg index: A comma-separated list of index names the alias should point + to (supports wildcards); use `_all` to perform the operation on all + indices. + :arg name: The name of the alias to be created or updated + :arg body: The settings for the alias, such as `routing` or `filter` + :arg master_timeout: Specify timeout for connection to master + :arg timeout: Explicit timeout for the operation + """ + for param in (index, name): + if param in SKIP_IN_PATH: + raise ValueError("Empty value passed for a required argument.") + _, data = self.transport.perform_request('PUT', _make_path(index, + '_alias', name), params=params, body=body) + return data + + @query_params('allow_no_indices', 'expand_wildcards', 'ignore_unavailable', + 'local') + def exists_alias(self, index=None, name=None, params=None): + """ + Return a boolean indicating whether given alias exists. + ``_ + + :arg index: A comma-separated list of index names to filter aliases + :arg name: A comma-separated list of alias names to return + :arg allow_no_indices: Whether to ignore if a wildcard indices + expression resolves into no concrete indices. (This includes `_all` + string or when no indices have been specified) + :arg expand_wildcards: Whether to expand wildcard expression to concrete + indices that are open, closed or both., default ['open', 'closed'], + valid choices are: 'open', 'closed', 'none', 'all' + :arg ignore_unavailable: Whether specified concrete indices should be + ignored when unavailable (missing or closed) + :arg local: Return local information, do not retrieve the state from + master node (default: false) + """ + try: + self.transport.perform_request('HEAD', _make_path(index, '_alias', + name), params=params) + except NotFoundError: + return False + return True + + @query_params('allow_no_indices', 'expand_wildcards', 'ignore_unavailable', + 'local') + def get_alias(self, index=None, name=None, params=None): + """ + Retrieve a specified alias. + ``_ + + :arg index: A comma-separated list of index names to filter aliases + :arg name: A comma-separated list of alias names to return + :arg allow_no_indices: Whether to ignore if a wildcard indices + expression resolves into no concrete indices. (This includes `_all` + string or when no indices have been specified) + :arg expand_wildcards: Whether to expand wildcard expression to concrete + indices that are open, closed or both., default 'open', valid + choices are: 'open', 'closed', 'none', 'all' + :arg ignore_unavailable: Whether specified concrete indices should be + ignored when unavailable (missing or closed) + :arg local: Return local information, do not retrieve the state from + master node (default: false) + """ + _, data = self.transport.perform_request('GET', _make_path(index, + '_alias', name), params=params) + return data + + @query_params('local', 'timeout') + def get_aliases(self, index=None, name=None, params=None): + """ + Retrieve specified aliases + ``_ + + :arg index: A comma-separated list of index names to filter aliases + :arg name: A comma-separated list of alias names to filter + :arg local: Return local information, do not retrieve the state from + master node (default: false) + :arg timeout: Explicit operation timeout + """ + _, data = self.transport.perform_request('GET', _make_path(index, + '_aliases', name), params=params) + return data + + @query_params('master_timeout', 'timeout') + def update_aliases(self, body, params=None): + """ + Update specified aliases. + ``_ + + :arg body: The definition of `actions` to perform + :arg master_timeout: Specify timeout for connection to master + :arg timeout: Request timeout + """ + if body in SKIP_IN_PATH: + raise ValueError("Empty value passed for a required argument 'body'.") + _, data = self.transport.perform_request('POST', '/_aliases', + params=params, body=body) + return data + + @query_params('master_timeout', 'timeout') + def delete_alias(self, index, name, params=None): + """ + Delete specific alias. + ``_ + + :arg index: A comma-separated list of index names (supports wildcards); + use `_all` for all indices + :arg name: A comma-separated list of aliases to delete (supports + wildcards); use `_all` to delete all aliases for the specified + indices. + :arg master_timeout: Specify timeout for connection to master + :arg timeout: Explicit timeout for the operation + """ + for param in (index, name): + if param in SKIP_IN_PATH: + raise ValueError("Empty value passed for a required argument.") + _, data = self.transport.perform_request('DELETE', _make_path(index, + '_alias', name), params=params) + return data + + @query_params('create', 'flat_settings', 'master_timeout', 'order', + 'timeout') + def put_template(self, name, body, params=None): + """ + Create an index template that will automatically be applied to new + indices created. + ``_ + + :arg name: The name of the template + :arg body: The template definition + :arg create: Whether the index template should only be added if new or + can also replace an existing one, default False + :arg flat_settings: Return settings in flat format (default: false) + :arg master_timeout: Specify timeout for connection to master + :arg order: The order for this template when merging multiple matching + ones (higher numbers are merged later, overriding the lower numbers) + :arg timeout: Explicit operation timeout + """ + for param in (name, body): + if param in SKIP_IN_PATH: + raise ValueError("Empty value passed for a required argument.") + _, data = self.transport.perform_request('PUT', _make_path('_template', + name), params=params, body=body) + return data + + @query_params('local', 'master_timeout') + def exists_template(self, name, params=None): + """ + Return a boolean indicating whether given template exists. + ``_ + + :arg name: The name of the template + :arg local: Return local information, do not retrieve the state from + master node (default: false) + :arg master_timeout: Explicit operation timeout for connection to master + node + """ + if name in SKIP_IN_PATH: + raise ValueError("Empty value passed for a required argument 'name'.") + try: + self.transport.perform_request('HEAD', _make_path('_template', + name), params=params) + except NotFoundError: + return False + return True + + @query_params('flat_settings', 'local', 'master_timeout') + def get_template(self, name=None, params=None): + """ + Retrieve an index template by its name. + ``_ + + :arg name: The name of the template + :arg flat_settings: Return settings in flat format (default: false) + :arg local: Return local information, do not retrieve the state from + master node (default: false) + :arg master_timeout: Explicit operation timeout for connection to master + node + """ + _, data = self.transport.perform_request('GET', _make_path('_template', + name), params=params) + return data + + @query_params('master_timeout', 'timeout') + def delete_template(self, name, params=None): + """ + Delete an index template by its name. + ``_ + + :arg name: The name of the template + :arg master_timeout: Specify timeout for connection to master + :arg timeout: Explicit operation timeout + """ + if name in SKIP_IN_PATH: + raise ValueError("Empty value passed for a required argument 'name'.") + _, data = self.transport.perform_request('DELETE', + _make_path('_template', name), params=params) + return data + + @query_params('allow_no_indices', 'expand_wildcards', 'flat_settings', + 'human', 'ignore_unavailable', 'local') + def get_settings(self, index=None, name=None, params=None): + """ + Retrieve settings for one or more (or all) indices. + ``_ + + :arg index: A comma-separated list of index names; use `_all` or empty + string to perform the operation on all indices + :arg name: The name of the settings that should be included + :arg allow_no_indices: Whether to ignore if a wildcard indices + expression resolves into no concrete indices. (This includes `_all` + string or when no indices have been specified) + :arg expand_wildcards: Whether to expand wildcard expression to concrete + indices that are open, closed or both., default ['open', 'closed'], + valid choices are: 'open', 'closed', 'none', 'all' + :arg flat_settings: Return settings in flat format (default: false) + :arg human: Whether to return version and creation date values in human- + readable format., default False + :arg ignore_unavailable: Whether specified concrete indices should be + ignored when unavailable (missing or closed) + :arg local: Return local information, do not retrieve the state from + master node (default: false) + """ + _, data = self.transport.perform_request('GET', _make_path(index, + '_settings', name), params=params) + return data + + @query_params('allow_no_indices', 'expand_wildcards', 'flat_settings', + 'ignore_unavailable', 'master_timeout') + def put_settings(self, body, index=None, params=None): + """ + Change specific index level settings in real time. + ``_ + + :arg body: The index settings to be updated + :arg index: A comma-separated list of index names; use `_all` or empty + string to perform the operation on all indices + :arg allow_no_indices: Whether to ignore if a wildcard indices + expression resolves into no concrete indices. (This includes `_all` + string or when no indices have been specified) + :arg expand_wildcards: Whether to expand wildcard expression to concrete + indices that are open, closed or both., default 'open', valid + choices are: 'open', 'closed', 'none', 'all' + :arg flat_settings: Return settings in flat format (default: false) + :arg ignore_unavailable: Whether specified concrete indices should be + ignored when unavailable (missing or closed) + :arg master_timeout: Specify timeout for connection to master + """ + if body in SKIP_IN_PATH: + raise ValueError("Empty value passed for a required argument 'body'.") + _, data = self.transport.perform_request('PUT', _make_path(index, + '_settings'), params=params, body=body) + return data + + @query_params('allow_no_indices', 'expand_wildcards', 'ignore_unavailable', + 'master_timeout', 'request_cache') + def put_warmer(self, name, body, index=None, doc_type=None, params=None): + """ + Create an index warmer to run registered search requests to warm up the + index before it is available for search. + ``_ + + :arg name: The name of the warmer + :arg body: The search request definition for the warmer (query, filters, + facets, sorting, etc) + :arg index: A comma-separated list of index names to register the warmer + for; use `_all` or omit to perform the operation on all indices + :arg doc_type: A comma-separated list of document types to register the + warmer for; leave empty to perform the operation on all types + :arg allow_no_indices: Whether to ignore if a wildcard indices + expression resolves into no concrete indices in the search request + to warm. (This includes `_all` string or when no indices have been + specified) + :arg expand_wildcards: Whether to expand wildcard expression to concrete + indices that are open, closed or both, in the search request to + warm., default 'open', valid choices are: 'open', 'closed', 'none', + 'all' + :arg ignore_unavailable: Whether specified concrete indices should be + ignored when unavailable (missing or closed) in the search request + to warm + :arg master_timeout: Specify timeout for connection to master + :arg request_cache: Specify whether the request to be warmed should use + the request cache, defaults to index level setting + """ + for param in (name, body): + if param in SKIP_IN_PATH: + raise ValueError("Empty value passed for a required argument.") + _, data = self.transport.perform_request('PUT', _make_path(index, + doc_type, '_warmer', name), params=params, body=body) + return data + + @query_params('allow_no_indices', 'expand_wildcards', 'ignore_unavailable', + 'local') + def get_warmer(self, index=None, doc_type=None, name=None, params=None): + """ + Retreieve an index warmer. + ``_ + + :arg index: A comma-separated list of index names to restrict the + operation; use `_all` to perform the operation on all indices + :arg doc_type: A comma-separated list of document types to restrict the + operation; leave empty to perform the operation on all types + :arg name: The name of the warmer (supports wildcards); leave empty to + get all warmers + :arg allow_no_indices: Whether to ignore if a wildcard indices + expression resolves into no concrete indices. (This includes `_all` + string or when no indices have been specified) + :arg expand_wildcards: Whether to expand wildcard expression to concrete + indices that are open, closed or both., default 'open', valid + choices are: 'open', 'closed', 'none', 'all' + :arg ignore_unavailable: Whether specified concrete indices should be + ignored when unavailable (missing or closed) + :arg local: Return local information, do not retrieve the state from + master node (default: false) + """ + _, data = self.transport.perform_request('GET', _make_path(index, + doc_type, '_warmer', name), params=params) + return data + + @query_params('master_timeout') + def delete_warmer(self, index, name, params=None): + """ + Delete an index warmer. + ``_ + + :arg index: A comma-separated list of index names to delete warmers from + (supports wildcards); use `_all` to perform the operation on all + indices. + :arg name: A comma-separated list of warmer names to delete (supports + wildcards); use `_all` to delete all warmers in the specified + indices. You must specify a name either in the uri or in the + parameters. + :arg master_timeout: Specify timeout for connection to master + """ + for param in (index, name): + if param in SKIP_IN_PATH: + raise ValueError("Empty value passed for a required argument.") + _, data = self.transport.perform_request('DELETE', _make_path(index, + '_warmer', name), params=params) + return data + + @query_params('completion_fields', 'fielddata_fields', 'fields', 'groups', + 'human', 'level', 'types') + def stats(self, index=None, metric=None, params=None): + """ + Retrieve statistics on different operations happening on an index. + ``_ + + :arg index: A comma-separated list of index names; use `_all` or empty + string to perform the operation on all indices + :arg metric: Limit the information returned the specific metrics. + :arg completion_fields: A comma-separated list of fields for `fielddata` + and `suggest` index metric (supports wildcards) + :arg fielddata_fields: A comma-separated list of fields for `fielddata` + index metric (supports wildcards) + :arg fields: A comma-separated list of fields for `fielddata` and + `completion` index metric (supports wildcards) + :arg groups: A comma-separated list of search groups for `search` index + metric + :arg human: Whether to return time and byte values in human-readable + format., default False + :arg level: Return stats aggregated at cluster, index or shard level, + default 'indices', valid choices are: 'cluster', 'indices', 'shards' + :arg types: A comma-separated list of document types for the `indexing` + index metric + """ + _, data = self.transport.perform_request('GET', _make_path(index, + '_stats', metric), params=params) + return data + + @query_params('allow_no_indices', 'expand_wildcards', 'human', + 'ignore_unavailable', 'operation_threading') + def segments(self, index=None, params=None): + """ + Provide low level segments information that a Lucene index (shard level) is built with. + ``_ + + :arg index: A comma-separated list of index names; use `_all` or empty + string to perform the operation on all indices + :arg allow_no_indices: Whether to ignore if a wildcard indices + expression resolves into no concrete indices. (This includes `_all` + string or when no indices have been specified) + :arg expand_wildcards: Whether to expand wildcard expression to concrete + indices that are open, closed or both., default 'open', valid + choices are: 'open', 'closed', 'none', 'all' + :arg human: Whether to return time and byte values in human-readable + format., default False + :arg ignore_unavailable: Whether specified concrete indices should be + ignored when unavailable (missing or closed) + :arg operation_threading: TODO: ? + """ + _, data = self.transport.perform_request('GET', _make_path(index, + '_segments'), params=params) + return data + + @query_params('allow_no_indices', 'expand_wildcards', 'flush', + 'ignore_unavailable', 'max_num_segments', 'only_expunge_deletes', + 'operation_threading', 'wait_for_merge') + def optimize(self, index=None, params=None): + """ + Explicitly optimize one or more indices through an API. + ``_ + + :arg index: A comma-separated list of index names; use `_all` or empty + string to perform the operation on all indices + :arg allow_no_indices: Whether to ignore if a wildcard indices + expression resolves into no concrete indices. (This includes `_all` + string or when no indices have been specified) + :arg expand_wildcards: Whether to expand wildcard expression to concrete + indices that are open, closed or both., default 'open', valid + choices are: 'open', 'closed', 'none', 'all' + :arg flush: Specify whether the index should be flushed after performing + the operation (default: true) + :arg ignore_unavailable: Whether specified concrete indices should be + ignored when unavailable (missing or closed) + :arg max_num_segments: The number of segments the index should be merged + into (default: dynamic) + :arg only_expunge_deletes: Specify whether the operation should only + expunge deleted documents + :arg operation_threading: TODO: ? + :arg wait_for_merge: Specify whether the request should block until the + merge process is finished (default: true) + """ + _, data = self.transport.perform_request('POST', _make_path(index, + '_optimize'), params=params) + return data + + @query_params('allow_no_indices', 'analyze_wildcard', 'analyzer', + 'default_operator', 'df', 'expand_wildcards', 'explain', + 'ignore_unavailable', 'lenient', 'lowercase_expanded_terms', + 'operation_threading', 'q', 'rewrite') + def validate_query(self, index=None, doc_type=None, body=None, params=None): + """ + Validate a potentially expensive query without executing it. + ``_ + + :arg index: A comma-separated list of index names to restrict the + operation; use `_all` or empty string to perform the operation on + all indices + :arg doc_type: A comma-separated list of document types to restrict the + operation; leave empty to perform the operation on all types + :arg body: The query definition specified with the Query DSL + :arg allow_no_indices: Whether to ignore if a wildcard indices + expression resolves into no concrete indices. (This includes `_all` + string or when no indices have been specified) + :arg analyze_wildcard: Specify whether wildcard and prefix queries + should be analyzed (default: false) + :arg analyzer: The analyzer to use for the query string + :arg default_operator: The default operator for query string query (AND + or OR), default 'OR', valid choices are: 'AND', 'OR' + :arg df: The field to use as default where no field prefix is given in + the query string + :arg expand_wildcards: Whether to expand wildcard expression to concrete + indices that are open, closed or both., default 'open', valid + choices are: 'open', 'closed', 'none', 'all' + :arg explain: Return detailed information about the error + :arg ignore_unavailable: Whether specified concrete indices should be + ignored when unavailable (missing or closed) + :arg lenient: Specify whether format-based query failures (such as + providing text to a numeric field) should be ignored + :arg lowercase_expanded_terms: Specify whether query terms should be + lowercased + :arg operation_threading: TODO: ? + :arg q: Query in the Lucene query string syntax + :arg rewrite: Provide a more detailed explanation showing the actual + Lucene query that will be executed. + """ + _, data = self.transport.perform_request('GET', _make_path(index, + doc_type, '_validate', 'query'), params=params, body=body) + return data + + @query_params('allow_no_indices', 'expand_wildcards', 'field_data', + 'fielddata', 'fields', 'ignore_unavailable', 'query', 'recycler', + 'request') + def clear_cache(self, index=None, params=None): + """ + Clear either all caches or specific cached associated with one ore more indices. + ``_ + + :arg index: A comma-separated list of index name to limit the operation + :arg allow_no_indices: Whether to ignore if a wildcard indices + expression resolves into no concrete indices. (This includes `_all` + string or when no indices have been specified) + :arg expand_wildcards: Whether to expand wildcard expression to concrete + indices that are open, closed or both., default 'open', valid + choices are: 'open', 'closed', 'none', 'all' + :arg field_data: Clear field data + :arg fielddata: Clear field data + :arg fields: A comma-separated list of fields to clear when using the + `field_data` parameter (default: all) + :arg ignore_unavailable: Whether specified concrete indices should be + ignored when unavailable (missing or closed) + :arg query: Clear query caches + :arg recycler: Clear the recycler cache + :arg request: Clear request cache + """ + _, data = self.transport.perform_request('POST', _make_path(index, + '_cache', 'clear'), params=params) + return data + + @query_params('active_only', 'detailed', 'human') + def recovery(self, index=None, params=None): + """ + The indices recovery API provides insight into on-going shard + recoveries. Recovery status may be reported for specific indices, or + cluster-wide. + ``_ + + :arg index: A comma-separated list of index names; use `_all` or empty + string to perform the operation on all indices + :arg active_only: Display only those recoveries that are currently on- + going, default False + :arg detailed: Whether to display detailed information about shard + recovery, default False + :arg human: Whether to return time and byte values in human-readable + format., default False + """ + _, data = self.transport.perform_request('GET', _make_path(index, + '_recovery'), params=params) + return data + + @query_params('allow_no_indices', 'expand_wildcards', 'ignore_unavailable', + 'only_ancient_segments', 'wait_for_completion') + def upgrade(self, index=None, params=None): + """ + Upgrade one or more indices to the latest format through an API. + ``_ + + :arg index: A comma-separated list of index names; use `_all` or empty + string to perform the operation on all indices + :arg allow_no_indices: Whether to ignore if a wildcard indices + expression resolves into no concrete indices. (This includes `_all` + string or when no indices have been specified) + :arg expand_wildcards: Whether to expand wildcard expression to concrete + indices that are open, closed or both., default 'open', valid + choices are: 'open', 'closed', 'none', 'all' + :arg ignore_unavailable: Whether specified concrete indices should be + ignored when unavailable (missing or closed) + :arg only_ancient_segments: If true, only ancient (an older Lucene major + release) segments will be upgraded + :arg wait_for_completion: Specify whether the request should block until + the all segments are upgraded (default: false) + """ + _, data = self.transport.perform_request('POST', _make_path(index, + '_upgrade'), params=params) + return data + + @query_params('allow_no_indices', 'expand_wildcards', 'human', + 'ignore_unavailable') + def get_upgrade(self, index=None, params=None): + """ + Monitor how much of one or more index is upgraded. + ``_ + + :arg index: A comma-separated list of index names; use `_all` or empty + string to perform the operation on all indices + :arg allow_no_indices: Whether to ignore if a wildcard indices + expression resolves into no concrete indices. (This includes `_all` + string or when no indices have been specified) + :arg expand_wildcards: Whether to expand wildcard expression to concrete + indices that are open, closed or both., default 'open', valid + choices are: 'open', 'closed', 'none', 'all' + :arg human: Whether to return time and byte values in human-readable + format., default False + :arg ignore_unavailable: Whether specified concrete indices should be + ignored when unavailable (missing or closed) + """ + _, data = self.transport.perform_request('GET', _make_path(index, + '_upgrade'), params=params) + return data + + @query_params('allow_no_indices', 'expand_wildcards', 'ignore_unavailable') + def flush_synced(self, index=None, params=None): + """ + Perform a normal flush, then add a generated unique marker (sync_id) to all shards. + ``_ + + :arg index: A comma-separated list of index names; use `_all` or empty + string for all indices + :arg allow_no_indices: Whether to ignore if a wildcard indices + expression resolves into no concrete indices. (This includes `_all` + string or when no indices have been specified) + :arg expand_wildcards: Whether to expand wildcard expression to concrete + indices that are open, closed or both., default 'open', valid + choices are: 'open', 'closed', 'none', 'all' + :arg ignore_unavailable: Whether specified concrete indices should be + ignored when unavailable (missing or closed) + """ + _, data = self.transport.perform_request('POST', _make_path(index, + '_flush', 'synced'), params=params) + return data + + @query_params('allow_no_indices', 'expand_wildcards', 'ignore_unavailable', + 'operation_threading', 'status') + def shard_stores(self, index=None, params=None): + """ + ``_ + + :arg index: A comma-separated list of index names; use `_all` or empty + string to perform the operation on all indices + :arg allow_no_indices: Whether to ignore if a wildcard indices + expression resolves into no concrete indices. (This includes `_all` + string or when no indices have been specified) + :arg expand_wildcards: Whether to expand wildcard expression to concrete + indices that are open, closed or both., default 'open', valid + choices are: 'open', 'closed', 'none', 'all' + :arg ignore_unavailable: Whether specified concrete indices should be + ignored when unavailable (missing or closed) + :arg operation_threading: TODO: ? + :arg status: A comma-separated list of statuses used to filter on shards + to get store information for, valid choices are: 'green', 'yellow', + 'red', 'all' + """ + _, data = self.transport.perform_request('GET', _make_path(index, + '_shard_stores'), params=params) + return data + + @query_params('allow_no_indices', 'expand_wildcards', 'flush', + 'ignore_unavailable', 'max_num_segments', 'only_expunge_deletes', + 'operation_threading', 'wait_for_merge') + def forcemerge(self, index=None, params=None): + """ + ``_ + + :arg index: A comma-separated list of index names; use `_all` or empty + string to perform the operation on all indices + :arg allow_no_indices: Whether to ignore if a wildcard indices + expression resolves into no concrete indices. (This includes `_all` + string or when no indices have been specified) + :arg expand_wildcards: Whether to expand wildcard expression to concrete + indices that are open, closed or both., default 'open', valid + choices are: 'open', 'closed', 'none', 'all' + :arg flush: Specify whether the index should be flushed after performing + the operation (default: true) + :arg ignore_unavailable: Whether specified concrete indices should be + ignored when unavailable (missing or closed) + :arg max_num_segments: The number of segments the index should be merged + into (default: dynamic) + :arg only_expunge_deletes: Specify whether the operation should only + expunge deleted documents + :arg operation_threading: TODO: ? + :arg wait_for_merge: Specify whether the request should block until the + merge process is finished (default: true) + """ + _, data = self.transport.perform_request('POST', _make_path(index, + '_forcemerge'), params=params) + return data diff --git a/venv/lib/python2.7/site-packages/elasticsearch/client/nodes.py b/venv/lib/python2.7/site-packages/elasticsearch/client/nodes.py new file mode 100644 index 0000000..b81fcf6 --- /dev/null +++ b/venv/lib/python2.7/site-packages/elasticsearch/client/nodes.py @@ -0,0 +1,91 @@ +from .utils import NamespacedClient, query_params, _make_path + +class NodesClient(NamespacedClient): + @query_params('flat_settings', 'human', 'timeout') + def info(self, node_id=None, metric=None, params=None): + """ + The cluster nodes info API allows to retrieve one or more (or all) of + the cluster nodes information. + ``_ + + :arg node_id: A comma-separated list of node IDs or names to limit the + returned information; use `_local` to return information from the + node you're connecting to, leave empty to get information from all + nodes + :arg metric: A comma-separated list of metrics you wish returned. Leave + empty to return all. + :arg flat_settings: Return settings in flat format (default: false) + :arg human: Whether to return time and byte values in human-readable + format., default False + :arg timeout: Explicit operation timeout + """ + _, data = self.transport.perform_request('GET', _make_path('_nodes', + node_id, metric), params=params) + return data + + @query_params('completion_fields', 'fielddata_fields', 'fields', 'groups', + 'human', 'level', 'timeout', 'types') + def stats(self, node_id=None, metric=None, index_metric=None, params=None): + """ + The cluster nodes stats API allows to retrieve one or more (or all) of + the cluster nodes statistics. + ``_ + + :arg node_id: A comma-separated list of node IDs or names to limit the + returned information; use `_local` to return information from the + node you're connecting to, leave empty to get information from all + nodes + :arg metric: Limit the information returned to the specified metrics + :arg index_metric: Limit the information returned for `indices` metric + to the specific index metrics. Isn't used if `indices` (or `all`) + metric isn't specified. + :arg completion_fields: A comma-separated list of fields for `fielddata` + and `suggest` index metric (supports wildcards) + :arg fielddata_fields: A comma-separated list of fields for `fielddata` + index metric (supports wildcards) + :arg fields: A comma-separated list of fields for `fielddata` and + `completion` index metric (supports wildcards) + :arg groups: A comma-separated list of search groups for `search` index + metric + :arg human: Whether to return time and byte values in human-readable + format., default False + :arg level: Return indices stats aggregated at node, index or shard + level, default 'node', valid choices are: 'node', 'indices', + 'shards' + :arg timeout: Explicit operation timeout + :arg types: A comma-separated list of document types for the `indexing` + index metric + """ + _, data = self.transport.perform_request('GET', _make_path('_nodes', + node_id, 'stats', metric, index_metric), params=params) + return data + + @query_params('doc_type', 'ignore_idle_threads', 'interval', 'snapshots', + 'threads', 'timeout') + def hot_threads(self, node_id=None, params=None): + """ + An API allowing to get the current hot threads on each node in the cluster. + ``_ + + :arg node_id: A comma-separated list of node IDs or names to limit the + returned information; use `_local` to return information from the + node you're connecting to, leave empty to get information from all + nodes + :arg doc_type: The type to sample (default: cpu), valid choices are: + 'cpu', 'wait', 'block' + :arg ignore_idle_threads: Don't show threads that are in known-idle + places, such as waiting on a socket select or pulling from an empty + task queue (default: true) + :arg interval: The interval for the second sampling of threads + :arg snapshots: Number of samples of thread stacktrace (default: 10) + :arg threads: Specify the number of threads to provide information for + (default: 3) + :arg timeout: Explicit operation timeout + """ + # avoid python reserved words + if params and 'type_' in params: + params['type'] = params.pop('type_') + _, data = self.transport.perform_request('GET', _make_path('_cluster', + 'nodes', node_id, 'hotthreads'), params=params) + return data + diff --git a/venv/lib/python2.7/site-packages/elasticsearch/client/snapshot.py b/venv/lib/python2.7/site-packages/elasticsearch/client/snapshot.py new file mode 100644 index 0000000..321580e --- /dev/null +++ b/venv/lib/python2.7/site-packages/elasticsearch/client/snapshot.py @@ -0,0 +1,169 @@ +from .utils import NamespacedClient, query_params, _make_path, SKIP_IN_PATH + +class SnapshotClient(NamespacedClient): + @query_params('master_timeout', 'wait_for_completion') + def create(self, repository, snapshot, body=None, params=None): + """ + Create a snapshot in repository + ``_ + + :arg repository: A repository name + :arg snapshot: A snapshot name + :arg body: The snapshot definition + :arg master_timeout: Explicit operation timeout for connection to master + node + :arg wait_for_completion: Should this request wait until the operation + has completed before returning, default False + """ + for param in (repository, snapshot): + if param in SKIP_IN_PATH: + raise ValueError("Empty value passed for a required argument.") + _, data = self.transport.perform_request('PUT', _make_path('_snapshot', + repository, snapshot), params=params, body=body) + return data + + @query_params('master_timeout') + def delete(self, repository, snapshot, params=None): + """ + Deletes a snapshot from a repository. + ``_ + + :arg repository: A repository name + :arg snapshot: A snapshot name + :arg master_timeout: Explicit operation timeout for connection to master + node + """ + for param in (repository, snapshot): + if param in SKIP_IN_PATH: + raise ValueError("Empty value passed for a required argument.") + _, data = self.transport.perform_request('DELETE', + _make_path('_snapshot', repository, snapshot), params=params) + return data + + @query_params('master_timeout') + def get(self, repository, snapshot, params=None): + """ + Retrieve information about a snapshot. + ``_ + + :arg repository: A repository name + :arg snapshot: A comma-separated list of snapshot names + :arg master_timeout: Explicit operation timeout for connection to master + node + """ + for param in (repository, snapshot): + if param in SKIP_IN_PATH: + raise ValueError("Empty value passed for a required argument.") + _, data = self.transport.perform_request('GET', _make_path('_snapshot', + repository, snapshot), params=params) + return data + + @query_params('master_timeout', 'timeout') + def delete_repository(self, repository, params=None): + """ + Removes a shared file system repository. + ``_ + + :arg repository: A comma-separated list of repository names + :arg master_timeout: Explicit operation timeout for connection to master + node + :arg timeout: Explicit operation timeout + """ + if repository in SKIP_IN_PATH: + raise ValueError("Empty value passed for a required argument 'repository'.") + _, data = self.transport.perform_request('DELETE', + _make_path('_snapshot', repository), params=params) + return data + + @query_params('local', 'master_timeout') + def get_repository(self, repository=None, params=None): + """ + Return information about registered repositories. + ``_ + + :arg repository: A comma-separated list of repository names + :arg local: Return local information, do not retrieve the state from + master node (default: false) + :arg master_timeout: Explicit operation timeout for connection to master + node + """ + _, data = self.transport.perform_request('GET', _make_path('_snapshot', + repository), params=params) + return data + + @query_params('master_timeout', 'timeout', 'verify') + def create_repository(self, repository, body, params=None): + """ + Registers a shared file system repository. + ``_ + + :arg repository: A repository name + :arg body: The repository definition + :arg master_timeout: Explicit operation timeout for connection to master + node + :arg timeout: Explicit operation timeout + :arg verify: Whether to verify the repository after creation + """ + for param in (repository, body): + if param in SKIP_IN_PATH: + raise ValueError("Empty value passed for a required argument.") + _, data = self.transport.perform_request('PUT', _make_path('_snapshot', + repository), params=params, body=body) + return data + + @query_params('master_timeout', 'wait_for_completion') + def restore(self, repository, snapshot, body=None, params=None): + """ + Restore a snapshot. + ``_ + + :arg repository: A repository name + :arg snapshot: A snapshot name + :arg body: Details of what to restore + :arg master_timeout: Explicit operation timeout for connection to master + node + :arg wait_for_completion: Should this request wait until the operation + has completed before returning, default False + """ + for param in (repository, snapshot): + if param in SKIP_IN_PATH: + raise ValueError("Empty value passed for a required argument.") + _, data = self.transport.perform_request('POST', _make_path('_snapshot', + repository, snapshot, '_restore'), params=params, body=body) + return data + + @query_params('master_timeout') + def status(self, repository=None, snapshot=None, params=None): + """ + Return information about all currently running snapshots. By specifying + a repository name, it's possible to limit the results to a particular + repository. + ``_ + + :arg repository: A repository name + :arg snapshot: A comma-separated list of snapshot names + :arg master_timeout: Explicit operation timeout for connection to master + node + """ + _, data = self.transport.perform_request('GET', _make_path('_snapshot', + repository, snapshot, '_status'), params=params) + return data + + @query_params('master_timeout', 'timeout') + def verify_repository(self, repository, params=None): + """ + Returns a list of nodes where repository was successfully verified or + an error message if verification process failed. + ``_ + + :arg repository: A repository name + :arg master_timeout: Explicit operation timeout for connection to master + node + :arg timeout: Explicit operation timeout + """ + if repository in SKIP_IN_PATH: + raise ValueError("Empty value passed for a required argument 'repository'.") + _, data = self.transport.perform_request('POST', _make_path('_snapshot', + repository, '_verify'), params=params) + return data + diff --git a/venv/lib/python2.7/site-packages/elasticsearch/client/utils.py b/venv/lib/python2.7/site-packages/elasticsearch/client/utils.py new file mode 100644 index 0000000..7a015a6 --- /dev/null +++ b/venv/lib/python2.7/site-packages/elasticsearch/client/utils.py @@ -0,0 +1,87 @@ +from __future__ import unicode_literals + +import weakref +from datetime import date, datetime +from functools import wraps +from ..compat import string_types, quote_plus + +# parts of URL to be omitted +SKIP_IN_PATH = (None, '', b'', [], ()) + +def _escape(value): + """ + Escape a single value of a URL string or a query parameter. If it is a list + or tuple, turn it into a comma-separated string first. + """ + + # make sequences into comma-separated stings + if isinstance(value, (list, tuple)): + value = ','.join(value) + + # dates and datetimes into isoformat + elif isinstance(value, (date, datetime)): + value = value.isoformat() + + # make bools into true/false strings + elif isinstance(value, bool): + value = str(value).lower() + + # encode strings to utf-8 + if isinstance(value, string_types): + try: + return value.encode('utf-8') + except UnicodeDecodeError: + # Python 2 and str, no need to re-encode + pass + + return str(value) + +def _make_path(*parts): + """ + Create a URL string from parts, omit all `None` values and empty strings. + Convert lists nad tuples to comma separated values. + """ + #TODO: maybe only allow some parts to be lists/tuples ? + return '/' + '/'.join( + # preserve ',' and '*' in url for nicer URLs in logs + quote_plus(_escape(p), b',*') for p in parts if p not in SKIP_IN_PATH) + +# parameters that apply to all methods +GLOBAL_PARAMS = ('pretty', 'format', 'filter_path') + +def query_params(*es_query_params): + """ + Decorator that pops all accepted parameters from method's kwargs and puts + them in the params argument. + """ + def _wrapper(func): + @wraps(func) + def _wrapped(*args, **kwargs): + params = kwargs.pop('params', {}) + for p in es_query_params + GLOBAL_PARAMS: + if p in kwargs: + params[p] = _escape(kwargs.pop(p)) + + # don't treat ignore and request_timeout as other params to avoid escaping + for p in ('ignore', 'request_timeout'): + if p in kwargs: + params[p] = kwargs.pop(p) + return func(*args, params=params, **kwargs) + return _wrapped + return _wrapper + + +class NamespacedClient(object): + def __init__(self, client): + self.client = client + + @property + def transport(self): + return self.client.transport + +class AddonClient(NamespacedClient): + @classmethod + def infect_client(cls, client): + addon = cls(weakref.proxy(client)) + setattr(client, cls.namespace, addon) + return client diff --git a/venv/lib/python2.7/site-packages/elasticsearch/compat.py b/venv/lib/python2.7/site-packages/elasticsearch/compat.py new file mode 100644 index 0000000..deee3c5 --- /dev/null +++ b/venv/lib/python2.7/site-packages/elasticsearch/compat.py @@ -0,0 +1,13 @@ +import sys + +PY2 = sys.version_info[0] == 2 + +if PY2: + string_types = basestring, + from urllib import quote_plus, urlencode + from urlparse import urlparse + from itertools import imap as map +else: + string_types = str, bytes + from urllib.parse import quote_plus, urlencode, urlparse + map = map diff --git a/venv/lib/python2.7/site-packages/elasticsearch/connection/__init__.py b/venv/lib/python2.7/site-packages/elasticsearch/connection/__init__.py new file mode 100644 index 0000000..c2f484a --- /dev/null +++ b/venv/lib/python2.7/site-packages/elasticsearch/connection/__init__.py @@ -0,0 +1,3 @@ +from .base import Connection +from .http_requests import RequestsHttpConnection +from .http_urllib3 import Urllib3HttpConnection diff --git a/venv/lib/python2.7/site-packages/elasticsearch/connection/base.py b/venv/lib/python2.7/site-packages/elasticsearch/connection/base.py new file mode 100644 index 0000000..914967c --- /dev/null +++ b/venv/lib/python2.7/site-packages/elasticsearch/connection/base.py @@ -0,0 +1,110 @@ +import logging +try: + import simplejson as json +except ImportError: + import json + +from ..exceptions import TransportError, HTTP_EXCEPTIONS + +logger = logging.getLogger('elasticsearch') + +# create the elasticsearch.trace logger, but only set propagate to False if the +# logger hasn't already been configured +_tracer_already_configured = 'elasticsearch.trace' in logging.Logger.manager.loggerDict +tracer = logging.getLogger('elasticsearch.trace') +if not _tracer_already_configured: + tracer.propagate = False + + +class Connection(object): + """ + Class responsible for maintaining a connection to an Elasticsearch node. It + holds persistent connection pool to it and it's main interface + (`perform_request`) is thread-safe. + + Also responsible for logging. + """ + transport_schema = 'http' + + def __init__(self, host='localhost', port=9200, url_prefix='', timeout=10, **kwargs): + """ + :arg host: hostname of the node (default: localhost) + :arg port: port to use (integer, default: 9200) + :arg url_prefix: optional url prefix for elasticsearch + :arg timeout: default timeout in seconds (float, default: 10) + """ + self.host = '%s://%s:%s' % (self.transport_schema, host, port) + if url_prefix: + url_prefix = '/' + url_prefix.strip('/') + self.url_prefix = url_prefix + self.timeout = timeout + + def __repr__(self): + return '<%s: %s>' % (self.__class__.__name__, self.host) + + def log_request_success(self, method, full_url, path, body, status_code, response, duration): + """ Log a successful API call. """ + # TODO: optionally pass in params instead of full_url and do urlencode only when needed + def _pretty_json(data): + # pretty JSON in tracer curl logs + try: + return json.dumps(json.loads(data), sort_keys=True, indent=2, separators=(',', ': ')).replace("'", r'\u0027') + except (ValueError, TypeError): + # non-json data or a bulk request + return data + + # body has already been serialized to utf-8, deserialize it for logging + # TODO: find a better way to avoid (de)encoding the body back and forth + if body: + body = body.decode('utf-8') + + logger.info( + '%s %s [status:%s request:%.3fs]', method, full_url, + status_code, duration + ) + logger.debug('> %s', body) + logger.debug('< %s', response) + + if tracer.isEnabledFor(logging.INFO): + # include pretty in trace curls + path = path.replace('?', '?pretty&', 1) if '?' in path else path + '?pretty' + if self.url_prefix: + path = path.replace(self.url_prefix, '', 1) + tracer.info("curl -X%s 'http://localhost:9200%s' -d '%s'", method, path, _pretty_json(body) if body else '') + + if tracer.isEnabledFor(logging.DEBUG): + tracer.debug('#[%s] (%.3fs)\n#%s', status_code, duration, _pretty_json(response).replace('\n', '\n#') if response else '') + + def log_request_fail(self, method, full_url, body, duration, status_code=None, response=None, exception=None): + """ Log an unsuccessful API call. """ + logger.warning( + '%s %s [status:%s request:%.3fs]', method, full_url, + status_code or 'N/A', duration, exc_info=exception is not None + ) + + # body has already been serialized to utf-8, deserialize it for logging + # TODO: find a better way to avoid (de)encoding the body back and forth + if body: + body = body.decode('utf-8') + + logger.debug('> %s', body) + + if response is not None: + logger.debug('< %s', response) + + def _raise_error(self, status_code, raw_data): + """ Locate appropriate exception and raise it. """ + error_message = raw_data + additional_info = None + try: + additional_info = json.loads(raw_data) + error_message = additional_info.get('error', error_message) + if isinstance(error_message, dict) and 'type' in error_message: + error_message = error_message['type'] + except: + # we don't care what went wrong + pass + + raise HTTP_EXCEPTIONS.get(status_code, TransportError)(status_code, error_message, additional_info) + + diff --git a/venv/lib/python2.7/site-packages/elasticsearch/connection/http_requests.py b/venv/lib/python2.7/site-packages/elasticsearch/connection/http_requests.py new file mode 100644 index 0000000..b97a94c --- /dev/null +++ b/venv/lib/python2.7/site-packages/elasticsearch/connection/http_requests.py @@ -0,0 +1,88 @@ +import time +import warnings +try: + import requests + REQUESTS_AVAILABLE = True +except ImportError: + REQUESTS_AVAILABLE = False + +from .base import Connection +from ..exceptions import ConnectionError, ImproperlyConfigured, ConnectionTimeout, SSLError +from ..compat import urlencode, string_types + +class RequestsHttpConnection(Connection): + """ + Connection using the `requests` library. + + :arg http_auth: optional http auth information as either ':' separated + string or a tuple. Any value will be passed into requests as `auth`. + :arg use_ssl: use ssl for the connection if `True` + :arg verify_certs: whether to verify SSL certificates + :arg ca_certs: optional path to CA bundle. By default standard requests' + bundle will be used. + :arg client_cert: path to the file containing the private key and the + certificate, or cert only if using client_key + :arg client_key: path to the file containing the private key if using + separate cert and key files (client_cert will contain only the cert) + """ + def __init__(self, host='localhost', port=9200, http_auth=None, + use_ssl=False, verify_certs=False, ca_certs=None, client_cert=None, + client_key=None, **kwargs): + if not REQUESTS_AVAILABLE: + raise ImproperlyConfigured("Please install requests to use RequestsHttpConnection.") + + super(RequestsHttpConnection, self).__init__(host= host, port=port, **kwargs) + self.session = requests.session() + if http_auth is not None: + if isinstance(http_auth, (tuple, list)): + http_auth = tuple(http_auth) + elif isinstance(http_auth, string_types): + http_auth = tuple(http_auth.split(':', 1)) + self.session.auth = http_auth + self.base_url = 'http%s://%s:%d%s' % ( + 's' if use_ssl else '', + host, port, self.url_prefix + ) + self.session.verify = verify_certs + if not client_key: + self.session.cert = client_cert + elif client_cert: + # cert is a tuple of (certfile, keyfile) + self.session.cert = (client_cert, client_key) + if ca_certs: + if not verify_certs: + raise ImproperlyConfigured("You cannot pass CA certificates when verify SSL is off.") + self.session.verify = ca_certs + + if use_ssl and not verify_certs: + warnings.warn( + 'Connecting to %s using SSL with verify_certs=False is insecure.' % self.base_url) + + def perform_request(self, method, url, params=None, body=None, timeout=None, ignore=()): + url = self.base_url + url + if params: + url = '%s?%s' % (url, urlencode(params or {})) + + start = time.time() + try: + response = self.session.request(method, url, data=body, timeout=timeout or self.timeout) + duration = time.time() - start + raw_data = response.text + except requests.exceptions.SSLError as e: + self.log_request_fail(method, url, body, time.time() - start, exception=e) + raise SSLError('N/A', str(e), e) + except requests.Timeout as e: + self.log_request_fail(method, url, body, time.time() - start, exception=e) + raise ConnectionTimeout('TIMEOUT', str(e), e) + except requests.ConnectionError as e: + self.log_request_fail(method, url, body, time.time() - start, exception=e) + raise ConnectionError('N/A', str(e), e) + + # raise errors based on http status codes, let the client handle those if needed + if not (200 <= response.status_code < 300) and response.status_code not in ignore: + self.log_request_fail(method, url, body, duration, response.status_code, raw_data) + self._raise_error(response.status_code, raw_data) + + self.log_request_success(method, url, response.request.path_url, body, response.status_code, raw_data, duration) + + return response.status_code, response.headers, raw_data diff --git a/venv/lib/python2.7/site-packages/elasticsearch/connection/http_urllib3.py b/venv/lib/python2.7/site-packages/elasticsearch/connection/http_urllib3.py new file mode 100644 index 0000000..d0f213f --- /dev/null +++ b/venv/lib/python2.7/site-packages/elasticsearch/connection/http_urllib3.py @@ -0,0 +1,115 @@ +import time +import urllib3 +from urllib3.exceptions import ReadTimeoutError, SSLError as UrllibSSLError +import warnings + +from .base import Connection +from ..exceptions import ConnectionError, ImproperlyConfigured, ConnectionTimeout, SSLError +from ..compat import urlencode + +class Urllib3HttpConnection(Connection): + """ + Default connection class using the `urllib3` library and the http protocol. + + :arg host: hostname of the node (default: localhost) + :arg port: port to use (integer, default: 9200) + :arg url_prefix: optional url prefix for elasticsearch + :arg timeout: default timeout in seconds (float, default: 10) + :arg http_auth: optional http auth information as either ':' separated + string or a tuple + :arg use_ssl: use ssl for the connection if `True` + :arg verify_certs: whether to verify SSL certificates + :arg ca_certs: optional path to CA bundle. See + http://urllib3.readthedocs.org/en/latest/security.html#using-certifi-with-urllib3 + for instructions how to get default set + :arg client_cert: path to the file containing the private key and the + certificate, or cert only if using client_key + :arg client_key: path to the file containing the private key if using + separate cert and key files (client_cert will contain only the cert) + :arg ssl_version: version of the SSL protocol to use. Choices are: + SSLv23 (default) SSLv2 SSLv3 TLSv1 (see ``PROTOCOL_*`` constants in the + ``ssl`` module for exact options for your environment). + :arg ssl_assert_hostname: use hostname verification if not `False` + :arg ssl_assert_fingerprint: verify the supplied certificate fingerprint if not `None` + :arg maxsize: the maximum number of connections which will be kept open to + this host. + """ + def __init__(self, host='localhost', port=9200, http_auth=None, + use_ssl=False, verify_certs=False, ca_certs=None, client_cert=None, + client_key=None, ssl_version=None, ssl_assert_hostname=None, + ssl_assert_fingerprint=None, maxsize=10, **kwargs): + + super(Urllib3HttpConnection, self).__init__(host=host, port=port, **kwargs) + self.headers = urllib3.make_headers(keep_alive=True) + if http_auth is not None: + if isinstance(http_auth, (tuple, list)): + http_auth = ':'.join(http_auth) + self.headers.update(urllib3.make_headers(basic_auth=http_auth)) + + pool_class = urllib3.HTTPConnectionPool + kw = {} + if use_ssl: + pool_class = urllib3.HTTPSConnectionPool + kw.update({ + 'ssl_version': ssl_version, + 'assert_hostname': ssl_assert_hostname, + 'assert_fingerprint': ssl_assert_fingerprint, + }) + + if verify_certs: + kw.update({ + 'cert_reqs': 'CERT_REQUIRED', + 'ca_certs': ca_certs, + 'cert_file': client_cert, + 'key_file': client_key, + }) + elif ca_certs: + raise ImproperlyConfigured("You cannot pass CA certificates when verify SSL is off.") + else: + warnings.warn( + 'Connecting to %s using SSL with verify_certs=False is insecure.' % host) + + self.pool = pool_class(host, port=port, timeout=self.timeout, maxsize=maxsize, **kw) + + def perform_request(self, method, url, params=None, body=None, timeout=None, ignore=()): + url = self.url_prefix + url + if params: + url = '%s?%s' % (url, urlencode(params)) + full_url = self.host + url + + start = time.time() + try: + kw = {} + if timeout: + kw['timeout'] = timeout + + # in python2 we need to make sure the url and method are not + # unicode. Otherwise the body will be decoded into unicode too and + # that will fail (#133, #201). + if not isinstance(url, str): + url = url.encode('utf-8') + if not isinstance(method, str): + method = method.encode('utf-8') + + response = self.pool.urlopen(method, url, body, retries=False, headers=self.headers, **kw) + duration = time.time() - start + raw_data = response.data.decode('utf-8') + except UrllibSSLError as e: + self.log_request_fail(method, full_url, body, time.time() - start, exception=e) + raise SSLError('N/A', str(e), e) + except ReadTimeoutError as e: + self.log_request_fail(method, full_url, body, time.time() - start, exception=e) + raise ConnectionTimeout('TIMEOUT', str(e), e) + except Exception as e: + self.log_request_fail(method, full_url, body, time.time() - start, exception=e) + raise ConnectionError('N/A', str(e), e) + + if not (200 <= response.status < 300) and response.status not in ignore: + self.log_request_fail(method, url, body, duration, response.status, raw_data) + self._raise_error(response.status, raw_data) + + self.log_request_success(method, full_url, url, body, response.status, + raw_data, duration) + + return response.status, response.getheaders(), raw_data + diff --git a/venv/lib/python2.7/site-packages/elasticsearch/connection/pooling.py b/venv/lib/python2.7/site-packages/elasticsearch/connection/pooling.py new file mode 100644 index 0000000..a772120 --- /dev/null +++ b/venv/lib/python2.7/site-packages/elasticsearch/connection/pooling.py @@ -0,0 +1,26 @@ +try: + import queue +except ImportError: + import Queue as queue +from .base import Connection + + +class PoolingConnection(Connection): + """ + Base connection class for connections that use libraries without thread + safety and no capacity for connection pooling. To use this just implement a + ``_make_connection`` method that constructs a new connection and returns + it. + """ + def __init__(self, *args, **kwargs): + self._free_connections = queue.Queue() + super(PoolingConnection, self).__init__(*args, **kwargs) + + def _get_connection(self): + try: + return self._free_connections.get_nowait() + except queue.Empty: + return self._make_connection() + + def _release_connection(self, con): + self._free_connections.put(con) diff --git a/venv/lib/python2.7/site-packages/elasticsearch/connection_pool.py b/venv/lib/python2.7/site-packages/elasticsearch/connection_pool.py new file mode 100644 index 0000000..346a884 --- /dev/null +++ b/venv/lib/python2.7/site-packages/elasticsearch/connection_pool.py @@ -0,0 +1,248 @@ +import time +import random +import logging + +try: + from Queue import PriorityQueue, Empty +except ImportError: + from queue import PriorityQueue, Empty + +from .exceptions import ImproperlyConfigured + +logger = logging.getLogger('elasticsearch') + +class ConnectionSelector(object): + """ + Simple class used to select a connection from a list of currently live + connection instances. In init time it is passed a dictionary containing all + the connections' options which it can then use during the selection + process. When the `select` method is called it is given a list of + *currently* live connections to choose from. + + The options dictionary is the one that has been passed to + :class:`~elasticsearch.Transport` as `hosts` param and the same that is + used to construct the Connection object itself. When the Connection was + created from information retrieved from the cluster via the sniffing + process it will be the dictionary returned by the `host_info_callback`. + + Example of where this would be useful is a zone-aware selector that would + only select connections from it's own zones and only fall back to other + connections where there would be none in it's zones. + """ + def __init__(self, opts): + """ + :arg opts: dictionary of connection instances and their options + """ + self.connection_opts = opts + + def select(self, connections): + """ + Select a connection from the given list. + + :arg connections: list of live connections to choose from + """ + pass + + +class RandomSelector(ConnectionSelector): + """ + Select a connection at random + """ + def select(self, connections): + return random.choice(connections) + + +class RoundRobinSelector(ConnectionSelector): + """ + Selector using round-robin. + """ + def __init__(self, opts): + super(RoundRobinSelector, self).__init__(opts) + self.rr = -1 + + def select(self, connections): + self.rr += 1 + self.rr %= len(connections) + return connections[self.rr] + +class ConnectionPool(object): + """ + Container holding the :class:`~elasticsearch.Connection` instances, + managing the selection process (via a + :class:`~elasticsearch.ConnectionSelector`) and dead connections. + + It's only interactions are with the :class:`~elasticsearch.Transport` class + that drives all the actions within `ConnectionPool`. + + Initially connections are stored on the class as a list and, along with the + connection options, get passed to the `ConnectionSelector` instance for + future reference. + + Upon each request the `Transport` will ask for a `Connection` via the + `get_connection` method. If the connection fails (it's `perform_request` + raises a `ConnectionError`) it will be marked as dead (via `mark_dead`) and + put on a timeout (if it fails N times in a row the timeout is exponentially + longer - the formula is `default_timeout * 2 ** (fail_count - 1)`). When + the timeout is over the connection will be resurrected and returned to the + live pool. A connection that has been peviously marked as dead and + succeedes will be marked as live (it's fail count will be deleted). + """ + def __init__(self, connections, dead_timeout=60, timeout_cutoff=5, + selector_class=RoundRobinSelector, randomize_hosts=True, **kwargs): + """ + :arg connections: list of tuples containing the + :class:`~elasticsearch.Connection` instance and it's options + :arg dead_timeout: number of seconds a connection should be retired for + after a failure, increases on consecutive failures + :arg timeout_cutoff: number of consecutive failures after which the + timeout doesn't increase + :arg selector_class: :class:`~elasticsearch.ConnectionSelector` + subclass to use if more than one connection is live + :arg randomize_hosts: shuffle the list of connections upon arrival to + avoid dog piling effect across processes + """ + if not connections: + raise ImproperlyConfigured("No defined connections, you need to " + "specify at least one host.") + self.connection_opts = connections + self.connections = [c for (c, opts) in connections] + # remember original connection list for resurrect(force=True) + self.orig_connections = tuple(self.connections) + # PriorityQueue for thread safety and ease of timeout management + self.dead = PriorityQueue(len(self.connections)) + self.dead_count = {} + + if randomize_hosts: + # randomize the connection list to avoid all clients hitting same node + # after startup/restart + random.shuffle(self.connections) + + # default timeout after which to try resurrecting a connection + self.dead_timeout = dead_timeout + self.timeout_cutoff = timeout_cutoff + + self.selector = selector_class(dict(connections)) + + def mark_dead(self, connection, now=None): + """ + Mark the connection as dead (failed). Remove it from the live pool and + put it on a timeout. + + :arg connection: the failed instance + """ + # allow inject for testing purposes + now = now if now else time.time() + try: + self.connections.remove(connection) + except ValueError: + # connection not alive or another thread marked it already, ignore + return + else: + dead_count = self.dead_count.get(connection, 0) + 1 + self.dead_count[connection] = dead_count + timeout = self.dead_timeout * 2 ** min(dead_count - 1, self.timeout_cutoff) + self.dead.put((now + timeout, connection)) + logger.warning( + 'Connection %r has failed for %i times in a row, putting on %i second timeout.', + connection, dead_count, timeout + ) + + def mark_live(self, connection): + """ + Mark connection as healthy after a resurrection. Resets the fail + counter for the connection. + + :arg connection: the connection to redeem + """ + try: + del self.dead_count[connection] + except KeyError: + # race condition, safe to ignore + pass + + def resurrect(self, force=False): + """ + Attempt to resurrect a connection from the dead pool. It will try to + locate one (not all) eligible (it's timeout is over) connection to + return to the live pool. Any resurrected connection is also returned. + + :arg force: resurrect a connection even if there is none eligible (used + when we have no live connections). If force is specified resurrect + always returns a connection. + + """ + # no dead connections + if self.dead.empty(): + # we are forced to return a connection, take one from the original + # list. This is to avoid a race condition where get_connection can + # see no live connections but when it calls resurrect self.dead is + # also empty. We assume that other threat has resurrected all + # available connections so we can safely return one at random. + if force: + return random.choice(self.orig_connections) + return + + try: + # retrieve a connection to check + timeout, connection = self.dead.get(block=False) + except Empty: + # other thread has been faster and the queue is now empty. If we + # are forced, return a connection at random again. + if force: + return random.choice(self.orig_connections) + return + + if not force and timeout > time.time(): + # return it back if not eligible and not forced + self.dead.put((timeout, connection)) + return + + # either we were forced or the connection is elligible to be retried + self.connections.append(connection) + logger.info('Resurrecting connection %r (force=%s).', connection, force) + return connection + + def get_connection(self): + """ + Return a connection from the pool using the `ConnectionSelector` + instance. + + It tries to resurrect eligible connections, forces a resurrection when + no connections are availible and passes the list of live connections to + the selector instance to choose from. + + Returns a connection instance and it's current fail count. + """ + self.resurrect() + connections = self.connections[:] + + # no live nodes, resurrect one by force and return it + if not connections: + return self.resurrect(True) + + # only call selector if we have a selection + if len(connections) > 1: + return self.selector.select(self.connections) + + # only one connection, no need for a selector + return connections[0] + + +class DummyConnectionPool(ConnectionPool): + def __init__(self, connections, **kwargs): + if len(connections) != 1: + raise ImproperlyConfigured("DummyConnectionPool needs exactly one " + "connection defined.") + # we need connection opts for sniffing logic + self.connection_opts = connections + self.connection = connections[0][0] + self.connections = (self.connection, ) + + def get_connection(self): + return self.connection + + def _noop(self, *args, **kwargs): + pass + mark_dead = mark_live = resurrect = _noop + + diff --git a/venv/lib/python2.7/site-packages/elasticsearch/exceptions.py b/venv/lib/python2.7/site-packages/elasticsearch/exceptions.py new file mode 100644 index 0000000..76978a8 --- /dev/null +++ b/venv/lib/python2.7/site-packages/elasticsearch/exceptions.py @@ -0,0 +1,109 @@ +__all__ = [ + 'ImproperlyConfigured', 'ElasticsearchException', 'SerializationError', + 'TransportError', 'NotFoundError', 'ConflictError', 'RequestError', 'ConnectionError', + 'SSLError', 'ConnectionTimeout' +] + +class ImproperlyConfigured(Exception): + """ + Exception raised when the config passed to the client is inconsistent or invalid. + """ + + +class ElasticsearchException(Exception): + """ + Base class for all exceptions raised by this package's operations (doesn't + apply to :class:`~elasticsearch.ImproperlyConfigured`). + """ + + +class SerializationError(ElasticsearchException): + """ + Data passed in failed to serialize properly in the ``Serializer`` being + used. + """ + + +class TransportError(ElasticsearchException): + """ + Exception raised when ES returns a non-OK (>=400) HTTP status code. Or when + an actual connection error happens; in that case the ``status_code`` will + be set to ``'N/A'``. + """ + @property + def status_code(self): + """ + The HTTP status code of the response that precipitated the error or + ``'N/A'`` if not applicable. + """ + return self.args[0] + + @property + def error(self): + """ A string error message. """ + return self.args[1] + + @property + def info(self): + """ Dict of returned error info from ES, where available. """ + return self.args[2] + + def __str__(self): + cause = '' + try: + if self.info: + cause = ', %r' % self.info['error']['root_cause'][0]['reason'] + except LookupError: + pass + return 'TransportError(%s, %r%s)' % (self.status_code, self.error, cause) + + +class ConnectionError(TransportError): + """ + Error raised when there was an exception while talking to ES. Original + exception from the underlying :class:`~elasticsearch.Connection` + implementation is available as ``.info.`` + """ + def __str__(self): + return 'ConnectionError(%s) caused by: %s(%s)' % ( + self.error, self.info.__class__.__name__, self.info) + + +class SSLError(ConnectionError): + """ Error raised when encountering SSL errors. """ + + +class ConnectionTimeout(ConnectionError): + """ A network timeout. Doesn't cause a node retry by default. """ + def __str__(self): + return 'ConnectionTimeout caused by - %s(%s)' % ( + self.info.__class__.__name__, self.info) + + +class NotFoundError(TransportError): + """ Exception representing a 404 status code. """ + + +class ConflictError(TransportError): + """ Exception representing a 409 status code. """ + + +class RequestError(TransportError): + """ Exception representing a 400 status code. """ + + +class AuthenticationException(TransportError): + """ Exception representing a 401 status code. """ + + +class AuthorizationException(TransportError): + """ Exception representing a 403 status code. """ + +# more generic mappings from status_code to python exceptions +HTTP_EXCEPTIONS = { + 400: RequestError, + 401: AuthenticationException, + 403: AuthorizationException, + 404: NotFoundError, + 409: ConflictError, +} diff --git a/venv/lib/python2.7/site-packages/elasticsearch/helpers/__init__.py b/venv/lib/python2.7/site-packages/elasticsearch/helpers/__init__.py new file mode 100644 index 0000000..2f98ba7 --- /dev/null +++ b/venv/lib/python2.7/site-packages/elasticsearch/helpers/__init__.py @@ -0,0 +1,355 @@ +from __future__ import unicode_literals + +import logging +from operator import methodcaller + +from ..exceptions import ElasticsearchException, TransportError +from ..compat import map, string_types + +logger = logging.getLogger('elasticsearch.helpers') + +class BulkIndexError(ElasticsearchException): + @property + def errors(self): + """ List of errors from execution of the last chunk. """ + return self.args[1] + + +class ScanError(ElasticsearchException): + pass + +def expand_action(data): + """ + From one document or action definition passed in by the user extract the + action/data lines needed for elasticsearch's + :meth:`~elasticsearch.Elasticsearch.bulk` api. + """ + # when given a string, assume user wants to index raw json + if isinstance(data, string_types): + return '{"index":{}}', data + + # make sure we don't alter the action + data = data.copy() + op_type = data.pop('_op_type', 'index') + action = {op_type: {}} + for key in ('_index', '_parent', '_percolate', '_routing', '_timestamp', + '_ttl', '_type', '_version', '_version_type', '_id', '_retry_on_conflict'): + if key in data: + action[op_type][key] = data.pop(key) + + # no data payload for delete + if op_type == 'delete': + return action, None + + return action, data.get('_source', data) + +def _chunk_actions(actions, chunk_size, max_chunk_bytes, serializer): + """ + Split actions into chunks by number or size, serialize them into strings in + the process. + """ + bulk_actions = [] + size, action_count = 0, 0 + for action, data in actions: + action = serializer.dumps(action) + cur_size = len(action) + 1 + + if data is not None: + data = serializer.dumps(data) + cur_size += len(data) + 1 + + # full chunk, send it and start a new one + if bulk_actions and (size + cur_size > max_chunk_bytes or action_count == chunk_size): + yield bulk_actions + bulk_actions = [] + size, action_count = 0, 0 + + bulk_actions.append(action) + if data is not None: + bulk_actions.append(data) + size += cur_size + action_count += 1 + + if bulk_actions: + yield bulk_actions + +def _process_bulk_chunk(client, bulk_actions, raise_on_exception=True, raise_on_error=True, **kwargs): + """ + Send a bulk request to elasticsearch and process the output. + """ + # if raise on error is set, we need to collect errors per chunk before raising them + errors = [] + + try: + # send the actual request + resp = client.bulk('\n'.join(bulk_actions) + '\n', **kwargs) + except TransportError as e: + # default behavior - just propagate exception + if raise_on_exception: + raise e + + # if we are not propagating, mark all actions in current chunk as failed + err_message = str(e) + exc_errors = [] + + # deserialize the data back, thisis expensive but only run on + # errors if raise_on_exception is false, so shouldn't be a real + # issue + bulk_data = map(client.transport.serializer.loads, bulk_actions) + while True: + try: + # collect all the information about failed actions + action = next(bulk_data) + op_type, action = action.popitem() + info = {"error": err_message, "status": e.status_code, "exception": e} + if op_type != 'delete': + info['data'] = next(bulk_data) + info.update(action) + exc_errors.append({op_type: info}) + except StopIteration: + break + + # emulate standard behavior for failed actions + if raise_on_error: + raise BulkIndexError('%i document(s) failed to index.' % len(exc_errors), exc_errors) + else: + for err in exc_errors: + yield False, err + return + + # go through request-reponse pairs and detect failures + for op_type, item in map(methodcaller('popitem'), resp['items']): + ok = 200 <= item.get('status', 500) < 300 + if not ok and raise_on_error: + errors.append({op_type: item}) + + if ok or not errors: + # if we are not just recording all errors to be able to raise + # them all at once, yield items individually + yield ok, {op_type: item} + + if errors: + raise BulkIndexError('%i document(s) failed to index.' % len(errors), errors) + +def streaming_bulk(client, actions, chunk_size=500, max_chunk_bytes=100 * 1024 * 1024, + raise_on_error=True, expand_action_callback=expand_action, + raise_on_exception=True, **kwargs): + """ + Streaming bulk consumes actions from the iterable passed in and yields + results per action. For non-streaming usecases use + :func:`~elasticsearch.helpers.bulk` which is a wrapper around streaming + bulk that returns summary information about the bulk operation once the + entire input is consumed and sent. + + + :arg client: instance of :class:`~elasticsearch.Elasticsearch` to use + :arg actions: iterable containing the actions to be executed + :arg chunk_size: number of docs in one chunk sent to es (default: 500) + :arg max_chunk_bytes: the maximum size of the request in bytes (default: 100MB) + :arg raise_on_error: raise ``BulkIndexError`` containing errors (as `.errors`) + from the execution of the last chunk when some occur. By default we raise. + :arg raise_on_exception: if ``False`` then don't propagate exceptions from + call to ``bulk`` and just report the items that failed as failed. + :arg expand_action_callback: callback executed on each action passed in, + should return a tuple containing the action line and the data line + (`None` if data line should be omitted). + """ + actions = map(expand_action_callback, actions) + + for bulk_actions in _chunk_actions(actions, chunk_size, max_chunk_bytes, client.transport.serializer): + for result in _process_bulk_chunk(client, bulk_actions, raise_on_exception, raise_on_error, **kwargs): + yield result + +def bulk(client, actions, stats_only=False, **kwargs): + """ + Helper for the :meth:`~elasticsearch.Elasticsearch.bulk` api that provides + a more human friendly interface - it consumes an iterator of actions and + sends them to elasticsearch in chunks. It returns a tuple with summary + information - number of successfully executed actions and either list of + errors or number of errors if `stats_only` is set to `True`. + + See :func:`~elasticsearch.helpers.streaming_bulk` for more accepted + parameters + + :arg client: instance of :class:`~elasticsearch.Elasticsearch` to use + :arg actions: iterator containing the actions + :arg stats_only: if `True` only report number of successful/failed + operations instead of just number of successful and a list of error responses + + Any additional keyword arguments will be passed to + :func:`~elasticsearch.helpers.streaming_bulk` which is used to execute + the operation. + """ + success, failed = 0, 0 + + # list of errors to be collected is not stats_only + errors = [] + + for ok, item in streaming_bulk(client, actions, **kwargs): + # go through request-reponse pairs and detect failures + if not ok: + if not stats_only: + errors.append(item) + failed += 1 + else: + success += 1 + + return success, failed if stats_only else errors + +def parallel_bulk(client, actions, thread_count=4, chunk_size=500, + max_chunk_bytes=100 * 1024 * 1024, + expand_action_callback=expand_action, **kwargs): + """ + Parallel version of the bulk helper run in multiple threads at once. + + :arg client: instance of :class:`~elasticsearch.Elasticsearch` to use + :arg actions: iterator containing the actions + :arg thread_count: size of the threadpool to use for the bulk requests + :arg chunk_size: number of docs in one chunk sent to es (default: 500) + :arg max_chunk_bytes: the maximum size of the request in bytes (default: 100MB) + :arg raise_on_error: raise ``BulkIndexError`` containing errors (as `.errors`) + from the execution of the last chunk when some occur. By default we raise. + :arg raise_on_exception: if ``False`` then don't propagate exceptions from + call to ``bulk`` and just report the items that failed as failed. + :arg expand_action_callback: callback executed on each action passed in, + should return a tuple containing the action line and the data line + (`None` if data line should be omitted). + """ + # Avoid importing multiprocessing unless parallel_bulk is used + # to avoid exceptions on restricted environments like App Engine + from multiprocessing.dummy import Pool + actions = map(expand_action_callback, actions) + + pool = Pool(thread_count) + + try: + for result in pool.imap( + lambda chunk: list(_process_bulk_chunk(client, chunk, **kwargs)), + _chunk_actions(actions, chunk_size, max_chunk_bytes, client.transport.serializer) + ): + for item in result: + yield item + + finally: + pool.close() + pool.join() + +def scan(client, query=None, scroll='5m', raise_on_error=True, preserve_order=False, **kwargs): + """ + Simple abstraction on top of the + :meth:`~elasticsearch.Elasticsearch.scroll` api - a simple iterator that + yields all hits as returned by underlining scroll requests. + + By default scan does not return results in any pre-determined order. To + have a standard order in the returned documents (either by score or + explicit sort definition) when scrolling, use ``preserve_order=True``. This + may be an expensive operation and will negate the performance benefits of + using ``scan``. + + :arg client: instance of :class:`~elasticsearch.Elasticsearch` to use + :arg query: body for the :meth:`~elasticsearch.Elasticsearch.search` api + :arg scroll: Specify how long a consistent view of the index should be + maintained for scrolled search + :arg raise_on_error: raises an exception (``ScanError``) if an error is + encountered (some shards fail to execute). By default we raise. + :arg preserve_order: don't set the ``search_type`` to ``scan`` - this will + cause the scroll to paginate with preserving the order. Note that this + can be an extremely expensive operation and can easily lead to + unpredictable results, use with caution. + + Any additional keyword arguments will be passed to the initial + :meth:`~elasticsearch.Elasticsearch.search` call:: + + scan(es, + query={"query": {"match": {"title": "python"}}}, + index="orders-*", + doc_type="books" + ) + + """ + if not preserve_order: + kwargs['search_type'] = 'scan' + # initial search + resp = client.search(body=query, scroll=scroll, **kwargs) + + scroll_id = resp.get('_scroll_id') + if scroll_id is None: + return + + first_run = True + while True: + # if we didn't set search_type to scan initial search contains data + if preserve_order and first_run: + first_run = False + else: + resp = client.scroll(scroll_id, scroll=scroll) + + for hit in resp['hits']['hits']: + yield hit + + # check if we have any errrors + if resp["_shards"]["failed"]: + logger.warning( + 'Scroll request has failed on %d shards out of %d.', + resp['_shards']['failed'], resp['_shards']['total'] + ) + if raise_on_error: + raise ScanError( + 'Scroll request has failed on %d shards out of %d.' % + (resp['_shards']['failed'], resp['_shards']['total']) + ) + + scroll_id = resp.get('_scroll_id') + # end of scroll + if scroll_id is None or not resp['hits']['hits']: + break + +def reindex(client, source_index, target_index, query=None, target_client=None, + chunk_size=500, scroll='5m', scan_kwargs={}, bulk_kwargs={}): + + """ + Reindex all documents from one index that satisfy a given query + to another, potentially (if `target_client` is specified) on a different cluster. + If you don't specify the query you will reindex all the documents. + + .. note:: + + This helper doesn't transfer mappings, just the data. + + :arg client: instance of :class:`~elasticsearch.Elasticsearch` to use (for + read if `target_client` is specified as well) + :arg source_index: index (or list of indices) to read documents from + :arg target_index: name of the index in the target cluster to populate + :arg query: body for the :meth:`~elasticsearch.Elasticsearch.search` api + :arg target_client: optional, is specified will be used for writing (thus + enabling reindex between clusters) + :arg chunk_size: number of docs in one chunk sent to es (default: 500) + :arg scroll: Specify how long a consistent view of the index should be + maintained for scrolled search + :arg scan_kwargs: additional kwargs to be passed to + :func:`~elasticsearch.helpers.scan` + :arg bulk_kwargs: additional kwargs to be passed to + :func:`~elasticsearch.helpers.bulk` + """ + target_client = client if target_client is None else target_client + + docs = scan(client, + query=query, + index=source_index, + scroll=scroll, + fields=('_source', '_parent', '_routing', '_timestamp'), + **scan_kwargs + ) + def _change_doc_index(hits, index): + for h in hits: + h['_index'] = index + if 'fields' in h: + h.update(h.pop('fields')) + yield h + + kwargs = { + 'stats_only': True, + } + kwargs.update(bulk_kwargs) + return bulk(target_client, _change_doc_index(docs, target_index), + chunk_size=chunk_size, **kwargs) diff --git a/venv/lib/python2.7/site-packages/elasticsearch/helpers/test.py b/venv/lib/python2.7/site-packages/elasticsearch/helpers/test.py new file mode 100644 index 0000000..2abf473 --- /dev/null +++ b/venv/lib/python2.7/site-packages/elasticsearch/helpers/test.py @@ -0,0 +1,60 @@ +import time +import os +try: + # python 2.6 + from unittest2 import TestCase, SkipTest +except ImportError: + from unittest import TestCase, SkipTest + +from elasticsearch import Elasticsearch +from elasticsearch.exceptions import ConnectionError + +def get_test_client(nowait=False, **kwargs): + # construct kwargs from the environment + kw = {'timeout': 30} + if 'TEST_ES_CONNECTION' in os.environ: + from elasticsearch import connection + kw['connection_class'] = getattr(connection, os.environ['TEST_ES_CONNECTION']) + + kw.update(kwargs) + client = Elasticsearch([os.environ.get('TEST_ES_SERVER', {})], **kw) + + # wait for yellow status + for _ in range(1 if nowait else 100): + try: + client.cluster.health(wait_for_status='yellow') + return client + except ConnectionError: + time.sleep(.1) + else: + # timeout + raise SkipTest("Elasticsearch failed to start.") + +def _get_version(version_string): + if '.' not in version_string: + return () + version = version_string.strip().split('.') + return tuple(int(v) if v.isdigit() else 999 for v in version) + +class ElasticsearchTestCase(TestCase): + @staticmethod + def _get_client(): + return get_test_client() + + @classmethod + def setUpClass(cls): + super(ElasticsearchTestCase, cls).setUpClass() + cls.client = cls._get_client() + + def tearDown(self): + super(ElasticsearchTestCase, self).tearDown() + self.client.indices.delete(index='*') + self.client.indices.delete_template(name='*', ignore=404) + + @property + def es_version(self): + if not hasattr(self, '_es_version'): + version_string = self.client.info()['version']['number'] + self._es_version = _get_version(version_string) + return self._es_version + diff --git a/venv/lib/python2.7/site-packages/elasticsearch/serializer.py b/venv/lib/python2.7/site-packages/elasticsearch/serializer.py new file mode 100644 index 0000000..808db71 --- /dev/null +++ b/venv/lib/python2.7/site-packages/elasticsearch/serializer.py @@ -0,0 +1,77 @@ +try: + import simplejson as json +except ImportError: + import json +import uuid +from datetime import date, datetime +from decimal import Decimal + +from .exceptions import SerializationError, ImproperlyConfigured +from .compat import string_types + +class TextSerializer(object): + mimetype = 'text/plain' + + def loads(self, s): + return s + + def dumps(self, data): + if isinstance(data, string_types): + return data + + raise SerializationError('Cannot serialize %r into text.' % data) + +class JSONSerializer(object): + mimetype = 'application/json' + + def default(self, data): + if isinstance(data, (date, datetime)): + return data.isoformat() + elif isinstance(data, Decimal): + return float(data) + elif isinstance(data, uuid.UUID): + return str(data) + raise TypeError("Unable to serialize %r (type: %s)" % (data, type(data))) + + def loads(self, s): + try: + return json.loads(s) + except (ValueError, TypeError) as e: + raise SerializationError(s, e) + + def dumps(self, data): + # don't serialize strings + if isinstance(data, string_types): + return data + + try: + return json.dumps(data, default=self.default, ensure_ascii=False) + except (ValueError, TypeError) as e: + raise SerializationError(data, e) + +DEFAULT_SERIALIZERS = { + JSONSerializer.mimetype: JSONSerializer(), + TextSerializer.mimetype: TextSerializer(), +} + +class Deserializer(object): + def __init__(self, serializers, default_mimetype='application/json'): + try: + self.default = serializers[default_mimetype] + except KeyError: + raise ImproperlyConfigured('Cannot find default serializer (%s)' % default_mimetype) + self.serializers = serializers + + def loads(self, s, mimetype=None): + if not mimetype: + deserializer = self.default + else: + # split out charset + mimetype = mimetype.split(';', 1)[0] + try: + deserializer = self.serializers[mimetype] + except KeyError: + raise SerializationError('Unknown mimetype, unable to deserialize: %s' % mimetype) + + return deserializer.loads(s) + diff --git a/venv/lib/python2.7/site-packages/elasticsearch/transport.py b/venv/lib/python2.7/site-packages/elasticsearch/transport.py new file mode 100644 index 0000000..505768d --- /dev/null +++ b/venv/lib/python2.7/site-packages/elasticsearch/transport.py @@ -0,0 +1,355 @@ +import time +from itertools import chain + +from .connection import Urllib3HttpConnection +from .connection_pool import ConnectionPool, DummyConnectionPool +from .serializer import JSONSerializer, Deserializer, DEFAULT_SERIALIZERS +from .exceptions import ConnectionError, TransportError, SerializationError, \ + ConnectionTimeout, ImproperlyConfigured + + +def get_host_info(node_info, host): + """ + Simple callback that takes the node info from `/_cluster/nodes` and a + parsed connection information and return the connection information. If + `None` is returned this node will be skipped. + + Useful for filtering nodes (by proximity for example) or if additional + information needs to be provided for the :class:`~elasticsearch.Connection` + class. By default master only nodes are filtered out since they shouldn't + typically be used for API operations. + + :arg node_info: node information from `/_cluster/nodes` + :arg host: connection information (host, port) extracted from the node info + """ + attrs = node_info.get('attributes', {}) + + # ignore master only nodes + if (attrs.get('data', 'true') == 'false' and + attrs.get('client', 'false') == 'false' and + attrs.get('master', 'true') == 'true'): + return None + return host + +class Transport(object): + """ + Encapsulation of transport-related to logic. Handles instantiation of the + individual connections as well as creating a connection pool to hold them. + + Main interface is the `perform_request` method. + """ + def __init__(self, hosts, connection_class=Urllib3HttpConnection, + connection_pool_class=ConnectionPool, host_info_callback=get_host_info, + sniff_on_start=False, sniffer_timeout=None, sniff_timeout=.1, + sniff_on_connection_fail=False, serializer=JSONSerializer(), serializers=None, + default_mimetype='application/json', max_retries=3, retry_on_status=(503, 504, ), + retry_on_timeout=False, send_get_body_as='GET', **kwargs): + """ + :arg hosts: list of dictionaries, each containing keyword arguments to + create a `connection_class` instance + :arg connection_class: subclass of :class:`~elasticsearch.Connection` to use + :arg connection_pool_class: subclass of :class:`~elasticsearch.ConnectionPool` to use + :arg host_info_callback: callback responsible for taking the node information from + `/_cluser/nodes`, along with already extracted information, and + producing a list of arguments (same as `hosts` parameter) + :arg sniff_on_start: flag indicating whether to obtain a list of nodes + from the cluser at startup time + :arg sniffer_timeout: number of seconds between automatic sniffs + :arg sniff_on_connection_fail: flag controlling if connection failure triggers a sniff + :arg sniff_timeout: timeout used for the sniff request - it should be a + fast api call and we are talking potentially to more nodes so we want + to fail quickly. Not used during initial sniffing (if + ``sniff_on_start`` is on) when the connection still isn't + initialized. + :arg serializer: serializer instance + :arg serializers: optional dict of serializer instances that will be + used for deserializing data coming from the server. (key is the mimetype) + :arg default_mimetype: when no mimetype is specified by the server + response assume this mimetype, defaults to `'application/json'` + :arg max_retries: maximum number of retries before an exception is propagated + :arg retry_on_status: set of HTTP status codes on which we should retry + on a different node. defaults to ``(503, 504, )`` + :arg retry_on_timeout: should timeout trigger a retry on different + node? (default `False`) + :arg send_get_body_as: for GET requests with body this option allows + you to specify an alternate way of execution for environments that + don't support passing bodies with GET requests. If you set this to + 'POST' a POST method will be used instead, if to 'source' then the body + will be serialized and passed as a query parameter `source`. + + Any extra keyword arguments will be passed to the `connection_class` + when creating and instance unless overriden by that connection's + options provided as part of the hosts parameter. + """ + + # serialization config + _serializers = DEFAULT_SERIALIZERS.copy() + # if a serializer has been specified, use it for deserialization as well + _serializers[serializer.mimetype] = serializer + # if custom serializers map has been supplied, override the defaults with it + if serializers: + _serializers.update(serializers) + # create a deserializer with our config + self.deserializer = Deserializer(_serializers, default_mimetype) + + self.max_retries = max_retries + self.retry_on_timeout = retry_on_timeout + self.retry_on_status = retry_on_status + self.send_get_body_as = send_get_body_as + + # data serializer + self.serializer = serializer + + # store all strategies... + self.connection_pool_class = connection_pool_class + self.connection_class = connection_class + + # ...save kwargs to be passed to the connections + self.kwargs = kwargs + self.hosts = hosts + + # ...and instantiate them + self.set_connections(hosts) + # retain the original connection instances for sniffing + self.seed_connections = self.connection_pool.connections[:] + + # sniffing data + self.sniffer_timeout = sniffer_timeout + self.sniff_on_connection_fail = sniff_on_connection_fail + self.last_sniff = time.time() + self.sniff_timeout = sniff_timeout + + # callback to construct host dict from data in /_cluster/nodes + self.host_info_callback = host_info_callback + + if sniff_on_start: + self.sniff_hosts(True) + + def add_connection(self, host): + """ + Create a new :class:`~elasticsearch.Connection` instance and add it to the pool. + + :arg host: kwargs that will be used to create the instance + """ + self.hosts.append(host) + self.set_connections(self.hosts) + + def set_connections(self, hosts): + """ + Instantiate all the connections and crate new connection pool to hold + them. Tries to identify unchanged hosts and re-use existing + :class:`~elasticsearch.Connection` instances. + + :arg hosts: same as `__init__` + """ + # construct the connections + def _create_connection(host): + # if this is not the initial setup look at the existing connection + # options and identify connections that haven't changed and can be + # kept around. + if hasattr(self, 'connection_pool'): + for (connection, old_host) in self.connection_pool.connection_opts: + if old_host == host: + return connection + + # previously unseen params, create new connection + kwargs = self.kwargs.copy() + kwargs.update(host) + + if 'scheme' in host and host['scheme'] != self.connection_class.transport_schema: + raise ImproperlyConfigured( + 'Scheme specified in connection (%s) is not the same as the connection class (%s) specifies (%s).' % ( + host['scheme'], self.connection_class.__name__, self.connection_class.transport_schema + )) + return self.connection_class(**kwargs) + connections = map(_create_connection, hosts) + + connections = list(zip(connections, hosts)) + if len(connections) == 1: + self.connection_pool = DummyConnectionPool(connections) + else: + # pass the hosts dicts to the connection pool to optionally extract parameters from + self.connection_pool = self.connection_pool_class(connections, **self.kwargs) + + def get_connection(self): + """ + Retreive a :class:`~elasticsearch.Connection` instance from the + :class:`~elasticsearch.ConnectionPool` instance. + """ + if self.sniffer_timeout: + if time.time() >= self.last_sniff + self.sniffer_timeout: + self.sniff_hosts() + return self.connection_pool.get_connection() + + def _get_sniff_data(self, initial=False): + """ + Perform the request to get sniffins information. Returns a list of + dictionaries (one per node) containing all the information from the + cluster. + + It also sets the last_sniff attribute in case of a successful attempt. + + In rare cases it might be possible to override this method in your + custom Transport class to serve data from alternative source like + configuration management. + """ + previous_sniff = self.last_sniff + + try: + # reset last_sniff timestamp + self.last_sniff = time.time() + # go through all current connections as well as the + # seed_connections for good measure + for c in chain(self.connection_pool.connections, self.seed_connections): + try: + # use small timeout for the sniffing request, should be a fast api call + _, headers, node_info = c.perform_request('GET', '/_nodes/_all/clear', + timeout=self.sniff_timeout if not initial else None) + node_info = self.deserializer.loads(node_info, headers.get('content-type')) + break + except (ConnectionError, SerializationError): + pass + else: + raise TransportError("N/A", "Unable to sniff hosts.") + except: + # keep the previous value on error + self.last_sniff = previous_sniff + raise + + return list(node_info['nodes'].values()) + + + def sniff_hosts(self, initial=False): + """ + Obtain a list of nodes from the cluster and create a new connection + pool using the information retrieved. + + To extract the node connection parameters use the ``nodes_to_host_callback``. + + :arg initial: flag indicating if this is during startup + (``sniff_on_start``), ignore the ``sniff_timeout`` if ``True`` + """ + node_info = self._get_sniff_data(initial) + + hosts = [] + address_key = self.connection_class.transport_schema + '_address' + for n in node_info: + host = {} + address = n.get(address_key, '') + if '/' in address: + host['host'], address = address.split('/', 1) + + # malformed address + if ':' not in address: + continue + + ip, port = address.rsplit(':', 1) + + # use the ip if not overridden by publish_host + host.setdefault('host', ip) + host['port'] = int(port) + + host = self.host_info_callback(n, host) + if host is not None: + hosts.append(host) + + # we weren't able to get any nodes, maybe using an incompatible + # transport_schema or host_info_callback blocked all - raise error. + if not hosts: + raise TransportError("N/A", "Unable to sniff hosts - no viable hosts found.") + + self.set_connections(hosts) + + def mark_dead(self, connection): + """ + Mark a connection as dead (failed) in the connection pool. If sniffing + on failure is enabled this will initiate the sniffing process. + + :arg connection: instance of :class:`~elasticsearch.Connection` that failed + """ + # mark as dead even when sniffing to avoid hitting this host during the sniff process + self.connection_pool.mark_dead(connection) + if self.sniff_on_connection_fail: + self.sniff_hosts() + + def perform_request(self, method, url, params=None, body=None): + """ + Perform the actual request. Retrieve a connection from the connection + pool, pass all the information to it's perform_request method and + return the data. + + If an exception was raised, mark the connection as failed and retry (up + to `max_retries` times). + + If the operation was succesful and the connection used was previously + marked as dead, mark it as live, resetting it's failure count. + + :arg method: HTTP method to use + :arg url: absolute url (without host) to target + :arg params: dictionary of query parameters, will be handed over to the + underlying :class:`~elasticsearch.Connection` class for serialization + :arg body: body of the request, will be serializes using serializer and + passed to the connection + """ + if body is not None: + body = self.serializer.dumps(body) + + # some clients or environments don't support sending GET with body + if method in ('HEAD', 'GET') and self.send_get_body_as != 'GET': + # send it as post instead + if self.send_get_body_as == 'POST': + method = 'POST' + + # or as source parameter + elif self.send_get_body_as == 'source': + if params is None: + params = {} + params['source'] = body + body = None + + if body is not None: + try: + body = body.encode('utf-8') + except (UnicodeDecodeError, AttributeError): + # bytes/str - no need to re-encode + pass + + ignore = () + timeout = None + if params: + timeout = params.pop('request_timeout', None) + ignore = params.pop('ignore', ()) + if isinstance(ignore, int): + ignore = (ignore, ) + + for attempt in range(self.max_retries + 1): + connection = self.get_connection() + + try: + status, headers, data = connection.perform_request(method, url, params, body, ignore=ignore, timeout=timeout) + + except TransportError as e: + retry = False + if isinstance(e, ConnectionTimeout): + retry = self.retry_on_timeout + elif isinstance(e, ConnectionError): + retry = True + elif e.status_code in self.retry_on_status: + retry = True + + if retry: + # only mark as dead if we are retrying + self.mark_dead(connection) + # raise exception on last retry + if attempt == self.max_retries: + raise + else: + raise + + else: + # connection didn't fail, confirm it's live status + self.connection_pool.mark_live(connection) + if data: + data = self.deserializer.loads(data, headers.get('content-type')) + return status, data + diff --git a/venv/lib/python2.7/site-packages/elasticsearch_dsl-2.0.0.dist-info/DESCRIPTION.rst b/venv/lib/python2.7/site-packages/elasticsearch_dsl-2.0.0.dist-info/DESCRIPTION.rst new file mode 100644 index 0000000..63dafc7 --- /dev/null +++ b/venv/lib/python2.7/site-packages/elasticsearch_dsl-2.0.0.dist-info/DESCRIPTION.rst @@ -0,0 +1,238 @@ +Elasticsearch DSL +================= + +Elasticsearch DSL is a high-level library whose aim is to help with writing and +running queries against Elasticsearch. It is built on top of the official +low-level client (``elasticsearch-py``). + +It provides a more convenient and idiomatic way to write and manipulate +queries. It stays close to the Elasticsearch JSON DSL, mirroring its +terminology and structure. It exposes the whole range of the DSL from Python +either directly using defined classes or a queryset-like expressions. + +It also provides an optional wrapper for working with documents as Python +objects: defining mappings, retrieving and saving documents, wrapping the +document data in user-defined classes. + +To use the other Elasticsearch APIs (eg. cluster health) just use the +underlying client. + +Compatibility +------------- + +The library is compatible with all Elasticsearch versions since ``1.x`` but you +**have to use a matching major version**: + +For **Elasticsearch 2.0** and later, use the major version 2 (``2.x.y``) of the +library. + +For **Elasticsearch 1.0** and later, use the major version 0 (``0.x.y``) of the +library. + + +The recommended way to set your requirements in your `setup.py` or +`requirements.txt` is:: + + # Elasticsearch 2.x + elasticsearch-dsl>=2.0.0,<3.0.0 + + # Elasticsearch 1.x + elasticsearch-dsl<2.0.0 + + +The development is happening on ``master`` and ``1.x`` branches, respectively. + +Search Example +-------------- + +Let's have a typical search request written directly as a ``dict``: + +.. code:: python + + from elasticsearch import Elasticsearch + client = Elasticsearch() + + response = client.search( + index="my-index", + body={ + "query": { + "filtered": { + "query": { + "bool": { + "must": [{"match": {"title": "python"}}], + "must_not": [{"match": {"description": "beta"}}] + } + }, + "filter": {"term": {"category": "search"}} + } + }, + "aggs" : { + "per_tag": { + "terms": {"field": "tags"}, + "aggs": { + "max_lines": {"max": {"field": "lines"}} + } + } + } + } + ) + + for hit in response['hits']['hits']: + print(hit['_score'], hit['_source']['title']) + + for tag in response['aggregations']['per_tag']['buckets']: + print(tag['key'], tag['max_lines']['value']) + + + +The problem with this approach is that it is very verbose, prone to syntax +mistakes like incorrect nesting, hard to modify (eg. adding another filter) and +definitely not fun to write. + +Let's rewrite the example using the Python DSL: + +.. code:: python + + from elasticsearch import Elasticsearch + from elasticsearch_dsl import Search, Q + + client = Elasticsearch() + + s = Search(using=client, index="my-index") \ + .filter("term", category="search") \ + .query("match", title="python") \ + .query(~Q("match", description="beta")) + + s.aggs.bucket('per_tag', 'terms', field='tags') \ + .metric('max_lines', 'max', field='lines') + + response = s.execute() + + for hit in response: + print(hit.meta.score, hit.title) + + for tag in response.aggregations.per_tag.buckets: + print(tag.key, tag.max_lines.value) + +As you see, the library took care of: + + * creating appropriate ``Query`` objects by name (eq. "match") + + * composing queries into a compound ``bool`` query + + * creating a ``filtered`` query since ``.filter()`` was used + + * providing a convenient access to response data + + * no curly or square brackets everywhere + + +Persistence Example +------------------- + +Let's have a simple Python class representing an article in a blogging system: + +.. code:: python + + from datetime import datetime + from elasticsearch_dsl import DocType, String, Date, Integer + from elasticsearch_dsl.connections import connections + + # Define a default Elasticsearch client + connections.create_connection(hosts=['localhost']) + + class Article(DocType): + title = String(analyzer='snowball', fields={'raw': String(index='not_analyzed')}) + body = String(analyzer='snowball') + tags = String(index='not_analyzed') + published_from = Date() + lines = Integer() + + class Meta: + index = 'blog' + + def save(self, ** kwargs): + self.lines = len(self.body.split()) + return super(Article, self).save(** kwargs) + + def is_published(self): + return datetime.now() > self.published_from + + # create the mappings in elasticsearch + Article.init() + + # create and save and article + article = Article(meta={'id': 42}, title='Hello world!', tags=['test']) + article.body = ''' looong text ''' + article.published_from = datetime.now() + article.save() + + article = Article.get(id=42) + print(article.is_published()) + + # Display cluster health + print(connections.get_connection().cluster.health()) + + +In this example you can see: + + * providing a default connection + + * defining fields with mapping configuration + + * setting index name + + * defining custom methods + + * overriding the built-in ``.save()`` method to hook into the persistence + life cycle + + * retrieving and saving the object into Elasticsearch + + * accessing the underlying client for other APIs + +You can see more in the persistence chapter of the documentation. + +Migration from ``elasticsearch-py`` +----------------------------------- + +You don't have to port your entire application to get the benefits of the +Python DSL, you can start gradually by creating a ``Search`` object from your +existing ``dict``, modifying it using the API and serializing it back to a +``dict``: + +.. code:: python + + body = {...} # insert complicated query here + + # Convert to Search object + s = Search.from_dict(body) + + # Add some filters, aggregations, queries, ... + s.filter("term", tags="python") + + # Convert back to dict to plug back into existing code + body = s.to_dict() + +Documentation +------------- + +Documentation is available at https://elasticsearch-dsl.readthedocs.org. + +License +------- + +Copyright 2013 Elasticsearch + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. + diff --git a/venv/lib/python2.7/site-packages/elasticsearch_dsl-2.0.0.dist-info/INSTALLER b/venv/lib/python2.7/site-packages/elasticsearch_dsl-2.0.0.dist-info/INSTALLER new file mode 100644 index 0000000..a1b589e --- /dev/null +++ b/venv/lib/python2.7/site-packages/elasticsearch_dsl-2.0.0.dist-info/INSTALLER @@ -0,0 +1 @@ +pip diff --git a/venv/lib/python2.7/site-packages/elasticsearch_dsl-2.0.0.dist-info/METADATA b/venv/lib/python2.7/site-packages/elasticsearch_dsl-2.0.0.dist-info/METADATA new file mode 100644 index 0000000..1c22771 --- /dev/null +++ b/venv/lib/python2.7/site-packages/elasticsearch_dsl-2.0.0.dist-info/METADATA @@ -0,0 +1,265 @@ +Metadata-Version: 2.0 +Name: elasticsearch-dsl +Version: 2.0.0 +Summary: Python client for Elasticsearch +Home-page: https://github.com/elasticsearch/elasticsearch-dsl-py +Author: Honza Král +Author-email: honza.kral@gmail.com +License: Apache License, Version 2.0 +Platform: UNKNOWN +Classifier: Development Status :: 4 - Beta +Classifier: License :: OSI Approved :: Apache Software License +Classifier: Intended Audience :: Developers +Classifier: Operating System :: OS Independent +Classifier: Programming Language :: Python +Classifier: Programming Language :: Python :: 2 +Classifier: Programming Language :: Python :: 2.6 +Classifier: Programming Language :: Python :: 2.7 +Classifier: Programming Language :: Python :: 3 +Classifier: Programming Language :: Python :: 3.2 +Classifier: Programming Language :: Python :: 3.3 +Classifier: Programming Language :: Python :: 3.4 +Classifier: Programming Language :: Python :: Implementation :: CPython +Classifier: Programming Language :: Python :: Implementation :: PyPy +Requires-Dist: six +Requires-Dist: python-dateutil +Requires-Dist: elasticsearch (>=2.0.0,<3.0.0) + +Elasticsearch DSL +================= + +Elasticsearch DSL is a high-level library whose aim is to help with writing and +running queries against Elasticsearch. It is built on top of the official +low-level client (``elasticsearch-py``). + +It provides a more convenient and idiomatic way to write and manipulate +queries. It stays close to the Elasticsearch JSON DSL, mirroring its +terminology and structure. It exposes the whole range of the DSL from Python +either directly using defined classes or a queryset-like expressions. + +It also provides an optional wrapper for working with documents as Python +objects: defining mappings, retrieving and saving documents, wrapping the +document data in user-defined classes. + +To use the other Elasticsearch APIs (eg. cluster health) just use the +underlying client. + +Compatibility +------------- + +The library is compatible with all Elasticsearch versions since ``1.x`` but you +**have to use a matching major version**: + +For **Elasticsearch 2.0** and later, use the major version 2 (``2.x.y``) of the +library. + +For **Elasticsearch 1.0** and later, use the major version 0 (``0.x.y``) of the +library. + + +The recommended way to set your requirements in your `setup.py` or +`requirements.txt` is:: + + # Elasticsearch 2.x + elasticsearch-dsl>=2.0.0,<3.0.0 + + # Elasticsearch 1.x + elasticsearch-dsl<2.0.0 + + +The development is happening on ``master`` and ``1.x`` branches, respectively. + +Search Example +-------------- + +Let's have a typical search request written directly as a ``dict``: + +.. code:: python + + from elasticsearch import Elasticsearch + client = Elasticsearch() + + response = client.search( + index="my-index", + body={ + "query": { + "filtered": { + "query": { + "bool": { + "must": [{"match": {"title": "python"}}], + "must_not": [{"match": {"description": "beta"}}] + } + }, + "filter": {"term": {"category": "search"}} + } + }, + "aggs" : { + "per_tag": { + "terms": {"field": "tags"}, + "aggs": { + "max_lines": {"max": {"field": "lines"}} + } + } + } + } + ) + + for hit in response['hits']['hits']: + print(hit['_score'], hit['_source']['title']) + + for tag in response['aggregations']['per_tag']['buckets']: + print(tag['key'], tag['max_lines']['value']) + + + +The problem with this approach is that it is very verbose, prone to syntax +mistakes like incorrect nesting, hard to modify (eg. adding another filter) and +definitely not fun to write. + +Let's rewrite the example using the Python DSL: + +.. code:: python + + from elasticsearch import Elasticsearch + from elasticsearch_dsl import Search, Q + + client = Elasticsearch() + + s = Search(using=client, index="my-index") \ + .filter("term", category="search") \ + .query("match", title="python") \ + .query(~Q("match", description="beta")) + + s.aggs.bucket('per_tag', 'terms', field='tags') \ + .metric('max_lines', 'max', field='lines') + + response = s.execute() + + for hit in response: + print(hit.meta.score, hit.title) + + for tag in response.aggregations.per_tag.buckets: + print(tag.key, tag.max_lines.value) + +As you see, the library took care of: + + * creating appropriate ``Query`` objects by name (eq. "match") + + * composing queries into a compound ``bool`` query + + * creating a ``filtered`` query since ``.filter()`` was used + + * providing a convenient access to response data + + * no curly or square brackets everywhere + + +Persistence Example +------------------- + +Let's have a simple Python class representing an article in a blogging system: + +.. code:: python + + from datetime import datetime + from elasticsearch_dsl import DocType, String, Date, Integer + from elasticsearch_dsl.connections import connections + + # Define a default Elasticsearch client + connections.create_connection(hosts=['localhost']) + + class Article(DocType): + title = String(analyzer='snowball', fields={'raw': String(index='not_analyzed')}) + body = String(analyzer='snowball') + tags = String(index='not_analyzed') + published_from = Date() + lines = Integer() + + class Meta: + index = 'blog' + + def save(self, ** kwargs): + self.lines = len(self.body.split()) + return super(Article, self).save(** kwargs) + + def is_published(self): + return datetime.now() > self.published_from + + # create the mappings in elasticsearch + Article.init() + + # create and save and article + article = Article(meta={'id': 42}, title='Hello world!', tags=['test']) + article.body = ''' looong text ''' + article.published_from = datetime.now() + article.save() + + article = Article.get(id=42) + print(article.is_published()) + + # Display cluster health + print(connections.get_connection().cluster.health()) + + +In this example you can see: + + * providing a default connection + + * defining fields with mapping configuration + + * setting index name + + * defining custom methods + + * overriding the built-in ``.save()`` method to hook into the persistence + life cycle + + * retrieving and saving the object into Elasticsearch + + * accessing the underlying client for other APIs + +You can see more in the persistence chapter of the documentation. + +Migration from ``elasticsearch-py`` +----------------------------------- + +You don't have to port your entire application to get the benefits of the +Python DSL, you can start gradually by creating a ``Search`` object from your +existing ``dict``, modifying it using the API and serializing it back to a +``dict``: + +.. code:: python + + body = {...} # insert complicated query here + + # Convert to Search object + s = Search.from_dict(body) + + # Add some filters, aggregations, queries, ... + s.filter("term", tags="python") + + # Convert back to dict to plug back into existing code + body = s.to_dict() + +Documentation +------------- + +Documentation is available at https://elasticsearch-dsl.readthedocs.org. + +License +------- + +Copyright 2013 Elasticsearch + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. + diff --git a/venv/lib/python2.7/site-packages/elasticsearch_dsl-2.0.0.dist-info/RECORD b/venv/lib/python2.7/site-packages/elasticsearch_dsl-2.0.0.dist-info/RECORD new file mode 100644 index 0000000..a438bc1 --- /dev/null +++ b/venv/lib/python2.7/site-packages/elasticsearch_dsl-2.0.0.dist-info/RECORD @@ -0,0 +1,42 @@ +elasticsearch_dsl/filter.py,sha256=MOtItO7APSI3ZxzkXonWPmkVBa85Zrp328UHhVx7o9Y,5627 +elasticsearch_dsl/connections.py,sha256=fKluPp0QCk3ENi2nZoxGXiB7qFWRrwfMn7MpZ2uA1ws,3119 +elasticsearch_dsl/faceted_search.py,sha256=xoRp4p9pg8PA8ksl70azjYYUm5lNWBHPFLjwv6nA82I,8046 +elasticsearch_dsl/field.py,sha256=sBvnZssItuiJLBdGhRGMVrv70fa8rtqj-ycxWMpVCfo,7513 +elasticsearch_dsl/document.py,sha256=tdcFNFuxxUsU7xQ3eGV2tcPWmxh36sk44Hw9N7haocg,8332 +elasticsearch_dsl/analysis.py,sha256=jzIlDS_79CStTf5QuIERAjhaNSiRtCISnJxhtAraPQo,3262 +elasticsearch_dsl/exceptions.py,sha256=IxJtSE19mUzM77FtrO2n5l2mPbL3TsupdwhirIHk8EE,254 +elasticsearch_dsl/index.py,sha256=HTjNRBxV5t_l7TkopZ_OnGltFQihh3sLS9kXxMX1dgQ,2915 +elasticsearch_dsl/function.py,sha256=RbwPF2bssRA4cwzxPROtmvaTGNB9652l8qNsM1XCMm4,2535 +elasticsearch_dsl/result.py,sha256=UkRy9z-8R5mogJjwv3usvTsmNK6jUmCtbd7TtNuo9gY,2599 +elasticsearch_dsl/mapping.py,sha256=tjJUGd9Gqx958VaMF_mYVtXPlSD5abD3rQfMCfSQEs0,5258 +elasticsearch_dsl/query.py,sha256=yvzyWLX9XF_pV2t9LofmAUObYUxUUsPb_YbpyCskEfQ,8019 +elasticsearch_dsl/serializer.py,sha256=h7-fSiJYc3J_zPv3S6llODtYRJfehXz5ZVL8lwMt0bc,387 +elasticsearch_dsl/search.py,sha256=ma0MsnqoCDyM21ZLxyBPNlFi0epHw7sVkz_PyeOlb88,20962 +elasticsearch_dsl/aggs.py,sha256=3y4RHAUPEtXvbdGBaKL_vn_SKvgEgvscQ13rDSjii9o,5978 +elasticsearch_dsl/utils.py,sha256=xOpcQMbJeabjDbgnRrKK2JFWO2oYM9zHeSZuLJ15nPU,12863 +elasticsearch_dsl/__init__.py,sha256=j5vPCDPeww6bLkFlOCYralCddcp3l4HhnKl-B0t7CMA,409 +elasticsearch_dsl-2.0.0.dist-info/pbr.json,sha256=9_6GDWaW_xvDbV0aqIGzXAGBdmRcvrO7BhxnqWtV1dg,47 +elasticsearch_dsl-2.0.0.dist-info/top_level.txt,sha256=xSZYjeDU8RrTQufEKhY7RX9KcSGZywt42PcWADXg1KY,18 +elasticsearch_dsl-2.0.0.dist-info/WHEEL,sha256=AvR0WeTpDaxT645bl5FQxUK6NPsTls2ttpcGJg3j1Xg,110 +elasticsearch_dsl-2.0.0.dist-info/METADATA,sha256=TWoWL4ORPWJ4xX26qXymRbbygKmDUYX-KjliCNnBOTc,7842 +elasticsearch_dsl-2.0.0.dist-info/DESCRIPTION.rst,sha256=b80GVru_ZTqybq24SFqhcHlWwtj3e_cJ8QsCTtAmyTc,6741 +elasticsearch_dsl-2.0.0.dist-info/metadata.json,sha256=HNkyIWVfAWlSGy2WZUrIDKQQrhOdhgRp20l9Wzjleg4,1269 +elasticsearch_dsl-2.0.0.dist-info/RECORD,, +elasticsearch_dsl-2.0.0.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4 +elasticsearch_dsl/utils.pyc,, +elasticsearch_dsl/filter.pyc,, +elasticsearch_dsl/serializer.pyc,, +elasticsearch_dsl/connections.pyc,, +elasticsearch_dsl/result.pyc,, +elasticsearch_dsl/index.pyc,, +elasticsearch_dsl/exceptions.pyc,, +elasticsearch_dsl/__init__.pyc,, +elasticsearch_dsl/field.pyc,, +elasticsearch_dsl/faceted_search.pyc,, +elasticsearch_dsl/analysis.pyc,, +elasticsearch_dsl/aggs.pyc,, +elasticsearch_dsl/search.pyc,, +elasticsearch_dsl/document.pyc,, +elasticsearch_dsl/function.pyc,, +elasticsearch_dsl/query.pyc,, +elasticsearch_dsl/mapping.pyc,, diff --git a/venv/lib/python2.7/site-packages/elasticsearch_dsl-2.0.0.dist-info/WHEEL b/venv/lib/python2.7/site-packages/elasticsearch_dsl-2.0.0.dist-info/WHEEL new file mode 100644 index 0000000..9dff69d --- /dev/null +++ b/venv/lib/python2.7/site-packages/elasticsearch_dsl-2.0.0.dist-info/WHEEL @@ -0,0 +1,6 @@ +Wheel-Version: 1.0 +Generator: bdist_wheel (0.24.0) +Root-Is-Purelib: true +Tag: py2-none-any +Tag: py3-none-any + diff --git a/venv/lib/python2.7/site-packages/elasticsearch_dsl-2.0.0.dist-info/metadata.json b/venv/lib/python2.7/site-packages/elasticsearch_dsl-2.0.0.dist-info/metadata.json new file mode 100644 index 0000000..ff2a789 --- /dev/null +++ b/venv/lib/python2.7/site-packages/elasticsearch_dsl-2.0.0.dist-info/metadata.json @@ -0,0 +1 @@ +{"extras": [], "classifiers": ["Development Status :: 4 - Beta", "License :: OSI Approved :: Apache Software License", "Intended Audience :: Developers", "Operating System :: OS Independent", "Programming Language :: Python", "Programming Language :: Python :: 2", "Programming Language :: Python :: 2.6", "Programming Language :: Python :: 2.7", "Programming Language :: Python :: 3", "Programming Language :: Python :: 3.2", "Programming Language :: Python :: 3.3", "Programming Language :: Python :: 3.4", "Programming Language :: Python :: Implementation :: CPython", "Programming Language :: Python :: Implementation :: PyPy"], "test_requires": [{"requires": ["mock", "pytest", "pytest-cov", "pytz"]}], "license": "Apache License, Version 2.0", "run_requires": [{"requires": ["six", "python-dateutil", "elasticsearch (>=2.0.0,<3.0.0)"]}], "summary": "Python client for Elasticsearch", "generator": "bdist_wheel (0.24.0)", "name": "elasticsearch-dsl", "version": "2.0.0", "extensions": {"python.details": {"project_urls": {"Home": "https://github.com/elasticsearch/elasticsearch-dsl-py"}, "document_names": {"description": "DESCRIPTION.rst"}, "contacts": [{"name": "Honza Kr\u00e1l", "role": "author", "email": "honza.kral@gmail.com"}]}}, "metadata_version": "2.0"} \ No newline at end of file diff --git a/venv/lib/python2.7/site-packages/elasticsearch_dsl-2.0.0.dist-info/pbr.json b/venv/lib/python2.7/site-packages/elasticsearch_dsl-2.0.0.dist-info/pbr.json new file mode 100644 index 0000000..6c0df49 --- /dev/null +++ b/venv/lib/python2.7/site-packages/elasticsearch_dsl-2.0.0.dist-info/pbr.json @@ -0,0 +1 @@ +{"is_release": false, "git_version": "d5c3476"} \ No newline at end of file diff --git a/venv/lib/python2.7/site-packages/elasticsearch_dsl-2.0.0.dist-info/top_level.txt b/venv/lib/python2.7/site-packages/elasticsearch_dsl-2.0.0.dist-info/top_level.txt new file mode 100644 index 0000000..0a6baaa --- /dev/null +++ b/venv/lib/python2.7/site-packages/elasticsearch_dsl-2.0.0.dist-info/top_level.txt @@ -0,0 +1 @@ +elasticsearch_dsl diff --git a/venv/lib/python2.7/site-packages/elasticsearch_dsl/__init__.py b/venv/lib/python2.7/site-packages/elasticsearch_dsl/__init__.py new file mode 100644 index 0000000..1cda645 --- /dev/null +++ b/venv/lib/python2.7/site-packages/elasticsearch_dsl/__init__.py @@ -0,0 +1,14 @@ +from .query import Q +from .aggs import A +from .function import SF +from .search import Search, MultiSearch +from .field import * +from .document import DocType, MetaField +from .mapping import Mapping +from .index import Index +from .analysis import analyzer, token_filter, char_filter, tokenizer +from .faceted_search import * + +VERSION = (2, 0, 0) +__version__ = VERSION +__versionstr__ = '.'.join(map(str, VERSION)) diff --git a/venv/lib/python2.7/site-packages/elasticsearch_dsl/aggs.py b/venv/lib/python2.7/site-packages/elasticsearch_dsl/aggs.py new file mode 100644 index 0000000..8264499 --- /dev/null +++ b/venv/lib/python2.7/site-packages/elasticsearch_dsl/aggs.py @@ -0,0 +1,176 @@ +from .utils import DslBase, _make_dsl_class + +__all__ = [ + 'A', 'Agg', 'Filter', 'Bucket', 'Children', 'DateHistogram', 'Filters', + 'GeoDistance', 'GeohashGrid', 'Global', 'Histogram', 'Iprange', 'Missing', + 'Nested', 'Range', 'ReverseNested', 'SignificantTerms', 'Terms', 'Avg', + 'Cardinality', 'ExtendedStats', 'GeoBounds', 'Max', 'Min', 'Percentiles', + 'PercenileRanks', 'ScriptedMetric', 'Stats', 'Sum', 'TopHits', 'ValueCount', + 'AvgBucket', 'BucketScript', 'BucketSelector', 'CumulativeSum', + 'Derivative', 'ExtendedStatsBucket', 'MaxBucket', 'MinBucket', 'MovingAvg', + 'PercentilesBucket', 'SerialDiff', 'StatsBucket', 'SumBucket' +] + + +def A(name_or_agg, filter=None, **params): + if filter is not None: + if name_or_agg != 'filter': + raise ValueError("Aggregation %r doesn't accept positional argument 'filter'." % name_or_agg) + params['filter'] = filter + + # {"terms": {"field": "tags"}, "aggs": {...}} + if isinstance(name_or_agg, dict): + if params: + raise ValueError('A() cannot accept parameters when passing in a dict.') + # copy to avoid modifying in-place + agg = name_or_agg.copy() + # pop out nested aggs + aggs = agg.pop('aggs', None) + # should be {"terms": {"fied": "tags"}} + if len(agg) != 1: + raise ValueError('A() can only accept dict with an aggregation ({"terms": {...}}). ' + 'Instead it got (%r)' % name_or_agg) + agg_type, params = agg.popitem() + if aggs: + params = params.copy() + params['aggs'] = aggs + return Agg.get_dsl_class(agg_type)(**params) + + # Terms(...) just return the nested agg + elif isinstance(name_or_agg, Agg): + if params: + raise ValueError('A() cannot accept parameters when passing in an Agg object.') + return name_or_agg + + # "terms", field="tags" + return Agg.get_dsl_class(name_or_agg)(**params) + +class Agg(DslBase): + _type_name = 'agg' + _type_shortcut = staticmethod(A) + name = None + +class AggBase(object): + _param_defs = { + 'aggs': {'type': 'agg', 'hash': True}, + } + def __getitem__(self, agg_name): + agg = self._params.setdefault('aggs', {})[agg_name] # propagate KeyError + + # make sure we're not mutating a shared state - whenever accessing a + # bucket, return a shallow copy of it to be safe + if isinstance(agg, Bucket): + agg = A(agg.name, **agg._params) + # be sure to store the copy so any modifications to it will affect us + self._params['aggs'][agg_name] = agg + + return agg + + def __setitem__(self, agg_name, agg): + self.aggs[agg_name] = A(agg) + + def _agg(self, bucket, name, agg_type, *args, **params): + agg = self[name] = A(agg_type, *args, **params) + + # For chaining - when creating new buckets return them... + if bucket: + return agg + # otherwise return self._base so we can keep chaining + else: + return self._base + + def metric(self, name, agg_type, *args, **params): + return self._agg(False, name, agg_type, *args, **params) + + def bucket(self, name, agg_type, *args, **params): + return self._agg(True, name, agg_type, *args, **params) + + def pipeline(self, name, agg_type, *args, **params): + return self._agg(False, name, agg_type, *args, **params) + + +class Bucket(AggBase, Agg): + def __init__(self, **params): + super(Bucket, self).__init__(**params) + # remember self for chaining + self._base = self + + def to_dict(self): + d = super(AggBase, self).to_dict() + if 'aggs' in d[self.name]: + d['aggs'] = d[self.name].pop('aggs') + return d + +class Filter(Bucket): + name = 'filter' + _param_defs = { + 'filter': {'type': 'query'}, + 'aggs': {'type': 'agg', 'hash': True}, + } + + def __init__(self, filter=None, **params): + if filter is not None: + params['filter'] = filter + super(Filter, self).__init__(**params) + + def to_dict(self): + d = super(Filter, self).to_dict() + d[self.name].update(d[self.name].pop('filter', {})) + return d + +class Pipeline(Agg): + pass + +AGGS = ( + (Bucket, 'children', None), + (Bucket, 'date_histogram', None), + (Bucket, 'date_range', None), + (Bucket, 'filters', {'filters': {'type': 'query', 'hash': True}}), + (Bucket, 'geo_distance', None), + (Bucket, 'geohash_grid', None), + (Bucket, 'global', None), + (Bucket, 'histogram', None), + (Bucket, 'iprange', None), + (Bucket, 'missing', None), + (Bucket, 'nested', None), + (Bucket, 'range', None), + (Bucket, 'reverse_nested', None), + (Bucket, 'significant_terms', None), + (Bucket, 'terms', None), + + (Agg, 'avg', None), + (Agg, 'cardinality', None), + (Agg, 'extended_stats', None), + (Agg, 'geo_bounds', None), + (Agg, 'max', None), + (Agg, 'min', None), + (Agg, 'percentiles', None), + (Agg, 'percentile_ranks', None), + (Agg, 'scripted_metric', None), + (Agg, 'stats', None), + (Agg, 'sum', None), + (Agg, 'top_hits', None), + (Agg, 'value_count', None), + + (Pipeline, 'avg_bucket', None), + (Pipeline, 'bucket_script', None), + (Pipeline, 'bucket_selector', None), + (Pipeline, 'cumulative_sum', None), + (Pipeline, 'derivative', None), + (Pipeline, 'extended_stats_bucket', None), + (Pipeline, 'max_bucket', None), + (Pipeline, 'min_bucket', None), + (Pipeline, 'moving_avg', None), + (Pipeline, 'percentiles_bucket', None), + (Pipeline, 'serial_diff', None), + (Pipeline, 'stats_bucket', None), + (Pipeline, 'sum_bucket', None), +) + +# generate the aggregation classes dynamicaly +for base, fname, params_def in AGGS: + # don't override the params def from AggBase + if params_def: + params_def.update(AggBase._param_defs) + fclass = _make_dsl_class(base, fname, params_def) + globals()[fclass.__name__] = fclass diff --git a/venv/lib/python2.7/site-packages/elasticsearch_dsl/analysis.py b/venv/lib/python2.7/site-packages/elasticsearch_dsl/analysis.py new file mode 100644 index 0000000..b735365 --- /dev/null +++ b/venv/lib/python2.7/site-packages/elasticsearch_dsl/analysis.py @@ -0,0 +1,121 @@ +from .utils import DslBase + +__all__ = [ + 'tokenizer', 'analyzer', 'char_filter', 'token_filter' +] + +class AnalysisBase(object): + @classmethod + def _type_shortcut(cls, name_or_instance, type=None, **kwargs): + if isinstance(name_or_instance, cls): + if type or kwargs: + raise ValueError('%s() cannot accept parameters.' % cls.__name__) + return name_or_instance + + if not (type or kwargs): + return cls.get_dsl_class('builtin')(name_or_instance) + + return cls.get_dsl_class('custom')(name_or_instance, type or 'custom', **kwargs) + +class CustomAnalysis(object): + name = 'custom' + def __init__(self, name, builtin_type='custom', **kwargs): + self._builtin_type = builtin_type + self._name = name + super(CustomAnalysis, self).__init__(**kwargs) + + def to_dict(self): + # only name to present in lists + return self._name + + def get_definition(self): + d = super(CustomAnalysis, self).to_dict() + d = d.pop(self.name) + d['type'] = self._builtin_type + return d + +class BuiltinAnalysis(object): + name = 'builtin' + def __init__(self, name): + self._name = name + super(BuiltinAnalysis, self).__init__() + + def to_dict(self): + # only name to present in lists + return self._name + +class Analyzer(AnalysisBase, DslBase): + _type_name = 'analyzer' + name = None + +class BuiltinAnalyzer(BuiltinAnalysis, Analyzer): + def get_analysis_definition(self): + return {} + +class CustomAnalyzer(CustomAnalysis, Analyzer): + _param_defs = { + 'filter': {'type': 'token_filter', 'multi': True}, + 'char_filter': {'type': 'char_filter', 'multi': True}, + 'tokenizer': {'type': 'tokenizer'}, + } + + def get_analysis_definition(self): + out = {'analyzer': {self._name: self.get_definition()}} + + t = getattr(self, 'tokenizer', None) + if hasattr(t, 'get_definition'): + out['tokenizer'] = {t._name: t.get_definition()} + + filters = dict((f._name, f.get_definition()) + for f in self.filter if hasattr(f, 'get_definition')) + if filters: + out['filter'] = filters + + + char_filters = dict((f._name, f.get_definition()) + for f in self.char_filter if hasattr(f, 'get_definition')) + if char_filters: + out['char_filter'] = char_filters + + return out + + +class Tokenizer(AnalysisBase, DslBase): + _type_name = 'tokenizer' + name = None + +class BuiltinTokenizer(BuiltinAnalysis, Tokenizer): + pass + +class CustomTokenizer(CustomAnalysis, Tokenizer): + pass + + +class TokenFilter(AnalysisBase, DslBase): + _type_name = 'token_filter' + name = None + +class BuiltinTokenFilter(BuiltinAnalysis, TokenFilter): + pass + +class CustomTokenFilter(CustomAnalysis, TokenFilter): + pass + + +class CharFilter(AnalysisBase, DslBase): + _type_name = 'char_filter' + name = None + +class BuiltinCharFilter(BuiltinAnalysis, CharFilter): + pass + +class CustomCharFilter(CustomAnalysis, CharFilter): + pass + + + +# shortcuts for direct use +analyzer = Analyzer._type_shortcut +tokenizer = Tokenizer._type_shortcut +token_filter = TokenFilter._type_shortcut +char_filter = CharFilter._type_shortcut diff --git a/venv/lib/python2.7/site-packages/elasticsearch_dsl/connections.py b/venv/lib/python2.7/site-packages/elasticsearch_dsl/connections.py new file mode 100644 index 0000000..bdc62e0 --- /dev/null +++ b/venv/lib/python2.7/site-packages/elasticsearch_dsl/connections.py @@ -0,0 +1,96 @@ +from six import string_types + +from elasticsearch import Elasticsearch + +from .serializer import serializer + +class Connections(object): + """ + Class responsible for holding connections to different clusters. Used as a + singleton in this module. + """ + def __init__(self): + self._kwargs = {} + self._conns = {} + + def configure(self, **kwargs): + """ + Configure multiple connections at once, useful for passing in config + dictionaries obtained from other sources, like Django's settings or a + configuration management tool. + + Example:: + + connections.configure( + default={'hosts': 'localhost'}, + dev={'hosts': ['esdev1.example.com:9200'], sniff_on_start=True} + ) + + Connections will only be constructed lazily when requested through + ``get_connection``. + """ + for k in list(self._conns): + # try and preserve existing client to keep the persistent connections alive + if k in self._kwargs and kwargs.get(k, None) == self._kwargs[k]: + continue + del self._conns[k] + self._kwargs = kwargs + + def add_connection(self, alias, conn): + """ + Add a connection object, it will be passed through as-is. + """ + self._conns[alias] = conn + + def remove_connection(self, alias): + """ + Remove connection from the registry. Raises ``KeyError`` if connection + wasn't found. + """ + errors = 0 + for d in (self._conns, self._kwargs): + try: + del d[alias] + except KeyError: + errors += 1 + + if errors == 2: + raise KeyError('There is no connection with alias %r.' % alias) + + def create_connection(self, alias='default', **kwargs): + """ + Construct an instance of ``elasticsearch.Elasticsearch`` and register + it under given alias. + """ + kwargs.setdefault('serializer', serializer) + conn = self._conns[alias] = Elasticsearch(**kwargs) + return conn + + def get_connection(self, alias='default'): + """ + Retrieve a connection, construct it if necessary (only configuration + was passed to us). If a non-string alias has been passed through we + assume it's already a client instance and will just return it as-is. + + Raises ``KeyError`` if no client (or its definition) is registered + under the alias. + """ + # do not check isinstance(Elasticsearch) so that people can wrap their + # clients + if not isinstance(alias, string_types): + return alias + + # connection already established + try: + return self._conns[alias] + except KeyError: + pass + + # if not, try to create it + try: + return self.create_connection(alias, **self._kwargs[alias]) + except KeyError: + # no connection and no kwargs to set one up + raise KeyError('There is no connection with alias %r.' % alias) + +connections = Connections() diff --git a/venv/lib/python2.7/site-packages/elasticsearch_dsl/document.py b/venv/lib/python2.7/site-packages/elasticsearch_dsl/document.py new file mode 100644 index 0000000..c4f55da --- /dev/null +++ b/venv/lib/python2.7/site-packages/elasticsearch_dsl/document.py @@ -0,0 +1,264 @@ +import re + +from six import iteritems, add_metaclass + +from .field import Field +from .mapping import Mapping +from .utils import ObjectBase, AttrDict, merge +from .result import ResultMeta +from .search import Search +from .connections import connections +from .exceptions import ValidationException + +DELETE_META_FIELDS = frozenset(( + 'id', 'parent', 'routing', 'version', 'version_type' +)) + +DOC_META_FIELDS = frozenset(( + 'timestamp', 'ttl' +)).union(DELETE_META_FIELDS) + +META_FIELDS = frozenset(( + # Elasticsearch metadata fields, except 'type' + 'index', 'using', 'score', +)).union(DOC_META_FIELDS) + +class MetaField(object): + def __init__(self, *args, **kwargs): + self.args, self.kwargs = args, kwargs + +class DocTypeMeta(type): + def __new__(cls, name, bases, attrs): + # DocTypeMeta filters attrs in place + attrs['_doc_type'] = DocTypeOptions(name, bases, attrs) + return super(DocTypeMeta, cls).__new__(cls, name, bases, attrs) + +class DocTypeOptions(object): + def __init__(self, name, bases, attrs): + meta = attrs.pop('Meta', None) + + # default index, if not overriden by doc.meta + self.index = getattr(meta, 'index', None) + + # default cluster alias, can be overriden in doc.meta + self._using = getattr(meta, 'using', None) + + # get doc_type name, if not defined take the name of the class and + # tranform it to lower_case + doc_type = getattr(meta, 'doc_type', + re.sub(r'(.)([A-Z])', r'\1_\2', name).lower()) + + # create the mapping instance + self.mapping = getattr(meta, 'mapping', Mapping(doc_type)) + + # register all declared fields into the mapping + for name, value in list(iteritems(attrs)): + if isinstance(value, Field): + self.mapping.field(name, value) + del attrs[name] + + # add all the mappings for meta fields + for name in dir(meta): + if isinstance(getattr(meta, name, None), MetaField): + params = getattr(meta, name) + self.mapping.meta(name, *params.args, **params.kwargs) + + # document inheritance - include the fields from parents' mappings and + # index/using values + for b in bases: + if hasattr(b, '_doc_type') and hasattr(b._doc_type, 'mapping'): + self.mapping.update(b._doc_type.mapping, update_only=True) + self._using = self._using or b._doc_type._using + self.index = self.index or b._doc_type.index + + @property + def using(self): + return self._using or 'default' + + @property + def name(self): + return self.mapping.properties.name + + @property + def parent(self): + if '_parent' in self.mapping._meta: + return self.mapping._meta['_parent']['type'] + return + + def init(self, index=None, using=None): + self.mapping.save(index or self.index, using=using or self.using) + + def refresh(self, index=None, using=None): + self.mapping.update_from_es(index or self.index, using=using or self.using) + + +@add_metaclass(DocTypeMeta) +class DocType(ObjectBase): + def __init__(self, meta=None, **kwargs): + meta = meta or {} + for k in list(kwargs): + if k.startswith('_'): + meta[k] = kwargs.pop(k) + + if self._doc_type.index: + meta.setdefault('_index', self._doc_type.index) + super(AttrDict, self).__setattr__('meta', ResultMeta(meta)) + + super(DocType, self).__init__(**kwargs) + + def __getstate__(self): + return (self.to_dict(), self.meta._d_) + + def __setstate__(self, state): + data, meta = state + super(AttrDict, self).__setattr__('_d_', data) + super(AttrDict, self).__setattr__('meta', ResultMeta(meta)) + + def __getattr__(self, name): + if name.startswith('_') and name[1:] in META_FIELDS: + return getattr(self.meta, name[1:]) + return super(DocType, self).__getattr__(name) + + def __setattr__(self, name, value): + if name.startswith('_') and name[1:] in META_FIELDS: + return setattr(self.meta, name[1:], value) + return super(DocType, self).__setattr__(name, value) + + @classmethod + def init(cls, index=None, using=None): + cls._doc_type.init(index, using) + + @classmethod + def search(cls, using=None, index=None): + return Search( + using=using or cls._doc_type.using, + index=index or cls._doc_type.index, + doc_type={cls._doc_type.name: cls.from_es}, + ) + + @classmethod + def get(cls, id, using=None, index=None, **kwargs): + es = connections.get_connection(using or cls._doc_type.using) + doc = es.get( + index=index or cls._doc_type.index, + doc_type=cls._doc_type.name, + id=id, + **kwargs + ) + if not doc['found']: + return None + return cls.from_es(doc) + + @classmethod + def from_es(cls, hit): + # don't modify in place + meta = hit.copy() + doc = meta.pop('_source', {}) + + if 'fields' in meta: + for k, v in iteritems(meta.pop('fields')): + if k == '_source': + doc.update(v) + if k.startswith('_'): + meta[k] = v + else: + doc[k] = v + + return cls(meta=meta, **doc) + + def _get_connection(self, using=None): + return connections.get_connection(using or self._doc_type.using) + connection = property(_get_connection) + + def _get_index(self, index=None): + if index is None: + index = getattr(self.meta, 'index', self._doc_type.index) + if index is None: + raise ValidationException('No index') + return index + + def delete(self, using=None, index=None, **kwargs): + es = self._get_connection(using) + # extract parent, routing etc from meta + doc_meta = dict( + (k, self.meta[k]) + for k in DELETE_META_FIELDS + if k in self.meta + ) + doc_meta.update(kwargs) + es.delete( + index=self._get_index(index), + doc_type=self._doc_type.name, + **doc_meta + ) + + def to_dict(self, include_meta=False): + d = super(DocType, self).to_dict() + if not include_meta: + return d + + meta = dict( + ('_' + k, self.meta[k]) + for k in DOC_META_FIELDS + if k in self.meta + ) + + # in case of to_dict include the index unlike save/update/delete + if 'index' in self.meta: + meta['_index'] = self.meta.index + elif self._doc_type.index: + meta['_index'] = self._doc_type.index + + meta['_type'] = self._doc_type.name + meta['_source'] = d + return meta + + def update(self, using=None, index=None, **fields): + es = self._get_connection(using) + + # update the data locally + merge(self._d_, fields) + + # extract parent, routing etc from meta + doc_meta = dict( + (k, self.meta[k]) + for k in DOC_META_FIELDS + if k in self.meta + ) + meta = es.update( + index=self._get_index(index), + doc_type=self._doc_type.name, + body={'doc': fields}, + **doc_meta + ) + # update meta information from ES + for k in META_FIELDS: + if '_' + k in meta: + setattr(self.meta, k, meta['_' + k]) + + def save(self, using=None, index=None, validate=True, **kwargs): + if validate: + self.full_clean() + + es = self._get_connection(using) + # extract parent, routing etc from meta + doc_meta = dict( + (k, self.meta[k]) + for k in DOC_META_FIELDS + if k in self.meta + ) + doc_meta.update(kwargs) + meta = es.index( + index=self._get_index(index), + doc_type=self._doc_type.name, + body=self.to_dict(), + **doc_meta + ) + # update meta information from ES + for k in META_FIELDS: + if '_' + k in meta: + setattr(self.meta, k, meta['_' + k]) + + # return True/False if the document has been created/updated + return meta['created'] + diff --git a/venv/lib/python2.7/site-packages/elasticsearch_dsl/exceptions.py b/venv/lib/python2.7/site-packages/elasticsearch_dsl/exceptions.py new file mode 100644 index 0000000..0ed5863 --- /dev/null +++ b/venv/lib/python2.7/site-packages/elasticsearch_dsl/exceptions.py @@ -0,0 +1,14 @@ +class ElasticsearchDslException(Exception): + pass + + +class UnknownDslObject(ElasticsearchDslException): + pass + + +class ValidationException(ValueError, ElasticsearchDslException): + pass + + +class IllegalOperation(ElasticsearchDslException): + pass diff --git a/venv/lib/python2.7/site-packages/elasticsearch_dsl/faceted_search.py b/venv/lib/python2.7/site-packages/elasticsearch_dsl/faceted_search.py new file mode 100644 index 0000000..8497122 --- /dev/null +++ b/venv/lib/python2.7/site-packages/elasticsearch_dsl/faceted_search.py @@ -0,0 +1,284 @@ +from datetime import timedelta, datetime +from six import iteritems, itervalues +from functools import partial + +from .search import Search +from .aggs import A +from .utils import AttrDict +from .result import Response +from .query import Q + +__all__ = ['FacetedSearch', 'HistogramFacet', 'TermsFacet', 'DateHistogramFacet', 'RangeFacet'] + +class Facet(object): + """ + A facet on faceted search. Wraps and aggregation and provides functionality + to create a filter for selected values and return a list of facet values + from the result of the aggregation. + """ + agg_type = None + + def __init__(self, **kwargs): + self.filter_values = () + self._params = kwargs + + def get_aggregation(self): + """ + Return the aggregation object. + """ + return A(self.agg_type, **self._params) + + def add_filter(self, filter_values): + """ + Construct a filter. + """ + if not filter_values: + return + + f = self.get_value_filter(filter_values[0]) + for v in filter_values[1:]: + f |= self.get_value_filter(v) + return f + + def get_value_filter(self, filter_value): + """ + Construct a filter for an individual value + """ + pass + + def is_filtered(self, key, filter_values): + """ + Is a filter active on the given key. + """ + return key in filter_values + + def get_value(self, bucket): + """ + return a value representing a bucket. Its key as default. + """ + return bucket['key'] + + def get_values(self, data, filter_values): + """ + Turn the raw bucket data into a list of tuples containing the key, + number of documents and a flag indicating whether this value has been + selected or not. + """ + out = [] + for bucket in data: + key = self.get_value(bucket) + out.append(( + key, + bucket['doc_count'], + self.is_filtered(key, filter_values) + )) + return out + + +class TermsFacet(Facet): + agg_type = 'terms' + + def add_filter(self, filter_values): + """ Create a terms filter instead of bool containing term filters. """ + if filter_values: + return Q('terms', **{self._params['field']: filter_values}) + + +class RangeFacet(Facet): + agg_type = 'range' + + def _range_to_dict(self, range): + key, range = range + out = {'key': key} + if range[0] is not None: + out['from'] = range[0] + if range[1] is not None: + out['to'] = range[1] + return out + + def __init__(self, ranges, **kwargs): + super(RangeFacet, self).__init__(**kwargs) + self._params['ranges'] = list(map(self._range_to_dict, ranges)) + self._params['keyed'] = False + self._ranges = dict(ranges) + + def get_value_filter(self, filter_value): + f, t = self._ranges[filter_value] + limits = {} + if f is not None: + limits['from'] = f + if t is not None: + limits['to'] = t + + return Q('range', **{ + self._params['field']: limits + }) + +class HistogramFacet(Facet): + agg_type = 'histogram' + + def get_value_filter(self, filter_value): + return Q('range', **{ + self._params['field']: { + 'gte': filter_value, + 'lt': filter_value + self._params['interval'] + } + }) + + +class DateHistogramFacet(Facet): + agg_type = 'date_histogram' + + DATE_INTERVALS = { + 'month': lambda d: (d+timedelta(days=32)).replace(day=1), + 'week': lambda d: d+timedelta(days=7), + 'day': lambda d: d+timedelta(days=1), + 'hour': lambda d: d+timedelta(hours=1), + } + + def __init__(self, **kwargs): + kwargs.setdefault("min_doc_count", 0) + super(DateHistogramFacet, self).__init__(**kwargs) + + def get_value(self, bucket): + return datetime.utcfromtimestamp(int(bucket['key']) / 1000) + + def get_value_filter(self, filter_value): + return Q('range', **{ + self._params['field']: { + 'gte': filter_value, + 'lt': self.DATE_INTERVALS[self._params['interval']](filter_value) + } + }) + + +class FacetedResponse(Response): + def __init__(self, search, *args, **kwargs): + super(FacetedResponse, self).__init__(*args, **kwargs) + super(AttrDict, self).__setattr__('_search', search) + + @property + def query_string(self): + return self._search._query + + @property + def facets(self): + if not hasattr(self, '_facets'): + super(AttrDict, self).__setattr__('_facets', AttrDict({})) + for name, facet in iteritems(self._search.facets): + self._facets[name] = facet.get_values( + self.aggregations['_filter_' + name][name]['buckets'], + self._search.filter_values.get(name, ()) + ) + return self._facets + + +class FacetedSearch(object): + index = '_all' + doc_types = ['_all'] + fields = ('*', ) + facets = {} + + def __init__(self, query=None, filters={}): + self._query = query + self._filters = {} + self.filter_values = {} + for name, value in iteritems(filters): + self.add_filter(name, value) + + self._s = self.build_search() + + def count(self): + return self._s.count() + + def __getitem__(self, k): + self._s = self._s[k] + return self + + def __iter__(self): + return iter(self._s) + + def add_filter(self, name, filter_values): + """ + Add a filter for a facet. + """ + # normalize the value into a list + if not isinstance(filter_values, (tuple, list)): + if filter_values in (None, ''): + return + filter_values = [filter_values, ] + + # remember the filter values for use in FacetedResponse + self.filter_values[name] = filter_values + + # get the filter from the facet + f = self.facets[name].add_filter(filter_values) + if f is None: + return + + self._filters[name] = f + + def search(self): + """ + Construct the Search object. + """ + s = Search(doc_type=self.doc_types, index=self.index) + return s.response_class(partial(FacetedResponse, self)) + + def query(self, search, query): + """ + Add query part to ``search``. + + Override this if you wish to customize the query used. + """ + if query: + return search.query('multi_match', fields=self.fields, query=query) + return search + + def aggregate(self, search): + """ + Add aggregations representing the facets selected, including potential + filters. + """ + for f, facet in iteritems(self.facets): + agg = facet.get_aggregation() + agg_filter = Q('match_all') + for field, filter in iteritems(self._filters): + if f == field: + continue + agg_filter &= filter + search.aggs.bucket( + '_filter_' + f, + 'filter', + filter=agg_filter + ).bucket(f, agg) + + def filter(self, search): + """ + Add a ``post_filter`` to the search request narrowing the results based + on the facet filters. + """ + post_filter = Q('match_all') + for f in itervalues(self._filters): + post_filter &= f + return search.post_filter(post_filter) + + def highlight(self, search): + """ + Add highlighting for all the fields + """ + return search.highlight(*self.fields) + + def build_search(self): + """ + Construct the ``Search`` object. + """ + s = self.search() + s = self.query(s, self._query) + s = self.filter(s) + s = self.highlight(s) + self.aggregate(s) + return s + + def execute(self): + return self._s.execute() diff --git a/venv/lib/python2.7/site-packages/elasticsearch_dsl/field.py b/venv/lib/python2.7/site-packages/elasticsearch_dsl/field.py new file mode 100644 index 0000000..30bb85a --- /dev/null +++ b/venv/lib/python2.7/site-packages/elasticsearch_dsl/field.py @@ -0,0 +1,256 @@ +from datetime import date +from dateutil import parser +from six import itervalues + +from .utils import DslBase, _make_dsl_class, ObjectBase, AttrDict, AttrList +from .exceptions import ValidationException + +__all__ = [ + 'construct_field', 'Field', 'Object', 'Nested', 'Date', 'String', 'Float', 'Double', + 'Byte', 'Short', 'Integer', 'Long', 'Boolean', 'Ip', 'Attachment', + 'GeoPoint', 'GeoShape', 'InnerObjectWrapper' +] + +def construct_field(name_or_field, **params): + # {"type": "string", "index": "not_analyzed"} + if isinstance(name_or_field, dict): + if params: + raise ValueError('construct_field() cannot accept parameters when passing in a dict.') + params = name_or_field.copy() + if 'type' not in params: + # inner object can be implicitly defined + if 'properties' in params: + name = 'object' + else: + raise ValueError('construct_field() needs to have a "type" key.') + else: + name = params.pop('type') + return Field.get_dsl_class(name)(**params) + + # String() + if isinstance(name_or_field, Field): + if params: + raise ValueError('construct_field() cannot accept parameters when passing in a construct_field object.') + return name_or_field + + # "string", index="not_analyzed" + return Field.get_dsl_class(name_or_field)(**params) + +class Field(DslBase): + _type_name = 'field' + _type_shortcut = staticmethod(construct_field) + # all fields can be multifields + _param_defs = {'fields': {'type': 'field', 'hash': True}} + name = None + _coerce = False + + def __init__(self, *args, **kwargs): + self._multi = kwargs.pop('multi', False) + self._required = kwargs.pop('required', False) + super(Field, self).__init__(*args, **kwargs) + + def _serialize(self, data): + return data + + def _deserialize(self, data): + return data + + def _empty(self): + return None + + def empty(self): + if self._multi: + return AttrList([]) + return self._empty() + + def serialize(self, data): + if isinstance(data, (list, AttrList)): + data[:] = map(self._serialize, data) + return data + return self._serialize(data) + + def deserialize(self, data): + if isinstance(data, (list, AttrList)): + data[:] = map(self._deserialize, data) + return data + return self._deserialize(data) + + def clean(self, data): + if data is not None: + data = self.deserialize(data) + # FIXME: numeric 0 + if not data and self._required: + raise ValidationException("Value required for this field.") + return data + + def to_dict(self): + d = super(Field, self).to_dict() + name, value = d.popitem() + value['type'] = name + return value + +class CustomField(Field): + name = 'custom' + _coerce = True + + def to_dict(self): + if isinstance(self.builtin_type, Field): + return self.builtin_type.to_dict() + + d = super(CustomField, self).to_dict() + d['type'] = self.builtin_type + return d + +class InnerObjectWrapper(ObjectBase): + def __init__(self, mapping, **kwargs): + # mimic DocType behavior with _doc_type.mapping + super(AttrDict, self).__setattr__('_doc_type', type('Meta', (), {'mapping': mapping})) + super(InnerObjectWrapper, self).__init__(**kwargs) + + +class InnerObject(object): + " Common functionality for nested and object fields. " + _param_defs = {'properties': {'type': 'field', 'hash': True}} + _coerce = True + + def __init__(self, *args, **kwargs): + self._doc_class = kwargs.pop('doc_class', InnerObjectWrapper) + super(InnerObject, self).__init__(*args, **kwargs) + + def field(self, name, *args, **kwargs): + self.properties[name] = construct_field(*args, **kwargs) + return self + # XXX: backwards compatible, will be removed + property = field + + def _empty(self): + return self._doc_class(self.properties) + + def empty(self): + if self._multi: + return AttrList([], lambda d: self._doc_class(self.properties, **d)) + return self._empty() + + def __getitem__(self, name): + return self.properties[name] + + def __contains__(self, name): + return name in self.properties + + def _collect_fields(self): + " Iterate over all Field objects within, including multi fields. " + for f in itervalues(self.properties.to_dict()): + yield f + # multi fields + if hasattr(f, 'fields'): + for inner_f in itervalues(f.fields.to_dict()): + yield inner_f + # nested and inner objects + if hasattr(f, '_collect_fields'): + for inner_f in f._collect_fields(): + yield inner_f + + def update(self, other_object): + if not hasattr(other_object, 'properties'): + # not an inner/nested object, no merge possible + return + + our, other = self.properties, other_object.properties + for name in other: + if name in our: + if hasattr(our[name], 'update'): + our[name].update(other[name]) + continue + our[name] = other[name] + + def _deserialize(self, data): + if data is None: + return None + # don't wrap already wrapped data + if isinstance(data, self._doc_class): + return data + + if isinstance(data, (list, AttrList)): + data[:] = list(map(self._deserialize, data)) + return data + + if isinstance(data, AttrDict): + data = data._d_ + + return self._doc_class(self.properties, **data) + + def _serialize(self, data): + return data.to_dict() + + def clean(self, data): + data = super(InnerObject, self).clean(data) + if data is None: + return None + if isinstance(data, (list, AttrList)): + for d in data: + d.full_clean() + else: + data.full_clean() + return data + + +class Object(InnerObject, Field): + name = 'object' + +class Nested(InnerObject, Field): + name = 'nested' + + def __init__(self, *args, **kwargs): + # change the default for Nested fields + kwargs.setdefault('multi', True) + super(Nested, self).__init__(*args, **kwargs) + +class Date(Field): + name = 'date' + _coerce = True + + def _deserialize(self, data): + if not data: + return None + if isinstance(data, date): + return data + + try: + # TODO: add format awareness + return parser.parse(data) + except Exception as e: + raise ValidationException('Could not parse date from the value (%r)' % data, e) + +class String(Field): + _param_defs = { + 'fields': {'type': 'field', 'hash': True}, + 'analyzer': {'type': 'analyzer'}, + 'index_analyzer': {'type': 'analyzer'}, + 'search_analyzer': {'type': 'analyzer'}, + } + name = 'string' + + def _empty(self): + return '' + +FIELDS = ( + 'float', + 'double', + 'byte', + 'short', + 'integer', + 'long', + 'boolean', + 'ip', + 'attachment', + 'geo_point', + 'geo_shape', + 'completion', +) + +# generate the query classes dynamicaly +for f in FIELDS: + fclass = _make_dsl_class(Field, f) + globals()[fclass.__name__] = fclass + __all__.append(fclass.__name__) + diff --git a/venv/lib/python2.7/site-packages/elasticsearch_dsl/filter.py b/venv/lib/python2.7/site-packages/elasticsearch_dsl/filter.py new file mode 100644 index 0000000..d462bcd --- /dev/null +++ b/venv/lib/python2.7/site-packages/elasticsearch_dsl/filter.py @@ -0,0 +1,180 @@ +from .utils import DslBase, BoolMixin, _make_dsl_class + +__all__ = [ + 'F', 'And', 'AndOrFilter', 'Bool', 'EMPTY_FILTER', 'Exists', 'Filter', + 'Fquery', 'GeoBoundingBox', 'GeoDistance', 'GeoDistanceRange', + 'GeoPolygon', 'GeoShape', 'GeohashCell', 'HasChild', 'HasParent', 'Ids', + 'Indices', 'Limit', 'MatchAll', 'Missing', 'Nested', 'Not', 'Or', 'Prefix', + 'Query', 'Range', 'Regexp', 'Script', 'Term', 'Terms', 'Type' +] + + +def F(name_or_filter='match_all', filters=None, **params): + # 'and/or', [F(), F()] + if filters is not None: + # someone passed in positional argument to F outside of and/or or query + if name_or_filter in ('and', 'or'): + params['filters'] = filters + elif name_or_filter == 'query': + params['query'] = filters + else: + raise ValueError("Filter %r doesn't accept positional argument." % name_or_filter) + + # {"term": {...}} + if isinstance(name_or_filter, dict): + if params: + raise ValueError('F() cannot accept parameters when passing in a dict.') + if len(name_or_filter) != 1: + raise ValueError('F() can only accept dict with a single filter ({"bool": {...}}). ' + 'Instead it got (%r)' % name_or_filter) + name, params = name_or_filter.copy().popitem() + if isinstance(params, dict): + return Filter.get_dsl_class(name)(**params) + else: + # and filter can have list + return Filter.get_dsl_class(name)(params) + + # Term(...) + if isinstance(name_or_filter, Filter): + if params: + raise ValueError('F() cannot accept parameters when passing in a Filter object.') + return name_or_filter + + # s.filter = ~F(s.filter) + if hasattr(name_or_filter, '_proxied'): + return name_or_filter._proxied + + # 'term', tag='python', ... + return Filter.get_dsl_class(name_or_filter)(**params) + +class Filter(DslBase): + _type_name = 'filter' + _type_shortcut = staticmethod(F) + name = None + +class MatchAll(Filter): + name = 'match_all' + def __add__(self, other): + return other._clone() + __and__ = __rand__ = __radd__ = __add__ + + def __or__(self, other): + return self + __ror__ = __or__ +EMPTY_FILTER = MatchAll() + +class Bool(BoolMixin, Filter): + name = 'bool' + _param_defs = { + 'must': {'type': 'filter', 'multi': True}, + 'should': {'type': 'filter', 'multi': True}, + 'must_not': {'type': 'filter', 'multi': True}, + } + + def __and__(self, other): + f = self._clone() + if isinstance(other, self.__class__): + f.must += other.must + f.must_not += other.must_not + f.should = [] + if self.should and other.should: + selfshould, othershould = self.should[:], other.should[:] + # required subfilter, move to must + for s in (selfshould, othershould): + if len(s) == 1: + f.must.append(s.pop()) + + # we have leftover lists, nothing to do but add to must as bool(should) + if selfshould and othershould: + f.must.extend(( + Bool(should=selfshould), + Bool(should=othershould), + )) + # at most one should list is left, keep as should + else: + f.should = selfshould + othershould + + # at most one should list is left, keep as should + else: + f.should = self.should + other.should + else: + f.must.append(other) + return f + __rand__ = __and__ + +# register this as Bool for Filter +Filter._bool = Bool + +class Not(Filter): + name = 'not' + _param_defs = {'filter': {'type': 'filter'}} + + def __init__(self, filter=None, **kwargs): + if filter is None: + filter, kwargs = kwargs, {} + super(Not, self).__init__(filter=filter, **kwargs) + +class AndOrFilter(object): + _param_defs = {'filters': {'type': 'filter', 'multi': True}} + + def __init__(self, filters=None, **kwargs): + if filters is not None: + kwargs['filters'] = filters + super(AndOrFilter, self).__init__(**kwargs) + + # compound filters +class And(AndOrFilter, Filter): + name = 'and' + +class Or(AndOrFilter, Filter): + name = 'or' + +class Query(Filter): + name = 'query' + _param_defs = {'query': {'type': 'query'}} + + def __init__(self, query=None, **kwargs): + if query is not None: + kwargs['query'] = query + super(Query, self).__init__(**kwargs) + + def to_dict(self): + d = super(Query, self).to_dict() + d[self.name].update(d[self.name].pop('query', {})) + return d + + +FILTERS = ( + # relationships + ('nested', {'filter': {'type': 'filter'}}), + ('has_child', {'filter': {'type': 'filter'}}), + ('has_parent', {'filter': {'type': 'filter'}}), + + ('fquery', {'query': {'type': 'query'}}), + + # core filters + ('exists', None), + ('geo_bounding_box', None), + ('geo_distance', None), + ('geo_distance_range', None), + ('geo_polygon', None), + ('geo_shape', None), + ('geohash_cell', None), + ('ids', None), + ('indices', None), + ('limit', None), + ('missing', None), + ('prefix', None), + ('range', None), + ('regexp', None), + ('script', None), + ('term', None), + ('terms', None), + ('type', None), +) + +# generate the filter classes dynamicaly +for fname, params_def in FILTERS: + fclass = _make_dsl_class(Filter, fname, params_def) + globals()[fclass.__name__] = fclass + diff --git a/venv/lib/python2.7/site-packages/elasticsearch_dsl/function.py b/venv/lib/python2.7/site-packages/elasticsearch_dsl/function.py new file mode 100644 index 0000000..688b226 --- /dev/null +++ b/venv/lib/python2.7/site-packages/elasticsearch_dsl/function.py @@ -0,0 +1,86 @@ +from .utils import DslBase + +def SF(name_or_sf, **params): + # {"script_score": {"script": "_score"}, "filter": {}} + if isinstance(name_or_sf, dict): + if params: + raise ValueError('SF() cannot accept parameters when passing in a dict.') + kwargs = {} + sf = name_or_sf.copy() + for k in ScoreFunction._param_defs: + if k in name_or_sf: + kwargs[k] = sf.pop(k) + + # not sf, so just filter+weight, which used to be boost factor + if not sf: + name = 'boost_factor' + # {'FUNCTION': {...}} + elif len(sf) == 1: + name, params = sf.popitem() + else: + raise ValueError('SF() got an unexpected fields in the dictionary: %r' % sf) + + # boost factor special case, see elasticsearch #6343 + if not isinstance(params, dict): + params = {'value': params} + + # mix known params (from _param_defs) and from inside the function + kwargs.update(params) + return ScoreFunction.get_dsl_class(name)(**kwargs) + + # ScriptScore(script="_score", filter=Q()) + if isinstance(name_or_sf, ScoreFunction): + if params: + raise ValueError('SF() cannot accept parameters when passing in a ScoreFunction object.') + return name_or_sf + + # "script_score", script="_score", filter=Q() + return ScoreFunction.get_dsl_class(name_or_sf)(**params) + +class ScoreFunction(DslBase): + _type_name = 'score_function' + _type_shortcut = staticmethod(SF) + _param_defs = { + 'query': {'type': 'query'}, + 'filter': {'type': 'query'}, + 'weight': {} + } + name = None + + def to_dict(self): + d = super(ScoreFunction, self).to_dict() + # filter and query dicts should be at the same level as us + for k in self._param_defs: + if k in d[self.name]: + d[k] = d[self.name].pop(k) + return d + +class ScriptScore(ScoreFunction): + name = 'script_score' + +class BoostFactor(ScoreFunction): + name = 'boost_factor' + + def to_dict(self): + d = super(BoostFactor, self).to_dict() + if 'value' in d[self.name]: + d[self.name] = d[self.name].pop('value') + else: + del d[self.name] + return d + +class RandomScore(ScoreFunction): + name = 'random_score' + +class FieldValueFactor(ScoreFunction): + name = 'field_value_factor' + +class Linear(ScoreFunction): + name = 'linear' + +class Gauss(ScoreFunction): + name = 'gauss' + +class Exp(ScoreFunction): + name = 'exp' + diff --git a/venv/lib/python2.7/site-packages/elasticsearch_dsl/index.py b/venv/lib/python2.7/site-packages/elasticsearch_dsl/index.py new file mode 100644 index 0000000..f4e7522 --- /dev/null +++ b/venv/lib/python2.7/site-packages/elasticsearch_dsl/index.py @@ -0,0 +1,91 @@ +from .connections import connections +from .search import Search + +class Index(object): + def __init__(self, name, using='default'): + self._name = name + self._doc_types = {} + self._mappings = {} + self._using = using + self._settings = {} + self._aliases = {} + + def clone(self, name, using=None): + i = Index(name, using=using or self._using) + for attr in ('_doc_types', '_mappings', '_settings', '_aliases'): + setattr(i, attr, getattr(self, attr).copy()) + return i + + def _get_connection(self): + return connections.get_connection(self._using) + connection = property(_get_connection) + + def doc_type(self, doc_type): + name = doc_type._doc_type.name + self._doc_types[name] = doc_type + self._mappings[name] = doc_type._doc_type.mapping + + if not doc_type._doc_type.index: + doc_type._doc_type.index = self._name + return doc_type # to use as decorator??? + + def settings(self, **kwargs): + self._settings.update(kwargs) + return self + + def aliases(self, **kwargs): + self._aliases.update(kwargs) + return self + + def search(self): + return Search( + using=self._using, + index=self._name, + doc_type=[self._doc_types.get(k, k) for k in self._mappings] + ) + + def _get_mappings(self): + analysis, mappings = {}, {} + for mapping in self._mappings.values(): + mappings.update(mapping.to_dict()) + a = mapping._collect_analysis() + # merge the defintion + # TODO: conflict detection/resolution + for key in a: + analysis.setdefault(key, {}).update(a[key]) + + return mappings, analysis + + def to_dict(self): + out = {} + if self._settings: + out['settings'] = self._settings + if self._aliases: + out['aliases'] = self._aliases + mappings, analysis = self._get_mappings() + if mappings: + out['mappings'] = mappings + if analysis: + out.setdefault('settings', {})['analysis'] = analysis + return out + + def exists(self, **kwargs): + return self.connection.indices.exists(index=self._name, **kwargs) + + def refresh(self, **kwargs): + return self.connection.indices.refresh(index=self._name, **kwargs) + + def flush(self, **kwargs): + return self.connection.indices.flush(index=self._name, **kwargs) + + def open(self, **kwargs): + return self.connection.indices.open(index=self._name, **kwargs) + + def close(self, **kwargs): + return self.connection.indices.close(index=self._name, **kwargs) + + def create(self, **kwargs): + self.connection.indices.create(index=self._name, body=self.to_dict(), **kwargs) + + def delete(self, **kwargs): + self.connection.indices.delete(index=self._name, **kwargs) diff --git a/venv/lib/python2.7/site-packages/elasticsearch_dsl/mapping.py b/venv/lib/python2.7/site-packages/elasticsearch_dsl/mapping.py new file mode 100644 index 0000000..e25cd81 --- /dev/null +++ b/venv/lib/python2.7/site-packages/elasticsearch_dsl/mapping.py @@ -0,0 +1,151 @@ +from six import iteritems +from itertools import chain + +from .utils import DslBase +from .field import InnerObject, String +from .connections import connections +from .exceptions import IllegalOperation + +META_FIELDS = frozenset(( + 'dynamic', 'transform', 'dynamic_date_formats', 'date_detection', + 'numeric_detection', 'dynamic_templates' +)) + +class Properties(InnerObject, DslBase): + def __init__(self, name): + self._name = name + super(Properties, self).__init__() + + def __repr__(self): + return 'Properties(%r)' % self._name + + @property + def name(self): + return self._name + + +class Mapping(object): + def __init__(self, name): + self.properties = Properties(name) + self._meta = {} + + def __repr__(self): + return 'Mapping(%r)' % self.doc_type + + @classmethod + def from_es(cls, index, doc_type, using='default'): + m = cls(doc_type) + m.update_from_es(index, using) + return m + + def _collect_analysis(self): + analysis = {} + fields = [] + if '_all' in self._meta: + fields.append(String(**self._meta['_all'])) + + for f in chain(fields, self.properties._collect_fields()): + for analyzer_name in ('analyzer', 'index_analyzer', 'search_analyzer'): + if not hasattr(f, analyzer_name): + continue + analyzer = getattr(f, analyzer_name) + d = analyzer.get_analysis_definition() + # empty custom analyzer, probably already defined out of our control + if not d: + continue + + # merge the defintion + # TODO: conflict detection/resolution + for key in d: + analysis.setdefault(key, {}).update(d[key]) + + return analysis + + def save(self, index, using='default'): + # TODO: replace with creating an Index instance to avoid duplication + es = connections.get_connection(using) + if not es.indices.exists(index=index): + es.indices.create(index=index, body={'mappings': self.to_dict(), 'settings': {'analysis': self._collect_analysis()}}) + else: + analysis = self._collect_analysis() + if analysis: + if es.cluster.state(index=index, metric='metadata')['metadata']['indices'][index]['state'] != 'close': + # TODO: check if the analysis config is already there + raise IllegalOperation( + 'You cannot update analysis configuration on an open index, you need to close index %s first.' % index) + es.indices.put_settings(index=index, body={'analysis': analysis}) + es.indices.put_mapping(index=index, doc_type=self.doc_type, body=self.to_dict()) + + def update_from_es(self, index, using='default'): + es = connections.get_connection(using) + raw = es.indices.get_mapping(index=index, doc_type=self.doc_type) + _, raw = raw.popitem() + raw = raw['mappings'][self.doc_type] + + for name, definition in iteritems(raw['properties']): + self.field(name, definition) + + # metadata like _all etc + for name, value in iteritems(raw): + if name != 'properties': + if isinstance(value, dict): + self.meta(name, **value) + else: + self.meta(name, value) + + def update(self, mapping, update_only=False): + for name in mapping: + if update_only and name in self: + # nested and inner objects, merge recursively + if hasattr(self[name], 'update'): + self[name].update(mapping[name]) + continue + self.field(name, mapping[name]) + + if update_only: + for name in mapping._meta: + if name not in self._meta: + self._meta[name] = mapping._meta[name] + else: + self._meta.update(mapping._meta) + + def __contains__(self, name): + return name in self.properties.properties + + def __getitem__(self, name): + return self.properties.properties[name] + + def __iter__(self): + return iter(self.properties.properties) + + @property + def doc_type(self): + return self.properties.name + + def field(self, *args, **kwargs): + self.properties.field(*args, **kwargs) + return self + + def meta(self, name, params=None, **kwargs): + if not name.startswith('_') and name not in META_FIELDS: + name = '_' + name + + if params and kwargs: + raise ValueError('Meta configs cannot have both value and a dictionary.') + + self._meta[name] = kwargs if params is None else params + return self + + def to_dict(self): + d = self.properties.to_dict() + meta = self._meta + + # hard coded serialization of analyzers in _all + if '_all' in meta: + meta = meta.copy() + _all = meta['_all'] = meta['_all'].copy() + for f in ('analyzer', 'search_analyzer', 'index_analyzer'): + if hasattr(_all.get(f, None), 'to_dict'): + _all[f] = _all[f].to_dict() + d[self.doc_type].update(meta) + return d diff --git a/venv/lib/python2.7/site-packages/elasticsearch_dsl/query.py b/venv/lib/python2.7/site-packages/elasticsearch_dsl/query.py new file mode 100644 index 0000000..d50aeb2 --- /dev/null +++ b/venv/lib/python2.7/site-packages/elasticsearch_dsl/query.py @@ -0,0 +1,233 @@ +from .utils import DslBase, _make_dsl_class +from .function import SF, ScoreFunction + +__all__ = [ + 'Q', 'Bool', 'Boosting', 'Common', 'ConstantScore', 'DisMax', 'Filtered', + 'FunctionScore', 'Fuzzy', 'FuzzyLikeThis', 'FuzzyLikeThisField', + 'GeoShape', 'HasChild', 'HasParent', 'Ids', 'Indices', 'Match', 'MatchAll', + 'MatchPhrase', 'MatchPhrasePrefix', 'MoreLikeThis', 'MoreLikeThisField', + 'MultiMatch', 'Nested', 'Prefix', 'Query', 'QueryString', 'Range', + 'Regexp', 'SF', 'ScoreFunction', 'SimpleQueryString', 'SpanFirst', + 'SpanMulti', 'SpanNear', 'SpanNot', 'SpanOr', 'SpanTerm', 'Template', + 'Term', 'Terms', 'TopChildren', 'Wildcard' +] + + +def Q(name_or_query='match_all', **params): + # {"match": {"title": "python"}} + if isinstance(name_or_query, dict): + if params: + raise ValueError('Q() cannot accept parameters when passing in a dict.') + if len(name_or_query) != 1: + raise ValueError('Q() can only accept dict with a single query ({"match": {...}}). ' + 'Instead it got (%r)' % name_or_query) + name, params = name_or_query.copy().popitem() + return Query.get_dsl_class(name)(**params) + + # MatchAll() + if isinstance(name_or_query, Query): + if params: + raise ValueError('Q() cannot accept parameters when passing in a Query object.') + return name_or_query + + # s.query = Q('filtered', query=s.query) + if hasattr(name_or_query, '_proxied'): + return name_or_query._proxied + + # "match", title="python" + return Query.get_dsl_class(name_or_query)(**params) + +class Query(DslBase): + _type_name = 'query' + _type_shortcut = staticmethod(Q) + name = None + + def __add__(self, other): + # make sure we give queries that know how to combine themselves + # preference + if hasattr(other, '__radd__'): + return other.__radd__(self) + return Bool(must=[self, other]) + + def __invert__(self): + return Bool(must_not=[self]) + + def __or__(self, other): + # make sure we give queries that know how to combine themselves + # preference + if hasattr(other, '__ror__'): + return other.__ror__(self) + return Bool(should=[self, other]) + + def __and__(self, other): + # make sure we give queries that know how to combine themselves + # preference + if hasattr(other, '__rand__'): + return other.__rand__(self) + return Bool(must=[self, other]) + + +class MatchAll(Query): + name = 'match_all' + def __add__(self, other): + return other._clone() + __and__ = __rand__ = __radd__ = __add__ + + def __or__(self, other): + return self + __ror__ = __or__ +EMPTY_QUERY = MatchAll() + +class Bool(Query): + name = 'bool' + _param_defs = { + 'must': {'type': 'query', 'multi': True}, + 'should': {'type': 'query', 'multi': True}, + 'must_not': {'type': 'query', 'multi': True}, + 'filter': {'type': 'query', 'multi': True}, + } + + def __add__(self, other): + q = self._clone() + if isinstance(other, Bool): + q.must += other.must + q.should += other.should + q.must_not += other.must_not + q.filter += other.filter + else: + q.must.append(other) + return q + __radd__ = __add__ + + def __or__(self, other): + for q in (self, other): + if isinstance(q, Bool) and len(q.should) == 1 and not any((q.must, q.must_not, q.filter)): + other = self if q is other else other + q = q._clone() + q.should.append(other) + return q + + return Bool(should=[self, other]) + __ror__ = __or__ + + def __invert__(self): + # special case for single negated query + if not (self.must or self.should or self.filter) and len(self.must_not) == 1: + return self.must_not[0]._clone() + + # bol without should, just flip must and must_not + elif not self.should: + q = self._clone() + q.must, q.must_not = q.must_not, q.must + if q.filter: + q.filter = [Bool(must_not=q.filter)] + return q + + # TODO: should -> must_not.append(Bool(should=self.should)) ?? + # queries with should just invert normally + return super(Bool, self).__invert__() + + def __and__(self, other): + q = self._clone() + if isinstance(other, Bool): + q.must += other.must + q.must_not += other.must_not + q.filter += other.filter + q.should = [] + for qx in (self, other): + # TODO: percetages will fail here + min_should_match = getattr(qx, 'minimum_should_match', 0 if qx.must else 1) + # all subqueries are required + if len(qx.should) <= min_should_match: + q.must.extend(qx.should) + # not all of them are required, use it and remember min_should_match + elif not q.should: + q.minimum_should_match = min_should_match + q.should = qx.should + # not all are required, add a should list to the must with proper min_should_match + else: + q.must.append(Bool(should=qx.should, minimum_should_match=min_should_match)) + else: + q.must.append(other) + return q + __rand__ = __and__ + +class FunctionScore(Query): + name = 'function_score' + _param_defs = { + 'query': {'type': 'query'}, + 'filter': {'type': 'query'}, + 'functions': {'type': 'score_function', 'multi': True}, + } + + def __init__(self, **kwargs): + if 'functions' in kwargs: + pass + else: + fns = kwargs['functions'] = [] + for name in ScoreFunction._classes: + if name in kwargs: + fns.append({name: kwargs.pop(name)}) + super(FunctionScore, self).__init__(**kwargs) + +QUERIES = ( + # compound queries + ('boosting', {'positive': {'type': 'query'}, 'negative': {'type': 'query'}}), + ('constant_score', {'query': {'type': 'query'}, 'filter': {'type': 'query'}}), + ('dis_max', {'queries': {'type': 'query', 'multi': True}}), + ('filtered', {'query': {'type': 'query'}, 'filter': {'type': 'query'}}), + ('indices', {'query': {'type': 'query'}, 'no_match_query': {'type': 'query'}}), + + # relationship queries + ('nested', {'query': {'type': 'query'}}), + ('has_child', {'query': {'type': 'query'}}), + ('has_parent', {'query': {'type': 'query'}}), + ('top_children', {'query': {'type': 'query'}}), + + # compount span queries + ('span_first', {'match': {'type': 'query'}}), + ('span_multi', {'match': {'type': 'query'}}), + ('span_near', {'clauses': {'type': 'query', 'multi': True}}), + ('span_not', {'exclude': {'type': 'query'}, 'include': {'type': 'query'}}), + ('span_or', {'clauses': {'type': 'query', 'multi': True}}), + + # core queries + ('common', None), + ('fuzzy', None), + ('fuzzy_like_this', None), + ('fuzzy_like_this_field', None), + ('geo_bounding_box', None), + ('geo_distance', None), + ('geo_distance_range', None), + ('geo_polygon', None), + ('geo_shape', None), + ('geohash_cell', None), + ('ids', None), + ('limit', None), + ('match', None), + ('match_phrase', None), + ('match_phrase_prefix', None), + ('exists', None), + ('missing', None), + ('more_like_this', None), + ('more_like_this_field', None), + ('multi_match', None), + ('prefix', None), + ('query_string', None), + ('range', None), + ('regexp', None), + ('simple_query_string', None), + ('span_term', None), + ('template', None), + ('term', None), + ('terms', None), + ('wildcard', None), + ('script', None), + ('type', None), +) + +# generate the query classes dynamicaly +for qname, params_def in QUERIES: + qclass = _make_dsl_class(Query, qname, params_def) + globals()[qclass.__name__] = qclass + diff --git a/venv/lib/python2.7/site-packages/elasticsearch_dsl/result.py b/venv/lib/python2.7/site-packages/elasticsearch_dsl/result.py new file mode 100644 index 0000000..6de2ea8 --- /dev/null +++ b/venv/lib/python2.7/site-packages/elasticsearch_dsl/result.py @@ -0,0 +1,82 @@ +from six import iteritems, u + +from .utils import AttrDict, AttrList + +class SuggestResponse(AttrDict): + def success(self): + return not self._shards.failed + +class Response(AttrDict): + def __init__(self, response, callbacks=None): + super(AttrDict, self).__setattr__('_callbacks', callbacks or {}) + super(Response, self).__init__(response) + + def __iter__(self): + return iter(self.hits) + + def __getitem__(self, key): + # for slicing etc + return self.hits[key] + + def __nonzero__(self): + return bool(self.hits) + __bool__ = __nonzero__ + + def __repr__(self): + return '' % self.hits + + def __len__(self): + return len(self.hits) + + def success(self): + return self._shards.total == self._shards.successful and not self.timed_out + + def _get_result(self, hit): + dt = hit['_type'] + return self._callbacks.get(dt, Result)(hit) + + @property + def hits(self): + if not hasattr(self, '_hits'): + h = self._d_['hits'] + + try: + hits = AttrList(map(self._get_result, h['hits'])) + except AttributeError as e: + # avoid raising AttributeError since it will be hidden by the property + raise TypeError("Could not parse hits.", e) + + # avoid assigning _hits into self._d_ + super(AttrDict, self).__setattr__('_hits', hits) + for k in h: + setattr(self._hits, k, h[k]) + return self._hits + + +class ResultMeta(AttrDict): + def __init__(self, document, exclude=('_source', '_fields')): + d = dict((k[1:] if k.startswith('_') else k, v) for (k, v) in iteritems(document) if k not in exclude) + if 'type' in d: + # make sure we are consistent everywhere in python + d['doc_type'] = d.pop('type') + super(ResultMeta, self).__init__(d) + +class Result(AttrDict): + def __init__(self, document): + data = {} + if '_source' in document: + data = document['_source'] + if 'fields' in document: + data.update(document['fields']) + + super(Result, self).__init__(data) + # assign meta as attribute and not as key in self._d_ + super(AttrDict, self).__setattr__('meta', ResultMeta(document)) + + def __dir__(self): + # be sure to expose meta in dir(self) + return super(Result, self).__dir__() + ['meta'] + + def __repr__(self): + return u('') % ( + self.meta.index, self.meta.doc_type, self.meta.id, super(Result, self).__repr__()) diff --git a/venv/lib/python2.7/site-packages/elasticsearch_dsl/search.py b/venv/lib/python2.7/site-packages/elasticsearch_dsl/search.py new file mode 100644 index 0000000..d6132d3 --- /dev/null +++ b/venv/lib/python2.7/site-packages/elasticsearch_dsl/search.py @@ -0,0 +1,676 @@ +from six import iteritems, string_types + +from elasticsearch.helpers import scan + +from .query import Q, EMPTY_QUERY, Bool +from .aggs import A, AggBase +from .utils import DslBase +from .result import Response, Result, SuggestResponse +from .connections import connections + +class QueryProxy(object): + """ + Simple proxy around DSL objects (queries) that can be called + (to add query/post_filter) and also allows attribute access which is proxied to + the wrapped query. + """ + def __init__(self, search, attr_name): + self._search = search + self._proxied = EMPTY_QUERY + self._attr_name = attr_name + + def __nonzero__(self): + return self._proxied != EMPTY_QUERY + __bool__ = __nonzero__ + + def __call__(self, *args, **kwargs): + s = self._search._clone() + getattr(s, self._attr_name)._proxied += Q(*args, **kwargs) + + # always return search to be chainable + return s + + def __getattr__(self, attr_name): + return getattr(self._proxied, attr_name) + + def __setattr__(self, attr_name, value): + if not attr_name.startswith('_'): + self._proxied = Q(self._proxied.to_dict()) + setattr(self._proxied, attr_name, value) + super(QueryProxy, self).__setattr__(attr_name, value) + + +class ProxyDescriptor(object): + """ + Simple descriptor to enable setting of queries and filters as: + + s = Search() + s.query = Q(...) + + """ + def __init__(self, name): + self._attr_name = '_%s_proxy' % name + + def __get__(self, instance, owner): + return getattr(instance, self._attr_name) + + def __set__(self, instance, value): + proxy = getattr(instance, self._attr_name) + proxy._proxied = Q(value) + + +class AggsProxy(AggBase, DslBase): + name = 'aggs' + def __init__(self, search): + self._base = self._search = search + self._params = {'aggs': {}} + + def to_dict(self): + return super(AggsProxy, self).to_dict().get('aggs', {}) + +class Request(object): + def __init__(self, using='default', index=None, doc_type=None, extra=None): + self._using = using + + self._index = None + if isinstance(index, (tuple, list)): + self._index = list(index) + elif index: + self._index = [index] + + self._doc_type = [] + self._doc_type_map = {} + if isinstance(doc_type, (tuple, list)): + for dt in doc_type: + self._add_doc_type(dt) + elif isinstance(doc_type, dict): + self._doc_type.extend(doc_type.keys()) + self._doc_type_map.update(doc_type) + elif doc_type: + self._add_doc_type(doc_type) + + self._params = {} + self._extra = extra or {} + + def params(self, **kwargs): + """ + Specify query params to be used when executing the search. All the + keyword arguments will override the current values. See + http://elasticsearch-py.readthedocs.org/en/master/api.html#elasticsearch.Elasticsearch.search + for all availible parameters. + + Example:: + + s = Search() + s = s.params(routing='user-1', preference='local') + """ + s = self._clone() + s._params.update(kwargs) + return s + + def index(self, *index): + """ + Set the index for the search. If called empty it will rmove all information. + + Example: + + s = Search() + s = s.index('twitter-2015.01.01', 'twitter-2015.01.02') + """ + # .index() resets + s = self._clone() + if not index: + s._index = None + else: + s._index = (self._index or []) + list(index) + return s + + def _add_doc_type(self, doc_type): + if hasattr(doc_type, '_doc_type'): + self._doc_type_map[doc_type._doc_type.name] = doc_type.from_es + doc_type = doc_type._doc_type.name + self._doc_type.append(doc_type) + + def doc_type(self, *doc_type, **kwargs): + """ + Set the type to search through. You can supply a single value or + multiple. Values can be strings or subclasses of ``DocType``. + + You can also pass in any keyword arguments, mapping a doc_type to a + callback that should be used instead of the Result class. + + If no doc_type is supplied any information stored on the instance will + be erased. + + Example: + + s = Search().doc_type('product', 'store', User, custom=my_callback) + """ + # .doc_type() resets + s = self._clone() + if not doc_type and not kwargs: + s._doc_type = [] + s._doc_type_map = {} + else: + for dt in doc_type: + s._add_doc_type(dt) + s._doc_type.extend(kwargs.keys()) + s._doc_type_map.update(kwargs) + return s + + def using(self, client): + """ + Associate the search request with an elasticsearch client. A fresh copy + will be returned with current instance remaining unchanged. + + :arg client: an instance of ``elasticsearch.Elasticsearch`` to use or + an alias to look up in ``elasticsearch_dsl.connections`` + + """ + s = self._clone() + s._using = client + return s + + def extra(self, **kwargs): + """ + Add extra keys to the request body. Mostly here for backwards + compatibility. + """ + s = self._clone() + if 'from_' in kwargs: + kwargs['from'] = kwargs.pop('from_') + s._extra.update(kwargs) + return s + + def _clone(self): + s = self.__class__(using=self._using, index=self._index, + doc_type=self._doc_type) + s._doc_type_map = self._doc_type_map.copy() + s._extra = self._extra.copy() + s._params = self._params.copy() + return s + + +class Search(Request): + query = ProxyDescriptor('query') + post_filter = ProxyDescriptor('post_filter') + + def __init__(self, **kwargs): + """ + Search request to elasticsearch. + + :arg using: `Elasticsearch` instance to use + :arg index: limit the search to index + :arg doc_type: only query this type. + + All the paramters supplied (or omitted) at creation type can be later + overriden by methods (`using`, `index` and `doc_type` respectively). + """ + super(Search, self).__init__(**kwargs) + + self.aggs = AggsProxy(self) + self._sort = [] + self._fields = None + self._partial_fields = {} + self._highlight = {} + self._highlight_opts = {} + self._suggest = {} + self._script_fields = {} + self._response_class = Response + + self._query_proxy = QueryProxy(self, 'query') + self._post_filter_proxy = QueryProxy(self, 'post_filter') + + def filter(self, *args, **kwargs): + return self.query(Bool(filter=[Q(*args, **kwargs)])) + + def __iter__(self): + """ + Iterate over the hits. + """ + return iter(self.execute()) + + def __getitem__(self, n): + """ + Support slicing the `Search` instance for pagination. + + Slicing equates to the from/size parameters. E.g.:: + + s = Search().query(...)[0:25] + + is equivalent to:: + + s = Search().query(...).extra(from_=0, size=25) + + """ + s = self._clone() + + if isinstance(n, slice): + # If negative slicing, abort. + if n.start and n.start < 0 or n.stop and n.stop < 0: + raise ValueError("Search does not support negative slicing.") + # Elasticsearch won't get all results so we default to size: 10 if + # stop not given. + s._extra['from'] = n.start or 0 + s._extra['size'] = n.stop - (n.start or 0) if n.stop is not None else 10 + return s + else: # This is an index lookup, equivalent to slicing by [n:n+1]. + # If negative index, abort. + if n < 0: + raise ValueError("Search does not support negative indexing.") + s._extra['from'] = n + s._extra['size'] = 1 + return s + + @classmethod + def from_dict(cls, d): + """ + Construct a `Search` instance from a raw dict containing the search + body. Useful when migrating from raw dictionaries. + + Example:: + + s = Search.from_dict({ + "query": { + "bool": { + "must": [...] + } + }, + "aggs": {...} + }) + s = s.filter('term', published=True) + """ + s = cls() + s.update_from_dict(d) + return s + + def _clone(self): + """ + Return a clone of the current search request. Performs a shallow copy + of all the underlying objects. Used internally by most state modifying + APIs. + """ + s = super(Search, self)._clone() + + s._response_class = self._response_class + s._sort = self._sort[:] + s._fields = self._fields[:] if self._fields is not None else None + s._partial_fields = self._partial_fields.copy() + s._highlight = self._highlight.copy() + s._highlight_opts = self._highlight_opts.copy() + s._suggest = self._suggest.copy() + s._script_fields = self._script_fields.copy() + for x in ('query', 'post_filter'): + getattr(s, x)._proxied = getattr(self, x)._proxied + + # copy top-level bucket definitions + if self.aggs._params.get('aggs'): + s.aggs._params = {'aggs': self.aggs._params['aggs'].copy()} + return s + + def response_class(self, cls): + """ + Override the default wrapper used for the response. + """ + s = self._clone() + s._response_class = cls + return s + + def update_from_dict(self, d): + """ + Apply options from a serialized body to the current instance. Modifies + the object in-place. Used mostly by ``from_dict``. + """ + d = d.copy() + if 'query' in d: + self.query._proxied = Q(d.pop('query')) + if 'post_filter' in d: + self.post_filter._proxied = Q(d.pop('post_filter')) + + aggs = d.pop('aggs', d.pop('aggregations', {})) + if aggs: + self.aggs._params = { + 'aggs': dict( + (name, A(value)) for (name, value) in iteritems(aggs)) + } + if 'sort' in d: + self._sort = d.pop('sort') + if 'fields' in d: + self._fields = d.pop('fields') + if 'partial_fields' in d: + self._partial_fields = d.pop('partial_fields') + if 'highlight' in d: + high = d.pop('highlight').copy() + self._highlight = high.pop('fields') + self._highlight_opts = high + if 'suggest' in d: + self._suggest = d.pop('suggest') + if 'text' in self._suggest: + text = self._suggest.pop('text') + for s in self._suggest.values(): + s.setdefault('text', text) + if 'script_fields' in d: + self._script_fields = d.pop('script_fields') + self._extra = d + + def script_fields(self, **kwargs): + """ + Define script fields to be calculated on hits. See + https://www.elastic.co/guide/en/elasticsearch/reference/current/search-request-script-fields.html + for more details. + + Example:: + + s = Search() + s = s.script_fields(times_two="doc['field'].value * 2") + s = s.script_fields( + times_three={ + 'script': "doc['field'].value * n", + 'params': {'n': 3} + } + ) + + """ + s = self._clone() + for name in kwargs: + if isinstance(kwargs[name], string_types): + kwargs[name] = {'script': kwargs[name]} + s._script_fields.update(kwargs) + return s + + def fields(self, fields=None): + """ + Selectively load specific stored fields for each document. + + :arg fields: list of fields to return for each document + + If ``fields`` is None, the entire document will be returned for + each hit. If fields is the empty list, no fields will be + returned for each hit, just the metadata. + """ + s = self._clone() + s._fields = fields + return s + + def partial_fields(self, **partial): + """ + Control which part of the fields to extract from the `_source` document + + :kwargs partial: dict specifying which fields to extract from the source + + An example usage would be: + + s = Search().partial_fields(authors_data={ + 'include': ['authors.*'], + 'exclude': ['authors.name'] + }) + + which will include all fields from the `authors` nested property except for + each authors `name` + + If ``partial`` is not provided, the whole `_source` will be fetched. Calling this multiple + times will override the previous values with the new ones. + """ + s = self._clone() + s._partial_fields = partial + return s + + def sort(self, *keys): + """ + Add sorting information to the search request. If called without + arguments it will remove all sort requirements. Otherwise it will + replace them. Acceptable arguments are:: + + 'some.field' + '-some.other.field' + {'different.field': {'any': 'dict'}} + + so for example:: + + s = Search().sort( + 'category', + '-title', + {"price" : {"order" : "asc", "mode" : "avg"}} + ) + + will sort by ``category``, ``title`` (in descending order) and + ``price`` in ascending order using the ``avg`` mode. + + The API returns a copy of the Search object and can thus be chained. + """ + s = self._clone() + s._sort = [] + for k in keys: + if isinstance(k, string_types) and k.startswith('-'): + k = {k[1:]: {"order": "desc"}} + s._sort.append(k) + return s + + def highlight_options(self, **kwargs): + """ + Update the global highlighting options used for this request. For + example:: + + s = Search() + s = s.highlight_options(order='score') + """ + s = self._clone() + s._highlight_opts.update(kwargs) + return s + + def highlight(self, *fields, **kwargs): + """ + Request highliting of some fields. All keyword arguments passed in will be + used as parameters. Example:: + + Search().highlight('title', 'body', fragment_size=50) + + will produce the equivalent of:: + + { + "highlight": { + "fields": { + "body": {"fragment_size": 50}, + "title": {"fragment_size": 50} + } + } + } + + """ + s = self._clone() + for f in fields: + s._highlight[f] = kwargs + return s + + def suggest(self, name, text, **kwargs): + """ + Add a suggestions request to the search. + + :arg name: name of the suggestion + :arg text: text to suggest on + + All keyword arguments will be added to the suggestions body. For example:: + + s = Search() + s = s.suggest('suggestion-1', 'Elasticserach', term={'field': 'body'}) + """ + s = self._clone() + s._suggest[name] = {'text': text} + s._suggest[name].update(kwargs) + return s + + def to_dict(self, count=False, **kwargs): + """ + Serialize the search into the dictionary that will be sent over as the + request's body. + + :arg count: a flag to specify we are interested in a body for count - + no aggregations, no pagination bounds etc. + + All additional keyword arguments will be included into the dictionary. + """ + d = {"query": self.query.to_dict()} + + if self.post_filter: + d['post_filter'] = self.post_filter.to_dict() + + # count request doesn't care for sorting and other things + if not count: + if self.aggs.aggs: + d.update(self.aggs.to_dict()) + + if self._sort: + d['sort'] = self._sort + + d.update(self._extra) + + if self._fields is not None: + d['fields'] = self._fields + + if self._partial_fields: + d['partial_fields'] = self._partial_fields + + if self._highlight: + d['highlight'] = {'fields': self._highlight} + d['highlight'].update(self._highlight_opts) + + if self._suggest: + d['suggest'] = self._suggest + + if self._script_fields: + d['script_fields'] = self._script_fields + + d.update(kwargs) + return d + + def count(self): + """ + Return the number of hits matching the query and filters. Note that + only the actual number is returned. + """ + if hasattr(self, '_response'): + return self._response.hits.total + + es = connections.get_connection(self._using) + + d = self.to_dict(count=True) + # TODO: failed shards detection + return es.count( + index=self._index, + doc_type=self._doc_type, + body=d + )['count'] + + def execute(self, ignore_cache=False): + """ + Execute the search and return an instance of ``Response`` wrapping all + the data. + + :arg response_class: optional subclass of ``Response`` to use instead. + """ + if ignore_cache or not hasattr(self, '_response'): + es = connections.get_connection(self._using) + + self._response = self._response_class( + es.search( + index=self._index, + doc_type=self._doc_type, + body=self.to_dict(), + **self._params + ), + callbacks=self._doc_type_map + ) + return self._response + + def execute_suggest(self): + """ + Execute just the suggesters. Ignores all parts of the request that are + not relevant, including ``query`` and ``doc_type``. + """ + es = connections.get_connection(self._using) + return SuggestResponse( + es.suggest( + index=self._index, + body=self._suggest, + **self._params + ) + ) + + def scan(self): + """ + Turn the search into a scan search and return a generator that will + iterate over all the documents matching the query. + + Use ``params`` method to specify any additional arguments you with to + pass to the underlying ``scan`` helper from ``elasticsearch-py`` - + http://elasticsearch-py.readthedocs.org/en/master/helpers.html#elasticsearch.helpers.scan + + """ + es = connections.get_connection(self._using) + + for hit in scan( + es, + query=self.to_dict(), + index=self._index, + doc_type=self._doc_type, + **self._params + ): + yield self._doc_type_map.get(hit['_type'], Result)(hit) + + +class MultiSearch(Request): + def __init__(self, **kwargs): + super(MultiSearch, self).__init__(**kwargs) + self._searches = [] + + def __getitem__(self, key): + return self._searches[key] + + def __iter__(self): + return iter(self._searches) + + def _clone(self): + ms = super(MultiSearch, self)._clone() + ms._searches = self._searches[:] + return ms + + def add(self, search): + ms = self._clone() + ms._searches.append(search) + return ms + + def to_dict(self): + out = [] + for s in self._searches: + meta = {} + if s._index: + meta['index'] = s._index + if s._doc_type: + meta['type'] = s._doc_type + meta.update(s._params) + + out.append(meta) + out.append(s.to_dict()) + + return out + + def execute(self, ignore_cache=False): + if ignore_cache or not hasattr(self, '_response'): + es = connections.get_connection(self._using) + + responses = es.msearch( + index=self._index, + doc_type=self._doc_type, + body=self.to_dict(), + **self._params + ) + + out = [] + for s, r in zip(self._searches, responses['responses']): + r = Response(r, callbacks=s._doc_type_map) + r.search = s + out.append(r) + + self._response = out + + return self._response diff --git a/venv/lib/python2.7/site-packages/elasticsearch_dsl/serializer.py b/venv/lib/python2.7/site-packages/elasticsearch_dsl/serializer.py new file mode 100644 index 0000000..ed0956b --- /dev/null +++ b/venv/lib/python2.7/site-packages/elasticsearch_dsl/serializer.py @@ -0,0 +1,13 @@ +from elasticsearch.serializer import JSONSerializer + +from .utils import AttrList + +class AttrJSONSerializer(JSONSerializer): + def default(self, data): + if isinstance(data, AttrList): + return data._l_ + if hasattr(data, 'to_dict'): + return data.to_dict() + return super(AttrJSONSerializer, self).default(data) + +serializer = AttrJSONSerializer() diff --git a/venv/lib/python2.7/site-packages/elasticsearch_dsl/utils.py b/venv/lib/python2.7/site-packages/elasticsearch_dsl/utils.py new file mode 100644 index 0000000..6fdec33 --- /dev/null +++ b/venv/lib/python2.7/site-packages/elasticsearch_dsl/utils.py @@ -0,0 +1,396 @@ +from __future__ import unicode_literals + +from six import iteritems, add_metaclass +from six.moves import map + +from .exceptions import UnknownDslObject, ValidationException + +SKIP_VALUES = ('', None) + +def _wrap(val, obj_wrapper=None): + if isinstance(val, dict): + return AttrDict(val) if obj_wrapper is None else obj_wrapper(val) + if isinstance(val, list): + return AttrList(val) + return val + +def _make_dsl_class(base, name, params_def=None, suffix=''): + """ + Generate a DSL class based on the name of the DSL object and it's parameters + """ + attrs = {'name': name} + if params_def: + attrs['_param_defs'] = params_def + cls_name = str(''.join(s.title() for s in name.split('_')) + suffix) + return type(cls_name, (base, ), attrs) + +class AttrList(object): + def __init__(self, l, obj_wrapper=None): + # make iteables into lists + if not isinstance(l, list): + l = list(l) + self._l_ = l + self._obj_wrapper = obj_wrapper + + def __repr__(self): + return repr(self._l_) + + def __eq__(self, other): + if isinstance(other, AttrList): + return other._l_ == self._l_ + # make sure we still equal to a dict with the same data + return other == self._l_ + + def __getitem__(self, k): + l = self._l_[k] + if isinstance(k, slice): + return AttrList(l) + return _wrap(l, self._obj_wrapper) + + def __setitem__(self, k, value): + self._l_[k] = value + + def __iter__(self): + return map(lambda i: _wrap(i, self._obj_wrapper), self._l_) + + def __len__(self): + return len(self._l_) + + def __nonzero__(self): + return bool(self._l_) + __bool__ = __nonzero__ + + def __getattr__(self, name): + return getattr(self._l_, name) + + +class AttrDict(object): + """ + Helper class to provide attribute like access (read and write) to + dictionaries. Used to provide a convenient way to access both results and + nested dsl dicts. + """ + def __init__(self, d): + # assign the inner dict manually to prevent __setattr__ from firing + super(AttrDict, self).__setattr__('_d_', d) + + def __contains__(self, key): + return key in self._d_ + + def __nonzero__(self): + return bool(self._d_) + __bool__ = __nonzero__ + + def __dir__(self): + # introspection for auto-complete in IPython etc + return list(self._d_.keys()) + + def __eq__(self, other): + if isinstance(other, AttrDict): + return other._d_ == self._d_ + # make sure we still equal to a dict with the same data + return other == self._d_ + + def __repr__(self): + r = repr(self._d_) + if len(r) > 60: + r = r[:60] + '...}' + return r + + def __getattr__(self, attr_name): + try: + return _wrap(self._d_[attr_name]) + except KeyError: + raise AttributeError( + '%r object has no attribute %r' % (self.__class__.__name__, attr_name)) + + def __delattr__(self, attr_name): + try: + del self._d_[attr_name] + except KeyError: + raise AttributeError( + '%r object has no attribute %r' % (self.__class__.__name__, attr_name)) + + def __getitem__(self, key): + return _wrap(self._d_[key]) + + def __setitem__(self, key, value): + self._d_[key] = value + + def __delitem__(self, key): + del self._d_[key] + + def __setattr__(self, name, value): + if name in self._d_ or not hasattr(self.__class__, name): + self._d_[name] = value + else: + # there is an attribute on the class (could be property, ..) - don't add it as field + super(AttrDict, self).__setattr__(name, value) + + def __iter__(self): + return iter(self._d_) + + def to_dict(self): + return self._d_ + + +class DslMeta(type): + """ + Base Metaclass for DslBase subclasses that builds a registry of all classes + for given DslBase subclass (== all the query types for the Query subclass + of DslBase). + + It then uses the information from that registry (as well as `name` and + `shortcut` attributes from the base class) to construct any subclass based + on it's name. + + For typical use see `QueryMeta` and `Query` in `elasticsearch_dsl.query`. + """ + _types = {} + def __init__(cls, name, bases, attrs): + super(DslMeta, cls).__init__(name, bases, attrs) + # skip for DslBase + if not hasattr(cls, '_type_shortcut'): + return + if cls.name is None: + # abstract base class, register it's shortcut + cls._types[cls._type_name] = cls._type_shortcut + # and create a registry for subclasses + if not hasattr(cls, '_classes'): + cls._classes = {} + elif cls.name not in cls._classes: + # normal class, register it + cls._classes[cls.name] = cls + + @classmethod + def get_dsl_type(cls, name): + try: + return cls._types[name] + except KeyError: + raise UnknownDslObject('DSL type %s does not exist.' % name) + + +@add_metaclass(DslMeta) +class DslBase(object): + """ + Base class for all DSL objects - queries, filters, aggregations etc. Wraps + a dictionary representing the object's json. + + Provides several feature: + - attribute access to the wrapped dictionary (.field instead of ['field']) + - _clone method returning a deep copy of self + - to_dict method to serialize into dict (to be sent via elasticsearch-py) + - basic logical operators (&, | and ~) using a Bool(Filter|Query) TODO: + move into a class specific for Query/Filter + - respects the definiton of the class and (de)serializes it's + attributes based on the `_param_defs` definition (for example turning + all values in the `must` attribute into Query objects) + """ + _param_defs = {} + + @classmethod + def get_dsl_class(cls, name): + try: + return cls._classes[name] + except KeyError: + raise UnknownDslObject('DSL class `%s` does not exist in %s.' % (name, cls._type_name)) + + def __init__(self, **params): + self._params = {} + for pname, pvalue in iteritems(params): + if '__' in pname: + pname = pname.replace('__', '.') + self._setattr(pname, pvalue) + + def _repr_params(self): + """ Produce a repr of all our parameters to be used in __repr__. """ + return ', '.join( + '%s=%r' % (n.replace('.', '__'), v) + for (n, v) in sorted(iteritems(self._params)) + # make sure we don't include empty typed params + if 'type' not in self._param_defs.get(n, {}) or v + ) + + def __repr__(self): + return '%s(%s)' % ( + self.__class__.__name__, + self._repr_params() + ) + + def __eq__(self, other): + return isinstance(other, self.__class__) and other.to_dict() == self.to_dict() + + def __ne__(self, other): + return not self == other + + def __setattr__(self, name, value): + if name.startswith('_'): + return super(DslBase, self).__setattr__(name, value) + return self._setattr(name, value) + + def _setattr(self, name, value): + # if this attribute has special type assigned to it... + if name in self._param_defs: + pinfo = self._param_defs[name] + + if 'type' in pinfo: + # get the shortcut used to construct this type (query.Q, aggs.A, etc) + shortcut = self.__class__.get_dsl_type(pinfo['type']) + if pinfo.get('multi'): + if not isinstance(value, (tuple, list)): + value = (value, ) + value = list(map(shortcut, value)) + + # dict(name -> DslBase), make sure we pickup all the objs + elif pinfo.get('hash'): + value = dict((k, shortcut(v)) for (k, v) in iteritems(value)) + + # single value object, just convert + else: + value = shortcut(value) + self._params[name] = value + + def __getattr__(self, name): + if name.startswith('_'): + raise AttributeError( + '%r object has no attribute %r' % (self.__class__.__name__, name)) + + value = None + try: + value = self._params[name] + except KeyError: + # compound types should never throw AttributeError and return empty + # container instead + if name in self._param_defs: + pinfo = self._param_defs[name] + if pinfo.get('multi'): + value = self._params.setdefault(name, []) + elif pinfo.get('hash'): + value = self._params.setdefault(name, {}) + if value is None: + raise AttributeError( + '%r object has no attribute %r' % (self.__class__.__name__, name)) + + # wrap nested dicts in AttrDict for convenient access + if isinstance(value, dict): + return AttrDict(value) + return value + + def to_dict(self): + """ + Serialize the DSL object to plain dict + """ + d = {} + for pname, value in iteritems(self._params): + pinfo = self._param_defs.get(pname) + + # typed param + if pinfo and 'type' in pinfo: + # don't serialize empty lists and dicts for typed fields + if value in ({}, []): + continue + + # multi-values are serialized as list of dicts + if pinfo.get('multi'): + value = list(map(lambda x: x.to_dict(), value)) + + # squash all the hash values into one dict + elif pinfo.get('hash'): + value = dict((k, v.to_dict()) for k, v in iteritems(value)) + + # serialize single values + else: + value = value.to_dict() + + # serialize anything with to_dict method + elif hasattr(value, 'to_dict'): + value = value.to_dict() + + d[pname] = value + return {self.name: d} + + def _clone(self): + return self._type_shortcut(self.to_dict()) + + +class ObjectBase(AttrDict): + def __init__(self, **kwargs): + m = self._doc_type.mapping + for k in m: + if k in kwargs and m[k]._coerce: + kwargs[k] = m[k].deserialize(kwargs[k]) + super(ObjectBase, self).__init__(kwargs) + + def __getattr__(self, name): + try: + return super(ObjectBase, self).__getattr__(name) + except AttributeError: + if name in self._doc_type.mapping: + f = self._doc_type.mapping[name] + if hasattr(f, 'empty'): + value = f.empty() + if value not in SKIP_VALUES: + setattr(self, name, value) + value = getattr(self, name) + return value + raise + + def __setattr__(self, name, value): + if name in self._doc_type.mapping: + value = self._doc_type.mapping[name].deserialize(value) + super(ObjectBase, self).__setattr__(name, value) + + def to_dict(self): + out = {} + for k, v in iteritems(self._d_): + try: + f = self._doc_type.mapping[k] + if f._coerce: + v = f.serialize(v) + except KeyError: + pass + + # don't serialize empty values + # careful not to include numeric zeros + if v in ([], {}, None): + continue + + out[k] = v + return out + + def clean_fields(self): + errors = {} + for name in self._doc_type.mapping: + field = self._doc_type.mapping[name] + data = self._d_.get(name, None) + try: + # save the cleaned value + data = field.clean(data) + except ValidationException as e: + errors.setdefault(name, []).append(e) + + if name in self._d_ or data not in ([], {}, None): + self._d_[name] = data + + if errors: + raise ValidationException(errors) + + def clean(self): + pass + + def full_clean(self): + self.clean_fields() + self.clean() + +def merge(data, new_data): + if not (isinstance(data, (AttrDict, dict)) + and isinstance(new_data, (AttrDict, dict))): + raise ValueError('You can only merge two dicts! Got %r and %r instead.' % (data, new_data)) + + for key, value in iteritems(new_data): + if key in data and isinstance(data[key], (AttrDict, dict)): + merge(data[key], value) + else: + data[key] = value + + diff --git a/venv/lib/python2.7/site-packages/flask/__init__.py b/venv/lib/python2.7/site-packages/flask/__init__.py new file mode 100644 index 0000000..70d5e58 --- /dev/null +++ b/venv/lib/python2.7/site-packages/flask/__init__.py @@ -0,0 +1,49 @@ +# -*- coding: utf-8 -*- +""" + flask + ~~~~~ + + A microframework based on Werkzeug. It's extensively documented + and follows best practice patterns. + + :copyright: (c) 2015 by Armin Ronacher. + :license: BSD, see LICENSE for more details. +""" + +__version__ = '0.11.1' + +# utilities we import from Werkzeug and Jinja2 that are unused +# in the module but are exported as public interface. +from werkzeug.exceptions import abort +from werkzeug.utils import redirect +from jinja2 import Markup, escape + +from .app import Flask, Request, Response +from .config import Config +from .helpers import url_for, flash, send_file, send_from_directory, \ + get_flashed_messages, get_template_attribute, make_response, safe_join, \ + stream_with_context +from .globals import current_app, g, request, session, _request_ctx_stack, \ + _app_ctx_stack +from .ctx import has_request_context, has_app_context, \ + after_this_request, copy_current_request_context +from .blueprints import Blueprint +from .templating import render_template, render_template_string + +# the signals +from .signals import signals_available, template_rendered, request_started, \ + request_finished, got_request_exception, request_tearing_down, \ + appcontext_tearing_down, appcontext_pushed, \ + appcontext_popped, message_flashed, before_render_template + +# We're not exposing the actual json module but a convenient wrapper around +# it. +from . import json + +# This was the only thing that flask used to export at one point and it had +# a more generic name. +jsonify = json.jsonify + +# backwards compat, goes away in 1.0 +from .sessions import SecureCookieSession as Session +json_available = True diff --git a/venv/lib/python2.7/site-packages/flask/__main__.py b/venv/lib/python2.7/site-packages/flask/__main__.py new file mode 100644 index 0000000..cbefccd --- /dev/null +++ b/venv/lib/python2.7/site-packages/flask/__main__.py @@ -0,0 +1,15 @@ +# -*- coding: utf-8 -*- +""" + flask.__main__ + ~~~~~~~~~~~~~~ + + Alias for flask.run for the command line. + + :copyright: (c) 2015 by Armin Ronacher. + :license: BSD, see LICENSE for more details. +""" + + +if __name__ == '__main__': + from .cli import main + main(as_module=True) diff --git a/venv/lib/python2.7/site-packages/flask/_compat.py b/venv/lib/python2.7/site-packages/flask/_compat.py new file mode 100644 index 0000000..bfe607d --- /dev/null +++ b/venv/lib/python2.7/site-packages/flask/_compat.py @@ -0,0 +1,88 @@ +# -*- coding: utf-8 -*- +""" + flask._compat + ~~~~~~~~~~~~~ + + Some py2/py3 compatibility support based on a stripped down + version of six so we don't have to depend on a specific version + of it. + + :copyright: (c) 2015 by Armin Ronacher. + :license: BSD, see LICENSE for more details. +""" +import sys + +PY2 = sys.version_info[0] == 2 +_identity = lambda x: x + + +if not PY2: + text_type = str + string_types = (str,) + integer_types = (int,) + + iterkeys = lambda d: iter(d.keys()) + itervalues = lambda d: iter(d.values()) + iteritems = lambda d: iter(d.items()) + + from io import StringIO + + def reraise(tp, value, tb=None): + if value.__traceback__ is not tb: + raise value.with_traceback(tb) + raise value + + implements_to_string = _identity + +else: + text_type = unicode + string_types = (str, unicode) + integer_types = (int, long) + + iterkeys = lambda d: d.iterkeys() + itervalues = lambda d: d.itervalues() + iteritems = lambda d: d.iteritems() + + from cStringIO import StringIO + + exec('def reraise(tp, value, tb=None):\n raise tp, value, tb') + + def implements_to_string(cls): + cls.__unicode__ = cls.__str__ + cls.__str__ = lambda x: x.__unicode__().encode('utf-8') + return cls + + +def with_metaclass(meta, *bases): + """Create a base class with a metaclass.""" + # This requires a bit of explanation: the basic idea is to make a + # dummy metaclass for one level of class instantiation that replaces + # itself with the actual metaclass. + class metaclass(type): + def __new__(cls, name, this_bases, d): + return meta(name, bases, d) + return type.__new__(metaclass, 'temporary_class', (), {}) + + +# Certain versions of pypy have a bug where clearing the exception stack +# breaks the __exit__ function in a very peculiar way. This is currently +# true for pypy 2.2.1 for instance. The second level of exception blocks +# is necessary because pypy seems to forget to check if an exception +# happened until the next bytecode instruction? +BROKEN_PYPY_CTXMGR_EXIT = False +if hasattr(sys, 'pypy_version_info'): + class _Mgr(object): + def __enter__(self): + return self + def __exit__(self, *args): + sys.exc_clear() + try: + try: + with _Mgr(): + raise AssertionError() + except: + raise + except TypeError: + BROKEN_PYPY_CTXMGR_EXIT = True + except AssertionError: + pass diff --git a/venv/lib/python2.7/site-packages/flask/app.py b/venv/lib/python2.7/site-packages/flask/app.py new file mode 100644 index 0000000..b1ea046 --- /dev/null +++ b/venv/lib/python2.7/site-packages/flask/app.py @@ -0,0 +1,2006 @@ +# -*- coding: utf-8 -*- +""" + flask.app + ~~~~~~~~~ + + This module implements the central WSGI application object. + + :copyright: (c) 2015 by Armin Ronacher. + :license: BSD, see LICENSE for more details. +""" +import os +import sys +from threading import Lock +from datetime import timedelta +from itertools import chain +from functools import update_wrapper +from collections import deque + +from werkzeug.datastructures import ImmutableDict +from werkzeug.routing import Map, Rule, RequestRedirect, BuildError +from werkzeug.exceptions import HTTPException, InternalServerError, \ + MethodNotAllowed, BadRequest, default_exceptions + +from .helpers import _PackageBoundObject, url_for, get_flashed_messages, \ + locked_cached_property, _endpoint_from_view_func, find_package, \ + get_debug_flag +from . import json, cli +from .wrappers import Request, Response +from .config import ConfigAttribute, Config +from .ctx import RequestContext, AppContext, _AppCtxGlobals +from .globals import _request_ctx_stack, request, session, g +from .sessions import SecureCookieSessionInterface +from .templating import DispatchingJinjaLoader, Environment, \ + _default_template_ctx_processor +from .signals import request_started, request_finished, got_request_exception, \ + request_tearing_down, appcontext_tearing_down +from ._compat import reraise, string_types, text_type, integer_types + +# a lock used for logger initialization +_logger_lock = Lock() + +# a singleton sentinel value for parameter defaults +_sentinel = object() + + +def _make_timedelta(value): + if not isinstance(value, timedelta): + return timedelta(seconds=value) + return value + + +def setupmethod(f): + """Wraps a method so that it performs a check in debug mode if the + first request was already handled. + """ + def wrapper_func(self, *args, **kwargs): + if self.debug and self._got_first_request: + raise AssertionError('A setup function was called after the ' + 'first request was handled. This usually indicates a bug ' + 'in the application where a module was not imported ' + 'and decorators or other functionality was called too late.\n' + 'To fix this make sure to import all your view modules, ' + 'database models and everything related at a central place ' + 'before the application starts serving requests.') + return f(self, *args, **kwargs) + return update_wrapper(wrapper_func, f) + + +class Flask(_PackageBoundObject): + """The flask object implements a WSGI application and acts as the central + object. It is passed the name of the module or package of the + application. Once it is created it will act as a central registry for + the view functions, the URL rules, template configuration and much more. + + The name of the package is used to resolve resources from inside the + package or the folder the module is contained in depending on if the + package parameter resolves to an actual python package (a folder with + an :file:`__init__.py` file inside) or a standard module (just a ``.py`` file). + + For more information about resource loading, see :func:`open_resource`. + + Usually you create a :class:`Flask` instance in your main module or + in the :file:`__init__.py` file of your package like this:: + + from flask import Flask + app = Flask(__name__) + + .. admonition:: About the First Parameter + + The idea of the first parameter is to give Flask an idea of what + belongs to your application. This name is used to find resources + on the filesystem, can be used by extensions to improve debugging + information and a lot more. + + So it's important what you provide there. If you are using a single + module, `__name__` is always the correct value. If you however are + using a package, it's usually recommended to hardcode the name of + your package there. + + For example if your application is defined in :file:`yourapplication/app.py` + you should create it with one of the two versions below:: + + app = Flask('yourapplication') + app = Flask(__name__.split('.')[0]) + + Why is that? The application will work even with `__name__`, thanks + to how resources are looked up. However it will make debugging more + painful. Certain extensions can make assumptions based on the + import name of your application. For example the Flask-SQLAlchemy + extension will look for the code in your application that triggered + an SQL query in debug mode. If the import name is not properly set + up, that debugging information is lost. (For example it would only + pick up SQL queries in `yourapplication.app` and not + `yourapplication.views.frontend`) + + .. versionadded:: 0.7 + The `static_url_path`, `static_folder`, and `template_folder` + parameters were added. + + .. versionadded:: 0.8 + The `instance_path` and `instance_relative_config` parameters were + added. + + .. versionadded:: 0.11 + The `root_path` parameter was added. + + :param import_name: the name of the application package + :param static_url_path: can be used to specify a different path for the + static files on the web. Defaults to the name + of the `static_folder` folder. + :param static_folder: the folder with static files that should be served + at `static_url_path`. Defaults to the ``'static'`` + folder in the root path of the application. + :param template_folder: the folder that contains the templates that should + be used by the application. Defaults to + ``'templates'`` folder in the root path of the + application. + :param instance_path: An alternative instance path for the application. + By default the folder ``'instance'`` next to the + package or module is assumed to be the instance + path. + :param instance_relative_config: if set to ``True`` relative filenames + for loading the config are assumed to + be relative to the instance path instead + of the application root. + :param root_path: Flask by default will automatically calculate the path + to the root of the application. In certain situations + this cannot be achieved (for instance if the package + is a Python 3 namespace package) and needs to be + manually defined. + """ + + #: The class that is used for request objects. See :class:`~flask.Request` + #: for more information. + request_class = Request + + #: The class that is used for response objects. See + #: :class:`~flask.Response` for more information. + response_class = Response + + #: The class that is used for the Jinja environment. + #: + #: .. versionadded:: 0.11 + jinja_environment = Environment + + #: The class that is used for the :data:`~flask.g` instance. + #: + #: Example use cases for a custom class: + #: + #: 1. Store arbitrary attributes on flask.g. + #: 2. Add a property for lazy per-request database connectors. + #: 3. Return None instead of AttributeError on unexpected attributes. + #: 4. Raise exception if an unexpected attr is set, a "controlled" flask.g. + #: + #: In Flask 0.9 this property was called `request_globals_class` but it + #: was changed in 0.10 to :attr:`app_ctx_globals_class` because the + #: flask.g object is now application context scoped. + #: + #: .. versionadded:: 0.10 + app_ctx_globals_class = _AppCtxGlobals + + # Backwards compatibility support + def _get_request_globals_class(self): + return self.app_ctx_globals_class + def _set_request_globals_class(self, value): + from warnings import warn + warn(DeprecationWarning('request_globals_class attribute is now ' + 'called app_ctx_globals_class')) + self.app_ctx_globals_class = value + request_globals_class = property(_get_request_globals_class, + _set_request_globals_class) + del _get_request_globals_class, _set_request_globals_class + + #: The class that is used for the ``config`` attribute of this app. + #: Defaults to :class:`~flask.Config`. + #: + #: Example use cases for a custom class: + #: + #: 1. Default values for certain config options. + #: 2. Access to config values through attributes in addition to keys. + #: + #: .. versionadded:: 0.11 + config_class = Config + + #: The debug flag. Set this to ``True`` to enable debugging of the + #: application. In debug mode the debugger will kick in when an unhandled + #: exception occurs and the integrated server will automatically reload + #: the application if changes in the code are detected. + #: + #: This attribute can also be configured from the config with the ``DEBUG`` + #: configuration key. Defaults to ``False``. + debug = ConfigAttribute('DEBUG') + + #: The testing flag. Set this to ``True`` to enable the test mode of + #: Flask extensions (and in the future probably also Flask itself). + #: For example this might activate unittest helpers that have an + #: additional runtime cost which should not be enabled by default. + #: + #: If this is enabled and PROPAGATE_EXCEPTIONS is not changed from the + #: default it's implicitly enabled. + #: + #: This attribute can also be configured from the config with the + #: ``TESTING`` configuration key. Defaults to ``False``. + testing = ConfigAttribute('TESTING') + + #: If a secret key is set, cryptographic components can use this to + #: sign cookies and other things. Set this to a complex random value + #: when you want to use the secure cookie for instance. + #: + #: This attribute can also be configured from the config with the + #: ``SECRET_KEY`` configuration key. Defaults to ``None``. + secret_key = ConfigAttribute('SECRET_KEY') + + #: The secure cookie uses this for the name of the session cookie. + #: + #: This attribute can also be configured from the config with the + #: ``SESSION_COOKIE_NAME`` configuration key. Defaults to ``'session'`` + session_cookie_name = ConfigAttribute('SESSION_COOKIE_NAME') + + #: A :class:`~datetime.timedelta` which is used to set the expiration + #: date of a permanent session. The default is 31 days which makes a + #: permanent session survive for roughly one month. + #: + #: This attribute can also be configured from the config with the + #: ``PERMANENT_SESSION_LIFETIME`` configuration key. Defaults to + #: ``timedelta(days=31)`` + permanent_session_lifetime = ConfigAttribute('PERMANENT_SESSION_LIFETIME', + get_converter=_make_timedelta) + + #: A :class:`~datetime.timedelta` which is used as default cache_timeout + #: for the :func:`send_file` functions. The default is 12 hours. + #: + #: This attribute can also be configured from the config with the + #: ``SEND_FILE_MAX_AGE_DEFAULT`` configuration key. This configuration + #: variable can also be set with an integer value used as seconds. + #: Defaults to ``timedelta(hours=12)`` + send_file_max_age_default = ConfigAttribute('SEND_FILE_MAX_AGE_DEFAULT', + get_converter=_make_timedelta) + + #: Enable this if you want to use the X-Sendfile feature. Keep in + #: mind that the server has to support this. This only affects files + #: sent with the :func:`send_file` method. + #: + #: .. versionadded:: 0.2 + #: + #: This attribute can also be configured from the config with the + #: ``USE_X_SENDFILE`` configuration key. Defaults to ``False``. + use_x_sendfile = ConfigAttribute('USE_X_SENDFILE') + + #: The name of the logger to use. By default the logger name is the + #: package name passed to the constructor. + #: + #: .. versionadded:: 0.4 + logger_name = ConfigAttribute('LOGGER_NAME') + + #: The JSON encoder class to use. Defaults to :class:`~flask.json.JSONEncoder`. + #: + #: .. versionadded:: 0.10 + json_encoder = json.JSONEncoder + + #: The JSON decoder class to use. Defaults to :class:`~flask.json.JSONDecoder`. + #: + #: .. versionadded:: 0.10 + json_decoder = json.JSONDecoder + + #: Options that are passed directly to the Jinja2 environment. + jinja_options = ImmutableDict( + extensions=['jinja2.ext.autoescape', 'jinja2.ext.with_'] + ) + + #: Default configuration parameters. + default_config = ImmutableDict({ + 'DEBUG': get_debug_flag(default=False), + 'TESTING': False, + 'PROPAGATE_EXCEPTIONS': None, + 'PRESERVE_CONTEXT_ON_EXCEPTION': None, + 'SECRET_KEY': None, + 'PERMANENT_SESSION_LIFETIME': timedelta(days=31), + 'USE_X_SENDFILE': False, + 'LOGGER_NAME': None, + 'LOGGER_HANDLER_POLICY': 'always', + 'SERVER_NAME': None, + 'APPLICATION_ROOT': None, + 'SESSION_COOKIE_NAME': 'session', + 'SESSION_COOKIE_DOMAIN': None, + 'SESSION_COOKIE_PATH': None, + 'SESSION_COOKIE_HTTPONLY': True, + 'SESSION_COOKIE_SECURE': False, + 'SESSION_REFRESH_EACH_REQUEST': True, + 'MAX_CONTENT_LENGTH': None, + 'SEND_FILE_MAX_AGE_DEFAULT': timedelta(hours=12), + 'TRAP_BAD_REQUEST_ERRORS': False, + 'TRAP_HTTP_EXCEPTIONS': False, + 'EXPLAIN_TEMPLATE_LOADING': False, + 'PREFERRED_URL_SCHEME': 'http', + 'JSON_AS_ASCII': True, + 'JSON_SORT_KEYS': True, + 'JSONIFY_PRETTYPRINT_REGULAR': True, + 'JSONIFY_MIMETYPE': 'application/json', + 'TEMPLATES_AUTO_RELOAD': None, + }) + + #: The rule object to use for URL rules created. This is used by + #: :meth:`add_url_rule`. Defaults to :class:`werkzeug.routing.Rule`. + #: + #: .. versionadded:: 0.7 + url_rule_class = Rule + + #: the test client that is used with when `test_client` is used. + #: + #: .. versionadded:: 0.7 + test_client_class = None + + #: the session interface to use. By default an instance of + #: :class:`~flask.sessions.SecureCookieSessionInterface` is used here. + #: + #: .. versionadded:: 0.8 + session_interface = SecureCookieSessionInterface() + + def __init__(self, import_name, static_path=None, static_url_path=None, + static_folder='static', template_folder='templates', + instance_path=None, instance_relative_config=False, + root_path=None): + _PackageBoundObject.__init__(self, import_name, + template_folder=template_folder, + root_path=root_path) + if static_path is not None: + from warnings import warn + warn(DeprecationWarning('static_path is now called ' + 'static_url_path'), stacklevel=2) + static_url_path = static_path + + if static_url_path is not None: + self.static_url_path = static_url_path + if static_folder is not None: + self.static_folder = static_folder + if instance_path is None: + instance_path = self.auto_find_instance_path() + elif not os.path.isabs(instance_path): + raise ValueError('If an instance path is provided it must be ' + 'absolute. A relative path was given instead.') + + #: Holds the path to the instance folder. + #: + #: .. versionadded:: 0.8 + self.instance_path = instance_path + + #: The configuration dictionary as :class:`Config`. This behaves + #: exactly like a regular dictionary but supports additional methods + #: to load a config from files. + self.config = self.make_config(instance_relative_config) + + # Prepare the deferred setup of the logger. + self._logger = None + self.logger_name = self.import_name + + #: A dictionary of all view functions registered. The keys will + #: be function names which are also used to generate URLs and + #: the values are the function objects themselves. + #: To register a view function, use the :meth:`route` decorator. + self.view_functions = {} + + # support for the now deprecated `error_handlers` attribute. The + # :attr:`error_handler_spec` shall be used now. + self._error_handlers = {} + + #: A dictionary of all registered error handlers. The key is ``None`` + #: for error handlers active on the application, otherwise the key is + #: the name of the blueprint. Each key points to another dictionary + #: where the key is the status code of the http exception. The + #: special key ``None`` points to a list of tuples where the first item + #: is the class for the instance check and the second the error handler + #: function. + #: + #: To register a error handler, use the :meth:`errorhandler` + #: decorator. + self.error_handler_spec = {None: self._error_handlers} + + #: A list of functions that are called when :meth:`url_for` raises a + #: :exc:`~werkzeug.routing.BuildError`. Each function registered here + #: is called with `error`, `endpoint` and `values`. If a function + #: returns ``None`` or raises a :exc:`BuildError` the next function is + #: tried. + #: + #: .. versionadded:: 0.9 + self.url_build_error_handlers = [] + + #: A dictionary with lists of functions that should be called at the + #: beginning of the request. The key of the dictionary is the name of + #: the blueprint this function is active for, ``None`` for all requests. + #: This can for example be used to open database connections or + #: getting hold of the currently logged in user. To register a + #: function here, use the :meth:`before_request` decorator. + self.before_request_funcs = {} + + #: A lists of functions that should be called at the beginning of the + #: first request to this instance. To register a function here, use + #: the :meth:`before_first_request` decorator. + #: + #: .. versionadded:: 0.8 + self.before_first_request_funcs = [] + + #: A dictionary with lists of functions that should be called after + #: each request. The key of the dictionary is the name of the blueprint + #: this function is active for, ``None`` for all requests. This can for + #: example be used to close database connections. To register a function + #: here, use the :meth:`after_request` decorator. + self.after_request_funcs = {} + + #: A dictionary with lists of functions that are called after + #: each request, even if an exception has occurred. The key of the + #: dictionary is the name of the blueprint this function is active for, + #: ``None`` for all requests. These functions are not allowed to modify + #: the request, and their return values are ignored. If an exception + #: occurred while processing the request, it gets passed to each + #: teardown_request function. To register a function here, use the + #: :meth:`teardown_request` decorator. + #: + #: .. versionadded:: 0.7 + self.teardown_request_funcs = {} + + #: A list of functions that are called when the application context + #: is destroyed. Since the application context is also torn down + #: if the request ends this is the place to store code that disconnects + #: from databases. + #: + #: .. versionadded:: 0.9 + self.teardown_appcontext_funcs = [] + + #: A dictionary with lists of functions that can be used as URL + #: value processor functions. Whenever a URL is built these functions + #: are called to modify the dictionary of values in place. The key + #: ``None`` here is used for application wide + #: callbacks, otherwise the key is the name of the blueprint. + #: Each of these functions has the chance to modify the dictionary + #: + #: .. versionadded:: 0.7 + self.url_value_preprocessors = {} + + #: A dictionary with lists of functions that can be used as URL value + #: preprocessors. The key ``None`` here is used for application wide + #: callbacks, otherwise the key is the name of the blueprint. + #: Each of these functions has the chance to modify the dictionary + #: of URL values before they are used as the keyword arguments of the + #: view function. For each function registered this one should also + #: provide a :meth:`url_defaults` function that adds the parameters + #: automatically again that were removed that way. + #: + #: .. versionadded:: 0.7 + self.url_default_functions = {} + + #: A dictionary with list of functions that are called without argument + #: to populate the template context. The key of the dictionary is the + #: name of the blueprint this function is active for, ``None`` for all + #: requests. Each returns a dictionary that the template context is + #: updated with. To register a function here, use the + #: :meth:`context_processor` decorator. + self.template_context_processors = { + None: [_default_template_ctx_processor] + } + + #: A list of shell context processor functions that should be run + #: when a shell context is created. + #: + #: .. versionadded:: 0.11 + self.shell_context_processors = [] + + #: all the attached blueprints in a dictionary by name. Blueprints + #: can be attached multiple times so this dictionary does not tell + #: you how often they got attached. + #: + #: .. versionadded:: 0.7 + self.blueprints = {} + self._blueprint_order = [] + + #: a place where extensions can store application specific state. For + #: example this is where an extension could store database engines and + #: similar things. For backwards compatibility extensions should register + #: themselves like this:: + #: + #: if not hasattr(app, 'extensions'): + #: app.extensions = {} + #: app.extensions['extensionname'] = SomeObject() + #: + #: The key must match the name of the extension module. For example in + #: case of a "Flask-Foo" extension in `flask_foo`, the key would be + #: ``'foo'``. + #: + #: .. versionadded:: 0.7 + self.extensions = {} + + #: The :class:`~werkzeug.routing.Map` for this instance. You can use + #: this to change the routing converters after the class was created + #: but before any routes are connected. Example:: + #: + #: from werkzeug.routing import BaseConverter + #: + #: class ListConverter(BaseConverter): + #: def to_python(self, value): + #: return value.split(',') + #: def to_url(self, values): + #: return ','.join(BaseConverter.to_url(value) + #: for value in values) + #: + #: app = Flask(__name__) + #: app.url_map.converters['list'] = ListConverter + self.url_map = Map() + + # tracks internally if the application already handled at least one + # request. + self._got_first_request = False + self._before_request_lock = Lock() + + # register the static folder for the application. Do that even + # if the folder does not exist. First of all it might be created + # while the server is running (usually happens during development) + # but also because google appengine stores static files somewhere + # else when mapped with the .yml file. + if self.has_static_folder: + self.add_url_rule(self.static_url_path + '/', + endpoint='static', + view_func=self.send_static_file) + + #: The click command line context for this application. Commands + #: registered here show up in the :command:`flask` command once the + #: application has been discovered. The default commands are + #: provided by Flask itself and can be overridden. + #: + #: This is an instance of a :class:`click.Group` object. + self.cli = cli.AppGroup(self.name) + + def _get_error_handlers(self): + from warnings import warn + warn(DeprecationWarning('error_handlers is deprecated, use the ' + 'new error_handler_spec attribute instead.'), stacklevel=1) + return self._error_handlers + def _set_error_handlers(self, value): + self._error_handlers = value + self.error_handler_spec[None] = value + error_handlers = property(_get_error_handlers, _set_error_handlers) + del _get_error_handlers, _set_error_handlers + + @locked_cached_property + def name(self): + """The name of the application. This is usually the import name + with the difference that it's guessed from the run file if the + import name is main. This name is used as a display name when + Flask needs the name of the application. It can be set and overridden + to change the value. + + .. versionadded:: 0.8 + """ + if self.import_name == '__main__': + fn = getattr(sys.modules['__main__'], '__file__', None) + if fn is None: + return '__main__' + return os.path.splitext(os.path.basename(fn))[0] + return self.import_name + + @property + def propagate_exceptions(self): + """Returns the value of the ``PROPAGATE_EXCEPTIONS`` configuration + value in case it's set, otherwise a sensible default is returned. + + .. versionadded:: 0.7 + """ + rv = self.config['PROPAGATE_EXCEPTIONS'] + if rv is not None: + return rv + return self.testing or self.debug + + @property + def preserve_context_on_exception(self): + """Returns the value of the ``PRESERVE_CONTEXT_ON_EXCEPTION`` + configuration value in case it's set, otherwise a sensible default + is returned. + + .. versionadded:: 0.7 + """ + rv = self.config['PRESERVE_CONTEXT_ON_EXCEPTION'] + if rv is not None: + return rv + return self.debug + + @property + def logger(self): + """A :class:`logging.Logger` object for this application. The + default configuration is to log to stderr if the application is + in debug mode. This logger can be used to (surprise) log messages. + Here some examples:: + + app.logger.debug('A value for debugging') + app.logger.warning('A warning occurred (%d apples)', 42) + app.logger.error('An error occurred') + + .. versionadded:: 0.3 + """ + if self._logger and self._logger.name == self.logger_name: + return self._logger + with _logger_lock: + if self._logger and self._logger.name == self.logger_name: + return self._logger + from flask.logging import create_logger + self._logger = rv = create_logger(self) + return rv + + @locked_cached_property + def jinja_env(self): + """The Jinja2 environment used to load templates.""" + return self.create_jinja_environment() + + @property + def got_first_request(self): + """This attribute is set to ``True`` if the application started + handling the first request. + + .. versionadded:: 0.8 + """ + return self._got_first_request + + def make_config(self, instance_relative=False): + """Used to create the config attribute by the Flask constructor. + The `instance_relative` parameter is passed in from the constructor + of Flask (there named `instance_relative_config`) and indicates if + the config should be relative to the instance path or the root path + of the application. + + .. versionadded:: 0.8 + """ + root_path = self.root_path + if instance_relative: + root_path = self.instance_path + return self.config_class(root_path, self.default_config) + + def auto_find_instance_path(self): + """Tries to locate the instance path if it was not provided to the + constructor of the application class. It will basically calculate + the path to a folder named ``instance`` next to your main file or + the package. + + .. versionadded:: 0.8 + """ + prefix, package_path = find_package(self.import_name) + if prefix is None: + return os.path.join(package_path, 'instance') + return os.path.join(prefix, 'var', self.name + '-instance') + + def open_instance_resource(self, resource, mode='rb'): + """Opens a resource from the application's instance folder + (:attr:`instance_path`). Otherwise works like + :meth:`open_resource`. Instance resources can also be opened for + writing. + + :param resource: the name of the resource. To access resources within + subfolders use forward slashes as separator. + :param mode: resource file opening mode, default is 'rb'. + """ + return open(os.path.join(self.instance_path, resource), mode) + + def create_jinja_environment(self): + """Creates the Jinja2 environment based on :attr:`jinja_options` + and :meth:`select_jinja_autoescape`. Since 0.7 this also adds + the Jinja2 globals and filters after initialization. Override + this function to customize the behavior. + + .. versionadded:: 0.5 + .. versionchanged:: 0.11 + ``Environment.auto_reload`` set in accordance with + ``TEMPLATES_AUTO_RELOAD`` configuration option. + """ + options = dict(self.jinja_options) + if 'autoescape' not in options: + options['autoescape'] = self.select_jinja_autoescape + if 'auto_reload' not in options: + if self.config['TEMPLATES_AUTO_RELOAD'] is not None: + options['auto_reload'] = self.config['TEMPLATES_AUTO_RELOAD'] + else: + options['auto_reload'] = self.debug + rv = self.jinja_environment(self, **options) + rv.globals.update( + url_for=url_for, + get_flashed_messages=get_flashed_messages, + config=self.config, + # request, session and g are normally added with the + # context processor for efficiency reasons but for imported + # templates we also want the proxies in there. + request=request, + session=session, + g=g + ) + rv.filters['tojson'] = json.tojson_filter + return rv + + def create_global_jinja_loader(self): + """Creates the loader for the Jinja2 environment. Can be used to + override just the loader and keeping the rest unchanged. It's + discouraged to override this function. Instead one should override + the :meth:`jinja_loader` function instead. + + The global loader dispatches between the loaders of the application + and the individual blueprints. + + .. versionadded:: 0.7 + """ + return DispatchingJinjaLoader(self) + + def init_jinja_globals(self): + """Deprecated. Used to initialize the Jinja2 globals. + + .. versionadded:: 0.5 + .. versionchanged:: 0.7 + This method is deprecated with 0.7. Override + :meth:`create_jinja_environment` instead. + """ + + def select_jinja_autoescape(self, filename): + """Returns ``True`` if autoescaping should be active for the given + template name. If no template name is given, returns `True`. + + .. versionadded:: 0.5 + """ + if filename is None: + return True + return filename.endswith(('.html', '.htm', '.xml', '.xhtml')) + + def update_template_context(self, context): + """Update the template context with some commonly used variables. + This injects request, session, config and g into the template + context as well as everything template context processors want + to inject. Note that the as of Flask 0.6, the original values + in the context will not be overridden if a context processor + decides to return a value with the same key. + + :param context: the context as a dictionary that is updated in place + to add extra variables. + """ + funcs = self.template_context_processors[None] + reqctx = _request_ctx_stack.top + if reqctx is not None: + bp = reqctx.request.blueprint + if bp is not None and bp in self.template_context_processors: + funcs = chain(funcs, self.template_context_processors[bp]) + orig_ctx = context.copy() + for func in funcs: + context.update(func()) + # make sure the original values win. This makes it possible to + # easier add new variables in context processors without breaking + # existing views. + context.update(orig_ctx) + + def make_shell_context(self): + """Returns the shell context for an interactive shell for this + application. This runs all the registered shell context + processors. + + .. versionadded:: 0.11 + """ + rv = {'app': self, 'g': g} + for processor in self.shell_context_processors: + rv.update(processor()) + return rv + + def run(self, host=None, port=None, debug=None, **options): + """Runs the application on a local development server. + + Do not use ``run()`` in a production setting. It is not intended to + meet security and performance requirements for a production server. + Instead, see :ref:`deployment` for WSGI server recommendations. + + If the :attr:`debug` flag is set the server will automatically reload + for code changes and show a debugger in case an exception happened. + + If you want to run the application in debug mode, but disable the + code execution on the interactive debugger, you can pass + ``use_evalex=False`` as parameter. This will keep the debugger's + traceback screen active, but disable code execution. + + It is not recommended to use this function for development with + automatic reloading as this is badly supported. Instead you should + be using the :command:`flask` command line script's ``run`` support. + + .. admonition:: Keep in Mind + + Flask will suppress any server error with a generic error page + unless it is in debug mode. As such to enable just the + interactive debugger without the code reloading, you have to + invoke :meth:`run` with ``debug=True`` and ``use_reloader=False``. + Setting ``use_debugger`` to ``True`` without being in debug mode + won't catch any exceptions because there won't be any to + catch. + + .. versionchanged:: 0.10 + The default port is now picked from the ``SERVER_NAME`` variable. + + :param host: the hostname to listen on. Set this to ``'0.0.0.0'`` to + have the server available externally as well. Defaults to + ``'127.0.0.1'``. + :param port: the port of the webserver. Defaults to ``5000`` or the + port defined in the ``SERVER_NAME`` config variable if + present. + :param debug: if given, enable or disable debug mode. + See :attr:`debug`. + :param options: the options to be forwarded to the underlying + Werkzeug server. See + :func:`werkzeug.serving.run_simple` for more + information. + """ + from werkzeug.serving import run_simple + if host is None: + host = '127.0.0.1' + if port is None: + server_name = self.config['SERVER_NAME'] + if server_name and ':' in server_name: + port = int(server_name.rsplit(':', 1)[1]) + else: + port = 5000 + if debug is not None: + self.debug = bool(debug) + options.setdefault('use_reloader', self.debug) + options.setdefault('use_debugger', self.debug) + options.setdefault('passthrough_errors', True) + try: + run_simple(host, port, self, **options) + finally: + # reset the first request information if the development server + # resetted normally. This makes it possible to restart the server + # without reloader and that stuff from an interactive shell. + self._got_first_request = False + + def test_client(self, use_cookies=True, **kwargs): + """Creates a test client for this application. For information + about unit testing head over to :ref:`testing`. + + Note that if you are testing for assertions or exceptions in your + application code, you must set ``app.testing = True`` in order for the + exceptions to propagate to the test client. Otherwise, the exception + will be handled by the application (not visible to the test client) and + the only indication of an AssertionError or other exception will be a + 500 status code response to the test client. See the :attr:`testing` + attribute. For example:: + + app.testing = True + client = app.test_client() + + The test client can be used in a ``with`` block to defer the closing down + of the context until the end of the ``with`` block. This is useful if + you want to access the context locals for testing:: + + with app.test_client() as c: + rv = c.get('/?vodka=42') + assert request.args['vodka'] == '42' + + Additionally, you may pass optional keyword arguments that will then + be passed to the application's :attr:`test_client_class` constructor. + For example:: + + from flask.testing import FlaskClient + + class CustomClient(FlaskClient): + def __init__(self, authentication=None, *args, **kwargs): + FlaskClient.__init__(*args, **kwargs) + self._authentication = authentication + + app.test_client_class = CustomClient + client = app.test_client(authentication='Basic ....') + + See :class:`~flask.testing.FlaskClient` for more information. + + .. versionchanged:: 0.4 + added support for ``with`` block usage for the client. + + .. versionadded:: 0.7 + The `use_cookies` parameter was added as well as the ability + to override the client to be used by setting the + :attr:`test_client_class` attribute. + + .. versionchanged:: 0.11 + Added `**kwargs` to support passing additional keyword arguments to + the constructor of :attr:`test_client_class`. + """ + cls = self.test_client_class + if cls is None: + from flask.testing import FlaskClient as cls + return cls(self, self.response_class, use_cookies=use_cookies, **kwargs) + + def open_session(self, request): + """Creates or opens a new session. Default implementation stores all + session data in a signed cookie. This requires that the + :attr:`secret_key` is set. Instead of overriding this method + we recommend replacing the :class:`session_interface`. + + :param request: an instance of :attr:`request_class`. + """ + return self.session_interface.open_session(self, request) + + def save_session(self, session, response): + """Saves the session if it needs updates. For the default + implementation, check :meth:`open_session`. Instead of overriding this + method we recommend replacing the :class:`session_interface`. + + :param session: the session to be saved (a + :class:`~werkzeug.contrib.securecookie.SecureCookie` + object) + :param response: an instance of :attr:`response_class` + """ + return self.session_interface.save_session(self, session, response) + + def make_null_session(self): + """Creates a new instance of a missing session. Instead of overriding + this method we recommend replacing the :class:`session_interface`. + + .. versionadded:: 0.7 + """ + return self.session_interface.make_null_session(self) + + @setupmethod + def register_blueprint(self, blueprint, **options): + """Register a blueprint on the application. For information about + blueprints head over to :ref:`blueprints`. + + The blueprint name is passed in as the first argument. + Options are passed as additional keyword arguments and forwarded to + `blueprints` in an "options" dictionary. + + :param subdomain: set a subdomain for the blueprint + :param url_prefix: set the prefix for all URLs defined on the blueprint. + ``(url_prefix='/')`` + :param url_defaults: a dictionary with URL defaults that is added to + each and every URL defined with this blueprint + :param static_folder: add a static folder to urls in this blueprint + :param static_url_path: add a static url path to urls in this blueprint + :param template_folder: set an alternate template folder + :param root_path: set an alternate root path for this blueprint + + .. versionadded:: 0.7 + """ + first_registration = False + if blueprint.name in self.blueprints: + assert self.blueprints[blueprint.name] is blueprint, \ + 'A blueprint\'s name collision occurred between %r and ' \ + '%r. Both share the same name "%s". Blueprints that ' \ + 'are created on the fly need unique names.' % \ + (blueprint, self.blueprints[blueprint.name], blueprint.name) + else: + self.blueprints[blueprint.name] = blueprint + self._blueprint_order.append(blueprint) + first_registration = True + blueprint.register(self, options, first_registration) + + def iter_blueprints(self): + """Iterates over all blueprints by the order they were registered. + + .. versionadded:: 0.11 + """ + return iter(self._blueprint_order) + + @setupmethod + def add_url_rule(self, rule, endpoint=None, view_func=None, **options): + """Connects a URL rule. Works exactly like the :meth:`route` + decorator. If a view_func is provided it will be registered with the + endpoint. + + Basically this example:: + + @app.route('/') + def index(): + pass + + Is equivalent to the following:: + + def index(): + pass + app.add_url_rule('/', 'index', index) + + If the view_func is not provided you will need to connect the endpoint + to a view function like so:: + + app.view_functions['index'] = index + + Internally :meth:`route` invokes :meth:`add_url_rule` so if you want + to customize the behavior via subclassing you only need to change + this method. + + For more information refer to :ref:`url-route-registrations`. + + .. versionchanged:: 0.2 + `view_func` parameter added. + + .. versionchanged:: 0.6 + ``OPTIONS`` is added automatically as method. + + :param rule: the URL rule as string + :param endpoint: the endpoint for the registered URL rule. Flask + itself assumes the name of the view function as + endpoint + :param view_func: the function to call when serving a request to the + provided endpoint + :param options: the options to be forwarded to the underlying + :class:`~werkzeug.routing.Rule` object. A change + to Werkzeug is handling of method options. methods + is a list of methods this rule should be limited + to (``GET``, ``POST`` etc.). By default a rule + just listens for ``GET`` (and implicitly ``HEAD``). + Starting with Flask 0.6, ``OPTIONS`` is implicitly + added and handled by the standard request handling. + """ + if endpoint is None: + endpoint = _endpoint_from_view_func(view_func) + options['endpoint'] = endpoint + methods = options.pop('methods', None) + + # if the methods are not given and the view_func object knows its + # methods we can use that instead. If neither exists, we go with + # a tuple of only ``GET`` as default. + if methods is None: + methods = getattr(view_func, 'methods', None) or ('GET',) + if isinstance(methods, string_types): + raise TypeError('Allowed methods have to be iterables of strings, ' + 'for example: @app.route(..., methods=["POST"])') + methods = set(item.upper() for item in methods) + + # Methods that should always be added + required_methods = set(getattr(view_func, 'required_methods', ())) + + # starting with Flask 0.8 the view_func object can disable and + # force-enable the automatic options handling. + provide_automatic_options = getattr(view_func, + 'provide_automatic_options', None) + + if provide_automatic_options is None: + if 'OPTIONS' not in methods: + provide_automatic_options = True + required_methods.add('OPTIONS') + else: + provide_automatic_options = False + + # Add the required methods now. + methods |= required_methods + + rule = self.url_rule_class(rule, methods=methods, **options) + rule.provide_automatic_options = provide_automatic_options + + self.url_map.add(rule) + if view_func is not None: + old_func = self.view_functions.get(endpoint) + if old_func is not None and old_func != view_func: + raise AssertionError('View function mapping is overwriting an ' + 'existing endpoint function: %s' % endpoint) + self.view_functions[endpoint] = view_func + + def route(self, rule, **options): + """A decorator that is used to register a view function for a + given URL rule. This does the same thing as :meth:`add_url_rule` + but is intended for decorator usage:: + + @app.route('/') + def index(): + return 'Hello World' + + For more information refer to :ref:`url-route-registrations`. + + :param rule: the URL rule as string + :param endpoint: the endpoint for the registered URL rule. Flask + itself assumes the name of the view function as + endpoint + :param options: the options to be forwarded to the underlying + :class:`~werkzeug.routing.Rule` object. A change + to Werkzeug is handling of method options. methods + is a list of methods this rule should be limited + to (``GET``, ``POST`` etc.). By default a rule + just listens for ``GET`` (and implicitly ``HEAD``). + Starting with Flask 0.6, ``OPTIONS`` is implicitly + added and handled by the standard request handling. + """ + def decorator(f): + endpoint = options.pop('endpoint', None) + self.add_url_rule(rule, endpoint, f, **options) + return f + return decorator + + @setupmethod + def endpoint(self, endpoint): + """A decorator to register a function as an endpoint. + Example:: + + @app.endpoint('example.endpoint') + def example(): + return "example" + + :param endpoint: the name of the endpoint + """ + def decorator(f): + self.view_functions[endpoint] = f + return f + return decorator + + @staticmethod + def _get_exc_class_and_code(exc_class_or_code): + """Ensure that we register only exceptions as handler keys""" + if isinstance(exc_class_or_code, integer_types): + exc_class = default_exceptions[exc_class_or_code] + else: + exc_class = exc_class_or_code + + assert issubclass(exc_class, Exception) + + if issubclass(exc_class, HTTPException): + return exc_class, exc_class.code + else: + return exc_class, None + + @setupmethod + def errorhandler(self, code_or_exception): + """A decorator that is used to register a function give a given + error code. Example:: + + @app.errorhandler(404) + def page_not_found(error): + return 'This page does not exist', 404 + + You can also register handlers for arbitrary exceptions:: + + @app.errorhandler(DatabaseError) + def special_exception_handler(error): + return 'Database connection failed', 500 + + You can also register a function as error handler without using + the :meth:`errorhandler` decorator. The following example is + equivalent to the one above:: + + def page_not_found(error): + return 'This page does not exist', 404 + app.error_handler_spec[None][404] = page_not_found + + Setting error handlers via assignments to :attr:`error_handler_spec` + however is discouraged as it requires fiddling with nested dictionaries + and the special case for arbitrary exception types. + + The first ``None`` refers to the active blueprint. If the error + handler should be application wide ``None`` shall be used. + + .. versionadded:: 0.7 + Use :meth:`register_error_handler` instead of modifying + :attr:`error_handler_spec` directly, for application wide error + handlers. + + .. versionadded:: 0.7 + One can now additionally also register custom exception types + that do not necessarily have to be a subclass of the + :class:`~werkzeug.exceptions.HTTPException` class. + + :param code: the code as integer for the handler + """ + def decorator(f): + self._register_error_handler(None, code_or_exception, f) + return f + return decorator + + def register_error_handler(self, code_or_exception, f): + """Alternative error attach function to the :meth:`errorhandler` + decorator that is more straightforward to use for non decorator + usage. + + .. versionadded:: 0.7 + """ + self._register_error_handler(None, code_or_exception, f) + + @setupmethod + def _register_error_handler(self, key, code_or_exception, f): + """ + :type key: None|str + :type code_or_exception: int|T<=Exception + :type f: callable + """ + if isinstance(code_or_exception, HTTPException): # old broken behavior + raise ValueError( + 'Tried to register a handler for an exception instance {0!r}. ' + 'Handlers can only be registered for exception classes or HTTP error codes.' + .format(code_or_exception)) + + exc_class, code = self._get_exc_class_and_code(code_or_exception) + + handlers = self.error_handler_spec.setdefault(key, {}).setdefault(code, {}) + handlers[exc_class] = f + + @setupmethod + def template_filter(self, name=None): + """A decorator that is used to register custom template filter. + You can specify a name for the filter, otherwise the function + name will be used. Example:: + + @app.template_filter() + def reverse(s): + return s[::-1] + + :param name: the optional name of the filter, otherwise the + function name will be used. + """ + def decorator(f): + self.add_template_filter(f, name=name) + return f + return decorator + + @setupmethod + def add_template_filter(self, f, name=None): + """Register a custom template filter. Works exactly like the + :meth:`template_filter` decorator. + + :param name: the optional name of the filter, otherwise the + function name will be used. + """ + self.jinja_env.filters[name or f.__name__] = f + + @setupmethod + def template_test(self, name=None): + """A decorator that is used to register custom template test. + You can specify a name for the test, otherwise the function + name will be used. Example:: + + @app.template_test() + def is_prime(n): + if n == 2: + return True + for i in range(2, int(math.ceil(math.sqrt(n))) + 1): + if n % i == 0: + return False + return True + + .. versionadded:: 0.10 + + :param name: the optional name of the test, otherwise the + function name will be used. + """ + def decorator(f): + self.add_template_test(f, name=name) + return f + return decorator + + @setupmethod + def add_template_test(self, f, name=None): + """Register a custom template test. Works exactly like the + :meth:`template_test` decorator. + + .. versionadded:: 0.10 + + :param name: the optional name of the test, otherwise the + function name will be used. + """ + self.jinja_env.tests[name or f.__name__] = f + + @setupmethod + def template_global(self, name=None): + """A decorator that is used to register a custom template global function. + You can specify a name for the global function, otherwise the function + name will be used. Example:: + + @app.template_global() + def double(n): + return 2 * n + + .. versionadded:: 0.10 + + :param name: the optional name of the global function, otherwise the + function name will be used. + """ + def decorator(f): + self.add_template_global(f, name=name) + return f + return decorator + + @setupmethod + def add_template_global(self, f, name=None): + """Register a custom template global function. Works exactly like the + :meth:`template_global` decorator. + + .. versionadded:: 0.10 + + :param name: the optional name of the global function, otherwise the + function name will be used. + """ + self.jinja_env.globals[name or f.__name__] = f + + @setupmethod + def before_request(self, f): + """Registers a function to run before each request. + + The function will be called without any arguments. + If the function returns a non-None value, it's handled as + if it was the return value from the view and further + request handling is stopped. + """ + self.before_request_funcs.setdefault(None, []).append(f) + return f + + @setupmethod + def before_first_request(self, f): + """Registers a function to be run before the first request to this + instance of the application. + + The function will be called without any arguments and its return + value is ignored. + + .. versionadded:: 0.8 + """ + self.before_first_request_funcs.append(f) + return f + + @setupmethod + def after_request(self, f): + """Register a function to be run after each request. + + Your function must take one parameter, an instance of + :attr:`response_class` and return a new response object or the + same (see :meth:`process_response`). + + As of Flask 0.7 this function might not be executed at the end of the + request in case an unhandled exception occurred. + """ + self.after_request_funcs.setdefault(None, []).append(f) + return f + + @setupmethod + def teardown_request(self, f): + """Register a function to be run at the end of each request, + regardless of whether there was an exception or not. These functions + are executed when the request context is popped, even if not an + actual request was performed. + + Example:: + + ctx = app.test_request_context() + ctx.push() + ... + ctx.pop() + + When ``ctx.pop()`` is executed in the above example, the teardown + functions are called just before the request context moves from the + stack of active contexts. This becomes relevant if you are using + such constructs in tests. + + Generally teardown functions must take every necessary step to avoid + that they will fail. If they do execute code that might fail they + will have to surround the execution of these code by try/except + statements and log occurring errors. + + When a teardown function was called because of a exception it will + be passed an error object. + + The return values of teardown functions are ignored. + + .. admonition:: Debug Note + + In debug mode Flask will not tear down a request on an exception + immediately. Instead it will keep it alive so that the interactive + debugger can still access it. This behavior can be controlled + by the ``PRESERVE_CONTEXT_ON_EXCEPTION`` configuration variable. + """ + self.teardown_request_funcs.setdefault(None, []).append(f) + return f + + @setupmethod + def teardown_appcontext(self, f): + """Registers a function to be called when the application context + ends. These functions are typically also called when the request + context is popped. + + Example:: + + ctx = app.app_context() + ctx.push() + ... + ctx.pop() + + When ``ctx.pop()`` is executed in the above example, the teardown + functions are called just before the app context moves from the + stack of active contexts. This becomes relevant if you are using + such constructs in tests. + + Since a request context typically also manages an application + context it would also be called when you pop a request context. + + When a teardown function was called because of an exception it will + be passed an error object. + + The return values of teardown functions are ignored. + + .. versionadded:: 0.9 + """ + self.teardown_appcontext_funcs.append(f) + return f + + @setupmethod + def context_processor(self, f): + """Registers a template context processor function.""" + self.template_context_processors[None].append(f) + return f + + @setupmethod + def shell_context_processor(self, f): + """Registers a shell context processor function. + + .. versionadded:: 0.11 + """ + self.shell_context_processors.append(f) + return f + + @setupmethod + def url_value_preprocessor(self, f): + """Registers a function as URL value preprocessor for all view + functions of the application. It's called before the view functions + are called and can modify the url values provided. + """ + self.url_value_preprocessors.setdefault(None, []).append(f) + return f + + @setupmethod + def url_defaults(self, f): + """Callback function for URL defaults for all view functions of the + application. It's called with the endpoint and values and should + update the values passed in place. + """ + self.url_default_functions.setdefault(None, []).append(f) + return f + + def _find_error_handler(self, e): + """Finds a registered error handler for the request’s blueprint. + Otherwise falls back to the app, returns None if not a suitable + handler is found. + """ + exc_class, code = self._get_exc_class_and_code(type(e)) + + def find_handler(handler_map): + if not handler_map: + return + queue = deque(exc_class.__mro__) + # Protect from geniuses who might create circular references in + # __mro__ + done = set() + + while queue: + cls = queue.popleft() + if cls in done: + continue + done.add(cls) + handler = handler_map.get(cls) + if handler is not None: + # cache for next time exc_class is raised + handler_map[exc_class] = handler + return handler + + queue.extend(cls.__mro__) + + # try blueprint handlers + handler = find_handler(self.error_handler_spec + .get(request.blueprint, {}) + .get(code)) + if handler is not None: + return handler + + # fall back to app handlers + return find_handler(self.error_handler_spec[None].get(code)) + + def handle_http_exception(self, e): + """Handles an HTTP exception. By default this will invoke the + registered error handlers and fall back to returning the + exception as response. + + .. versionadded:: 0.3 + """ + # Proxy exceptions don't have error codes. We want to always return + # those unchanged as errors + if e.code is None: + return e + + handler = self._find_error_handler(e) + if handler is None: + return e + return handler(e) + + def trap_http_exception(self, e): + """Checks if an HTTP exception should be trapped or not. By default + this will return ``False`` for all exceptions except for a bad request + key error if ``TRAP_BAD_REQUEST_ERRORS`` is set to ``True``. It + also returns ``True`` if ``TRAP_HTTP_EXCEPTIONS`` is set to ``True``. + + This is called for all HTTP exceptions raised by a view function. + If it returns ``True`` for any exception the error handler for this + exception is not called and it shows up as regular exception in the + traceback. This is helpful for debugging implicitly raised HTTP + exceptions. + + .. versionadded:: 0.8 + """ + if self.config['TRAP_HTTP_EXCEPTIONS']: + return True + if self.config['TRAP_BAD_REQUEST_ERRORS']: + return isinstance(e, BadRequest) + return False + + def handle_user_exception(self, e): + """This method is called whenever an exception occurs that should be + handled. A special case are + :class:`~werkzeug.exception.HTTPException`\s which are forwarded by + this function to the :meth:`handle_http_exception` method. This + function will either return a response value or reraise the + exception with the same traceback. + + .. versionadded:: 0.7 + """ + exc_type, exc_value, tb = sys.exc_info() + assert exc_value is e + + # ensure not to trash sys.exc_info() at that point in case someone + # wants the traceback preserved in handle_http_exception. Of course + # we cannot prevent users from trashing it themselves in a custom + # trap_http_exception method so that's their fault then. + + if isinstance(e, HTTPException) and not self.trap_http_exception(e): + return self.handle_http_exception(e) + + handler = self._find_error_handler(e) + + if handler is None: + reraise(exc_type, exc_value, tb) + return handler(e) + + def handle_exception(self, e): + """Default exception handling that kicks in when an exception + occurs that is not caught. In debug mode the exception will + be re-raised immediately, otherwise it is logged and the handler + for a 500 internal server error is used. If no such handler + exists, a default 500 internal server error message is displayed. + + .. versionadded:: 0.3 + """ + exc_type, exc_value, tb = sys.exc_info() + + got_request_exception.send(self, exception=e) + handler = self._find_error_handler(InternalServerError()) + + if self.propagate_exceptions: + # if we want to repropagate the exception, we can attempt to + # raise it with the whole traceback in case we can do that + # (the function was actually called from the except part) + # otherwise, we just raise the error again + if exc_value is e: + reraise(exc_type, exc_value, tb) + else: + raise e + + self.log_exception((exc_type, exc_value, tb)) + if handler is None: + return InternalServerError() + return handler(e) + + def log_exception(self, exc_info): + """Logs an exception. This is called by :meth:`handle_exception` + if debugging is disabled and right before the handler is called. + The default implementation logs the exception as error on the + :attr:`logger`. + + .. versionadded:: 0.8 + """ + self.logger.error('Exception on %s [%s]' % ( + request.path, + request.method + ), exc_info=exc_info) + + def raise_routing_exception(self, request): + """Exceptions that are recording during routing are reraised with + this method. During debug we are not reraising redirect requests + for non ``GET``, ``HEAD``, or ``OPTIONS`` requests and we're raising + a different error instead to help debug situations. + + :internal: + """ + if not self.debug \ + or not isinstance(request.routing_exception, RequestRedirect) \ + or request.method in ('GET', 'HEAD', 'OPTIONS'): + raise request.routing_exception + + from .debughelpers import FormDataRoutingRedirect + raise FormDataRoutingRedirect(request) + + def dispatch_request(self): + """Does the request dispatching. Matches the URL and returns the + return value of the view or error handler. This does not have to + be a response object. In order to convert the return value to a + proper response object, call :func:`make_response`. + + .. versionchanged:: 0.7 + This no longer does the exception handling, this code was + moved to the new :meth:`full_dispatch_request`. + """ + req = _request_ctx_stack.top.request + if req.routing_exception is not None: + self.raise_routing_exception(req) + rule = req.url_rule + # if we provide automatic options for this URL and the + # request came with the OPTIONS method, reply automatically + if getattr(rule, 'provide_automatic_options', False) \ + and req.method == 'OPTIONS': + return self.make_default_options_response() + # otherwise dispatch to the handler for that endpoint + return self.view_functions[rule.endpoint](**req.view_args) + + def full_dispatch_request(self): + """Dispatches the request and on top of that performs request + pre and postprocessing as well as HTTP exception catching and + error handling. + + .. versionadded:: 0.7 + """ + self.try_trigger_before_first_request_functions() + try: + request_started.send(self) + rv = self.preprocess_request() + if rv is None: + rv = self.dispatch_request() + except Exception as e: + rv = self.handle_user_exception(e) + response = self.make_response(rv) + response = self.process_response(response) + request_finished.send(self, response=response) + return response + + def try_trigger_before_first_request_functions(self): + """Called before each request and will ensure that it triggers + the :attr:`before_first_request_funcs` and only exactly once per + application instance (which means process usually). + + :internal: + """ + if self._got_first_request: + return + with self._before_request_lock: + if self._got_first_request: + return + for func in self.before_first_request_funcs: + func() + self._got_first_request = True + + def make_default_options_response(self): + """This method is called to create the default ``OPTIONS`` response. + This can be changed through subclassing to change the default + behavior of ``OPTIONS`` responses. + + .. versionadded:: 0.7 + """ + adapter = _request_ctx_stack.top.url_adapter + if hasattr(adapter, 'allowed_methods'): + methods = adapter.allowed_methods() + else: + # fallback for Werkzeug < 0.7 + methods = [] + try: + adapter.match(method='--') + except MethodNotAllowed as e: + methods = e.valid_methods + except HTTPException as e: + pass + rv = self.response_class() + rv.allow.update(methods) + return rv + + def should_ignore_error(self, error): + """This is called to figure out if an error should be ignored + or not as far as the teardown system is concerned. If this + function returns ``True`` then the teardown handlers will not be + passed the error. + + .. versionadded:: 0.10 + """ + return False + + def make_response(self, rv): + """Converts the return value from a view function to a real + response object that is an instance of :attr:`response_class`. + + The following types are allowed for `rv`: + + .. tabularcolumns:: |p{3.5cm}|p{9.5cm}| + + ======================= =========================================== + :attr:`response_class` the object is returned unchanged + :class:`str` a response object is created with the + string as body + :class:`unicode` a response object is created with the + string encoded to utf-8 as body + a WSGI function the function is called as WSGI application + and buffered as response object + :class:`tuple` A tuple in the form ``(response, status, + headers)`` or ``(response, headers)`` + where `response` is any of the + types defined here, `status` is a string + or an integer and `headers` is a list or + a dictionary with header values. + ======================= =========================================== + + :param rv: the return value from the view function + + .. versionchanged:: 0.9 + Previously a tuple was interpreted as the arguments for the + response object. + """ + status_or_headers = headers = None + if isinstance(rv, tuple): + rv, status_or_headers, headers = rv + (None,) * (3 - len(rv)) + + if rv is None: + raise ValueError('View function did not return a response') + + if isinstance(status_or_headers, (dict, list)): + headers, status_or_headers = status_or_headers, None + + if not isinstance(rv, self.response_class): + # When we create a response object directly, we let the constructor + # set the headers and status. We do this because there can be + # some extra logic involved when creating these objects with + # specific values (like default content type selection). + if isinstance(rv, (text_type, bytes, bytearray)): + rv = self.response_class(rv, headers=headers, + status=status_or_headers) + headers = status_or_headers = None + else: + rv = self.response_class.force_type(rv, request.environ) + + if status_or_headers is not None: + if isinstance(status_or_headers, string_types): + rv.status = status_or_headers + else: + rv.status_code = status_or_headers + if headers: + rv.headers.extend(headers) + + return rv + + def create_url_adapter(self, request): + """Creates a URL adapter for the given request. The URL adapter + is created at a point where the request context is not yet set up + so the request is passed explicitly. + + .. versionadded:: 0.6 + + .. versionchanged:: 0.9 + This can now also be called without a request object when the + URL adapter is created for the application context. + """ + if request is not None: + return self.url_map.bind_to_environ(request.environ, + server_name=self.config['SERVER_NAME']) + # We need at the very least the server name to be set for this + # to work. + if self.config['SERVER_NAME'] is not None: + return self.url_map.bind( + self.config['SERVER_NAME'], + script_name=self.config['APPLICATION_ROOT'] or '/', + url_scheme=self.config['PREFERRED_URL_SCHEME']) + + def inject_url_defaults(self, endpoint, values): + """Injects the URL defaults for the given endpoint directly into + the values dictionary passed. This is used internally and + automatically called on URL building. + + .. versionadded:: 0.7 + """ + funcs = self.url_default_functions.get(None, ()) + if '.' in endpoint: + bp = endpoint.rsplit('.', 1)[0] + funcs = chain(funcs, self.url_default_functions.get(bp, ())) + for func in funcs: + func(endpoint, values) + + def handle_url_build_error(self, error, endpoint, values): + """Handle :class:`~werkzeug.routing.BuildError` on :meth:`url_for`. + """ + exc_type, exc_value, tb = sys.exc_info() + for handler in self.url_build_error_handlers: + try: + rv = handler(error, endpoint, values) + if rv is not None: + return rv + except BuildError as e: + # make error available outside except block (py3) + error = e + + # At this point we want to reraise the exception. If the error is + # still the same one we can reraise it with the original traceback, + # otherwise we raise it from here. + if error is exc_value: + reraise(exc_type, exc_value, tb) + raise error + + def preprocess_request(self): + """Called before the actual request dispatching and will + call each :meth:`before_request` decorated function, passing no + arguments. + If any of these functions returns a value, it's handled as + if it was the return value from the view and further + request handling is stopped. + + This also triggers the :meth:`url_value_processor` functions before + the actual :meth:`before_request` functions are called. + """ + bp = _request_ctx_stack.top.request.blueprint + + funcs = self.url_value_preprocessors.get(None, ()) + if bp is not None and bp in self.url_value_preprocessors: + funcs = chain(funcs, self.url_value_preprocessors[bp]) + for func in funcs: + func(request.endpoint, request.view_args) + + funcs = self.before_request_funcs.get(None, ()) + if bp is not None and bp in self.before_request_funcs: + funcs = chain(funcs, self.before_request_funcs[bp]) + for func in funcs: + rv = func() + if rv is not None: + return rv + + def process_response(self, response): + """Can be overridden in order to modify the response object + before it's sent to the WSGI server. By default this will + call all the :meth:`after_request` decorated functions. + + .. versionchanged:: 0.5 + As of Flask 0.5 the functions registered for after request + execution are called in reverse order of registration. + + :param response: a :attr:`response_class` object. + :return: a new response object or the same, has to be an + instance of :attr:`response_class`. + """ + ctx = _request_ctx_stack.top + bp = ctx.request.blueprint + funcs = ctx._after_request_functions + if bp is not None and bp in self.after_request_funcs: + funcs = chain(funcs, reversed(self.after_request_funcs[bp])) + if None in self.after_request_funcs: + funcs = chain(funcs, reversed(self.after_request_funcs[None])) + for handler in funcs: + response = handler(response) + if not self.session_interface.is_null_session(ctx.session): + self.save_session(ctx.session, response) + return response + + def do_teardown_request(self, exc=_sentinel): + """Called after the actual request dispatching and will + call every as :meth:`teardown_request` decorated function. This is + not actually called by the :class:`Flask` object itself but is always + triggered when the request context is popped. That way we have a + tighter control over certain resources under testing environments. + + .. versionchanged:: 0.9 + Added the `exc` argument. Previously this was always using the + current exception information. + """ + if exc is _sentinel: + exc = sys.exc_info()[1] + funcs = reversed(self.teardown_request_funcs.get(None, ())) + bp = _request_ctx_stack.top.request.blueprint + if bp is not None and bp in self.teardown_request_funcs: + funcs = chain(funcs, reversed(self.teardown_request_funcs[bp])) + for func in funcs: + func(exc) + request_tearing_down.send(self, exc=exc) + + def do_teardown_appcontext(self, exc=_sentinel): + """Called when an application context is popped. This works pretty + much the same as :meth:`do_teardown_request` but for the application + context. + + .. versionadded:: 0.9 + """ + if exc is _sentinel: + exc = sys.exc_info()[1] + for func in reversed(self.teardown_appcontext_funcs): + func(exc) + appcontext_tearing_down.send(self, exc=exc) + + def app_context(self): + """Binds the application only. For as long as the application is bound + to the current context the :data:`flask.current_app` points to that + application. An application context is automatically created when a + request context is pushed if necessary. + + Example usage:: + + with app.app_context(): + ... + + .. versionadded:: 0.9 + """ + return AppContext(self) + + def request_context(self, environ): + """Creates a :class:`~flask.ctx.RequestContext` from the given + environment and binds it to the current context. This must be used in + combination with the ``with`` statement because the request is only bound + to the current context for the duration of the ``with`` block. + + Example usage:: + + with app.request_context(environ): + do_something_with(request) + + The object returned can also be used without the ``with`` statement + which is useful for working in the shell. The example above is + doing exactly the same as this code:: + + ctx = app.request_context(environ) + ctx.push() + try: + do_something_with(request) + finally: + ctx.pop() + + .. versionchanged:: 0.3 + Added support for non-with statement usage and ``with`` statement + is now passed the ctx object. + + :param environ: a WSGI environment + """ + return RequestContext(self, environ) + + def test_request_context(self, *args, **kwargs): + """Creates a WSGI environment from the given values (see + :class:`werkzeug.test.EnvironBuilder` for more information, this + function accepts the same arguments). + """ + from flask.testing import make_test_environ_builder + builder = make_test_environ_builder(self, *args, **kwargs) + try: + return self.request_context(builder.get_environ()) + finally: + builder.close() + + def wsgi_app(self, environ, start_response): + """The actual WSGI application. This is not implemented in + `__call__` so that middlewares can be applied without losing a + reference to the class. So instead of doing this:: + + app = MyMiddleware(app) + + It's a better idea to do this instead:: + + app.wsgi_app = MyMiddleware(app.wsgi_app) + + Then you still have the original application object around and + can continue to call methods on it. + + .. versionchanged:: 0.7 + The behavior of the before and after request callbacks was changed + under error conditions and a new callback was added that will + always execute at the end of the request, independent on if an + error occurred or not. See :ref:`callbacks-and-errors`. + + :param environ: a WSGI environment + :param start_response: a callable accepting a status code, + a list of headers and an optional + exception context to start the response + """ + ctx = self.request_context(environ) + ctx.push() + error = None + try: + try: + response = self.full_dispatch_request() + except Exception as e: + error = e + response = self.make_response(self.handle_exception(e)) + return response(environ, start_response) + finally: + if self.should_ignore_error(error): + error = None + ctx.auto_pop(error) + + def __call__(self, environ, start_response): + """Shortcut for :attr:`wsgi_app`.""" + return self.wsgi_app(environ, start_response) + + def __repr__(self): + return '<%s %r>' % ( + self.__class__.__name__, + self.name, + ) diff --git a/venv/lib/python2.7/site-packages/flask/blueprints.py b/venv/lib/python2.7/site-packages/flask/blueprints.py new file mode 100644 index 0000000..586a1b0 --- /dev/null +++ b/venv/lib/python2.7/site-packages/flask/blueprints.py @@ -0,0 +1,413 @@ +# -*- coding: utf-8 -*- +""" + flask.blueprints + ~~~~~~~~~~~~~~~~ + + Blueprints are the recommended way to implement larger or more + pluggable applications in Flask 0.7 and later. + + :copyright: (c) 2015 by Armin Ronacher. + :license: BSD, see LICENSE for more details. +""" +from functools import update_wrapper + +from .helpers import _PackageBoundObject, _endpoint_from_view_func + + +class BlueprintSetupState(object): + """Temporary holder object for registering a blueprint with the + application. An instance of this class is created by the + :meth:`~flask.Blueprint.make_setup_state` method and later passed + to all register callback functions. + """ + + def __init__(self, blueprint, app, options, first_registration): + #: a reference to the current application + self.app = app + + #: a reference to the blueprint that created this setup state. + self.blueprint = blueprint + + #: a dictionary with all options that were passed to the + #: :meth:`~flask.Flask.register_blueprint` method. + self.options = options + + #: as blueprints can be registered multiple times with the + #: application and not everything wants to be registered + #: multiple times on it, this attribute can be used to figure + #: out if the blueprint was registered in the past already. + self.first_registration = first_registration + + subdomain = self.options.get('subdomain') + if subdomain is None: + subdomain = self.blueprint.subdomain + + #: The subdomain that the blueprint should be active for, ``None`` + #: otherwise. + self.subdomain = subdomain + + url_prefix = self.options.get('url_prefix') + if url_prefix is None: + url_prefix = self.blueprint.url_prefix + + #: The prefix that should be used for all URLs defined on the + #: blueprint. + self.url_prefix = url_prefix + + #: A dictionary with URL defaults that is added to each and every + #: URL that was defined with the blueprint. + self.url_defaults = dict(self.blueprint.url_values_defaults) + self.url_defaults.update(self.options.get('url_defaults', ())) + + def add_url_rule(self, rule, endpoint=None, view_func=None, **options): + """A helper method to register a rule (and optionally a view function) + to the application. The endpoint is automatically prefixed with the + blueprint's name. + """ + if self.url_prefix: + rule = self.url_prefix + rule + options.setdefault('subdomain', self.subdomain) + if endpoint is None: + endpoint = _endpoint_from_view_func(view_func) + defaults = self.url_defaults + if 'defaults' in options: + defaults = dict(defaults, **options.pop('defaults')) + self.app.add_url_rule(rule, '%s.%s' % (self.blueprint.name, endpoint), + view_func, defaults=defaults, **options) + + +class Blueprint(_PackageBoundObject): + """Represents a blueprint. A blueprint is an object that records + functions that will be called with the + :class:`~flask.blueprints.BlueprintSetupState` later to register functions + or other things on the main application. See :ref:`blueprints` for more + information. + + .. versionadded:: 0.7 + """ + + warn_on_modifications = False + _got_registered_once = False + + def __init__(self, name, import_name, static_folder=None, + static_url_path=None, template_folder=None, + url_prefix=None, subdomain=None, url_defaults=None, + root_path=None): + _PackageBoundObject.__init__(self, import_name, template_folder, + root_path=root_path) + self.name = name + self.url_prefix = url_prefix + self.subdomain = subdomain + self.static_folder = static_folder + self.static_url_path = static_url_path + self.deferred_functions = [] + if url_defaults is None: + url_defaults = {} + self.url_values_defaults = url_defaults + + def record(self, func): + """Registers a function that is called when the blueprint is + registered on the application. This function is called with the + state as argument as returned by the :meth:`make_setup_state` + method. + """ + if self._got_registered_once and self.warn_on_modifications: + from warnings import warn + warn(Warning('The blueprint was already registered once ' + 'but is getting modified now. These changes ' + 'will not show up.')) + self.deferred_functions.append(func) + + def record_once(self, func): + """Works like :meth:`record` but wraps the function in another + function that will ensure the function is only called once. If the + blueprint is registered a second time on the application, the + function passed is not called. + """ + def wrapper(state): + if state.first_registration: + func(state) + return self.record(update_wrapper(wrapper, func)) + + def make_setup_state(self, app, options, first_registration=False): + """Creates an instance of :meth:`~flask.blueprints.BlueprintSetupState` + object that is later passed to the register callback functions. + Subclasses can override this to return a subclass of the setup state. + """ + return BlueprintSetupState(self, app, options, first_registration) + + def register(self, app, options, first_registration=False): + """Called by :meth:`Flask.register_blueprint` to register a blueprint + on the application. This can be overridden to customize the register + behavior. Keyword arguments from + :func:`~flask.Flask.register_blueprint` are directly forwarded to this + method in the `options` dictionary. + """ + self._got_registered_once = True + state = self.make_setup_state(app, options, first_registration) + if self.has_static_folder: + state.add_url_rule(self.static_url_path + '/', + view_func=self.send_static_file, + endpoint='static') + + for deferred in self.deferred_functions: + deferred(state) + + def route(self, rule, **options): + """Like :meth:`Flask.route` but for a blueprint. The endpoint for the + :func:`url_for` function is prefixed with the name of the blueprint. + """ + def decorator(f): + endpoint = options.pop("endpoint", f.__name__) + self.add_url_rule(rule, endpoint, f, **options) + return f + return decorator + + def add_url_rule(self, rule, endpoint=None, view_func=None, **options): + """Like :meth:`Flask.add_url_rule` but for a blueprint. The endpoint for + the :func:`url_for` function is prefixed with the name of the blueprint. + """ + if endpoint: + assert '.' not in endpoint, "Blueprint endpoints should not contain dots" + self.record(lambda s: + s.add_url_rule(rule, endpoint, view_func, **options)) + + def endpoint(self, endpoint): + """Like :meth:`Flask.endpoint` but for a blueprint. This does not + prefix the endpoint with the blueprint name, this has to be done + explicitly by the user of this method. If the endpoint is prefixed + with a `.` it will be registered to the current blueprint, otherwise + it's an application independent endpoint. + """ + def decorator(f): + def register_endpoint(state): + state.app.view_functions[endpoint] = f + self.record_once(register_endpoint) + return f + return decorator + + def app_template_filter(self, name=None): + """Register a custom template filter, available application wide. Like + :meth:`Flask.template_filter` but for a blueprint. + + :param name: the optional name of the filter, otherwise the + function name will be used. + """ + def decorator(f): + self.add_app_template_filter(f, name=name) + return f + return decorator + + def add_app_template_filter(self, f, name=None): + """Register a custom template filter, available application wide. Like + :meth:`Flask.add_template_filter` but for a blueprint. Works exactly + like the :meth:`app_template_filter` decorator. + + :param name: the optional name of the filter, otherwise the + function name will be used. + """ + def register_template(state): + state.app.jinja_env.filters[name or f.__name__] = f + self.record_once(register_template) + + def app_template_test(self, name=None): + """Register a custom template test, available application wide. Like + :meth:`Flask.template_test` but for a blueprint. + + .. versionadded:: 0.10 + + :param name: the optional name of the test, otherwise the + function name will be used. + """ + def decorator(f): + self.add_app_template_test(f, name=name) + return f + return decorator + + def add_app_template_test(self, f, name=None): + """Register a custom template test, available application wide. Like + :meth:`Flask.add_template_test` but for a blueprint. Works exactly + like the :meth:`app_template_test` decorator. + + .. versionadded:: 0.10 + + :param name: the optional name of the test, otherwise the + function name will be used. + """ + def register_template(state): + state.app.jinja_env.tests[name or f.__name__] = f + self.record_once(register_template) + + def app_template_global(self, name=None): + """Register a custom template global, available application wide. Like + :meth:`Flask.template_global` but for a blueprint. + + .. versionadded:: 0.10 + + :param name: the optional name of the global, otherwise the + function name will be used. + """ + def decorator(f): + self.add_app_template_global(f, name=name) + return f + return decorator + + def add_app_template_global(self, f, name=None): + """Register a custom template global, available application wide. Like + :meth:`Flask.add_template_global` but for a blueprint. Works exactly + like the :meth:`app_template_global` decorator. + + .. versionadded:: 0.10 + + :param name: the optional name of the global, otherwise the + function name will be used. + """ + def register_template(state): + state.app.jinja_env.globals[name or f.__name__] = f + self.record_once(register_template) + + def before_request(self, f): + """Like :meth:`Flask.before_request` but for a blueprint. This function + is only executed before each request that is handled by a function of + that blueprint. + """ + self.record_once(lambda s: s.app.before_request_funcs + .setdefault(self.name, []).append(f)) + return f + + def before_app_request(self, f): + """Like :meth:`Flask.before_request`. Such a function is executed + before each request, even if outside of a blueprint. + """ + self.record_once(lambda s: s.app.before_request_funcs + .setdefault(None, []).append(f)) + return f + + def before_app_first_request(self, f): + """Like :meth:`Flask.before_first_request`. Such a function is + executed before the first request to the application. + """ + self.record_once(lambda s: s.app.before_first_request_funcs.append(f)) + return f + + def after_request(self, f): + """Like :meth:`Flask.after_request` but for a blueprint. This function + is only executed after each request that is handled by a function of + that blueprint. + """ + self.record_once(lambda s: s.app.after_request_funcs + .setdefault(self.name, []).append(f)) + return f + + def after_app_request(self, f): + """Like :meth:`Flask.after_request` but for a blueprint. Such a function + is executed after each request, even if outside of the blueprint. + """ + self.record_once(lambda s: s.app.after_request_funcs + .setdefault(None, []).append(f)) + return f + + def teardown_request(self, f): + """Like :meth:`Flask.teardown_request` but for a blueprint. This + function is only executed when tearing down requests handled by a + function of that blueprint. Teardown request functions are executed + when the request context is popped, even when no actual request was + performed. + """ + self.record_once(lambda s: s.app.teardown_request_funcs + .setdefault(self.name, []).append(f)) + return f + + def teardown_app_request(self, f): + """Like :meth:`Flask.teardown_request` but for a blueprint. Such a + function is executed when tearing down each request, even if outside of + the blueprint. + """ + self.record_once(lambda s: s.app.teardown_request_funcs + .setdefault(None, []).append(f)) + return f + + def context_processor(self, f): + """Like :meth:`Flask.context_processor` but for a blueprint. This + function is only executed for requests handled by a blueprint. + """ + self.record_once(lambda s: s.app.template_context_processors + .setdefault(self.name, []).append(f)) + return f + + def app_context_processor(self, f): + """Like :meth:`Flask.context_processor` but for a blueprint. Such a + function is executed each request, even if outside of the blueprint. + """ + self.record_once(lambda s: s.app.template_context_processors + .setdefault(None, []).append(f)) + return f + + def app_errorhandler(self, code): + """Like :meth:`Flask.errorhandler` but for a blueprint. This + handler is used for all requests, even if outside of the blueprint. + """ + def decorator(f): + self.record_once(lambda s: s.app.errorhandler(code)(f)) + return f + return decorator + + def url_value_preprocessor(self, f): + """Registers a function as URL value preprocessor for this + blueprint. It's called before the view functions are called and + can modify the url values provided. + """ + self.record_once(lambda s: s.app.url_value_preprocessors + .setdefault(self.name, []).append(f)) + return f + + def url_defaults(self, f): + """Callback function for URL defaults for this blueprint. It's called + with the endpoint and values and should update the values passed + in place. + """ + self.record_once(lambda s: s.app.url_default_functions + .setdefault(self.name, []).append(f)) + return f + + def app_url_value_preprocessor(self, f): + """Same as :meth:`url_value_preprocessor` but application wide. + """ + self.record_once(lambda s: s.app.url_value_preprocessors + .setdefault(None, []).append(f)) + return f + + def app_url_defaults(self, f): + """Same as :meth:`url_defaults` but application wide. + """ + self.record_once(lambda s: s.app.url_default_functions + .setdefault(None, []).append(f)) + return f + + def errorhandler(self, code_or_exception): + """Registers an error handler that becomes active for this blueprint + only. Please be aware that routing does not happen local to a + blueprint so an error handler for 404 usually is not handled by + a blueprint unless it is caused inside a view function. Another + special case is the 500 internal server error which is always looked + up from the application. + + Otherwise works as the :meth:`~flask.Flask.errorhandler` decorator + of the :class:`~flask.Flask` object. + """ + def decorator(f): + self.record_once(lambda s: s.app._register_error_handler( + self.name, code_or_exception, f)) + return f + return decorator + + def register_error_handler(self, code_or_exception, f): + """Non-decorator version of the :meth:`errorhandler` error attach + function, akin to the :meth:`~flask.Flask.register_error_handler` + application-wide function of the :class:`~flask.Flask` object but + for error handlers limited to this blueprint. + + .. versionadded:: 0.11 + """ + self.record_once(lambda s: s.app._register_error_handler( + self.name, code_or_exception, f)) diff --git a/venv/lib/python2.7/site-packages/flask/cli.py b/venv/lib/python2.7/site-packages/flask/cli.py new file mode 100644 index 0000000..d3e74d9 --- /dev/null +++ b/venv/lib/python2.7/site-packages/flask/cli.py @@ -0,0 +1,482 @@ +# -*- coding: utf-8 -*- +""" + flask.cli + ~~~~~~~~~ + + A simple command line application to run flask apps. + + :copyright: (c) 2015 by Armin Ronacher. + :license: BSD, see LICENSE for more details. +""" + +import os +import sys +from threading import Lock, Thread +from functools import update_wrapper + +import click + +from ._compat import iteritems, reraise +from .helpers import get_debug_flag + + +class NoAppException(click.UsageError): + """Raised if an application cannot be found or loaded.""" + + +def find_best_app(module): + """Given a module instance this tries to find the best possible + application in the module or raises an exception. + """ + from . import Flask + + # Search for the most common names first. + for attr_name in 'app', 'application': + app = getattr(module, attr_name, None) + if app is not None and isinstance(app, Flask): + return app + + # Otherwise find the only object that is a Flask instance. + matches = [v for k, v in iteritems(module.__dict__) + if isinstance(v, Flask)] + + if len(matches) == 1: + return matches[0] + raise NoAppException('Failed to find application in module "%s". Are ' + 'you sure it contains a Flask application? Maybe ' + 'you wrapped it in a WSGI middleware or you are ' + 'using a factory function.' % module.__name__) + + +def prepare_exec_for_file(filename): + """Given a filename this will try to calculate the python path, add it + to the search path and return the actual module name that is expected. + """ + module = [] + + # Chop off file extensions or package markers + if os.path.split(filename)[1] == '__init__.py': + filename = os.path.dirname(filename) + elif filename.endswith('.py'): + filename = filename[:-3] + else: + raise NoAppException('The file provided (%s) does exist but is not a ' + 'valid Python file. This means that it cannot ' + 'be used as application. Please change the ' + 'extension to .py' % filename) + filename = os.path.realpath(filename) + + dirpath = filename + while 1: + dirpath, extra = os.path.split(dirpath) + module.append(extra) + if not os.path.isfile(os.path.join(dirpath, '__init__.py')): + break + + sys.path.insert(0, dirpath) + return '.'.join(module[::-1]) + + +def locate_app(app_id): + """Attempts to locate the application.""" + __traceback_hide__ = True + if ':' in app_id: + module, app_obj = app_id.split(':', 1) + else: + module = app_id + app_obj = None + + __import__(module) + mod = sys.modules[module] + if app_obj is None: + app = find_best_app(mod) + else: + app = getattr(mod, app_obj, None) + if app is None: + raise RuntimeError('Failed to find application in module "%s"' + % module) + + return app + + +def find_default_import_path(): + app = os.environ.get('FLASK_APP') + if app is None: + return + if os.path.isfile(app): + return prepare_exec_for_file(app) + return app + + +class DispatchingApp(object): + """Special application that dispatches to a flask application which + is imported by name in a background thread. If an error happens + it is is recorded and shows as part of the WSGI handling which in case + of the Werkzeug debugger means that it shows up in the browser. + """ + + def __init__(self, loader, use_eager_loading=False): + self.loader = loader + self._app = None + self._lock = Lock() + self._bg_loading_exc_info = None + if use_eager_loading: + self._load_unlocked() + else: + self._load_in_background() + + def _load_in_background(self): + def _load_app(): + __traceback_hide__ = True + with self._lock: + try: + self._load_unlocked() + except Exception: + self._bg_loading_exc_info = sys.exc_info() + t = Thread(target=_load_app, args=()) + t.start() + + def _flush_bg_loading_exception(self): + __traceback_hide__ = True + exc_info = self._bg_loading_exc_info + if exc_info is not None: + self._bg_loading_exc_info = None + reraise(*exc_info) + + def _load_unlocked(self): + __traceback_hide__ = True + self._app = rv = self.loader() + self._bg_loading_exc_info = None + return rv + + def __call__(self, environ, start_response): + __traceback_hide__ = True + if self._app is not None: + return self._app(environ, start_response) + self._flush_bg_loading_exception() + with self._lock: + if self._app is not None: + rv = self._app + else: + rv = self._load_unlocked() + return rv(environ, start_response) + + +class ScriptInfo(object): + """Help object to deal with Flask applications. This is usually not + necessary to interface with as it's used internally in the dispatching + to click. In future versions of Flask this object will most likely play + a bigger role. Typically it's created automatically by the + :class:`FlaskGroup` but you can also manually create it and pass it + onwards as click object. + """ + + def __init__(self, app_import_path=None, create_app=None): + if create_app is None: + if app_import_path is None: + app_import_path = find_default_import_path() + self.app_import_path = app_import_path + else: + app_import_path = None + + #: Optionally the import path for the Flask application. + self.app_import_path = app_import_path + #: Optionally a function that is passed the script info to create + #: the instance of the application. + self.create_app = create_app + #: A dictionary with arbitrary data that can be associated with + #: this script info. + self.data = {} + self._loaded_app = None + + def load_app(self): + """Loads the Flask app (if not yet loaded) and returns it. Calling + this multiple times will just result in the already loaded app to + be returned. + """ + __traceback_hide__ = True + if self._loaded_app is not None: + return self._loaded_app + if self.create_app is not None: + rv = self.create_app(self) + else: + if not self.app_import_path: + raise NoAppException( + 'Could not locate Flask application. You did not provide ' + 'the FLASK_APP environment variable.\n\nFor more ' + 'information see ' + 'http://flask.pocoo.org/docs/latest/quickstart/') + rv = locate_app(self.app_import_path) + debug = get_debug_flag() + if debug is not None: + rv.debug = debug + self._loaded_app = rv + return rv + + +pass_script_info = click.make_pass_decorator(ScriptInfo, ensure=True) + + +def with_appcontext(f): + """Wraps a callback so that it's guaranteed to be executed with the + script's application context. If callbacks are registered directly + to the ``app.cli`` object then they are wrapped with this function + by default unless it's disabled. + """ + @click.pass_context + def decorator(__ctx, *args, **kwargs): + with __ctx.ensure_object(ScriptInfo).load_app().app_context(): + return __ctx.invoke(f, *args, **kwargs) + return update_wrapper(decorator, f) + + +class AppGroup(click.Group): + """This works similar to a regular click :class:`~click.Group` but it + changes the behavior of the :meth:`command` decorator so that it + automatically wraps the functions in :func:`with_appcontext`. + + Not to be confused with :class:`FlaskGroup`. + """ + + def command(self, *args, **kwargs): + """This works exactly like the method of the same name on a regular + :class:`click.Group` but it wraps callbacks in :func:`with_appcontext` + unless it's disabled by passing ``with_appcontext=False``. + """ + wrap_for_ctx = kwargs.pop('with_appcontext', True) + def decorator(f): + if wrap_for_ctx: + f = with_appcontext(f) + return click.Group.command(self, *args, **kwargs)(f) + return decorator + + def group(self, *args, **kwargs): + """This works exactly like the method of the same name on a regular + :class:`click.Group` but it defaults the group class to + :class:`AppGroup`. + """ + kwargs.setdefault('cls', AppGroup) + return click.Group.group(self, *args, **kwargs) + + +class FlaskGroup(AppGroup): + """Special subclass of the :class:`AppGroup` group that supports + loading more commands from the configured Flask app. Normally a + developer does not have to interface with this class but there are + some very advanced use cases for which it makes sense to create an + instance of this. + + For information as of why this is useful see :ref:`custom-scripts`. + + :param add_default_commands: if this is True then the default run and + shell commands wil be added. + :param create_app: an optional callback that is passed the script info + and returns the loaded app. + """ + + def __init__(self, add_default_commands=True, create_app=None, **extra): + AppGroup.__init__(self, **extra) + self.create_app = create_app + + if add_default_commands: + self.add_command(run_command) + self.add_command(shell_command) + + self._loaded_plugin_commands = False + + def _load_plugin_commands(self): + if self._loaded_plugin_commands: + return + try: + import pkg_resources + except ImportError: + self._loaded_plugin_commands = True + return + + for ep in pkg_resources.iter_entry_points('flask.commands'): + self.add_command(ep.load(), ep.name) + self._loaded_plugin_commands = True + + def get_command(self, ctx, name): + self._load_plugin_commands() + + # We load built-in commands first as these should always be the + # same no matter what the app does. If the app does want to + # override this it needs to make a custom instance of this group + # and not attach the default commands. + # + # This also means that the script stays functional in case the + # application completely fails. + rv = AppGroup.get_command(self, ctx, name) + if rv is not None: + return rv + + info = ctx.ensure_object(ScriptInfo) + try: + rv = info.load_app().cli.get_command(ctx, name) + if rv is not None: + return rv + except NoAppException: + pass + + def list_commands(self, ctx): + self._load_plugin_commands() + + # The commands available is the list of both the application (if + # available) plus the builtin commands. + rv = set(click.Group.list_commands(self, ctx)) + info = ctx.ensure_object(ScriptInfo) + try: + rv.update(info.load_app().cli.list_commands(ctx)) + except Exception: + # Here we intentionally swallow all exceptions as we don't + # want the help page to break if the app does not exist. + # If someone attempts to use the command we try to create + # the app again and this will give us the error. + pass + return sorted(rv) + + def main(self, *args, **kwargs): + obj = kwargs.get('obj') + if obj is None: + obj = ScriptInfo(create_app=self.create_app) + kwargs['obj'] = obj + kwargs.setdefault('auto_envvar_prefix', 'FLASK') + return AppGroup.main(self, *args, **kwargs) + + +@click.command('run', short_help='Runs a development server.') +@click.option('--host', '-h', default='127.0.0.1', + help='The interface to bind to.') +@click.option('--port', '-p', default=5000, + help='The port to bind to.') +@click.option('--reload/--no-reload', default=None, + help='Enable or disable the reloader. By default the reloader ' + 'is active if debug is enabled.') +@click.option('--debugger/--no-debugger', default=None, + help='Enable or disable the debugger. By default the debugger ' + 'is active if debug is enabled.') +@click.option('--eager-loading/--lazy-loader', default=None, + help='Enable or disable eager loading. By default eager ' + 'loading is enabled if the reloader is disabled.') +@click.option('--with-threads/--without-threads', default=False, + help='Enable or disable multithreading.') +@pass_script_info +def run_command(info, host, port, reload, debugger, eager_loading, + with_threads): + """Runs a local development server for the Flask application. + + This local server is recommended for development purposes only but it + can also be used for simple intranet deployments. By default it will + not support any sort of concurrency at all to simplify debugging. This + can be changed with the --with-threads option which will enable basic + multithreading. + + The reloader and debugger are by default enabled if the debug flag of + Flask is enabled and disabled otherwise. + """ + from werkzeug.serving import run_simple + + debug = get_debug_flag() + if reload is None: + reload = bool(debug) + if debugger is None: + debugger = bool(debug) + if eager_loading is None: + eager_loading = not reload + + app = DispatchingApp(info.load_app, use_eager_loading=eager_loading) + + # Extra startup messages. This depends a but on Werkzeug internals to + # not double execute when the reloader kicks in. + if os.environ.get('WERKZEUG_RUN_MAIN') != 'true': + # If we have an import path we can print it out now which can help + # people understand what's being served. If we do not have an + # import path because the app was loaded through a callback then + # we won't print anything. + if info.app_import_path is not None: + print(' * Serving Flask app "%s"' % info.app_import_path) + if debug is not None: + print(' * Forcing debug mode %s' % (debug and 'on' or 'off')) + + run_simple(host, port, app, use_reloader=reload, + use_debugger=debugger, threaded=with_threads) + + +@click.command('shell', short_help='Runs a shell in the app context.') +@with_appcontext +def shell_command(): + """Runs an interactive Python shell in the context of a given + Flask application. The application will populate the default + namespace of this shell according to it's configuration. + + This is useful for executing small snippets of management code + without having to manually configuring the application. + """ + import code + from flask.globals import _app_ctx_stack + app = _app_ctx_stack.top.app + banner = 'Python %s on %s\nApp: %s%s\nInstance: %s' % ( + sys.version, + sys.platform, + app.import_name, + app.debug and ' [debug]' or '', + app.instance_path, + ) + ctx = {} + + # Support the regular Python interpreter startup script if someone + # is using it. + startup = os.environ.get('PYTHONSTARTUP') + if startup and os.path.isfile(startup): + with open(startup, 'r') as f: + eval(compile(f.read(), startup, 'exec'), ctx) + + ctx.update(app.make_shell_context()) + + code.interact(banner=banner, local=ctx) + + +cli = FlaskGroup(help="""\ +This shell command acts as general utility script for Flask applications. + +It loads the application configured (either through the FLASK_APP environment +variable) and then provides commands either provided by the application or +Flask itself. + +The most useful commands are the "run" and "shell" command. + +Example usage: + +\b + %(prefix)s%(cmd)s FLASK_APP=hello + %(prefix)s%(cmd)s FLASK_DEBUG=1 + %(prefix)sflask run +""" % { + 'cmd': os.name == 'posix' and 'export' or 'set', + 'prefix': os.name == 'posix' and '$ ' or '', +}) + + +def main(as_module=False): + this_module = __package__ + '.cli' + args = sys.argv[1:] + + if as_module: + if sys.version_info >= (2, 7): + name = 'python -m ' + this_module.rsplit('.', 1)[0] + else: + name = 'python -m ' + this_module + + # This module is always executed as "python -m flask.run" and as such + # we need to ensure that we restore the actual command line so that + # the reloader can properly operate. + sys.argv = ['-m', this_module] + sys.argv[1:] + else: + name = None + + cli.main(args=args, prog_name=name) + + +if __name__ == '__main__': + main(as_module=True) diff --git a/venv/lib/python2.7/site-packages/flask/config.py b/venv/lib/python2.7/site-packages/flask/config.py new file mode 100644 index 0000000..426a23a --- /dev/null +++ b/venv/lib/python2.7/site-packages/flask/config.py @@ -0,0 +1,258 @@ +# -*- coding: utf-8 -*- +""" + flask.config + ~~~~~~~~~~~~ + + Implements the configuration related objects. + + :copyright: (c) 2015 by Armin Ronacher. + :license: BSD, see LICENSE for more details. +""" + +import os +import types +import errno + +from werkzeug.utils import import_string +from ._compat import string_types, iteritems +from . import json + + +class ConfigAttribute(object): + """Makes an attribute forward to the config""" + + def __init__(self, name, get_converter=None): + self.__name__ = name + self.get_converter = get_converter + + def __get__(self, obj, type=None): + if obj is None: + return self + rv = obj.config[self.__name__] + if self.get_converter is not None: + rv = self.get_converter(rv) + return rv + + def __set__(self, obj, value): + obj.config[self.__name__] = value + + +class Config(dict): + """Works exactly like a dict but provides ways to fill it from files + or special dictionaries. There are two common patterns to populate the + config. + + Either you can fill the config from a config file:: + + app.config.from_pyfile('yourconfig.cfg') + + Or alternatively you can define the configuration options in the + module that calls :meth:`from_object` or provide an import path to + a module that should be loaded. It is also possible to tell it to + use the same module and with that provide the configuration values + just before the call:: + + DEBUG = True + SECRET_KEY = 'development key' + app.config.from_object(__name__) + + In both cases (loading from any Python file or loading from modules), + only uppercase keys are added to the config. This makes it possible to use + lowercase values in the config file for temporary values that are not added + to the config or to define the config keys in the same file that implements + the application. + + Probably the most interesting way to load configurations is from an + environment variable pointing to a file:: + + app.config.from_envvar('YOURAPPLICATION_SETTINGS') + + In this case before launching the application you have to set this + environment variable to the file you want to use. On Linux and OS X + use the export statement:: + + export YOURAPPLICATION_SETTINGS='/path/to/config/file' + + On windows use `set` instead. + + :param root_path: path to which files are read relative from. When the + config object is created by the application, this is + the application's :attr:`~flask.Flask.root_path`. + :param defaults: an optional dictionary of default values + """ + + def __init__(self, root_path, defaults=None): + dict.__init__(self, defaults or {}) + self.root_path = root_path + + def from_envvar(self, variable_name, silent=False): + """Loads a configuration from an environment variable pointing to + a configuration file. This is basically just a shortcut with nicer + error messages for this line of code:: + + app.config.from_pyfile(os.environ['YOURAPPLICATION_SETTINGS']) + + :param variable_name: name of the environment variable + :param silent: set to ``True`` if you want silent failure for missing + files. + :return: bool. ``True`` if able to load config, ``False`` otherwise. + """ + rv = os.environ.get(variable_name) + if not rv: + if silent: + return False + raise RuntimeError('The environment variable %r is not set ' + 'and as such configuration could not be ' + 'loaded. Set this variable and make it ' + 'point to a configuration file' % + variable_name) + return self.from_pyfile(rv, silent=silent) + + def from_pyfile(self, filename, silent=False): + """Updates the values in the config from a Python file. This function + behaves as if the file was imported as module with the + :meth:`from_object` function. + + :param filename: the filename of the config. This can either be an + absolute filename or a filename relative to the + root path. + :param silent: set to ``True`` if you want silent failure for missing + files. + + .. versionadded:: 0.7 + `silent` parameter. + """ + filename = os.path.join(self.root_path, filename) + d = types.ModuleType('config') + d.__file__ = filename + try: + with open(filename) as config_file: + exec(compile(config_file.read(), filename, 'exec'), d.__dict__) + except IOError as e: + if silent and e.errno in (errno.ENOENT, errno.EISDIR): + return False + e.strerror = 'Unable to load configuration file (%s)' % e.strerror + raise + self.from_object(d) + return True + + def from_object(self, obj): + """Updates the values from the given object. An object can be of one + of the following two types: + + - a string: in this case the object with that name will be imported + - an actual object reference: that object is used directly + + Objects are usually either modules or classes. + + Just the uppercase variables in that object are stored in the config. + Example usage:: + + app.config.from_object('yourapplication.default_config') + from yourapplication import default_config + app.config.from_object(default_config) + + You should not use this function to load the actual configuration but + rather configuration defaults. The actual config should be loaded + with :meth:`from_pyfile` and ideally from a location not within the + package because the package might be installed system wide. + + :param obj: an import name or object + """ + if isinstance(obj, string_types): + obj = import_string(obj) + for key in dir(obj): + if key.isupper(): + self[key] = getattr(obj, key) + + def from_json(self, filename, silent=False): + """Updates the values in the config from a JSON file. This function + behaves as if the JSON object was a dictionary and passed to the + :meth:`from_mapping` function. + + :param filename: the filename of the JSON file. This can either be an + absolute filename or a filename relative to the + root path. + :param silent: set to ``True`` if you want silent failure for missing + files. + + .. versionadded:: 0.11 + """ + filename = os.path.join(self.root_path, filename) + + try: + with open(filename) as json_file: + obj = json.loads(json_file.read()) + except IOError as e: + if silent and e.errno in (errno.ENOENT, errno.EISDIR): + return False + e.strerror = 'Unable to load configuration file (%s)' % e.strerror + raise + return self.from_mapping(obj) + + def from_mapping(self, *mapping, **kwargs): + """Updates the config like :meth:`update` ignoring items with non-upper + keys. + + .. versionadded:: 0.11 + """ + mappings = [] + if len(mapping) == 1: + if hasattr(mapping[0], 'items'): + mappings.append(mapping[0].items()) + else: + mappings.append(mapping[0]) + elif len(mapping) > 1: + raise TypeError( + 'expected at most 1 positional argument, got %d' % len(mapping) + ) + mappings.append(kwargs.items()) + for mapping in mappings: + for (key, value) in mapping: + if key.isupper(): + self[key] = value + return True + + def get_namespace(self, namespace, lowercase=True, trim_namespace=True): + """Returns a dictionary containing a subset of configuration options + that match the specified namespace/prefix. Example usage:: + + app.config['IMAGE_STORE_TYPE'] = 'fs' + app.config['IMAGE_STORE_PATH'] = '/var/app/images' + app.config['IMAGE_STORE_BASE_URL'] = 'http://img.website.com' + image_store_config = app.config.get_namespace('IMAGE_STORE_') + + The resulting dictionary `image_store_config` would look like:: + + { + 'type': 'fs', + 'path': '/var/app/images', + 'base_url': 'http://img.website.com' + } + + This is often useful when configuration options map directly to + keyword arguments in functions or class constructors. + + :param namespace: a configuration namespace + :param lowercase: a flag indicating if the keys of the resulting + dictionary should be lowercase + :param trim_namespace: a flag indicating if the keys of the resulting + dictionary should not include the namespace + + .. versionadded:: 0.11 + """ + rv = {} + for k, v in iteritems(self): + if not k.startswith(namespace): + continue + if trim_namespace: + key = k[len(namespace):] + else: + key = k + if lowercase: + key = key.lower() + rv[key] = v + return rv + + def __repr__(self): + return '<%s %s>' % (self.__class__.__name__, dict.__repr__(self)) diff --git a/venv/lib/python2.7/site-packages/flask/ctx.py b/venv/lib/python2.7/site-packages/flask/ctx.py new file mode 100644 index 0000000..480d9c5 --- /dev/null +++ b/venv/lib/python2.7/site-packages/flask/ctx.py @@ -0,0 +1,410 @@ +# -*- coding: utf-8 -*- +""" + flask.ctx + ~~~~~~~~~ + + Implements the objects required to keep the context. + + :copyright: (c) 2015 by Armin Ronacher. + :license: BSD, see LICENSE for more details. +""" + +import sys +from functools import update_wrapper + +from werkzeug.exceptions import HTTPException + +from .globals import _request_ctx_stack, _app_ctx_stack +from .signals import appcontext_pushed, appcontext_popped +from ._compat import BROKEN_PYPY_CTXMGR_EXIT, reraise + + +# a singleton sentinel value for parameter defaults +_sentinel = object() + + +class _AppCtxGlobals(object): + """A plain object.""" + + def get(self, name, default=None): + return self.__dict__.get(name, default) + + def pop(self, name, default=_sentinel): + if default is _sentinel: + return self.__dict__.pop(name) + else: + return self.__dict__.pop(name, default) + + def setdefault(self, name, default=None): + return self.__dict__.setdefault(name, default) + + def __contains__(self, item): + return item in self.__dict__ + + def __iter__(self): + return iter(self.__dict__) + + def __repr__(self): + top = _app_ctx_stack.top + if top is not None: + return '' % top.app.name + return object.__repr__(self) + + +def after_this_request(f): + """Executes a function after this request. This is useful to modify + response objects. The function is passed the response object and has + to return the same or a new one. + + Example:: + + @app.route('/') + def index(): + @after_this_request + def add_header(response): + response.headers['X-Foo'] = 'Parachute' + return response + return 'Hello World!' + + This is more useful if a function other than the view function wants to + modify a response. For instance think of a decorator that wants to add + some headers without converting the return value into a response object. + + .. versionadded:: 0.9 + """ + _request_ctx_stack.top._after_request_functions.append(f) + return f + + +def copy_current_request_context(f): + """A helper function that decorates a function to retain the current + request context. This is useful when working with greenlets. The moment + the function is decorated a copy of the request context is created and + then pushed when the function is called. + + Example:: + + import gevent + from flask import copy_current_request_context + + @app.route('/') + def index(): + @copy_current_request_context + def do_some_work(): + # do some work here, it can access flask.request like you + # would otherwise in the view function. + ... + gevent.spawn(do_some_work) + return 'Regular response' + + .. versionadded:: 0.10 + """ + top = _request_ctx_stack.top + if top is None: + raise RuntimeError('This decorator can only be used at local scopes ' + 'when a request context is on the stack. For instance within ' + 'view functions.') + reqctx = top.copy() + def wrapper(*args, **kwargs): + with reqctx: + return f(*args, **kwargs) + return update_wrapper(wrapper, f) + + +def has_request_context(): + """If you have code that wants to test if a request context is there or + not this function can be used. For instance, you may want to take advantage + of request information if the request object is available, but fail + silently if it is unavailable. + + :: + + class User(db.Model): + + def __init__(self, username, remote_addr=None): + self.username = username + if remote_addr is None and has_request_context(): + remote_addr = request.remote_addr + self.remote_addr = remote_addr + + Alternatively you can also just test any of the context bound objects + (such as :class:`request` or :class:`g` for truthness):: + + class User(db.Model): + + def __init__(self, username, remote_addr=None): + self.username = username + if remote_addr is None and request: + remote_addr = request.remote_addr + self.remote_addr = remote_addr + + .. versionadded:: 0.7 + """ + return _request_ctx_stack.top is not None + + +def has_app_context(): + """Works like :func:`has_request_context` but for the application + context. You can also just do a boolean check on the + :data:`current_app` object instead. + + .. versionadded:: 0.9 + """ + return _app_ctx_stack.top is not None + + +class AppContext(object): + """The application context binds an application object implicitly + to the current thread or greenlet, similar to how the + :class:`RequestContext` binds request information. The application + context is also implicitly created if a request context is created + but the application is not on top of the individual application + context. + """ + + def __init__(self, app): + self.app = app + self.url_adapter = app.create_url_adapter(None) + self.g = app.app_ctx_globals_class() + + # Like request context, app contexts can be pushed multiple times + # but there a basic "refcount" is enough to track them. + self._refcnt = 0 + + def push(self): + """Binds the app context to the current context.""" + self._refcnt += 1 + if hasattr(sys, 'exc_clear'): + sys.exc_clear() + _app_ctx_stack.push(self) + appcontext_pushed.send(self.app) + + def pop(self, exc=_sentinel): + """Pops the app context.""" + try: + self._refcnt -= 1 + if self._refcnt <= 0: + if exc is _sentinel: + exc = sys.exc_info()[1] + self.app.do_teardown_appcontext(exc) + finally: + rv = _app_ctx_stack.pop() + assert rv is self, 'Popped wrong app context. (%r instead of %r)' \ + % (rv, self) + appcontext_popped.send(self.app) + + def __enter__(self): + self.push() + return self + + def __exit__(self, exc_type, exc_value, tb): + self.pop(exc_value) + + if BROKEN_PYPY_CTXMGR_EXIT and exc_type is not None: + reraise(exc_type, exc_value, tb) + + +class RequestContext(object): + """The request context contains all request relevant information. It is + created at the beginning of the request and pushed to the + `_request_ctx_stack` and removed at the end of it. It will create the + URL adapter and request object for the WSGI environment provided. + + Do not attempt to use this class directly, instead use + :meth:`~flask.Flask.test_request_context` and + :meth:`~flask.Flask.request_context` to create this object. + + When the request context is popped, it will evaluate all the + functions registered on the application for teardown execution + (:meth:`~flask.Flask.teardown_request`). + + The request context is automatically popped at the end of the request + for you. In debug mode the request context is kept around if + exceptions happen so that interactive debuggers have a chance to + introspect the data. With 0.4 this can also be forced for requests + that did not fail and outside of ``DEBUG`` mode. By setting + ``'flask._preserve_context'`` to ``True`` on the WSGI environment the + context will not pop itself at the end of the request. This is used by + the :meth:`~flask.Flask.test_client` for example to implement the + deferred cleanup functionality. + + You might find this helpful for unittests where you need the + information from the context local around for a little longer. Make + sure to properly :meth:`~werkzeug.LocalStack.pop` the stack yourself in + that situation, otherwise your unittests will leak memory. + """ + + def __init__(self, app, environ, request=None): + self.app = app + if request is None: + request = app.request_class(environ) + self.request = request + self.url_adapter = app.create_url_adapter(self.request) + self.flashes = None + self.session = None + + # Request contexts can be pushed multiple times and interleaved with + # other request contexts. Now only if the last level is popped we + # get rid of them. Additionally if an application context is missing + # one is created implicitly so for each level we add this information + self._implicit_app_ctx_stack = [] + + # indicator if the context was preserved. Next time another context + # is pushed the preserved context is popped. + self.preserved = False + + # remembers the exception for pop if there is one in case the context + # preservation kicks in. + self._preserved_exc = None + + # Functions that should be executed after the request on the response + # object. These will be called before the regular "after_request" + # functions. + self._after_request_functions = [] + + self.match_request() + + def _get_g(self): + return _app_ctx_stack.top.g + def _set_g(self, value): + _app_ctx_stack.top.g = value + g = property(_get_g, _set_g) + del _get_g, _set_g + + def copy(self): + """Creates a copy of this request context with the same request object. + This can be used to move a request context to a different greenlet. + Because the actual request object is the same this cannot be used to + move a request context to a different thread unless access to the + request object is locked. + + .. versionadded:: 0.10 + """ + return self.__class__(self.app, + environ=self.request.environ, + request=self.request + ) + + def match_request(self): + """Can be overridden by a subclass to hook into the matching + of the request. + """ + try: + url_rule, self.request.view_args = \ + self.url_adapter.match(return_rule=True) + self.request.url_rule = url_rule + except HTTPException as e: + self.request.routing_exception = e + + def push(self): + """Binds the request context to the current context.""" + # If an exception occurs in debug mode or if context preservation is + # activated under exception situations exactly one context stays + # on the stack. The rationale is that you want to access that + # information under debug situations. However if someone forgets to + # pop that context again we want to make sure that on the next push + # it's invalidated, otherwise we run at risk that something leaks + # memory. This is usually only a problem in test suite since this + # functionality is not active in production environments. + top = _request_ctx_stack.top + if top is not None and top.preserved: + top.pop(top._preserved_exc) + + # Before we push the request context we have to ensure that there + # is an application context. + app_ctx = _app_ctx_stack.top + if app_ctx is None or app_ctx.app != self.app: + app_ctx = self.app.app_context() + app_ctx.push() + self._implicit_app_ctx_stack.append(app_ctx) + else: + self._implicit_app_ctx_stack.append(None) + + if hasattr(sys, 'exc_clear'): + sys.exc_clear() + + _request_ctx_stack.push(self) + + # Open the session at the moment that the request context is + # available. This allows a custom open_session method to use the + # request context (e.g. code that access database information + # stored on `g` instead of the appcontext). + self.session = self.app.open_session(self.request) + if self.session is None: + self.session = self.app.make_null_session() + + def pop(self, exc=_sentinel): + """Pops the request context and unbinds it by doing that. This will + also trigger the execution of functions registered by the + :meth:`~flask.Flask.teardown_request` decorator. + + .. versionchanged:: 0.9 + Added the `exc` argument. + """ + app_ctx = self._implicit_app_ctx_stack.pop() + + try: + clear_request = False + if not self._implicit_app_ctx_stack: + self.preserved = False + self._preserved_exc = None + if exc is _sentinel: + exc = sys.exc_info()[1] + self.app.do_teardown_request(exc) + + # If this interpreter supports clearing the exception information + # we do that now. This will only go into effect on Python 2.x, + # on 3.x it disappears automatically at the end of the exception + # stack. + if hasattr(sys, 'exc_clear'): + sys.exc_clear() + + request_close = getattr(self.request, 'close', None) + if request_close is not None: + request_close() + clear_request = True + finally: + rv = _request_ctx_stack.pop() + + # get rid of circular dependencies at the end of the request + # so that we don't require the GC to be active. + if clear_request: + rv.request.environ['werkzeug.request'] = None + + # Get rid of the app as well if necessary. + if app_ctx is not None: + app_ctx.pop(exc) + + assert rv is self, 'Popped wrong request context. ' \ + '(%r instead of %r)' % (rv, self) + + def auto_pop(self, exc): + if self.request.environ.get('flask._preserve_context') or \ + (exc is not None and self.app.preserve_context_on_exception): + self.preserved = True + self._preserved_exc = exc + else: + self.pop(exc) + + def __enter__(self): + self.push() + return self + + def __exit__(self, exc_type, exc_value, tb): + # do not pop the request stack if we are in debug mode and an + # exception happened. This will allow the debugger to still + # access the request object in the interactive shell. Furthermore + # the context can be force kept alive for the test client. + # See flask.testing for how this works. + self.auto_pop(exc_value) + + if BROKEN_PYPY_CTXMGR_EXIT and exc_type is not None: + reraise(exc_type, exc_value, tb) + + def __repr__(self): + return '<%s \'%s\' [%s] of %s>' % ( + self.__class__.__name__, + self.request.url, + self.request.method, + self.app.name, + ) diff --git a/venv/lib/python2.7/site-packages/flask/debughelpers.py b/venv/lib/python2.7/site-packages/flask/debughelpers.py new file mode 100644 index 0000000..90710dd --- /dev/null +++ b/venv/lib/python2.7/site-packages/flask/debughelpers.py @@ -0,0 +1,155 @@ +# -*- coding: utf-8 -*- +""" + flask.debughelpers + ~~~~~~~~~~~~~~~~~~ + + Various helpers to make the development experience better. + + :copyright: (c) 2015 by Armin Ronacher. + :license: BSD, see LICENSE for more details. +""" +from ._compat import implements_to_string, text_type +from .app import Flask +from .blueprints import Blueprint +from .globals import _request_ctx_stack + + +class UnexpectedUnicodeError(AssertionError, UnicodeError): + """Raised in places where we want some better error reporting for + unexpected unicode or binary data. + """ + + +@implements_to_string +class DebugFilesKeyError(KeyError, AssertionError): + """Raised from request.files during debugging. The idea is that it can + provide a better error message than just a generic KeyError/BadRequest. + """ + + def __init__(self, request, key): + form_matches = request.form.getlist(key) + buf = ['You tried to access the file "%s" in the request.files ' + 'dictionary but it does not exist. The mimetype for the request ' + 'is "%s" instead of "multipart/form-data" which means that no ' + 'file contents were transmitted. To fix this error you should ' + 'provide enctype="multipart/form-data" in your form.' % + (key, request.mimetype)] + if form_matches: + buf.append('\n\nThe browser instead transmitted some file names. ' + 'This was submitted: %s' % ', '.join('"%s"' % x + for x in form_matches)) + self.msg = ''.join(buf) + + def __str__(self): + return self.msg + + +class FormDataRoutingRedirect(AssertionError): + """This exception is raised by Flask in debug mode if it detects a + redirect caused by the routing system when the request method is not + GET, HEAD or OPTIONS. Reasoning: form data will be dropped. + """ + + def __init__(self, request): + exc = request.routing_exception + buf = ['A request was sent to this URL (%s) but a redirect was ' + 'issued automatically by the routing system to "%s".' + % (request.url, exc.new_url)] + + # In case just a slash was appended we can be extra helpful + if request.base_url + '/' == exc.new_url.split('?')[0]: + buf.append(' The URL was defined with a trailing slash so ' + 'Flask will automatically redirect to the URL ' + 'with the trailing slash if it was accessed ' + 'without one.') + + buf.append(' Make sure to directly send your %s-request to this URL ' + 'since we can\'t make browsers or HTTP clients redirect ' + 'with form data reliably or without user interaction.' % + request.method) + buf.append('\n\nNote: this exception is only raised in debug mode') + AssertionError.__init__(self, ''.join(buf).encode('utf-8')) + + +def attach_enctype_error_multidict(request): + """Since Flask 0.8 we're monkeypatching the files object in case a + request is detected that does not use multipart form data but the files + object is accessed. + """ + oldcls = request.files.__class__ + class newcls(oldcls): + def __getitem__(self, key): + try: + return oldcls.__getitem__(self, key) + except KeyError: + if key not in request.form: + raise + raise DebugFilesKeyError(request, key) + newcls.__name__ = oldcls.__name__ + newcls.__module__ = oldcls.__module__ + request.files.__class__ = newcls + + +def _dump_loader_info(loader): + yield 'class: %s.%s' % (type(loader).__module__, type(loader).__name__) + for key, value in sorted(loader.__dict__.items()): + if key.startswith('_'): + continue + if isinstance(value, (tuple, list)): + if not all(isinstance(x, (str, text_type)) for x in value): + continue + yield '%s:' % key + for item in value: + yield ' - %s' % item + continue + elif not isinstance(value, (str, text_type, int, float, bool)): + continue + yield '%s: %r' % (key, value) + + +def explain_template_loading_attempts(app, template, attempts): + """This should help developers understand what failed""" + info = ['Locating template "%s":' % template] + total_found = 0 + blueprint = None + reqctx = _request_ctx_stack.top + if reqctx is not None and reqctx.request.blueprint is not None: + blueprint = reqctx.request.blueprint + + for idx, (loader, srcobj, triple) in enumerate(attempts): + if isinstance(srcobj, Flask): + src_info = 'application "%s"' % srcobj.import_name + elif isinstance(srcobj, Blueprint): + src_info = 'blueprint "%s" (%s)' % (srcobj.name, + srcobj.import_name) + else: + src_info = repr(srcobj) + + info.append('% 5d: trying loader of %s' % ( + idx + 1, src_info)) + + for line in _dump_loader_info(loader): + info.append(' %s' % line) + + if triple is None: + detail = 'no match' + else: + detail = 'found (%r)' % (triple[1] or '') + total_found += 1 + info.append(' -> %s' % detail) + + seems_fishy = False + if total_found == 0: + info.append('Error: the template could not be found.') + seems_fishy = True + elif total_found > 1: + info.append('Warning: multiple loaders returned a match for the template.') + seems_fishy = True + + if blueprint is not None and seems_fishy: + info.append(' The template was looked up from an endpoint that ' + 'belongs to the blueprint "%s".' % blueprint) + info.append(' Maybe you did not place a template in the right folder?') + info.append(' See http://flask.pocoo.org/docs/blueprints/#templates') + + app.logger.info('\n'.join(info)) diff --git a/venv/lib/python2.7/site-packages/flask/ext/__init__.py b/venv/lib/python2.7/site-packages/flask/ext/__init__.py new file mode 100644 index 0000000..051f44a --- /dev/null +++ b/venv/lib/python2.7/site-packages/flask/ext/__init__.py @@ -0,0 +1,29 @@ +# -*- coding: utf-8 -*- +""" + flask.ext + ~~~~~~~~~ + + Redirect imports for extensions. This module basically makes it possible + for us to transition from flaskext.foo to flask_foo without having to + force all extensions to upgrade at the same time. + + When a user does ``from flask.ext.foo import bar`` it will attempt to + import ``from flask_foo import bar`` first and when that fails it will + try to import ``from flaskext.foo import bar``. + + We're switching from namespace packages because it was just too painful for + everybody involved. + + :copyright: (c) 2015 by Armin Ronacher. + :license: BSD, see LICENSE for more details. +""" + + +def setup(): + from ..exthook import ExtensionImporter + importer = ExtensionImporter(['flask_%s', 'flaskext.%s'], __name__) + importer.install() + + +setup() +del setup diff --git a/venv/lib/python2.7/site-packages/flask/exthook.py b/venv/lib/python2.7/site-packages/flask/exthook.py new file mode 100644 index 0000000..6522e06 --- /dev/null +++ b/venv/lib/python2.7/site-packages/flask/exthook.py @@ -0,0 +1,143 @@ +# -*- coding: utf-8 -*- +""" + flask.exthook + ~~~~~~~~~~~~~ + + Redirect imports for extensions. This module basically makes it possible + for us to transition from flaskext.foo to flask_foo without having to + force all extensions to upgrade at the same time. + + When a user does ``from flask.ext.foo import bar`` it will attempt to + import ``from flask_foo import bar`` first and when that fails it will + try to import ``from flaskext.foo import bar``. + + We're switching from namespace packages because it was just too painful for + everybody involved. + + This is used by `flask.ext`. + + :copyright: (c) 2015 by Armin Ronacher. + :license: BSD, see LICENSE for more details. +""" +import sys +import os +import warnings +from ._compat import reraise + + +class ExtDeprecationWarning(DeprecationWarning): + pass + +warnings.simplefilter('always', ExtDeprecationWarning) + + +class ExtensionImporter(object): + """This importer redirects imports from this submodule to other locations. + This makes it possible to transition from the old flaskext.name to the + newer flask_name without people having a hard time. + """ + + def __init__(self, module_choices, wrapper_module): + self.module_choices = module_choices + self.wrapper_module = wrapper_module + self.prefix = wrapper_module + '.' + self.prefix_cutoff = wrapper_module.count('.') + 1 + + def __eq__(self, other): + return self.__class__.__module__ == other.__class__.__module__ and \ + self.__class__.__name__ == other.__class__.__name__ and \ + self.wrapper_module == other.wrapper_module and \ + self.module_choices == other.module_choices + + def __ne__(self, other): + return not self.__eq__(other) + + def install(self): + sys.meta_path[:] = [x for x in sys.meta_path if self != x] + [self] + + def find_module(self, fullname, path=None): + if fullname.startswith(self.prefix) and \ + fullname != 'flask.ext.ExtDeprecationWarning': + return self + + def load_module(self, fullname): + if fullname in sys.modules: + return sys.modules[fullname] + + modname = fullname.split('.', self.prefix_cutoff)[self.prefix_cutoff] + + warnings.warn( + "Importing flask.ext.{x} is deprecated, use flask_{x} instead." + .format(x=modname), ExtDeprecationWarning + ) + + for path in self.module_choices: + realname = path % modname + try: + __import__(realname) + except ImportError: + exc_type, exc_value, tb = sys.exc_info() + # since we only establish the entry in sys.modules at the + # very this seems to be redundant, but if recursive imports + # happen we will call into the move import a second time. + # On the second invocation we still don't have an entry for + # fullname in sys.modules, but we will end up with the same + # fake module name and that import will succeed since this + # one already has a temporary entry in the modules dict. + # Since this one "succeeded" temporarily that second + # invocation now will have created a fullname entry in + # sys.modules which we have to kill. + sys.modules.pop(fullname, None) + + # If it's an important traceback we reraise it, otherwise + # we swallow it and try the next choice. The skipped frame + # is the one from __import__ above which we don't care about + if self.is_important_traceback(realname, tb): + reraise(exc_type, exc_value, tb.tb_next) + continue + module = sys.modules[fullname] = sys.modules[realname] + if '.' not in modname: + setattr(sys.modules[self.wrapper_module], modname, module) + + if realname.startswith('flaskext.'): + warnings.warn( + "Detected extension named flaskext.{x}, please rename it " + "to flask_{x}. The old form is deprecated." + .format(x=modname), ExtDeprecationWarning + ) + + return module + raise ImportError('No module named %s' % fullname) + + def is_important_traceback(self, important_module, tb): + """Walks a traceback's frames and checks if any of the frames + originated in the given important module. If that is the case then we + were able to import the module itself but apparently something went + wrong when the module was imported. (Eg: import of an import failed). + """ + while tb is not None: + if self.is_important_frame(important_module, tb): + return True + tb = tb.tb_next + return False + + def is_important_frame(self, important_module, tb): + """Checks a single frame if it's important.""" + g = tb.tb_frame.f_globals + if '__name__' not in g: + return False + + module_name = g['__name__'] + + # Python 2.7 Behavior. Modules are cleaned up late so the + # name shows up properly here. Success! + if module_name == important_module: + return True + + # Some python versions will clean up modules so early that the + # module name at that point is no longer set. Try guessing from + # the filename then. + filename = os.path.abspath(tb.tb_frame.f_code.co_filename) + test_string = os.path.sep + important_module.replace('.', os.path.sep) + return test_string + '.py' in filename or \ + test_string + os.path.sep + '__init__.py' in filename diff --git a/venv/lib/python2.7/site-packages/flask/globals.py b/venv/lib/python2.7/site-packages/flask/globals.py new file mode 100644 index 0000000..0b70a3e --- /dev/null +++ b/venv/lib/python2.7/site-packages/flask/globals.py @@ -0,0 +1,61 @@ +# -*- coding: utf-8 -*- +""" + flask.globals + ~~~~~~~~~~~~~ + + Defines all the global objects that are proxies to the current + active context. + + :copyright: (c) 2015 by Armin Ronacher. + :license: BSD, see LICENSE for more details. +""" + +from functools import partial +from werkzeug.local import LocalStack, LocalProxy + + +_request_ctx_err_msg = '''\ +Working outside of request context. + +This typically means that you attempted to use functionality that needed +an active HTTP request. Consult the documentation on testing for +information about how to avoid this problem.\ +''' +_app_ctx_err_msg = '''\ +Working outside of application context. + +This typically means that you attempted to use functionality that needed +to interface with the current application object in a way. To solve +this set up an application context with app.app_context(). See the +documentation for more information.\ +''' + + +def _lookup_req_object(name): + top = _request_ctx_stack.top + if top is None: + raise RuntimeError(_request_ctx_err_msg) + return getattr(top, name) + + +def _lookup_app_object(name): + top = _app_ctx_stack.top + if top is None: + raise RuntimeError(_app_ctx_err_msg) + return getattr(top, name) + + +def _find_app(): + top = _app_ctx_stack.top + if top is None: + raise RuntimeError(_app_ctx_err_msg) + return top.app + + +# context locals +_request_ctx_stack = LocalStack() +_app_ctx_stack = LocalStack() +current_app = LocalProxy(_find_app) +request = LocalProxy(partial(_lookup_req_object, 'request')) +session = LocalProxy(partial(_lookup_req_object, 'session')) +g = LocalProxy(partial(_lookup_app_object, 'g')) diff --git a/venv/lib/python2.7/site-packages/flask/helpers.py b/venv/lib/python2.7/site-packages/flask/helpers.py new file mode 100644 index 0000000..c744bb8 --- /dev/null +++ b/venv/lib/python2.7/site-packages/flask/helpers.py @@ -0,0 +1,930 @@ +# -*- coding: utf-8 -*- +""" + flask.helpers + ~~~~~~~~~~~~~ + + Implements various helpers. + + :copyright: (c) 2015 by Armin Ronacher. + :license: BSD, see LICENSE for more details. +""" + +import os +import sys +import pkgutil +import posixpath +import mimetypes +from time import time +from zlib import adler32 +from threading import RLock +from werkzeug.routing import BuildError +from functools import update_wrapper + +try: + from werkzeug.urls import url_quote +except ImportError: + from urlparse import quote as url_quote + +from werkzeug.datastructures import Headers +from werkzeug.exceptions import BadRequest, NotFound + +# this was moved in 0.7 +try: + from werkzeug.wsgi import wrap_file +except ImportError: + from werkzeug.utils import wrap_file + +from jinja2 import FileSystemLoader + +from .signals import message_flashed +from .globals import session, _request_ctx_stack, _app_ctx_stack, \ + current_app, request +from ._compat import string_types, text_type + + +# sentinel +_missing = object() + + +# what separators does this operating system provide that are not a slash? +# this is used by the send_from_directory function to ensure that nobody is +# able to access files from outside the filesystem. +_os_alt_seps = list(sep for sep in [os.path.sep, os.path.altsep] + if sep not in (None, '/')) + + +def get_debug_flag(default=None): + val = os.environ.get('FLASK_DEBUG') + if not val: + return default + return val not in ('0', 'false', 'no') + + +def _endpoint_from_view_func(view_func): + """Internal helper that returns the default endpoint for a given + function. This always is the function name. + """ + assert view_func is not None, 'expected view func if endpoint ' \ + 'is not provided.' + return view_func.__name__ + + +def stream_with_context(generator_or_function): + """Request contexts disappear when the response is started on the server. + This is done for efficiency reasons and to make it less likely to encounter + memory leaks with badly written WSGI middlewares. The downside is that if + you are using streamed responses, the generator cannot access request bound + information any more. + + This function however can help you keep the context around for longer:: + + from flask import stream_with_context, request, Response + + @app.route('/stream') + def streamed_response(): + @stream_with_context + def generate(): + yield 'Hello ' + yield request.args['name'] + yield '!' + return Response(generate()) + + Alternatively it can also be used around a specific generator:: + + from flask import stream_with_context, request, Response + + @app.route('/stream') + def streamed_response(): + def generate(): + yield 'Hello ' + yield request.args['name'] + yield '!' + return Response(stream_with_context(generate())) + + .. versionadded:: 0.9 + """ + try: + gen = iter(generator_or_function) + except TypeError: + def decorator(*args, **kwargs): + gen = generator_or_function(*args, **kwargs) + return stream_with_context(gen) + return update_wrapper(decorator, generator_or_function) + + def generator(): + ctx = _request_ctx_stack.top + if ctx is None: + raise RuntimeError('Attempted to stream with context but ' + 'there was no context in the first place to keep around.') + with ctx: + # Dummy sentinel. Has to be inside the context block or we're + # not actually keeping the context around. + yield None + + # The try/finally is here so that if someone passes a WSGI level + # iterator in we're still running the cleanup logic. Generators + # don't need that because they are closed on their destruction + # automatically. + try: + for item in gen: + yield item + finally: + if hasattr(gen, 'close'): + gen.close() + + # The trick is to start the generator. Then the code execution runs until + # the first dummy None is yielded at which point the context was already + # pushed. This item is discarded. Then when the iteration continues the + # real generator is executed. + wrapped_g = generator() + next(wrapped_g) + return wrapped_g + + +def make_response(*args): + """Sometimes it is necessary to set additional headers in a view. Because + views do not have to return response objects but can return a value that + is converted into a response object by Flask itself, it becomes tricky to + add headers to it. This function can be called instead of using a return + and you will get a response object which you can use to attach headers. + + If view looked like this and you want to add a new header:: + + def index(): + return render_template('index.html', foo=42) + + You can now do something like this:: + + def index(): + response = make_response(render_template('index.html', foo=42)) + response.headers['X-Parachutes'] = 'parachutes are cool' + return response + + This function accepts the very same arguments you can return from a + view function. This for example creates a response with a 404 error + code:: + + response = make_response(render_template('not_found.html'), 404) + + The other use case of this function is to force the return value of a + view function into a response which is helpful with view + decorators:: + + response = make_response(view_function()) + response.headers['X-Parachutes'] = 'parachutes are cool' + + Internally this function does the following things: + + - if no arguments are passed, it creates a new response argument + - if one argument is passed, :meth:`flask.Flask.make_response` + is invoked with it. + - if more than one argument is passed, the arguments are passed + to the :meth:`flask.Flask.make_response` function as tuple. + + .. versionadded:: 0.6 + """ + if not args: + return current_app.response_class() + if len(args) == 1: + args = args[0] + return current_app.make_response(args) + + +def url_for(endpoint, **values): + """Generates a URL to the given endpoint with the method provided. + + Variable arguments that are unknown to the target endpoint are appended + to the generated URL as query arguments. If the value of a query argument + is ``None``, the whole pair is skipped. In case blueprints are active + you can shortcut references to the same blueprint by prefixing the + local endpoint with a dot (``.``). + + This will reference the index function local to the current blueprint:: + + url_for('.index') + + For more information, head over to the :ref:`Quickstart `. + + To integrate applications, :class:`Flask` has a hook to intercept URL build + errors through :attr:`Flask.url_build_error_handlers`. The `url_for` + function results in a :exc:`~werkzeug.routing.BuildError` when the current + app does not have a URL for the given endpoint and values. When it does, the + :data:`~flask.current_app` calls its :attr:`~Flask.url_build_error_handlers` if + it is not ``None``, which can return a string to use as the result of + `url_for` (instead of `url_for`'s default to raise the + :exc:`~werkzeug.routing.BuildError` exception) or re-raise the exception. + An example:: + + def external_url_handler(error, endpoint, values): + "Looks up an external URL when `url_for` cannot build a URL." + # This is an example of hooking the build_error_handler. + # Here, lookup_url is some utility function you've built + # which looks up the endpoint in some external URL registry. + url = lookup_url(endpoint, **values) + if url is None: + # External lookup did not have a URL. + # Re-raise the BuildError, in context of original traceback. + exc_type, exc_value, tb = sys.exc_info() + if exc_value is error: + raise exc_type, exc_value, tb + else: + raise error + # url_for will use this result, instead of raising BuildError. + return url + + app.url_build_error_handlers.append(external_url_handler) + + Here, `error` is the instance of :exc:`~werkzeug.routing.BuildError`, and + `endpoint` and `values` are the arguments passed into `url_for`. Note + that this is for building URLs outside the current application, and not for + handling 404 NotFound errors. + + .. versionadded:: 0.10 + The `_scheme` parameter was added. + + .. versionadded:: 0.9 + The `_anchor` and `_method` parameters were added. + + .. versionadded:: 0.9 + Calls :meth:`Flask.handle_build_error` on + :exc:`~werkzeug.routing.BuildError`. + + :param endpoint: the endpoint of the URL (name of the function) + :param values: the variable arguments of the URL rule + :param _external: if set to ``True``, an absolute URL is generated. Server + address can be changed via ``SERVER_NAME`` configuration variable which + defaults to `localhost`. + :param _scheme: a string specifying the desired URL scheme. The `_external` + parameter must be set to ``True`` or a :exc:`ValueError` is raised. The default + behavior uses the same scheme as the current request, or + ``PREFERRED_URL_SCHEME`` from the :ref:`app configuration ` if no + request context is available. As of Werkzeug 0.10, this also can be set + to an empty string to build protocol-relative URLs. + :param _anchor: if provided this is added as anchor to the URL. + :param _method: if provided this explicitly specifies an HTTP method. + """ + appctx = _app_ctx_stack.top + reqctx = _request_ctx_stack.top + if appctx is None: + raise RuntimeError('Attempted to generate a URL without the ' + 'application context being pushed. This has to be ' + 'executed when application context is available.') + + # If request specific information is available we have some extra + # features that support "relative" URLs. + if reqctx is not None: + url_adapter = reqctx.url_adapter + blueprint_name = request.blueprint + if not reqctx.request._is_old_module: + if endpoint[:1] == '.': + if blueprint_name is not None: + endpoint = blueprint_name + endpoint + else: + endpoint = endpoint[1:] + else: + # TODO: get rid of this deprecated functionality in 1.0 + if '.' not in endpoint: + if blueprint_name is not None: + endpoint = blueprint_name + '.' + endpoint + elif endpoint.startswith('.'): + endpoint = endpoint[1:] + external = values.pop('_external', False) + + # Otherwise go with the url adapter from the appctx and make + # the URLs external by default. + else: + url_adapter = appctx.url_adapter + if url_adapter is None: + raise RuntimeError('Application was not able to create a URL ' + 'adapter for request independent URL generation. ' + 'You might be able to fix this by setting ' + 'the SERVER_NAME config variable.') + external = values.pop('_external', True) + + anchor = values.pop('_anchor', None) + method = values.pop('_method', None) + scheme = values.pop('_scheme', None) + appctx.app.inject_url_defaults(endpoint, values) + + # This is not the best way to deal with this but currently the + # underlying Werkzeug router does not support overriding the scheme on + # a per build call basis. + old_scheme = None + if scheme is not None: + if not external: + raise ValueError('When specifying _scheme, _external must be True') + old_scheme = url_adapter.url_scheme + url_adapter.url_scheme = scheme + + try: + try: + rv = url_adapter.build(endpoint, values, method=method, + force_external=external) + finally: + if old_scheme is not None: + url_adapter.url_scheme = old_scheme + except BuildError as error: + # We need to inject the values again so that the app callback can + # deal with that sort of stuff. + values['_external'] = external + values['_anchor'] = anchor + values['_method'] = method + return appctx.app.handle_url_build_error(error, endpoint, values) + + if anchor is not None: + rv += '#' + url_quote(anchor) + return rv + + +def get_template_attribute(template_name, attribute): + """Loads a macro (or variable) a template exports. This can be used to + invoke a macro from within Python code. If you for example have a + template named :file:`_cider.html` with the following contents: + + .. sourcecode:: html+jinja + + {% macro hello(name) %}Hello {{ name }}!{% endmacro %} + + You can access this from Python code like this:: + + hello = get_template_attribute('_cider.html', 'hello') + return hello('World') + + .. versionadded:: 0.2 + + :param template_name: the name of the template + :param attribute: the name of the variable of macro to access + """ + return getattr(current_app.jinja_env.get_template(template_name).module, + attribute) + + +def flash(message, category='message'): + """Flashes a message to the next request. In order to remove the + flashed message from the session and to display it to the user, + the template has to call :func:`get_flashed_messages`. + + .. versionchanged:: 0.3 + `category` parameter added. + + :param message: the message to be flashed. + :param category: the category for the message. The following values + are recommended: ``'message'`` for any kind of message, + ``'error'`` for errors, ``'info'`` for information + messages and ``'warning'`` for warnings. However any + kind of string can be used as category. + """ + # Original implementation: + # + # session.setdefault('_flashes', []).append((category, message)) + # + # This assumed that changes made to mutable structures in the session are + # are always in sync with the session object, which is not true for session + # implementations that use external storage for keeping their keys/values. + flashes = session.get('_flashes', []) + flashes.append((category, message)) + session['_flashes'] = flashes + message_flashed.send(current_app._get_current_object(), + message=message, category=category) + + +def get_flashed_messages(with_categories=False, category_filter=[]): + """Pulls all flashed messages from the session and returns them. + Further calls in the same request to the function will return + the same messages. By default just the messages are returned, + but when `with_categories` is set to ``True``, the return value will + be a list of tuples in the form ``(category, message)`` instead. + + Filter the flashed messages to one or more categories by providing those + categories in `category_filter`. This allows rendering categories in + separate html blocks. The `with_categories` and `category_filter` + arguments are distinct: + + * `with_categories` controls whether categories are returned with message + text (``True`` gives a tuple, where ``False`` gives just the message text). + * `category_filter` filters the messages down to only those matching the + provided categories. + + See :ref:`message-flashing-pattern` for examples. + + .. versionchanged:: 0.3 + `with_categories` parameter added. + + .. versionchanged:: 0.9 + `category_filter` parameter added. + + :param with_categories: set to ``True`` to also receive categories. + :param category_filter: whitelist of categories to limit return values + """ + flashes = _request_ctx_stack.top.flashes + if flashes is None: + _request_ctx_stack.top.flashes = flashes = session.pop('_flashes') \ + if '_flashes' in session else [] + if category_filter: + flashes = list(filter(lambda f: f[0] in category_filter, flashes)) + if not with_categories: + return [x[1] for x in flashes] + return flashes + + +def send_file(filename_or_fp, mimetype=None, as_attachment=False, + attachment_filename=None, add_etags=True, + cache_timeout=None, conditional=False): + """Sends the contents of a file to the client. This will use the + most efficient method available and configured. By default it will + try to use the WSGI server's file_wrapper support. Alternatively + you can set the application's :attr:`~Flask.use_x_sendfile` attribute + to ``True`` to directly emit an ``X-Sendfile`` header. This however + requires support of the underlying webserver for ``X-Sendfile``. + + By default it will try to guess the mimetype for you, but you can + also explicitly provide one. For extra security you probably want + to send certain files as attachment (HTML for instance). The mimetype + guessing requires a `filename` or an `attachment_filename` to be + provided. + + Please never pass filenames to this function from user sources; + you should use :func:`send_from_directory` instead. + + .. versionadded:: 0.2 + + .. versionadded:: 0.5 + The `add_etags`, `cache_timeout` and `conditional` parameters were + added. The default behavior is now to attach etags. + + .. versionchanged:: 0.7 + mimetype guessing and etag support for file objects was + deprecated because it was unreliable. Pass a filename if you are + able to, otherwise attach an etag yourself. This functionality + will be removed in Flask 1.0 + + .. versionchanged:: 0.9 + cache_timeout pulls its default from application config, when None. + + :param filename_or_fp: the filename of the file to send in `latin-1`. + This is relative to the :attr:`~Flask.root_path` + if a relative path is specified. + Alternatively a file object might be provided in + which case ``X-Sendfile`` might not work and fall + back to the traditional method. Make sure that the + file pointer is positioned at the start of data to + send before calling :func:`send_file`. + :param mimetype: the mimetype of the file if provided, otherwise + auto detection happens. + :param as_attachment: set to ``True`` if you want to send this file with + a ``Content-Disposition: attachment`` header. + :param attachment_filename: the filename for the attachment if it + differs from the file's filename. + :param add_etags: set to ``False`` to disable attaching of etags. + :param conditional: set to ``True`` to enable conditional responses. + + :param cache_timeout: the timeout in seconds for the headers. When ``None`` + (default), this value is set by + :meth:`~Flask.get_send_file_max_age` of + :data:`~flask.current_app`. + """ + mtime = None + if isinstance(filename_or_fp, string_types): + filename = filename_or_fp + file = None + else: + from warnings import warn + file = filename_or_fp + filename = getattr(file, 'name', None) + + # XXX: this behavior is now deprecated because it was unreliable. + # removed in Flask 1.0 + if not attachment_filename and not mimetype \ + and isinstance(filename, string_types): + warn(DeprecationWarning('The filename support for file objects ' + 'passed to send_file is now deprecated. Pass an ' + 'attach_filename if you want mimetypes to be guessed.'), + stacklevel=2) + if add_etags: + warn(DeprecationWarning('In future flask releases etags will no ' + 'longer be generated for file objects passed to the send_file ' + 'function because this behavior was unreliable. Pass ' + 'filenames instead if possible, otherwise attach an etag ' + 'yourself based on another value'), stacklevel=2) + + if filename is not None: + if not os.path.isabs(filename): + filename = os.path.join(current_app.root_path, filename) + if mimetype is None and (filename or attachment_filename): + mimetype = mimetypes.guess_type(filename or attachment_filename)[0] + if mimetype is None: + mimetype = 'application/octet-stream' + + headers = Headers() + if as_attachment: + if attachment_filename is None: + if filename is None: + raise TypeError('filename unavailable, required for ' + 'sending as attachment') + attachment_filename = os.path.basename(filename) + headers.add('Content-Disposition', 'attachment', + filename=attachment_filename) + + if current_app.use_x_sendfile and filename: + if file is not None: + file.close() + headers['X-Sendfile'] = filename + headers['Content-Length'] = os.path.getsize(filename) + data = None + else: + if file is None: + file = open(filename, 'rb') + mtime = os.path.getmtime(filename) + headers['Content-Length'] = os.path.getsize(filename) + data = wrap_file(request.environ, file) + + rv = current_app.response_class(data, mimetype=mimetype, headers=headers, + direct_passthrough=True) + + # if we know the file modification date, we can store it as + # the time of the last modification. + if mtime is not None: + rv.last_modified = int(mtime) + + rv.cache_control.public = True + if cache_timeout is None: + cache_timeout = current_app.get_send_file_max_age(filename) + if cache_timeout is not None: + rv.cache_control.max_age = cache_timeout + rv.expires = int(time() + cache_timeout) + + if add_etags and filename is not None: + try: + rv.set_etag('%s-%s-%s' % ( + os.path.getmtime(filename), + os.path.getsize(filename), + adler32( + filename.encode('utf-8') if isinstance(filename, text_type) + else filename + ) & 0xffffffff + )) + except OSError: + warn('Access %s failed, maybe it does not exist, so ignore etags in ' + 'headers' % filename, stacklevel=2) + + if conditional: + rv = rv.make_conditional(request) + # make sure we don't send x-sendfile for servers that + # ignore the 304 status code for x-sendfile. + if rv.status_code == 304: + rv.headers.pop('x-sendfile', None) + return rv + + +def safe_join(directory, filename): + """Safely join `directory` and `filename`. + + Example usage:: + + @app.route('/wiki/') + def wiki_page(filename): + filename = safe_join(app.config['WIKI_FOLDER'], filename) + with open(filename, 'rb') as fd: + content = fd.read() # Read and process the file content... + + :param directory: the base directory. + :param filename: the untrusted filename relative to that directory. + :raises: :class:`~werkzeug.exceptions.NotFound` if the resulting path + would fall out of `directory`. + """ + filename = posixpath.normpath(filename) + for sep in _os_alt_seps: + if sep in filename: + raise NotFound() + if os.path.isabs(filename) or \ + filename == '..' or \ + filename.startswith('../'): + raise NotFound() + return os.path.join(directory, filename) + + +def send_from_directory(directory, filename, **options): + """Send a file from a given directory with :func:`send_file`. This + is a secure way to quickly expose static files from an upload folder + or something similar. + + Example usage:: + + @app.route('/uploads/') + def download_file(filename): + return send_from_directory(app.config['UPLOAD_FOLDER'], + filename, as_attachment=True) + + .. admonition:: Sending files and Performance + + It is strongly recommended to activate either ``X-Sendfile`` support in + your webserver or (if no authentication happens) to tell the webserver + to serve files for the given path on its own without calling into the + web application for improved performance. + + .. versionadded:: 0.5 + + :param directory: the directory where all the files are stored. + :param filename: the filename relative to that directory to + download. + :param options: optional keyword arguments that are directly + forwarded to :func:`send_file`. + """ + filename = safe_join(directory, filename) + if not os.path.isabs(filename): + filename = os.path.join(current_app.root_path, filename) + try: + if not os.path.isfile(filename): + raise NotFound() + except (TypeError, ValueError): + raise BadRequest() + options.setdefault('conditional', True) + return send_file(filename, **options) + + +def get_root_path(import_name): + """Returns the path to a package or cwd if that cannot be found. This + returns the path of a package or the folder that contains a module. + + Not to be confused with the package path returned by :func:`find_package`. + """ + # Module already imported and has a file attribute. Use that first. + mod = sys.modules.get(import_name) + if mod is not None and hasattr(mod, '__file__'): + return os.path.dirname(os.path.abspath(mod.__file__)) + + # Next attempt: check the loader. + loader = pkgutil.get_loader(import_name) + + # Loader does not exist or we're referring to an unloaded main module + # or a main module without path (interactive sessions), go with the + # current working directory. + if loader is None or import_name == '__main__': + return os.getcwd() + + # For .egg, zipimporter does not have get_filename until Python 2.7. + # Some other loaders might exhibit the same behavior. + if hasattr(loader, 'get_filename'): + filepath = loader.get_filename(import_name) + else: + # Fall back to imports. + __import__(import_name) + mod = sys.modules[import_name] + filepath = getattr(mod, '__file__', None) + + # If we don't have a filepath it might be because we are a + # namespace package. In this case we pick the root path from the + # first module that is contained in our package. + if filepath is None: + raise RuntimeError('No root path can be found for the provided ' + 'module "%s". This can happen because the ' + 'module came from an import hook that does ' + 'not provide file name information or because ' + 'it\'s a namespace package. In this case ' + 'the root path needs to be explicitly ' + 'provided.' % import_name) + + # filepath is import_name.py for a module, or __init__.py for a package. + return os.path.dirname(os.path.abspath(filepath)) + + +def _matching_loader_thinks_module_is_package(loader, mod_name): + """Given the loader that loaded a module and the module this function + attempts to figure out if the given module is actually a package. + """ + # If the loader can tell us if something is a package, we can + # directly ask the loader. + if hasattr(loader, 'is_package'): + return loader.is_package(mod_name) + # importlib's namespace loaders do not have this functionality but + # all the modules it loads are packages, so we can take advantage of + # this information. + elif (loader.__class__.__module__ == '_frozen_importlib' and + loader.__class__.__name__ == 'NamespaceLoader'): + return True + # Otherwise we need to fail with an error that explains what went + # wrong. + raise AttributeError( + ('%s.is_package() method is missing but is required by Flask of ' + 'PEP 302 import hooks. If you do not use import hooks and ' + 'you encounter this error please file a bug against Flask.') % + loader.__class__.__name__) + + +def find_package(import_name): + """Finds a package and returns the prefix (or None if the package is + not installed) as well as the folder that contains the package or + module as a tuple. The package path returned is the module that would + have to be added to the pythonpath in order to make it possible to + import the module. The prefix is the path below which a UNIX like + folder structure exists (lib, share etc.). + """ + root_mod_name = import_name.split('.')[0] + loader = pkgutil.get_loader(root_mod_name) + if loader is None or import_name == '__main__': + # import name is not found, or interactive/main module + package_path = os.getcwd() + else: + # For .egg, zipimporter does not have get_filename until Python 2.7. + if hasattr(loader, 'get_filename'): + filename = loader.get_filename(root_mod_name) + elif hasattr(loader, 'archive'): + # zipimporter's loader.archive points to the .egg or .zip + # archive filename is dropped in call to dirname below. + filename = loader.archive + else: + # At least one loader is missing both get_filename and archive: + # Google App Engine's HardenedModulesHook + # + # Fall back to imports. + __import__(import_name) + filename = sys.modules[import_name].__file__ + package_path = os.path.abspath(os.path.dirname(filename)) + + # In case the root module is a package we need to chop of the + # rightmost part. This needs to go through a helper function + # because of python 3.3 namespace packages. + if _matching_loader_thinks_module_is_package( + loader, root_mod_name): + package_path = os.path.dirname(package_path) + + site_parent, site_folder = os.path.split(package_path) + py_prefix = os.path.abspath(sys.prefix) + if package_path.startswith(py_prefix): + return py_prefix, package_path + elif site_folder.lower() == 'site-packages': + parent, folder = os.path.split(site_parent) + # Windows like installations + if folder.lower() == 'lib': + base_dir = parent + # UNIX like installations + elif os.path.basename(parent).lower() == 'lib': + base_dir = os.path.dirname(parent) + else: + base_dir = site_parent + return base_dir, package_path + return None, package_path + + +class locked_cached_property(object): + """A decorator that converts a function into a lazy property. The + function wrapped is called the first time to retrieve the result + and then that calculated result is used the next time you access + the value. Works like the one in Werkzeug but has a lock for + thread safety. + """ + + def __init__(self, func, name=None, doc=None): + self.__name__ = name or func.__name__ + self.__module__ = func.__module__ + self.__doc__ = doc or func.__doc__ + self.func = func + self.lock = RLock() + + def __get__(self, obj, type=None): + if obj is None: + return self + with self.lock: + value = obj.__dict__.get(self.__name__, _missing) + if value is _missing: + value = self.func(obj) + obj.__dict__[self.__name__] = value + return value + + +class _PackageBoundObject(object): + + def __init__(self, import_name, template_folder=None, root_path=None): + #: The name of the package or module. Do not change this once + #: it was set by the constructor. + self.import_name = import_name + + #: location of the templates. ``None`` if templates should not be + #: exposed. + self.template_folder = template_folder + + if root_path is None: + root_path = get_root_path(self.import_name) + + #: Where is the app root located? + self.root_path = root_path + + self._static_folder = None + self._static_url_path = None + + def _get_static_folder(self): + if self._static_folder is not None: + return os.path.join(self.root_path, self._static_folder) + def _set_static_folder(self, value): + self._static_folder = value + static_folder = property(_get_static_folder, _set_static_folder, doc=''' + The absolute path to the configured static folder. + ''') + del _get_static_folder, _set_static_folder + + def _get_static_url_path(self): + if self._static_url_path is not None: + return self._static_url_path + if self.static_folder is not None: + return '/' + os.path.basename(self.static_folder) + def _set_static_url_path(self, value): + self._static_url_path = value + static_url_path = property(_get_static_url_path, _set_static_url_path) + del _get_static_url_path, _set_static_url_path + + @property + def has_static_folder(self): + """This is ``True`` if the package bound object's container has a + folder for static files. + + .. versionadded:: 0.5 + """ + return self.static_folder is not None + + @locked_cached_property + def jinja_loader(self): + """The Jinja loader for this package bound object. + + .. versionadded:: 0.5 + """ + if self.template_folder is not None: + return FileSystemLoader(os.path.join(self.root_path, + self.template_folder)) + + def get_send_file_max_age(self, filename): + """Provides default cache_timeout for the :func:`send_file` functions. + + By default, this function returns ``SEND_FILE_MAX_AGE_DEFAULT`` from + the configuration of :data:`~flask.current_app`. + + Static file functions such as :func:`send_from_directory` use this + function, and :func:`send_file` calls this function on + :data:`~flask.current_app` when the given cache_timeout is ``None``. If a + cache_timeout is given in :func:`send_file`, that timeout is used; + otherwise, this method is called. + + This allows subclasses to change the behavior when sending files based + on the filename. For example, to set the cache timeout for .js files + to 60 seconds:: + + class MyFlask(flask.Flask): + def get_send_file_max_age(self, name): + if name.lower().endswith('.js'): + return 60 + return flask.Flask.get_send_file_max_age(self, name) + + .. versionadded:: 0.9 + """ + return total_seconds(current_app.send_file_max_age_default) + + def send_static_file(self, filename): + """Function used internally to send static files from the static + folder to the browser. + + .. versionadded:: 0.5 + """ + if not self.has_static_folder: + raise RuntimeError('No static folder for this object') + # Ensure get_send_file_max_age is called in all cases. + # Here, we ensure get_send_file_max_age is called for Blueprints. + cache_timeout = self.get_send_file_max_age(filename) + return send_from_directory(self.static_folder, filename, + cache_timeout=cache_timeout) + + def open_resource(self, resource, mode='rb'): + """Opens a resource from the application's resource folder. To see + how this works, consider the following folder structure:: + + /myapplication.py + /schema.sql + /static + /style.css + /templates + /layout.html + /index.html + + If you want to open the :file:`schema.sql` file you would do the + following:: + + with app.open_resource('schema.sql') as f: + contents = f.read() + do_something_with(contents) + + :param resource: the name of the resource. To access resources within + subfolders use forward slashes as separator. + :param mode: resource file opening mode, default is 'rb'. + """ + if mode not in ('r', 'rb'): + raise ValueError('Resources can only be opened for reading') + return open(os.path.join(self.root_path, resource), mode) + + +def total_seconds(td): + """Returns the total seconds from a timedelta object. + + :param timedelta td: the timedelta to be converted in seconds + + :returns: number of seconds + :rtype: int + """ + return td.days * 60 * 60 * 24 + td.seconds diff --git a/venv/lib/python2.7/site-packages/flask/json.py b/venv/lib/python2.7/site-packages/flask/json.py new file mode 100644 index 0000000..b9ce4a0 --- /dev/null +++ b/venv/lib/python2.7/site-packages/flask/json.py @@ -0,0 +1,272 @@ +# -*- coding: utf-8 -*- +""" + flask.jsonimpl + ~~~~~~~~~~~~~~ + + Implementation helpers for the JSON support in Flask. + + :copyright: (c) 2015 by Armin Ronacher. + :license: BSD, see LICENSE for more details. +""" +import io +import uuid +from datetime import date +from .globals import current_app, request +from ._compat import text_type, PY2 + +from werkzeug.http import http_date +from jinja2 import Markup + +# Use the same json implementation as itsdangerous on which we +# depend anyways. +try: + from itsdangerous import simplejson as _json +except ImportError: + from itsdangerous import json as _json + + +# Figure out if simplejson escapes slashes. This behavior was changed +# from one version to another without reason. +_slash_escape = '\\/' not in _json.dumps('/') + + +__all__ = ['dump', 'dumps', 'load', 'loads', 'htmlsafe_dump', + 'htmlsafe_dumps', 'JSONDecoder', 'JSONEncoder', + 'jsonify'] + + +def _wrap_reader_for_text(fp, encoding): + if isinstance(fp.read(0), bytes): + fp = io.TextIOWrapper(io.BufferedReader(fp), encoding) + return fp + + +def _wrap_writer_for_text(fp, encoding): + try: + fp.write('') + except TypeError: + fp = io.TextIOWrapper(fp, encoding) + return fp + + +class JSONEncoder(_json.JSONEncoder): + """The default Flask JSON encoder. This one extends the default simplejson + encoder by also supporting ``datetime`` objects, ``UUID`` as well as + ``Markup`` objects which are serialized as RFC 822 datetime strings (same + as the HTTP date format). In order to support more data types override the + :meth:`default` method. + """ + + def default(self, o): + """Implement this method in a subclass such that it returns a + serializable object for ``o``, or calls the base implementation (to + raise a :exc:`TypeError`). + + For example, to support arbitrary iterators, you could implement + default like this:: + + def default(self, o): + try: + iterable = iter(o) + except TypeError: + pass + else: + return list(iterable) + return JSONEncoder.default(self, o) + """ + if isinstance(o, date): + return http_date(o.timetuple()) + if isinstance(o, uuid.UUID): + return str(o) + if hasattr(o, '__html__'): + return text_type(o.__html__()) + return _json.JSONEncoder.default(self, o) + + +class JSONDecoder(_json.JSONDecoder): + """The default JSON decoder. This one does not change the behavior from + the default simplejson decoder. Consult the :mod:`json` documentation + for more information. This decoder is not only used for the load + functions of this module but also :attr:`~flask.Request`. + """ + + +def _dump_arg_defaults(kwargs): + """Inject default arguments for dump functions.""" + if current_app: + kwargs.setdefault('cls', current_app.json_encoder) + if not current_app.config['JSON_AS_ASCII']: + kwargs.setdefault('ensure_ascii', False) + kwargs.setdefault('sort_keys', current_app.config['JSON_SORT_KEYS']) + else: + kwargs.setdefault('sort_keys', True) + kwargs.setdefault('cls', JSONEncoder) + + +def _load_arg_defaults(kwargs): + """Inject default arguments for load functions.""" + if current_app: + kwargs.setdefault('cls', current_app.json_decoder) + else: + kwargs.setdefault('cls', JSONDecoder) + + +def dumps(obj, **kwargs): + """Serialize ``obj`` to a JSON formatted ``str`` by using the application's + configured encoder (:attr:`~flask.Flask.json_encoder`) if there is an + application on the stack. + + This function can return ``unicode`` strings or ascii-only bytestrings by + default which coerce into unicode strings automatically. That behavior by + default is controlled by the ``JSON_AS_ASCII`` configuration variable + and can be overridden by the simplejson ``ensure_ascii`` parameter. + """ + _dump_arg_defaults(kwargs) + encoding = kwargs.pop('encoding', None) + rv = _json.dumps(obj, **kwargs) + if encoding is not None and isinstance(rv, text_type): + rv = rv.encode(encoding) + return rv + + +def dump(obj, fp, **kwargs): + """Like :func:`dumps` but writes into a file object.""" + _dump_arg_defaults(kwargs) + encoding = kwargs.pop('encoding', None) + if encoding is not None: + fp = _wrap_writer_for_text(fp, encoding) + _json.dump(obj, fp, **kwargs) + + +def loads(s, **kwargs): + """Unserialize a JSON object from a string ``s`` by using the application's + configured decoder (:attr:`~flask.Flask.json_decoder`) if there is an + application on the stack. + """ + _load_arg_defaults(kwargs) + if isinstance(s, bytes): + s = s.decode(kwargs.pop('encoding', None) or 'utf-8') + return _json.loads(s, **kwargs) + + +def load(fp, **kwargs): + """Like :func:`loads` but reads from a file object. + """ + _load_arg_defaults(kwargs) + if not PY2: + fp = _wrap_reader_for_text(fp, kwargs.pop('encoding', None) or 'utf-8') + return _json.load(fp, **kwargs) + + +def htmlsafe_dumps(obj, **kwargs): + """Works exactly like :func:`dumps` but is safe for use in ``' + safe = Markup('username') + assert unsafe + safe == text_type(escape(unsafe)) + text_type(safe) + + def test_string_interpolation(self): + # string interpolations are safe to use too + assert Markup('%s') % '' == \ + '<bad user>' + assert Markup('%(username)s') % { + 'username': '' + } == '<bad user>' + + assert Markup('%i') % 3.14 == '3' + assert Markup('%.2f') % 3.14 == '3.14' + + def test_type_behavior(self): + # an escaped object is markup too + assert type(Markup('foo') + 'bar') is Markup + + # and it implements __html__ by returning itself + x = Markup("foo") + assert x.__html__() is x + + def test_html_interop(self): + # it also knows how to treat __html__ objects + class Foo(object): + def __html__(self): + return 'awesome' + def __unicode__(self): + return 'awesome' + __str__ = __unicode__ + assert Markup(Foo()) == 'awesome' + assert Markup('%s') % Foo() == \ + 'awesome' + + def test_tuple_interpol(self): + self.assertEqual(Markup('%s:%s') % ( + '', + '', + ), Markup(u'<foo>:<bar>')) + + def test_dict_interpol(self): + self.assertEqual(Markup('%(foo)s') % { + 'foo': '', + }, Markup(u'<foo>')) + self.assertEqual(Markup('%(foo)s:%(bar)s') % { + 'foo': '', + 'bar': '', + }, Markup(u'<foo>:<bar>')) + + def test_escaping(self): + # escaping and unescaping + assert escape('"<>&\'') == '"<>&'' + assert Markup("Foo & Bar").striptags() == "Foo & Bar" + assert Markup("<test>").unescape() == "" + + def test_formatting(self): + for actual, expected in ( + (Markup('%i') % 3.14, '3'), + (Markup('%.2f') % 3.14159, '3.14'), + (Markup('%s %s %s') % ('<', 123, '>'), '< 123 >'), + (Markup('{awesome}').format(awesome=''), + '<awesome>'), + (Markup('{0[1][bar]}').format([0, {'bar': ''}]), + '<bar/>'), + (Markup('{0[1][bar]}').format([0, {'bar': Markup('')}]), + '')): + assert actual == expected, "%r should be %r!" % (actual, expected) + + # This is new in 2.7 + if sys.version_info >= (2, 7): + def test_formatting_empty(self): + formatted = Markup('{}').format(0) + assert formatted == Markup('0') + + def test_custom_formatting(self): + class HasHTMLOnly(object): + def __html__(self): + return Markup('') + + class HasHTMLAndFormat(object): + def __html__(self): + return Markup('') + def __html_format__(self, spec): + return Markup('') + + assert Markup('{0}').format(HasHTMLOnly()) == Markup('') + assert Markup('{0}').format(HasHTMLAndFormat()) == Markup('') + + def test_complex_custom_formatting(self): + class User(object): + def __init__(self, id, username): + self.id = id + self.username = username + def __html_format__(self, format_spec): + if format_spec == 'link': + return Markup('{1}').format( + self.id, + self.__html__(), + ) + elif format_spec: + raise ValueError('Invalid format spec') + return self.__html__() + def __html__(self): + return Markup('{0}').format(self.username) + + user = User(1, 'foo') + assert Markup('

User: {0:link}').format(user) == \ + Markup('

User: foo') + + def test_all_set(self): + import markupsafe as markup + for item in markup.__all__: + getattr(markup, item) + + def test_escape_silent(self): + assert escape_silent(None) == Markup() + assert escape(None) == Markup(None) + assert escape_silent('') == Markup(u'<foo>') + + def test_splitting(self): + self.assertEqual(Markup('a b').split(), [ + Markup('a'), + Markup('b') + ]) + self.assertEqual(Markup('a b').rsplit(), [ + Markup('a'), + Markup('b') + ]) + self.assertEqual(Markup('a\nb').splitlines(), [ + Markup('a'), + Markup('b') + ]) + + def test_mul(self): + self.assertEqual(Markup('a') * 3, Markup('aaa')) + + +class MarkupLeakTestCase(unittest.TestCase): + + def test_markup_leaks(self): + counts = set() + for count in range(20): + for item in range(1000): + escape("foo") + escape("") + escape(u"foo") + escape(u"") + counts.add(len(gc.get_objects())) + assert len(counts) == 1, 'ouch, c extension seems to leak objects' + + +def suite(): + suite = unittest.TestSuite() + suite.addTest(unittest.makeSuite(MarkupTestCase)) + + # this test only tests the c extension + if not hasattr(escape, 'func_code'): + suite.addTest(unittest.makeSuite(MarkupLeakTestCase)) + + return suite + + +if __name__ == '__main__': + unittest.main(defaultTest='suite') + +# vim:sts=4:sw=4:et: diff --git a/venv/lib/python2.7/site-packages/pip-8.1.2.dist-info/DESCRIPTION.rst b/venv/lib/python2.7/site-packages/pip-8.1.2.dist-info/DESCRIPTION.rst new file mode 100644 index 0000000..39586d2 --- /dev/null +++ b/venv/lib/python2.7/site-packages/pip-8.1.2.dist-info/DESCRIPTION.rst @@ -0,0 +1,36 @@ +pip +=== + +The `PyPA recommended +`_ +tool for installing Python packages. + +* `Installation `_ +* `Documentation `_ +* `Changelog `_ +* `Github Page `_ +* `Issue Tracking `_ +* `User mailing list `_ +* `Dev mailing list `_ +* User IRC: #pypa on Freenode. +* Dev IRC: #pypa-dev on Freenode. + + +.. image:: https://img.shields.io/pypi/v/pip.svg + :target: https://pypi.python.org/pypi/pip + +.. image:: https://img.shields.io/travis/pypa/pip/develop.svg + :target: http://travis-ci.org/pypa/pip + +.. image:: https://readthedocs.org/projects/pip/badge/?version=stable + :target: https://pip.pypa.io/en/stable + +Code of Conduct +--------------- + +Everyone interacting in the pip project's codebases, issue trackers, chat +rooms, and mailing lists is expected to follow the `PyPA Code of Conduct`_. + +.. _PyPA Code of Conduct: https://www.pypa.io/en/latest/code-of-conduct/ + + diff --git a/venv/lib/python2.7/site-packages/pip-8.1.2.dist-info/INSTALLER b/venv/lib/python2.7/site-packages/pip-8.1.2.dist-info/INSTALLER new file mode 100644 index 0000000..a1b589e --- /dev/null +++ b/venv/lib/python2.7/site-packages/pip-8.1.2.dist-info/INSTALLER @@ -0,0 +1 @@ +pip diff --git a/venv/lib/python2.7/site-packages/pip-8.1.2.dist-info/METADATA b/venv/lib/python2.7/site-packages/pip-8.1.2.dist-info/METADATA new file mode 100644 index 0000000..83d3c03 --- /dev/null +++ b/venv/lib/python2.7/site-packages/pip-8.1.2.dist-info/METADATA @@ -0,0 +1,65 @@ +Metadata-Version: 2.0 +Name: pip +Version: 8.1.2 +Summary: The PyPA recommended tool for installing Python packages. +Home-page: https://pip.pypa.io/ +Author: The pip developers +Author-email: python-virtualenv@groups.google.com +License: MIT +Keywords: easy_install distutils setuptools egg virtualenv +Platform: UNKNOWN +Classifier: Development Status :: 5 - Production/Stable +Classifier: Intended Audience :: Developers +Classifier: License :: OSI Approved :: MIT License +Classifier: Topic :: Software Development :: Build Tools +Classifier: Programming Language :: Python :: 2 +Classifier: Programming Language :: Python :: 2.6 +Classifier: Programming Language :: Python :: 2.7 +Classifier: Programming Language :: Python :: 3 +Classifier: Programming Language :: Python :: 3.3 +Classifier: Programming Language :: Python :: 3.4 +Classifier: Programming Language :: Python :: 3.5 +Classifier: Programming Language :: Python :: Implementation :: PyPy +Provides-Extra: testing +Requires-Dist: mock; extra == 'testing' +Requires-Dist: pretend; extra == 'testing' +Requires-Dist: pytest; extra == 'testing' +Requires-Dist: scripttest (>=1.3); extra == 'testing' +Requires-Dist: virtualenv (>=1.10); extra == 'testing' + +pip +=== + +The `PyPA recommended +`_ +tool for installing Python packages. + +* `Installation `_ +* `Documentation `_ +* `Changelog `_ +* `Github Page `_ +* `Issue Tracking `_ +* `User mailing list `_ +* `Dev mailing list `_ +* User IRC: #pypa on Freenode. +* Dev IRC: #pypa-dev on Freenode. + + +.. image:: https://img.shields.io/pypi/v/pip.svg + :target: https://pypi.python.org/pypi/pip + +.. image:: https://img.shields.io/travis/pypa/pip/develop.svg + :target: http://travis-ci.org/pypa/pip + +.. image:: https://readthedocs.org/projects/pip/badge/?version=stable + :target: https://pip.pypa.io/en/stable + +Code of Conduct +--------------- + +Everyone interacting in the pip project's codebases, issue trackers, chat +rooms, and mailing lists is expected to follow the `PyPA Code of Conduct`_. + +.. _PyPA Code of Conduct: https://www.pypa.io/en/latest/code-of-conduct/ + + diff --git a/venv/lib/python2.7/site-packages/pip-8.1.2.dist-info/RECORD b/venv/lib/python2.7/site-packages/pip-8.1.2.dist-info/RECORD new file mode 100644 index 0000000..c59b682 --- /dev/null +++ b/venv/lib/python2.7/site-packages/pip-8.1.2.dist-info/RECORD @@ -0,0 +1,483 @@ +pip/__init__.py,sha256=OQB9eSOv7HjK2W5VesX26tKN8uvMhvPaXoM4HuoiKn0,10604 +pip/__main__.py,sha256=V6Kh-IEDEFpt1cahRE6MajUF_14qJR_Qsvn4MjWZXzE,584 +pip/basecommand.py,sha256=4WPEhwtMgVGe4ahlr4Y2JyXJ8chjBmYlU8RCa0sQ5bo,11648 +pip/baseparser.py,sha256=Nlc7Un9gat27xtB24SnKL_3pZZOoh62gNNRdS6tDRZY,10465 +pip/cmdoptions.py,sha256=pf24iszA39rhcJ5DjFA4oD_z5vTI0NG98qUahHs3qPM,15878 +pip/download.py,sha256=oJ3sZ8I6ct9X3eoXQ9xm_Ne0e6N85G_rWaERmMCVF2k,31722 +pip/exceptions.py,sha256=GdDhHOROBj-kW2rgerLJYXsxN8ENy1BX5RUb_Vs9TXM,7980 +pip/index.py,sha256=DsxoKRxoL4oEdaqQOctou0HN1rciulGp0EBcHtnyBR4,37235 +pip/locations.py,sha256=MqUzS8YI2wDa7oFzTQw4zM4s0Hci05yubxfU_kTXXlU,5632 +pip/pep425tags.py,sha256=elib4cgVtzTw8CUJ8IxoX1mkaeLxS-2XHHqkvS5ryZM,11853 +pip/status_codes.py,sha256=F6uDG6Gj7RNKQJUDnd87QKqI16Us-t-B0wPF_4QMpWc,156 +pip/wheel.py,sha256=wO99S_aDXkjyActzGLOw0cV73YFLXjz740oD96wXyXo,32216 +pip/_vendor/__init__.py,sha256=yzwJqv89TDqeqqWRgBlZZmiAc5sTNOrdIMgF0MT8TWI,4647 +pip/_vendor/ipaddress.py,sha256=FngG_V6As8JnDJVR1g1i0D6wVa5ycSKJYOh7IrI4fq0,79904 +pip/_vendor/pyparsing.py,sha256=cBtJbRYQNhvtw0IqoLvDUzW-ZdgfRFVm3QD2O3-jlz0,158096 +pip/_vendor/re-vendor.py,sha256=PcdZ40d0ohMsdJmA4t0AeAWbPXi1tFsvAwA5KE5FGeY,773 +pip/_vendor/retrying.py,sha256=k3fflf5_Mm0XcIJYhB7Tj34bqCCPhUDkYbx1NvW2FPE,9972 +pip/_vendor/six.py,sha256=A6hdJZVjI3t_geebZ9BzUvwRrIXo0lfwzQlM2LcKyas,30098 +pip/_vendor/cachecontrol/__init__.py,sha256=uz7Oxs9hQmUxtxod1WyVoUj6Jab7NfiLECPHFuc-22U,302 +pip/_vendor/cachecontrol/_cmd.py,sha256=MPxZfZd2LKDzVrs55X3wA1rsI2YuP8evLZSwQj0dIk0,1320 +pip/_vendor/cachecontrol/adapter.py,sha256=eizWWJwOnG5TToxL-XiEywPEf2k20--e-5C6u6wAEts,4196 +pip/_vendor/cachecontrol/cache.py,sha256=xtl-V-pr9KSt9VvFDRCB9yrHPEvqvbk-5M1vAInZb5k,790 +pip/_vendor/cachecontrol/compat.py,sha256=uyovOpd1ehI3J1XeBqJvcsIp6fvkjBpoQmu_0J2st8c,416 +pip/_vendor/cachecontrol/controller.py,sha256=Aky7U9UZ1VqutfvilljlWi1uk40yis6lmaz8c_wQLw8,12996 +pip/_vendor/cachecontrol/filewrapper.py,sha256=jkC0GOorbWIpy9CzP3PwxSHx5J2CXBAxNbGH68HmP1M,2168 +pip/_vendor/cachecontrol/heuristics.py,sha256=WtJrVsyWjpP9WoUiDVdTZZRNBCz5ZVptaQpYnqofDQU,4141 +pip/_vendor/cachecontrol/serialize.py,sha256=vZQ-Rsn4618ATipqqZJ-Hife2qVzpIepNjxTCUM-eAw,6305 +pip/_vendor/cachecontrol/wrapper.py,sha256=Kqyu_3TW_54XDudha4-HF21vyEOAJ4ZnRXFysTiLmXA,498 +pip/_vendor/cachecontrol/caches/__init__.py,sha256=uWnUtyMvHY_LULaL_4_IR1F_xPgK5zHfJyRnBq4DnPE,369 +pip/_vendor/cachecontrol/caches/file_cache.py,sha256=FsDug3bwUAQ3okjjfGzxlDaBf2fwVSn1iBKMTL6SyGU,3532 +pip/_vendor/cachecontrol/caches/redis_cache.py,sha256=XywqxkS9MkCaflTOY_wjrE02neKdywB9YwlOBbP7Ywc,973 +pip/_vendor/colorama/__init__.py,sha256=9xByrTvk9upkL5NGV5It2Eje4-kzNLwa_1lGPWpXoNU,240 +pip/_vendor/colorama/ansi.py,sha256=Fi0un-QLqRm-v7o_nKiOqyC8PapBJK7DLV_q9LKtTO0,2524 +pip/_vendor/colorama/ansitowin32.py,sha256=gJZB35Lbdjatykd2zrUUnokMzkvcFgscyn_tNxxMFHA,9668 +pip/_vendor/colorama/initialise.py,sha256=cHqVJtb82OG7HUCxvQ2joG7N_CoxbIKbI_fgryZkj20,1917 +pip/_vendor/colorama/win32.py,sha256=_SCEoTK_GA2tU1nhbayKKac-v9Jn98lCPIFOeFMGCHQ,5365 +pip/_vendor/colorama/winterm.py,sha256=V7U7ojwG1q4n6PKripjEvW_htYQi5ueXSM3LUUoqqDY,6290 +pip/_vendor/distlib/__init__.py,sha256=7B6hxGONsbhxuy9NCkPGnXHuuV4EVLwRGl3qwkXrEn8,581 +pip/_vendor/distlib/compat.py,sha256=13653e8hB0lLj87tSxctbi4I9zC3VtCjJxjwQ3yxwAI,40660 +pip/_vendor/distlib/database.py,sha256=rZHwz4NcZZ7HtXH4SNca6ITyYBSvalvPXZmIj1BjNP8,49671 +pip/_vendor/distlib/index.py,sha256=qU38JCRI5F_1Z-QmydiocDE58d4KvHzKjS_T0dBsPlg,20976 +pip/_vendor/distlib/locators.py,sha256=LhEPbiagqlvOA6PWGuN-AGVDeakS3ykStoe_OYBkAUE,50493 +pip/_vendor/distlib/manifest.py,sha256=JF5EstaCOPnsW2tUdXCdjIFn-Zkf48Dqw0TcxKjuni0,13598 +pip/_vendor/distlib/markers.py,sha256=iRrVWwpyVwjkKJSX8NEQ92_MRMwpROcfNGKCD-Ch1QM,6282 +pip/_vendor/distlib/metadata.py,sha256=UJdzZIbC6Bkp1H9BtiOLXzph6HVujHKcUW1yDgO9oJ0,38702 +pip/_vendor/distlib/resources.py,sha256=7hQ2OgeRRwMrU81hztflAy3b0no-JOtUGCAIC8GImtk,10620 +pip/_vendor/distlib/scripts.py,sha256=ZVGGXYJwW06rIT5gOhO0fvc5CO2Q89LVVNYNF2XVcIY,15223 +pip/_vendor/distlib/t32.exe,sha256=rOJD6eDYk88TJ5lJtyt58El-nYNip4UvnYIDJ2y6QNs,89088 +pip/_vendor/distlib/t64.exe,sha256=qDBQu9uPHCVdBFM6ANg-Xp9nc5Wz_iFnSmsCTvdEQec,97792 +pip/_vendor/distlib/util.py,sha256=DXYa1CiOqPEJE77L6bx6mwNiQE1ayU8LCr2ux8mLBMo,52032 +pip/_vendor/distlib/version.py,sha256=_XjbARzhJfjH7MyFyBr3X5NWzSt7pY73la5KCWbD4Sc,23711 +pip/_vendor/distlib/w32.exe,sha256=LrnXXqK-Yb1tzS0lxymvQPiMlkQZWAB0eHM5jnL0mAk,85504 +pip/_vendor/distlib/w64.exe,sha256=GbKq4oBmzHZXdcpaLupKLenmQD7_DXsYX8PDPWo_U3M,94208 +pip/_vendor/distlib/wheel.py,sha256=UP53cKxOM5r7bHSS-n5prF6hwJEVsMW9ZNJutOuC26c,39115 +pip/_vendor/distlib/_backport/__init__.py,sha256=bqS_dTOH6uW9iGgd0uzfpPjo6vZ4xpPZ7kyfZJ2vNaw,274 +pip/_vendor/distlib/_backport/misc.py,sha256=KWecINdbFNOxSOP1fGF680CJnaC6S4fBRgEtaYTw0ig,971 +pip/_vendor/distlib/_backport/shutil.py,sha256=AUi8718iRoJ9K26mRi-rywtt8Gx7ykvrvbUbZszjfYE,25650 +pip/_vendor/distlib/_backport/sysconfig.cfg,sha256=swZKxq9RY5e9r3PXCrlvQPMsvOdiWZBTHLEbqS8LJLU,2617 +pip/_vendor/distlib/_backport/sysconfig.py,sha256=7WdYP0wbw8izH1eAEGNA-HXUyJrhzIAGK_LniUs4UNI,26958 +pip/_vendor/distlib/_backport/tarfile.py,sha256=bjyTNONZb-YEXrHFLExOSuagtSOoPaONP2UUoxwkAqE,92627 +pip/_vendor/html5lib/__init__.py,sha256=emLYTm9_7OPnOInA9dPc4IIVF7GWA5tpl2H_bLqVoWk,779 +pip/_vendor/html5lib/constants.py,sha256=B5LN2DMP-6lEp9wpON4ecX3Kx01n_cbMjuGd6AteixE,86873 +pip/_vendor/html5lib/html5parser.py,sha256=o9FOrhdLGYOtUhsueAsuXwduydagASvwxJ0lUpGYrYg,117347 +pip/_vendor/html5lib/ihatexml.py,sha256=MT12cVXAKaW-ALUkUeN175HpUP73xK8wAIpPzQ8cgfI,16581 +pip/_vendor/html5lib/inputstream.py,sha256=ss3wjtlObOVoVGWFsBztYdpnUqRaezyJ0sTXfdb4Ly4,31665 +pip/_vendor/html5lib/sanitizer.py,sha256=sbyGySzFzCD_v0JYYSr6sLYVLpO6bpVmRiDMKbFRcCw,17804 +pip/_vendor/html5lib/tokenizer.py,sha256=6Uf8sDUkvNn661bcBSBYUCTfXzSs9EyCTiPcj5PAjYI,76929 +pip/_vendor/html5lib/utils.py,sha256=PSVv1ig9oAZa-DU16DT4cbbggnG7K3qQIkPm6uJKxFg,3267 +pip/_vendor/html5lib/filters/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +pip/_vendor/html5lib/filters/_base.py,sha256=z-IU9ZAYjpsVsqmVt7kuWC63jR11hDMr6CVrvuao8W0,286 +pip/_vendor/html5lib/filters/alphabeticalattributes.py,sha256=fpRLbz6TCe5yXEkGmyMlJ80FekWsTR-sHk3Ano0U9LQ,624 +pip/_vendor/html5lib/filters/inject_meta_charset.py,sha256=xllv1I7unxhcyZTf3LTsv30wh2mAkT7wmTZx7zIhpuY,2746 +pip/_vendor/html5lib/filters/lint.py,sha256=8eJo0SXDcY40OhsNd0Cft36kUXCZ5t-30mNFSUf4LnE,4208 +pip/_vendor/html5lib/filters/optionaltags.py,sha256=4ozLwBgMRaxe7iqxefLQpDhp3irK7YHo9LgSGsvZYMw,10500 +pip/_vendor/html5lib/filters/sanitizer.py,sha256=MvGUs_v2taWPgGhjxswRSUiHfxrqMUhsNPz-eSeUYUQ,352 +pip/_vendor/html5lib/filters/whitespace.py,sha256=LbOUcC0zQ9z703KNZrArOr0kVBO7OMXjKjucDW32LU4,1142 +pip/_vendor/html5lib/serializer/__init__.py,sha256=xFXFP-inaTNlbnau5c5DGrH_O8yPm-C6HWbJxpiSqFE,490 +pip/_vendor/html5lib/serializer/htmlserializer.py,sha256=G-aVHmlR7uMR011jO0ev7sZvkVHpLr3OrLSYMZ7liVs,12855 +pip/_vendor/html5lib/treeadapters/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +pip/_vendor/html5lib/treeadapters/sax.py,sha256=3of4vvaUYIAic7pngebwJV24hpOS7Zg9ggJa_WQegy4,1661 +pip/_vendor/html5lib/treebuilders/__init__.py,sha256=Xz4X6B5DA1R-5GyRa44j0sJwfl6dUNyb0NBu9-7sK3U,3405 +pip/_vendor/html5lib/treebuilders/_base.py,sha256=Xf0FZVcVwIQS6tEseJdj5wKbYucbNCnbAsnsG4lONis,13711 +pip/_vendor/html5lib/treebuilders/dom.py,sha256=jvmtvnERtpxXpHvBgiq1FpzAUYAAzoolOTx_DoXwGEI,8469 +pip/_vendor/html5lib/treebuilders/etree.py,sha256=etbO6yQlyV46rWlj9mSyVqQOWrgoHgyJ01Tut4lWZkk,12621 +pip/_vendor/html5lib/treebuilders/etree_lxml.py,sha256=z3Bnfm2MstEEb_lbaAeicl5l-ab6MSQa5Q1ZZreK7Pc,14031 +pip/_vendor/html5lib/treewalkers/__init__.py,sha256=m2-4a5P4dMNlQb26MNIhgj69p6ms1i-JD2HPDr7iTfw,5766 +pip/_vendor/html5lib/treewalkers/_base.py,sha256=9nXtXtgubdWKFlKxhVzWarE0Hiv3T4VC7_Wt9ulVzB0,7014 +pip/_vendor/html5lib/treewalkers/dom.py,sha256=Lb63Nuz8HtgvkuuvSmU5LOyUkEtstH5saPPAg5xN4r8,1421 +pip/_vendor/html5lib/treewalkers/etree.py,sha256=966h5cOYPwQIcHjxJmVP5cDOnWKiyqpyt9QKQo9W-uo,4597 +pip/_vendor/html5lib/treewalkers/genshistream.py,sha256=IbBFrlgi-59-K7P1zm0d7ZFIknBN4c5E57PHJDkx39s,2278 +pip/_vendor/html5lib/treewalkers/lxmletree.py,sha256=am6t_JHh_Fpm10CaW-zDaGGsDwTPK3Pas7TRBezFs4w,5992 +pip/_vendor/html5lib/treewalkers/pulldom.py,sha256=9W6i8yWtUzayV6EwX-okVacttHaqpQZwdBCc2S3XeQ4,2302 +pip/_vendor/html5lib/trie/__init__.py,sha256=mec5zyJ5wIKRM8819gIcIsYQwncg91rEmPwGH1dG3Ho,212 +pip/_vendor/html5lib/trie/_base.py,sha256=WGY8SGptFmx4O0aKLJ54zrIQOoyuvhS0ngA36vAcIcc,927 +pip/_vendor/html5lib/trie/datrie.py,sha256=EQpqSfkZRuTbE-DuhW7xMdVDxdZNZ0CfmnYfHA_3zxM,1178 +pip/_vendor/html5lib/trie/py.py,sha256=wXmQLrZRf4MyWNyg0m3h81m9InhLR7GJ002mIIZh-8o,1775 +pip/_vendor/lockfile/__init__.py,sha256=Tqpz90DwKYfhPsfzVOJl84TL87pdFE5ePNHdXAxs4Tk,9371 +pip/_vendor/lockfile/linklockfile.py,sha256=C7OH3H4GdK68u4FQgp8fkP2kO4fyUTSyj3X6blgfobc,2652 +pip/_vendor/lockfile/mkdirlockfile.py,sha256=e3qgIL-etZMLsS-3ft19iW_8IQ360HNkGOqE3yBKsUw,3096 +pip/_vendor/lockfile/pidlockfile.py,sha256=ukH9uk6NFuxyVmG5QiWw4iKq3fT7MjqUguX95avYPIY,6090 +pip/_vendor/lockfile/sqlitelockfile.py,sha256=o2TMkMRY0iwn-iL1XMRRIFStMUkS4i3ajceeYNntKFg,5506 +pip/_vendor/lockfile/symlinklockfile.py,sha256=ABwXXmvTHvCl5viPblShL3PG-gGsLiT1roAMfDRwhi8,2616 +pip/_vendor/packaging/__about__.py,sha256=E9KR5UJ_8U9K-R1mScu6FmkXtbnlFEEFjEXJp-7LxNU,720 +pip/_vendor/packaging/__init__.py,sha256=_vNac5TrzwsrzbOFIbF-5cHqc_Y2aPT2D7zrIR06BOo,513 +pip/_vendor/packaging/_compat.py,sha256=Vi_A0rAQeHbU-a9X0tt1yQm9RqkgQbDSxzRw8WlU9kA,860 +pip/_vendor/packaging/_structures.py,sha256=RImECJ4c_wTlaTYYwZYLHEiebDMaAJmK1oPARhw1T5o,1416 +pip/_vendor/packaging/markers.py,sha256=GAjjUG2SkQzc3fkG19g_oK4s5QhR9ySIq25I3ZWJWn8,7921 +pip/_vendor/packaging/requirements.py,sha256=SD7dVJGjdPUqtoHb47qwK6wWJTQd-ZXWjxpJg83UcBA,4327 +pip/_vendor/packaging/specifiers.py,sha256=SAMRerzO3fK2IkFZCaZkuwZaL_EGqHNOz4pni4vhnN0,28025 +pip/_vendor/packaging/utils.py,sha256=3m6WvPm6NNxE8rkTGmn0r75B_GZSGg7ikafxHsBN1WA,421 +pip/_vendor/packaging/version.py,sha256=OwGnxYfr2ghNzYx59qWIBkrK3SnB6n-Zfd1XaLpnnM0,11556 +pip/_vendor/pkg_resources/__init__.py,sha256=UaN-6iF8-E8TVvI4tJ8TWeeE34lzATAfYVoc3REb_yg,101295 +pip/_vendor/progress/__init__.py,sha256=Wn1074LUDZovd4zfoVYojnPBgOc6ctHbQX7rp_p8lRA,3023 +pip/_vendor/progress/bar.py,sha256=YNPJeRrwYVKFO2nyaEwsQjYByamMWTgJMvQO1NpD-AY,2685 +pip/_vendor/progress/counter.py,sha256=kEqA8jWEdwrc6P_9VaRx7bjOHwk9gxl-Q9oVbQ08v5c,1502 +pip/_vendor/progress/helpers.py,sha256=FehfwZTv-5cCfsbcMlvlUkm3xZ0cRhsev6XVpmeTF4c,2854 +pip/_vendor/progress/spinner.py,sha256=iCVtUQbaJUFHTjn1ZLPQLPYeao4lC9aXAa_HxIeUK6k,1314 +pip/_vendor/requests/__init__.py,sha256=ZaQ9ZpESi8Zhi-SHxMJwAqs_7En4GlvjEa-uJOQaQxc,2215 +pip/_vendor/requests/adapters.py,sha256=eKKxnQoL8yFVb3Hh607POK0dBm2gu50fgQTtSX-lJy8,18680 +pip/_vendor/requests/api.py,sha256=MCiVT9TG56jThCzrfy1pLQVOCXxaTOzNHj8HNKDu1P4,5795 +pip/_vendor/requests/auth.py,sha256=YBFhR0KH8og250hDRTYgC9KBqMVxh9yMjpZh0-kXefI,8075 +pip/_vendor/requests/cacert.pem,sha256=5xzWFRrSP0ZsXiW6emg8UQ_w497lT4qWCv32OO8R1ME,344712 +pip/_vendor/requests/certs.py,sha256=RX5H1cSiB52Hbjh_qv3eMW8hqHEF_r4Qiv_4AwfziuU,613 +pip/_vendor/requests/compat.py,sha256=hq7CKHoykNs8yzKPAJiOkHQJPoNp9A89MufTdhlCniY,1469 +pip/_vendor/requests/cookies.py,sha256=ShZ9lzb0qhTV03nhafCMQXrnugBGr32M30FEo5u6rkQ,17564 +pip/_vendor/requests/exceptions.py,sha256=lyzK5I-zkNCN9zfYGJgkDMvtt3akjw0QUq4q8pYI4wA,2776 +pip/_vendor/requests/hooks.py,sha256=jSdmZjB5oRJ6xmKM3VtqkYkq8oFTwx6gqG9AaUWpAlw,767 +pip/_vendor/requests/models.py,sha256=17T7cPYBk4JggKQxaeGMWWDB5KqLG8u6QiBXaH7IFYY,29530 +pip/_vendor/requests/sessions.py,sha256=K6tPEgZ4giLZ5sQg3QymC26lcsmFvwtZ044g2hrY3WE,24897 +pip/_vendor/requests/status_codes.py,sha256=QCHM7hbByBJCcPxnN63tCBYkJLjE04umd5m5DSZk-FE,3315 +pip/_vendor/requests/structures.py,sha256=TJ-VyA0GJwzMtLw2A9AqiHWsjvnzjf0s1AHGYc7n-pM,3017 +pip/_vendor/requests/utils.py,sha256=KQwDsJdE5DMGmSdRmBVHsdIVDak4rmK41gKOYR9WhfU,22228 +pip/_vendor/requests/packages/__init__.py,sha256=CVheqNRcXIkAi5037RhxeqbAqd0QhrK1o9R9kS2xvuI,1384 +pip/_vendor/requests/packages/chardet/__init__.py,sha256=XuTKCYOR7JwsoHxqZTYH86LVyMDbDI3s1s0W_qoGEBM,1295 +pip/_vendor/requests/packages/chardet/big5freq.py,sha256=D8oTdz-GM7Jg8TsaWJDm65vM_OLHC3xub6qUJ3rOgsQ,82594 +pip/_vendor/requests/packages/chardet/big5prober.py,sha256=XX96C--6WKYW36mL-z7pJSAtc169Z8ZImByCP4pEN9A,1684 +pip/_vendor/requests/packages/chardet/chardetect.py,sha256=f4299UZG6uWd3i3r_N0OdrFj2sA9JFI54PAmDLAFmWA,2504 +pip/_vendor/requests/packages/chardet/chardistribution.py,sha256=cUARQFr1oTLXeJCDQrDRkUP778AvSMzhSCnG8VLCV58,9226 +pip/_vendor/requests/packages/chardet/charsetgroupprober.py,sha256=0lKk7VE516fgMw119tNefFqLOxKfIE9WfdkpIT69OKU,3791 +pip/_vendor/requests/packages/chardet/charsetprober.py,sha256=Z48o2KiOj23FNqYH8FqzhH5m1qdm3rI8DcTm2Yqtklg,1902 +pip/_vendor/requests/packages/chardet/codingstatemachine.py,sha256=E85rYhHVMw9xDEJVgiQhp0OnLGr6i2r8_7QOWMKTH08,2318 +pip/_vendor/requests/packages/chardet/compat.py,sha256=5mm6yrHwef1JEG5OxkPJlSq5lkjLVpEGh3iPgFBkpkM,1157 +pip/_vendor/requests/packages/chardet/constants.py,sha256=-UnY8U7EP7z9fTyd09yq35BEkSFEAUAiv9ohd1DW1s4,1335 +pip/_vendor/requests/packages/chardet/cp949prober.py,sha256=FMvdLyB7fejPXRsTbca7LK1P3RUvvssmjUNyaEfz8zY,1782 +pip/_vendor/requests/packages/chardet/escprober.py,sha256=q5TcQKeVq31WxrW7Sv8yjpZkjEoaHO8S92EJZ9hodys,3187 +pip/_vendor/requests/packages/chardet/escsm.py,sha256=7iljEKN8lXTh8JFXPUSwlibMno6R6ksq4evLxbkzfro,7839 +pip/_vendor/requests/packages/chardet/eucjpprober.py,sha256=5IpfSEjAb7h3hcGMd6dkU80O900C2N6xku28rdYFKuc,3678 +pip/_vendor/requests/packages/chardet/euckrfreq.py,sha256=T5saK5mImySG5ygQPtsp6o2uKulouCwYm2ElOyFkJqU,45978 +pip/_vendor/requests/packages/chardet/euckrprober.py,sha256=Wo7dnZ5Erw_nB4H-m5alMiOxOuJUmGHlwCSaGqExDZA,1675 +pip/_vendor/requests/packages/chardet/euctwfreq.py,sha256=G_I0BW9i1w0ONeeUwIYqV7_U09buIHdqh-wNHVaql7I,34872 +pip/_vendor/requests/packages/chardet/euctwprober.py,sha256=upS2P6GuT5ujOxXYw-RJLcT7A4PTuo27KGUKU4UZpIQ,1676 +pip/_vendor/requests/packages/chardet/gb2312freq.py,sha256=M2gFdo_qQ_BslStEchrPW5CrPEZEacC0uyDLw4ok-kY,36011 +pip/_vendor/requests/packages/chardet/gb2312prober.py,sha256=VWnjoRa83Y6V6oczMaxyUr0uy48iCnC2nzk9zfEIRHc,1681 +pip/_vendor/requests/packages/chardet/hebrewprober.py,sha256=8pdoUfsVXf_L4BnJde_BewS6H2yInV5688eu0nFhLHY,13359 +pip/_vendor/requests/packages/chardet/jisfreq.py,sha256=ZcL4R5ekHHbP2KCYGakVMBsiKqZZZAABzhwi-uRkOps,47315 +pip/_vendor/requests/packages/chardet/jpcntx.py,sha256=yftmp0QaF6RJO5SJs8I7LU5AF4rwP23ebeCQL4BM1OY,19348 +pip/_vendor/requests/packages/chardet/langbulgarianmodel.py,sha256=ZyPsA796MSVhYdfWhMCgKWckupAKAnKqWcE3Cl3ej6o,12784 +pip/_vendor/requests/packages/chardet/langcyrillicmodel.py,sha256=fkcd5OvogUp-GrNDWAZPgkYsSRCD2omotAEvqjlmLKE,17725 +pip/_vendor/requests/packages/chardet/langgreekmodel.py,sha256=QHMy31CH_ot67UCtmurCEKqKx2WwoaKrw2YCYYBK2Lw,12628 +pip/_vendor/requests/packages/chardet/langhebrewmodel.py,sha256=4ASl5vzKJPng4H278VHKtRYC03TpQpenlHTcsmZH1rE,11318 +pip/_vendor/requests/packages/chardet/langhungarianmodel.py,sha256=SXwuUzh49_cBeMXhshRHdrhlkz0T8_pZWV_pdqBKNFk,12536 +pip/_vendor/requests/packages/chardet/langthaimodel.py,sha256=-k7djh3dGKngAGnt3WfuoJN7acDcWcmHAPojhaUd7q4,11275 +pip/_vendor/requests/packages/chardet/latin1prober.py,sha256=238JHOxH8aRudJY2NmeSv5s7i0Qe3GuklIU3HlYybvg,5232 +pip/_vendor/requests/packages/chardet/mbcharsetprober.py,sha256=9rOCjDVsmSMp6e7q2syqak22j7lrbUZhJhMee2gbVL0,3268 +pip/_vendor/requests/packages/chardet/mbcsgroupprober.py,sha256=SHRzNPLpDXfMJLA8phCHVU0WgqbgDCNxDQMolGX_7yk,1967 +pip/_vendor/requests/packages/chardet/mbcssm.py,sha256=IKwJXyxu34n6NojmxVxC60MLFtJKm-hIfxaFEnb3uBA,19590 +pip/_vendor/requests/packages/chardet/sbcharsetprober.py,sha256=Xq0lODqJnDgxglBiQI4BqTFiPbn63-0a5XNA5-hVu7U,4793 +pip/_vendor/requests/packages/chardet/sbcsgroupprober.py,sha256=8hLyH8RAG-aohBo7o_KciWVgRo42ZE_zEtuNG1JMRYI,3291 +pip/_vendor/requests/packages/chardet/sjisprober.py,sha256=UYOmiMDzttYIkSDoOB08UEagivJpUXz4tuWiWzTiOr8,3764 +pip/_vendor/requests/packages/chardet/universaldetector.py,sha256=h-E2x6XSCzlNjycYWG0Fe4Cf1SGdaIzUNu2HCphpMZA,6840 +pip/_vendor/requests/packages/chardet/utf8prober.py,sha256=7tdNZGrJY7jZUBD483GGMkiP0Tx8Fp-cGvWHoAsilHg,2652 +pip/_vendor/requests/packages/urllib3/__init__.py,sha256=dgnv71-LhuNA7j-ov1A7TL7h4Ajq5JT1fONyGO8ivCg,2854 +pip/_vendor/requests/packages/urllib3/_collections.py,sha256=RP-cHyTx4AgYwvoETK8q1IVRbWFJnE0VV692ZHSbU68,10553 +pip/_vendor/requests/packages/urllib3/connection.py,sha256=QCmkelYgtbc06DfJtgs22na78kRTLCTbLb-OSWLbt-A,11617 +pip/_vendor/requests/packages/urllib3/connectionpool.py,sha256=ws48c_kVg09RUxuqpusWg_oOXIRTf25y3SLVN6yuCcQ,32713 +pip/_vendor/requests/packages/urllib3/exceptions.py,sha256=zGjhZCR1wefEnCN5b7WouQ3UhXesJ2bRKYIeWusaFJs,5599 +pip/_vendor/requests/packages/urllib3/fields.py,sha256=WUMvCLvnw7XemBq6AmCgNPJwyIJL_vWaMHaA2FLlscM,5931 +pip/_vendor/requests/packages/urllib3/filepost.py,sha256=NvLlFsdt8ih_Q4S2ekQF3CJG0nOXs32YI-G04_AdT2g,2320 +pip/_vendor/requests/packages/urllib3/poolmanager.py,sha256=W09uewCGoKSzezei0DwaTXT7kuvsF2elO2wUXWfiAco,9614 +pip/_vendor/requests/packages/urllib3/request.py,sha256=jET7OvA3FSjxABBRGhCyMdPvM9XuJA6df9gRhkJiJiY,5988 +pip/_vendor/requests/packages/urllib3/response.py,sha256=Xx9-heNZOrcx5suX-I1PYT_uEbhVtqOHqkui309g9XY,18542 +pip/_vendor/requests/packages/urllib3/contrib/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +pip/_vendor/requests/packages/urllib3/contrib/appengine.py,sha256=VP10uoVbNpH0kYVbOFd7dN5dtDcVfEytMoriKsDBBuI,7938 +pip/_vendor/requests/packages/urllib3/contrib/ntlmpool.py,sha256=r-vMDMXAGbix9a7-IhbKVTATmAst-5g4hKYOLf8Kd5M,4531 +pip/_vendor/requests/packages/urllib3/contrib/pyopenssl.py,sha256=JsdAh0gL4XvQzhOEBRoFtJN91qLf1LFIDEFZs95445I,11778 +pip/_vendor/requests/packages/urllib3/contrib/socks.py,sha256=mAHrChjtbElhGBoHOGq5CqRjtmdFiS_YjnTr2HlUNYU,5669 +pip/_vendor/requests/packages/urllib3/packages/__init__.py,sha256=nlChrGzkjCkmhCX9HrF_qHPUgosfsPQkVIJxiiLhk9g,109 +pip/_vendor/requests/packages/urllib3/packages/ordered_dict.py,sha256=VQaPONfhVMsb8B63Xg7ZOydJqIE_jzeMhVN3Pec6ogw,8935 +pip/_vendor/requests/packages/urllib3/packages/six.py,sha256=U-rO-WBrFS8PxHeamSl6okKCjqPF18NhiZb0qPZ67XM,11628 +pip/_vendor/requests/packages/urllib3/packages/ssl_match_hostname/__init__.py,sha256=cOWMIn1orgJoA35p6pSzO_-Dc6iOX9Dhl6D2sL9b_2o,460 +pip/_vendor/requests/packages/urllib3/packages/ssl_match_hostname/_implementation.py,sha256=fK28k37hL7-D79v9iM2fHgNK9Q1Pw0M7qVRL4rkfFjQ,3778 +pip/_vendor/requests/packages/urllib3/util/__init__.py,sha256=n2QE9_0Bb6u8tf7LUc4qKe8V-Hz9G8lEOc9j_30Q8d0,892 +pip/_vendor/requests/packages/urllib3/util/connection.py,sha256=6PvDBlK_6QDLHzEDT-uEMhqKcDoSuRO43Vtb4IXfkzQ,3380 +pip/_vendor/requests/packages/urllib3/util/request.py,sha256=ZMDewRK-mjlK72szGIIjzYnLIn-zPP0WgJUMjKeZ6Tg,2128 +pip/_vendor/requests/packages/urllib3/util/response.py,sha256=1UFd5TIp9MyBp4xgnZoyQZscZVPPr0tWRaXNR5w_vds,2165 +pip/_vendor/requests/packages/urllib3/util/retry.py,sha256=WjO8iuwSR-TFJUPW1OhuuhvNReZHJ8v_QEPThdUzOLE,10350 +pip/_vendor/requests/packages/urllib3/util/ssl_.py,sha256=eVPo9L7d8x8g1wnBi2swWM49AXu5I2pKXKAIHs2ar_k,11624 +pip/_vendor/requests/packages/urllib3/util/timeout.py,sha256=ioAIYptFyBG7eU_r8_ZmO45hpj1dJE6WCvrGR9dNFjs,9596 +pip/_vendor/requests/packages/urllib3/util/url.py,sha256=EcX4ZfmgKWcqM4sY9FlC-yN4y_snuURPV0TpUPHNjnc,5879 +pip/commands/__init__.py,sha256=naZ1iIWRutNznOVpLj8qyn1GPE0B5rhCWCrSUOZSt4M,2145 +pip/commands/completion.py,sha256=2BEUY3jowgemiIGgUP3rpk6A9My4Eu8rTPosFxlESOE,1967 +pip/commands/download.py,sha256=dMRtH0JMBhNGlJWr1qC29vOeiBzG2K0OjOAfzdxSVgA,4804 +pip/commands/freeze.py,sha256=KmQoLf-HruqBDzc-F2-ganGVn2lboNQqppfyrMsx3SU,2774 +pip/commands/hash.py,sha256=MCt4jEFyfoce0lVeNEz1x49uaTY-VDkKiBvvxrVcHkw,1597 +pip/commands/help.py,sha256=84HWkEdnGP_AEBHnn8gJP2Te0XTXRKFoXqXopbOZTNo,982 +pip/commands/install.py,sha256=8MOsH3IlL3ovZhTQtZwHhJb19pnkr8eKNE_9klVJ3PU,14971 +pip/commands/list.py,sha256=u76U5TLODQ2g53sSUA4q6WhYus7usbuWuITQJsCnP3E,7412 +pip/commands/search.py,sha256=qOfCTw9wEN6_lVBSCLjOHiPrTGBMx4n8_ZCIWgIKL0M,4816 +pip/commands/show.py,sha256=dytBbI9XV-ChpV51tsuBygZJJO-QaO2Gtz5kbLkBCZE,5815 +pip/commands/uninstall.py,sha256=tz8cXz4WdpUdnt3RvpdQwH6_SNMB50egBIZWa1dwfcc,2884 +pip/commands/wheel.py,sha256=iT92Uo8qpVILl_Yk8L7AtkFVYGmY0ep5oDeyQSpwkLs,7528 +pip/compat/__init__.py,sha256=7WN0B0XMYIldfminnT679VoEJLxNQPi9MFwCIt1_llU,4669 +pip/compat/dictconfig.py,sha256=dRrelPDWrceDSzFT51RTEVY2GuM7UDyc5Igh_tn4Fvk,23096 +pip/compat/ordereddict.py,sha256=6RQCd4PyTE4tvLUoAnsygvrreOSTV4BRDbc_4gCSkTs,4110 +pip/models/__init__.py,sha256=0Rs7_RA4DxeOkWT5Cq4CQzDrSEhvYcN3TH2cazr72PE,71 +pip/models/index.py,sha256=pUfbO__v3mD9j-2n_ClwPS8pVyx4l2wIwyvWt8GMCRA,487 +pip/operations/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +pip/operations/freeze.py,sha256=H6xpxe1XgoNm5f3UXK47kNy0OQfM5jzo4UUwQu7G-Lo,4048 +pip/req/__init__.py,sha256=vFwZY8_Vc1WU1zFAespg1My_r_AT3n7cN0W9eX0EFqk,276 +pip/req/req_file.py,sha256=3eaVnPMUAjikLdC5i8hZUAf8aAOby2UxmAVFf94FOXY,11928 +pip/req/req_install.py,sha256=ArdEHUTrB3Xvyy_lWop2V0c_8ArTgpZdpR8shXrJ8eU,45583 +pip/req/req_set.py,sha256=5gLPqd4vDbpl7mz9wOYPgpqIoB-EsjI0hcFuSzC8dw8,32764 +pip/req/req_uninstall.py,sha256=fdH2VgCjEC8NRYDS7fRu3ZJaBBUEy-N5muwxDX5MBNM,6897 +pip/utils/__init__.py,sha256=WdAVVUl5j0OQ3u3eR3Qtu3Um3OmeVflR7W1BmMmvmDU,27531 +pip/utils/appdirs.py,sha256=KTpZANfjYw5K2tZ0_jNNdP_kMxQAns79qZWelwaJo0c,7896 +pip/utils/build.py,sha256=4smLRrfSCmXmjEnVnMFh2tBEpNcSLRe6J0ejZJ-wWJE,1312 +pip/utils/deprecation.py,sha256=DR3cKqzovYu9Pif7c9bT2KmwekfW95N3BsI45_5u38I,2239 +pip/utils/encoding.py,sha256=NQxGiFS5GbeAveLZTnx92t5r0PYqvt0iRnP2u9SGG1w,971 +pip/utils/filesystem.py,sha256=ZEVBuYM3fqr2_lgOESh4Y7fPFszGD474zVm_M3Mb5Tk,899 +pip/utils/hashes.py,sha256=oMk7cd3PbJgzpSQyXq1MytMud5f6H5Oa2YY5hYuCq6I,2866 +pip/utils/logging.py,sha256=7yWu4gZw-Qclj7X80QVdpGWkdTWGKT4LiUVKcE04pro,3327 +pip/utils/outdated.py,sha256=fNwOCL5r2EftPGhgCYGMKu032HC8cV-JAr9lp0HmToM,5455 +pip/utils/setuptools_build.py,sha256=8IGop-SZ6lxUl5HMOjLRaDlORPugIH_b_b2Y67x4jQc,240 +pip/utils/ui.py,sha256=pbDkSAeumZ6jdZcOJ2yAbx8iBgeP2zfpqNnLJK1gskQ,11597 +pip/vcs/__init__.py,sha256=lnea41zMq9HqB1Qo7hxy2IjUzk5WtBvnoloCCMR6Vk4,12349 +pip/vcs/bazaar.py,sha256=tYTwc4b4off8mr0O2o8SiGejqBDJxcbDBMSMd9-ISYc,3803 +pip/vcs/git.py,sha256=u16VCiNW_a9AaYqLri2b8-f4lOZlOYwsGpHHV3uv_dQ,10218 +pip/vcs/mercurial.py,sha256=xG6rDiwHCRytJEs23SIHBXl_SwQo2jkkdD_6rVVP5h4,3472 +pip/vcs/subversion.py,sha256=mGT7sAzuVc1u-9MPoXJNyShnRzhdJpDdGNuhhzUPv6w,8687 +pip-8.1.2.dist-info/DESCRIPTION.rst,sha256=jSvW1qOjwzndvm_p_DexGCVJfwgg3rWPMJWzf6Rmsfc,1167 +pip-8.1.2.dist-info/METADATA,sha256=P3mID1oPoIKxUDsH1D5yk4h6P4N1T5t0sosQul1OMx4,2362 +pip-8.1.2.dist-info/RECORD,, +pip-8.1.2.dist-info/WHEEL,sha256=o2k-Qa-RMNIJmUdIc7KU6VWR_ErNRbWNlxDIpl7lm34,110 +pip-8.1.2.dist-info/entry_points.txt,sha256=GWc-Wb9WUKZ1EuVWNz-G0l3BeIpbNJLx0OJbZ61AAV0,68 +pip-8.1.2.dist-info/metadata.json,sha256=pGIpFivM6SXoww8RYGdf33RD9yK7EFA2eAgvEP7SlLg,1513 +pip-8.1.2.dist-info/top_level.txt,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4 +../../../bin/pip,sha256=2LpQhjQy1UtjEfjAoFcnLkwS7dWpYH2wN2tpGsU6FBc,221 +../../../bin/pip2,sha256=2LpQhjQy1UtjEfjAoFcnLkwS7dWpYH2wN2tpGsU6FBc,221 +../../../bin/pip2.7,sha256=2LpQhjQy1UtjEfjAoFcnLkwS7dWpYH2wN2tpGsU6FBc,221 +pip-8.1.2.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4 +pip/_vendor/requests/compat.pyc,, +pip/_vendor/cachecontrol/cache.pyc,, +pip/_vendor/requests/certs.pyc,, +pip/_vendor/requests/adapters.pyc,, +pip/_vendor/progress/spinner.pyc,, +pip/__init__.pyc,, +pip/_vendor/html5lib/treewalkers/genshistream.pyc,, +pip/_vendor/requests/packages/chardet/latin1prober.pyc,, +pip/_vendor/distlib/_backport/__init__.pyc,, +pip/vcs/bazaar.pyc,, +pip/_vendor/distlib/metadata.pyc,, +pip/compat/__init__.pyc,, +pip/_vendor/html5lib/treebuilders/__init__.pyc,, +pip/_vendor/html5lib/filters/alphabeticalattributes.pyc,, +pip/_vendor/requests/packages/urllib3/request.pyc,, +pip/_vendor/requests/packages/chardet/escprober.pyc,, +pip/_vendor/distlib/util.pyc,, +pip/_vendor/requests/packages/chardet/langbulgarianmodel.pyc,, +pip/commands/completion.pyc,, +pip/_vendor/html5lib/html5parser.pyc,, +pip/_vendor/html5lib/filters/lint.pyc,, +pip/pep425tags.pyc,, +pip/_vendor/packaging/__init__.pyc,, +pip/_vendor/lockfile/symlinklockfile.pyc,, +pip/_vendor/requests/packages/chardet/__init__.pyc,, +pip/_vendor/packaging/utils.pyc,, +pip/_vendor/html5lib/filters/whitespace.pyc,, +pip/_vendor/requests/packages/chardet/euckrprober.pyc,, +pip/_vendor/html5lib/treebuilders/etree_lxml.pyc,, +pip/_vendor/distlib/database.pyc,, +pip/_vendor/html5lib/treeadapters/sax.pyc,, +pip/commands/wheel.pyc,, +pip/_vendor/requests/auth.pyc,, +pip/_vendor/html5lib/treewalkers/pulldom.pyc,, +pip/_vendor/packaging/__about__.pyc,, +pip/_vendor/progress/__init__.pyc,, +pip/_vendor/cachecontrol/_cmd.pyc,, +pip/_vendor/html5lib/ihatexml.pyc,, +pip/_vendor/html5lib/treewalkers/_base.pyc,, +pip/commands/list.pyc,, +pip/_vendor/distlib/scripts.pyc,, +pip/_vendor/html5lib/filters/sanitizer.pyc,, +pip/vcs/git.pyc,, +pip/cmdoptions.pyc,, +pip/_vendor/lockfile/linklockfile.pyc,, +pip/commands/hash.pyc,, +pip/_vendor/requests/packages/urllib3/util/ssl_.pyc,, +pip/_vendor/requests/packages/urllib3/poolmanager.pyc,, +pip/req/req_set.pyc,, +pip/_vendor/html5lib/inputstream.pyc,, +pip/req/__init__.pyc,, +pip/_vendor/html5lib/sanitizer.pyc,, +pip/_vendor/colorama/win32.pyc,, +pip/_vendor/distlib/resources.pyc,, +pip/_vendor/requests/packages/chardet/hebrewprober.pyc,, +pip/_vendor/requests/cookies.pyc,, +pip/_vendor/requests/packages/urllib3/packages/ssl_match_hostname/_implementation.pyc,, +pip/_vendor/lockfile/__init__.pyc,, +pip/_vendor/requests/packages/urllib3/packages/six.pyc,, +pip/_vendor/progress/helpers.pyc,, +pip/_vendor/ipaddress.pyc,, +pip/_vendor/html5lib/trie/__init__.pyc,, +pip/operations/__init__.pyc,, +pip/_vendor/pyparsing.pyc,, +pip/_vendor/html5lib/__init__.pyc,, +pip/_vendor/requests/packages/chardet/charsetgroupprober.pyc,, +pip/_vendor/cachecontrol/serialize.pyc,, +pip/_vendor/requests/packages/urllib3/packages/__init__.pyc,, +pip/_vendor/distlib/_backport/misc.pyc,, +pip/_vendor/requests/packages/chardet/gb2312freq.pyc,, +pip/_vendor/requests/packages/chardet/codingstatemachine.pyc,, +pip/_vendor/cachecontrol/compat.pyc,, +pip/_vendor/requests/packages/urllib3/util/url.pyc,, +pip/_vendor/requests/packages/urllib3/packages/ssl_match_hostname/__init__.pyc,, +pip/_vendor/cachecontrol/controller.pyc,, +pip/utils/deprecation.pyc,, +pip/_vendor/requests/packages/chardet/langgreekmodel.pyc,, +pip/_vendor/requests/packages/__init__.pyc,, +pip/commands/show.pyc,, +pip/_vendor/requests/packages/chardet/jpcntx.pyc,, +pip/_vendor/colorama/initialise.pyc,, +pip/_vendor/requests/packages/urllib3/contrib/appengine.pyc,, +pip/_vendor/html5lib/treebuilders/_base.pyc,, +pip/commands/help.pyc,, +pip/_vendor/requests/utils.pyc,, +pip/_vendor/colorama/__init__.pyc,, +pip/_vendor/packaging/_compat.pyc,, +pip/_vendor/distlib/version.pyc,, +pip/utils/ui.pyc,, +pip/commands/uninstall.pyc,, +pip/_vendor/distlib/index.pyc,, +pip/_vendor/cachecontrol/heuristics.pyc,, +pip/commands/freeze.pyc,, +pip/_vendor/requests/sessions.pyc,, +pip/_vendor/requests/packages/chardet/big5prober.pyc,, +pip/_vendor/requests/packages/chardet/langthaimodel.pyc,, +pip/_vendor/distlib/_backport/tarfile.pyc,, +pip/_vendor/requests/packages/urllib3/util/response.pyc,, +pip/_vendor/requests/packages/chardet/mbcsgroupprober.pyc,, +pip/_vendor/html5lib/filters/__init__.pyc,, +pip/baseparser.pyc,, +pip/status_codes.pyc,, +pip/_vendor/cachecontrol/caches/redis_cache.pyc,, +pip/_vendor/distlib/__init__.pyc,, +pip/_vendor/pkg_resources/__init__.pyc,, +pip/_vendor/html5lib/treewalkers/__init__.pyc,, +pip/operations/freeze.pyc,, +pip/commands/search.pyc,, +pip/_vendor/requests/packages/chardet/langhungarianmodel.pyc,, +pip/_vendor/requests/models.pyc,, +pip/_vendor/html5lib/utils.pyc,, +pip/compat/dictconfig.pyc,, +pip/_vendor/html5lib/trie/datrie.pyc,, +pip/_vendor/requests/structures.pyc,, +pip/_vendor/packaging/version.pyc,, +pip/_vendor/requests/packages/chardet/euckrfreq.pyc,, +pip/_vendor/cachecontrol/adapter.pyc,, +pip/_vendor/requests/packages/urllib3/fields.pyc,, +pip/_vendor/requests/packages/urllib3/contrib/ntlmpool.pyc,, +pip/vcs/__init__.pyc,, +pip/req/req_file.pyc,, +pip/models/__init__.pyc,, +pip/_vendor/html5lib/trie/py.pyc,, +pip/_vendor/packaging/specifiers.pyc,, +pip/download.pyc,, +pip/commands/__init__.pyc,, +pip/_vendor/requests/packages/urllib3/__init__.pyc,, +pip/_vendor/distlib/compat.pyc,, +pip/models/index.pyc,, +pip/_vendor/lockfile/mkdirlockfile.pyc,, +pip/_vendor/distlib/wheel.pyc,, +pip/_vendor/requests/packages/chardet/euctwprober.pyc,, +pip/utils/build.pyc,, +pip/_vendor/requests/packages/chardet/escsm.pyc,, +pip/_vendor/requests/status_codes.pyc,, +pip/_vendor/requests/exceptions.pyc,, +pip/_vendor/distlib/markers.pyc,, +pip/index.pyc,, +pip/utils/logging.pyc,, +pip/_vendor/packaging/requirements.pyc,, +pip/_vendor/requests/packages/chardet/cp949prober.pyc,, +pip/utils/outdated.pyc,, +pip/_vendor/requests/api.pyc,, +pip/_vendor/requests/packages/urllib3/filepost.pyc,, +pip/_vendor/requests/packages/chardet/big5freq.pyc,, +pip/_vendor/html5lib/treebuilders/etree.pyc,, +pip/_vendor/cachecontrol/caches/file_cache.pyc,, +pip/_vendor/html5lib/treebuilders/dom.pyc,, +pip/_vendor/requests/packages/chardet/mbcssm.pyc,, +pip/_vendor/distlib/_backport/shutil.pyc,, +pip/_vendor/requests/packages/chardet/sbcsgroupprober.pyc,, +pip/utils/encoding.pyc,, +pip/__main__.pyc,, +pip/_vendor/html5lib/tokenizer.pyc,, +pip/_vendor/lockfile/sqlitelockfile.pyc,, +pip/_vendor/requests/packages/chardet/utf8prober.pyc,, +pip/_vendor/requests/packages/chardet/langhebrewmodel.pyc,, +pip/_vendor/requests/packages/chardet/compat.pyc,, +pip/_vendor/progress/counter.pyc,, +pip/utils/filesystem.pyc,, +pip/_vendor/six.pyc,, +pip/_vendor/requests/packages/chardet/langcyrillicmodel.pyc,, +pip/_vendor/requests/packages/chardet/constants.pyc,, +pip/_vendor/requests/packages/urllib3/packages/ordered_dict.pyc,, +pip/_vendor/requests/packages/chardet/eucjpprober.pyc,, +pip/_vendor/retrying.pyc,, +pip/utils/hashes.pyc,, +pip/_vendor/html5lib/treewalkers/etree.pyc,, +pip/_vendor/cachecontrol/wrapper.pyc,, +pip/_vendor/requests/hooks.pyc,, +pip/_vendor/packaging/_structures.pyc,, +pip/_vendor/requests/packages/urllib3/connectionpool.pyc,, +pip/_vendor/requests/packages/chardet/mbcharsetprober.pyc,, +pip/locations.pyc,, +pip/_vendor/requests/packages/chardet/jisfreq.pyc,, +pip/utils/setuptools_build.pyc,, +pip/vcs/subversion.pyc,, +pip/_vendor/requests/packages/urllib3/util/connection.pyc,, +pip/exceptions.pyc,, +pip/basecommand.pyc,, +pip/_vendor/distlib/locators.pyc,, +pip/_vendor/html5lib/filters/_base.pyc,, +pip/_vendor/re-vendor.pyc,, +pip/_vendor/requests/packages/urllib3/contrib/__init__.pyc,, +pip/_vendor/requests/packages/chardet/euctwfreq.pyc,, +pip/commands/download.pyc,, +pip/_vendor/requests/packages/chardet/chardistribution.pyc,, +pip/_vendor/cachecontrol/caches/__init__.pyc,, +pip/_vendor/requests/packages/urllib3/contrib/socks.pyc,, +pip/_vendor/distlib/_backport/sysconfig.pyc,, +pip/_vendor/requests/__init__.pyc,, +pip/req/req_uninstall.pyc,, +pip/_vendor/requests/packages/chardet/sbcharsetprober.pyc,, +pip/_vendor/colorama/ansitowin32.pyc,, +pip/_vendor/cachecontrol/filewrapper.pyc,, +pip/_vendor/requests/packages/chardet/sjisprober.pyc,, +pip/_vendor/requests/packages/urllib3/util/timeout.pyc,, +pip/_vendor/html5lib/treewalkers/dom.pyc,, +pip/utils/__init__.pyc,, +pip/_vendor/requests/packages/urllib3/_collections.pyc,, +pip/_vendor/requests/packages/urllib3/util/request.pyc,, +pip/_vendor/requests/packages/urllib3/util/__init__.pyc,, +pip/_vendor/distlib/manifest.pyc,, +pip/_vendor/html5lib/serializer/htmlserializer.pyc,, +pip/_vendor/html5lib/serializer/__init__.pyc,, +pip/_vendor/html5lib/trie/_base.pyc,, +pip/_vendor/requests/packages/urllib3/response.pyc,, +pip/req/req_install.pyc,, +pip/_vendor/html5lib/treeadapters/__init__.pyc,, +pip/_vendor/html5lib/filters/inject_meta_charset.pyc,, +pip/_vendor/requests/packages/chardet/charsetprober.pyc,, +pip/compat/ordereddict.pyc,, +pip/_vendor/requests/packages/urllib3/util/retry.pyc,, +pip/vcs/mercurial.pyc,, +pip/_vendor/cachecontrol/__init__.pyc,, +pip/_vendor/requests/packages/urllib3/exceptions.pyc,, +pip/_vendor/__init__.pyc,, +pip/_vendor/requests/packages/chardet/universaldetector.pyc,, +pip/_vendor/html5lib/constants.pyc,, +pip/_vendor/html5lib/treewalkers/lxmletree.pyc,, +pip/_vendor/colorama/ansi.pyc,, +pip/_vendor/packaging/markers.pyc,, +pip/commands/install.pyc,, +pip/_vendor/requests/packages/chardet/gb2312prober.pyc,, +pip/_vendor/requests/packages/urllib3/contrib/pyopenssl.pyc,, +pip/_vendor/progress/bar.pyc,, +pip/wheel.pyc,, +pip/_vendor/lockfile/pidlockfile.pyc,, +pip/_vendor/requests/packages/chardet/chardetect.pyc,, +pip/_vendor/html5lib/filters/optionaltags.pyc,, +pip/_vendor/requests/packages/urllib3/connection.pyc,, +pip/utils/appdirs.pyc,, +pip/_vendor/colorama/winterm.pyc,, diff --git a/venv/lib/python2.7/site-packages/pip-8.1.2.dist-info/WHEEL b/venv/lib/python2.7/site-packages/pip-8.1.2.dist-info/WHEEL new file mode 100644 index 0000000..8b6dd1b --- /dev/null +++ b/venv/lib/python2.7/site-packages/pip-8.1.2.dist-info/WHEEL @@ -0,0 +1,6 @@ +Wheel-Version: 1.0 +Generator: bdist_wheel (0.29.0) +Root-Is-Purelib: true +Tag: py2-none-any +Tag: py3-none-any + diff --git a/venv/lib/python2.7/site-packages/pip-8.1.2.dist-info/entry_points.txt b/venv/lib/python2.7/site-packages/pip-8.1.2.dist-info/entry_points.txt new file mode 100644 index 0000000..c02a8d5 --- /dev/null +++ b/venv/lib/python2.7/site-packages/pip-8.1.2.dist-info/entry_points.txt @@ -0,0 +1,5 @@ +[console_scripts] +pip = pip:main +pip3 = pip:main +pip3.5 = pip:main + diff --git a/venv/lib/python2.7/site-packages/pip-8.1.2.dist-info/metadata.json b/venv/lib/python2.7/site-packages/pip-8.1.2.dist-info/metadata.json new file mode 100644 index 0000000..17ac7e0 --- /dev/null +++ b/venv/lib/python2.7/site-packages/pip-8.1.2.dist-info/metadata.json @@ -0,0 +1 @@ +{"classifiers": ["Development Status :: 5 - Production/Stable", "Intended Audience :: Developers", "License :: OSI Approved :: MIT License", "Topic :: Software Development :: Build Tools", "Programming Language :: Python :: 2", "Programming Language :: Python :: 2.6", "Programming Language :: Python :: 2.7", "Programming Language :: Python :: 3", "Programming Language :: Python :: 3.3", "Programming Language :: Python :: 3.4", "Programming Language :: Python :: 3.5", "Programming Language :: Python :: Implementation :: PyPy"], "extensions": {"python.commands": {"wrap_console": {"pip": "pip:main", "pip3": "pip:main", "pip3.5": "pip:main"}}, "python.details": {"contacts": [{"email": "python-virtualenv@groups.google.com", "name": "The pip developers", "role": "author"}], "document_names": {"description": "DESCRIPTION.rst"}, "project_urls": {"Home": "https://pip.pypa.io/"}}, "python.exports": {"console_scripts": {"pip": "pip:main", "pip3": "pip:main", "pip3.5": "pip:main"}}}, "extras": ["testing"], "generator": "bdist_wheel (0.29.0)", "keywords": ["easy_install", "distutils", "setuptools", "egg", "virtualenv"], "license": "MIT", "metadata_version": "2.0", "name": "pip", "run_requires": [{"extra": "testing", "requires": ["mock", "pretend", "pytest", "scripttest (>=1.3)", "virtualenv (>=1.10)"]}], "summary": "The PyPA recommended tool for installing Python packages.", "test_requires": [{"requires": ["mock", "pretend", "pytest", "scripttest (>=1.3)", "virtualenv (>=1.10)"]}], "version": "8.1.2"} \ No newline at end of file diff --git a/venv/lib/python2.7/site-packages/pip-8.1.2.dist-info/top_level.txt b/venv/lib/python2.7/site-packages/pip-8.1.2.dist-info/top_level.txt new file mode 100644 index 0000000..a1b589e --- /dev/null +++ b/venv/lib/python2.7/site-packages/pip-8.1.2.dist-info/top_level.txt @@ -0,0 +1 @@ +pip diff --git a/venv/lib/python2.7/site-packages/pip/__init__.py b/venv/lib/python2.7/site-packages/pip/__init__.py new file mode 100644 index 0000000..588c881 --- /dev/null +++ b/venv/lib/python2.7/site-packages/pip/__init__.py @@ -0,0 +1,319 @@ +#!/usr/bin/env python +from __future__ import absolute_import + +import locale +import logging +import os +import optparse +import warnings + +import sys +import re + +from pip.exceptions import InstallationError, CommandError, PipError +from pip.utils import get_installed_distributions, get_prog +from pip.utils import deprecation, dist_is_editable +from pip.vcs import git, mercurial, subversion, bazaar # noqa +from pip.baseparser import ConfigOptionParser, UpdatingDefaultsHelpFormatter +from pip.commands import get_summaries, get_similar_commands +from pip.commands import commands_dict +from pip._vendor.requests.packages.urllib3.exceptions import ( + InsecureRequestWarning, +) + + +# assignment for flake8 to be happy + +# This fixes a peculiarity when importing via __import__ - as we are +# initialising the pip module, "from pip import cmdoptions" is recursive +# and appears not to work properly in that situation. +import pip.cmdoptions +cmdoptions = pip.cmdoptions + +# The version as used in the setup.py and the docs conf.py +__version__ = "8.1.2" + + +logger = logging.getLogger(__name__) + +# Hide the InsecureRequestWArning from urllib3 +warnings.filterwarnings("ignore", category=InsecureRequestWarning) + + +def autocomplete(): + """Command and option completion for the main option parser (and options) + and its subcommands (and options). + + Enable by sourcing one of the completion shell scripts (bash or zsh). + """ + # Don't complete if user hasn't sourced bash_completion file. + if 'PIP_AUTO_COMPLETE' not in os.environ: + return + cwords = os.environ['COMP_WORDS'].split()[1:] + cword = int(os.environ['COMP_CWORD']) + try: + current = cwords[cword - 1] + except IndexError: + current = '' + + subcommands = [cmd for cmd, summary in get_summaries()] + options = [] + # subcommand + try: + subcommand_name = [w for w in cwords if w in subcommands][0] + except IndexError: + subcommand_name = None + + parser = create_main_parser() + # subcommand options + if subcommand_name: + # special case: 'help' subcommand has no options + if subcommand_name == 'help': + sys.exit(1) + # special case: list locally installed dists for uninstall command + if subcommand_name == 'uninstall' and not current.startswith('-'): + installed = [] + lc = current.lower() + for dist in get_installed_distributions(local_only=True): + if dist.key.startswith(lc) and dist.key not in cwords[1:]: + installed.append(dist.key) + # if there are no dists installed, fall back to option completion + if installed: + for dist in installed: + print(dist) + sys.exit(1) + + subcommand = commands_dict[subcommand_name]() + options += [(opt.get_opt_string(), opt.nargs) + for opt in subcommand.parser.option_list_all + if opt.help != optparse.SUPPRESS_HELP] + + # filter out previously specified options from available options + prev_opts = [x.split('=')[0] for x in cwords[1:cword - 1]] + options = [(x, v) for (x, v) in options if x not in prev_opts] + # filter options by current input + options = [(k, v) for k, v in options if k.startswith(current)] + for option in options: + opt_label = option[0] + # append '=' to options which require args + if option[1]: + opt_label += '=' + print(opt_label) + else: + # show main parser options only when necessary + if current.startswith('-') or current.startswith('--'): + opts = [i.option_list for i in parser.option_groups] + opts.append(parser.option_list) + opts = (o for it in opts for o in it) + + subcommands += [i.get_opt_string() for i in opts + if i.help != optparse.SUPPRESS_HELP] + + print(' '.join([x for x in subcommands if x.startswith(current)])) + sys.exit(1) + + +def create_main_parser(): + parser_kw = { + 'usage': '\n%prog [options]', + 'add_help_option': False, + 'formatter': UpdatingDefaultsHelpFormatter(), + 'name': 'global', + 'prog': get_prog(), + } + + parser = ConfigOptionParser(**parser_kw) + parser.disable_interspersed_args() + + pip_pkg_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) + parser.version = 'pip %s from %s (python %s)' % ( + __version__, pip_pkg_dir, sys.version[:3]) + + # add the general options + gen_opts = cmdoptions.make_option_group(cmdoptions.general_group, parser) + parser.add_option_group(gen_opts) + + parser.main = True # so the help formatter knows + + # create command listing for description + command_summaries = get_summaries() + description = [''] + ['%-27s %s' % (i, j) for i, j in command_summaries] + parser.description = '\n'.join(description) + + return parser + + +def parseopts(args): + parser = create_main_parser() + + # Note: parser calls disable_interspersed_args(), so the result of this + # call is to split the initial args into the general options before the + # subcommand and everything else. + # For example: + # args: ['--timeout=5', 'install', '--user', 'INITools'] + # general_options: ['--timeout==5'] + # args_else: ['install', '--user', 'INITools'] + general_options, args_else = parser.parse_args(args) + + # --version + if general_options.version: + sys.stdout.write(parser.version) + sys.stdout.write(os.linesep) + sys.exit() + + # pip || pip help -> print_help() + if not args_else or (args_else[0] == 'help' and len(args_else) == 1): + parser.print_help() + sys.exit() + + # the subcommand name + cmd_name = args_else[0] + + if cmd_name not in commands_dict: + guess = get_similar_commands(cmd_name) + + msg = ['unknown command "%s"' % cmd_name] + if guess: + msg.append('maybe you meant "%s"' % guess) + + raise CommandError(' - '.join(msg)) + + # all the args without the subcommand + cmd_args = args[:] + cmd_args.remove(cmd_name) + + return cmd_name, cmd_args + + +def check_isolated(args): + isolated = False + + if "--isolated" in args: + isolated = True + + return isolated + + +def main(args=None): + if args is None: + args = sys.argv[1:] + + # Configure our deprecation warnings to be sent through loggers + deprecation.install_warning_logger() + + autocomplete() + + try: + cmd_name, cmd_args = parseopts(args) + except PipError as exc: + sys.stderr.write("ERROR: %s" % exc) + sys.stderr.write(os.linesep) + sys.exit(1) + + # Needed for locale.getpreferredencoding(False) to work + # in pip.utils.encoding.auto_decode + try: + locale.setlocale(locale.LC_ALL, '') + except locale.Error as e: + # setlocale can apparently crash if locale are uninitialized + logger.debug("Ignoring error %s when setting locale", e) + command = commands_dict[cmd_name](isolated=check_isolated(cmd_args)) + return command.main(cmd_args) + + +# ########################################################### +# # Writing freeze files + +class FrozenRequirement(object): + + def __init__(self, name, req, editable, comments=()): + self.name = name + self.req = req + self.editable = editable + self.comments = comments + + _rev_re = re.compile(r'-r(\d+)$') + _date_re = re.compile(r'-(20\d\d\d\d\d\d)$') + + @classmethod + def from_dist(cls, dist, dependency_links): + location = os.path.normcase(os.path.abspath(dist.location)) + comments = [] + from pip.vcs import vcs, get_src_requirement + if dist_is_editable(dist) and vcs.get_backend_name(location): + editable = True + try: + req = get_src_requirement(dist, location) + except InstallationError as exc: + logger.warning( + "Error when trying to get requirement for VCS system %s, " + "falling back to uneditable format", exc + ) + req = None + if req is None: + logger.warning( + 'Could not determine repository location of %s', location + ) + comments.append( + '## !! Could not determine repository location' + ) + req = dist.as_requirement() + editable = False + else: + editable = False + req = dist.as_requirement() + specs = req.specs + assert len(specs) == 1 and specs[0][0] in ["==", "==="], \ + 'Expected 1 spec with == or ===; specs = %r; dist = %r' % \ + (specs, dist) + version = specs[0][1] + ver_match = cls._rev_re.search(version) + date_match = cls._date_re.search(version) + if ver_match or date_match: + svn_backend = vcs.get_backend('svn') + if svn_backend: + svn_location = svn_backend().get_location( + dist, + dependency_links, + ) + if not svn_location: + logger.warning( + 'Warning: cannot find svn location for %s', req) + comments.append( + '## FIXME: could not find svn URL in dependency_links ' + 'for this package:' + ) + else: + comments.append( + '# Installing as editable to satisfy requirement %s:' % + req + ) + if ver_match: + rev = ver_match.group(1) + else: + rev = '{%s}' % date_match.group(1) + editable = True + req = '%s@%s#egg=%s' % ( + svn_location, + rev, + cls.egg_name(dist) + ) + return cls(dist.project_name, req, editable, comments) + + @staticmethod + def egg_name(dist): + name = dist.egg_name() + match = re.search(r'-py\d\.\d$', name) + if match: + name = name[:match.start()] + return name + + def __str__(self): + req = self.req + if self.editable: + req = '-e %s' % req + return '\n'.join(list(self.comments) + [str(req)]) + '\n' + + +if __name__ == '__main__': + sys.exit(main()) diff --git a/venv/lib/python2.7/site-packages/pip/__main__.py b/venv/lib/python2.7/site-packages/pip/__main__.py new file mode 100644 index 0000000..5556539 --- /dev/null +++ b/venv/lib/python2.7/site-packages/pip/__main__.py @@ -0,0 +1,19 @@ +from __future__ import absolute_import + +import os +import sys + +# If we are running from a wheel, add the wheel to sys.path +# This allows the usage python pip-*.whl/pip install pip-*.whl +if __package__ == '': + # __file__ is pip-*.whl/pip/__main__.py + # first dirname call strips of '/__main__.py', second strips off '/pip' + # Resulting path is the name of the wheel itself + # Add that to sys.path so we can import pip + path = os.path.dirname(os.path.dirname(__file__)) + sys.path.insert(0, path) + +import pip # noqa + +if __name__ == '__main__': + sys.exit(pip.main()) diff --git a/venv/lib/python2.7/site-packages/pip/_vendor/__init__.py b/venv/lib/python2.7/site-packages/pip/_vendor/__init__.py new file mode 100644 index 0000000..d0e7b34 --- /dev/null +++ b/venv/lib/python2.7/site-packages/pip/_vendor/__init__.py @@ -0,0 +1,106 @@ +""" +pip._vendor is for vendoring dependencies of pip to prevent needing pip to +depend on something external. + +Files inside of pip._vendor should be considered immutable and should only be +updated to versions from upstream. +""" +from __future__ import absolute_import + +import glob +import os.path +import sys + +# Downstream redistributors which have debundled our dependencies should also +# patch this value to be true. This will trigger the additional patching +# to cause things like "six" to be available as pip. +DEBUNDLED = False + +# By default, look in this directory for a bunch of .whl files which we will +# add to the beginning of sys.path before attempting to import anything. This +# is done to support downstream re-distributors like Debian and Fedora who +# wish to create their own Wheels for our dependencies to aid in debundling. +WHEEL_DIR = os.path.abspath(os.path.dirname(__file__)) + + +# Define a small helper function to alias our vendored modules to the real ones +# if the vendored ones do not exist. This idea of this was taken from +# https://github.com/kennethreitz/requests/pull/2567. +def vendored(modulename): + vendored_name = "{0}.{1}".format(__name__, modulename) + + try: + __import__(vendored_name, globals(), locals(), level=0) + except ImportError: + try: + __import__(modulename, globals(), locals(), level=0) + except ImportError: + # We can just silently allow import failures to pass here. If we + # got to this point it means that ``import pip._vendor.whatever`` + # failed and so did ``import whatever``. Since we're importing this + # upfront in an attempt to alias imports, not erroring here will + # just mean we get a regular import error whenever pip *actually* + # tries to import one of these modules to use it, which actually + # gives us a better error message than we would have otherwise + # gotten. + pass + else: + sys.modules[vendored_name] = sys.modules[modulename] + base, head = vendored_name.rsplit(".", 1) + setattr(sys.modules[base], head, sys.modules[modulename]) + + +# If we're operating in a debundled setup, then we want to go ahead and trigger +# the aliasing of our vendored libraries as well as looking for wheels to add +# to our sys.path. This will cause all of this code to be a no-op typically +# however downstream redistributors can enable it in a consistent way across +# all platforms. +if DEBUNDLED: + # Actually look inside of WHEEL_DIR to find .whl files and add them to the + # front of our sys.path. + sys.path[:] = glob.glob(os.path.join(WHEEL_DIR, "*.whl")) + sys.path + + # Actually alias all of our vendored dependencies. + vendored("cachecontrol") + vendored("colorama") + vendored("distlib") + vendored("html5lib") + vendored("lockfile") + vendored("six") + vendored("six.moves") + vendored("six.moves.urllib") + vendored("packaging") + vendored("packaging.version") + vendored("packaging.specifiers") + vendored("pkg_resources") + vendored("progress") + vendored("retrying") + vendored("requests") + vendored("requests.packages") + vendored("requests.packages.urllib3") + vendored("requests.packages.urllib3._collections") + vendored("requests.packages.urllib3.connection") + vendored("requests.packages.urllib3.connectionpool") + vendored("requests.packages.urllib3.contrib") + vendored("requests.packages.urllib3.contrib.ntlmpool") + vendored("requests.packages.urllib3.contrib.pyopenssl") + vendored("requests.packages.urllib3.exceptions") + vendored("requests.packages.urllib3.fields") + vendored("requests.packages.urllib3.filepost") + vendored("requests.packages.urllib3.packages") + vendored("requests.packages.urllib3.packages.ordered_dict") + vendored("requests.packages.urllib3.packages.six") + vendored("requests.packages.urllib3.packages.ssl_match_hostname") + vendored("requests.packages.urllib3.packages.ssl_match_hostname." + "_implementation") + vendored("requests.packages.urllib3.poolmanager") + vendored("requests.packages.urllib3.request") + vendored("requests.packages.urllib3.response") + vendored("requests.packages.urllib3.util") + vendored("requests.packages.urllib3.util.connection") + vendored("requests.packages.urllib3.util.request") + vendored("requests.packages.urllib3.util.response") + vendored("requests.packages.urllib3.util.retry") + vendored("requests.packages.urllib3.util.ssl_") + vendored("requests.packages.urllib3.util.timeout") + vendored("requests.packages.urllib3.util.url") diff --git a/venv/lib/python2.7/site-packages/pip/_vendor/cachecontrol/__init__.py b/venv/lib/python2.7/site-packages/pip/_vendor/cachecontrol/__init__.py new file mode 100644 index 0000000..724e220 --- /dev/null +++ b/venv/lib/python2.7/site-packages/pip/_vendor/cachecontrol/__init__.py @@ -0,0 +1,11 @@ +"""CacheControl import Interface. + +Make it easy to import from cachecontrol without long namespaces. +""" +__author__ = 'Eric Larson' +__email__ = 'eric@ionrock.org' +__version__ = '0.11.6' + +from .wrapper import CacheControl +from .adapter import CacheControlAdapter +from .controller import CacheController diff --git a/venv/lib/python2.7/site-packages/pip/_vendor/cachecontrol/_cmd.py b/venv/lib/python2.7/site-packages/pip/_vendor/cachecontrol/_cmd.py new file mode 100644 index 0000000..afdcc88 --- /dev/null +++ b/venv/lib/python2.7/site-packages/pip/_vendor/cachecontrol/_cmd.py @@ -0,0 +1,60 @@ +import logging + +from pip._vendor import requests + +from pip._vendor.cachecontrol.adapter import CacheControlAdapter +from pip._vendor.cachecontrol.cache import DictCache +from pip._vendor.cachecontrol.controller import logger + +from argparse import ArgumentParser + + +def setup_logging(): + logger.setLevel(logging.DEBUG) + handler = logging.StreamHandler() + logger.addHandler(handler) + + +def get_session(): + adapter = CacheControlAdapter( + DictCache(), + cache_etags=True, + serializer=None, + heuristic=None, + ) + sess = requests.Session() + sess.mount('http://', adapter) + sess.mount('https://', adapter) + + sess.cache_controller = adapter.controller + return sess + + +def get_args(): + parser = ArgumentParser() + parser.add_argument('url', help='The URL to try and cache') + return parser.parse_args() + + +def main(args=None): + args = get_args() + sess = get_session() + + # Make a request to get a response + resp = sess.get(args.url) + + # Turn on logging + setup_logging() + + # try setting the cache + sess.cache_controller.cache_response(resp.request, resp.raw) + + # Now try to get it + if sess.cache_controller.cached_request(resp.request): + print('Cached!') + else: + print('Not cached :(') + + +if __name__ == '__main__': + main() diff --git a/venv/lib/python2.7/site-packages/pip/_vendor/cachecontrol/adapter.py b/venv/lib/python2.7/site-packages/pip/_vendor/cachecontrol/adapter.py new file mode 100644 index 0000000..74589e0 --- /dev/null +++ b/venv/lib/python2.7/site-packages/pip/_vendor/cachecontrol/adapter.py @@ -0,0 +1,117 @@ +import functools + +from pip._vendor.requests.adapters import HTTPAdapter + +from .controller import CacheController +from .cache import DictCache +from .filewrapper import CallbackFileWrapper + + +class CacheControlAdapter(HTTPAdapter): + invalidating_methods = set(['PUT', 'DELETE']) + + def __init__(self, cache=None, + cache_etags=True, + controller_class=None, + serializer=None, + heuristic=None, + *args, **kw): + super(CacheControlAdapter, self).__init__(*args, **kw) + self.cache = cache or DictCache() + self.heuristic = heuristic + + controller_factory = controller_class or CacheController + self.controller = controller_factory( + self.cache, + cache_etags=cache_etags, + serializer=serializer, + ) + + def send(self, request, **kw): + """ + Send a request. Use the request information to see if it + exists in the cache and cache the response if we need to and can. + """ + if request.method == 'GET': + cached_response = self.controller.cached_request(request) + if cached_response: + return self.build_response(request, cached_response, + from_cache=True) + + # check for etags and add headers if appropriate + request.headers.update( + self.controller.conditional_headers(request) + ) + + resp = super(CacheControlAdapter, self).send(request, **kw) + + return resp + + def build_response(self, request, response, from_cache=False): + """ + Build a response by making a request or using the cache. + + This will end up calling send and returning a potentially + cached response + """ + if not from_cache and request.method == 'GET': + + # apply any expiration heuristics + if response.status == 304: + # We must have sent an ETag request. This could mean + # that we've been expired already or that we simply + # have an etag. In either case, we want to try and + # update the cache if that is the case. + cached_response = self.controller.update_cached_response( + request, response + ) + + if cached_response is not response: + from_cache = True + + # We are done with the server response, read a + # possible response body (compliant servers will + # not return one, but we cannot be 100% sure) and + # release the connection back to the pool. + response.read(decode_content=False) + response.release_conn() + + response = cached_response + + # We always cache the 301 responses + elif response.status == 301: + self.controller.cache_response(request, response) + else: + # Check for any heuristics that might update headers + # before trying to cache. + if self.heuristic: + response = self.heuristic.apply(response) + + # Wrap the response file with a wrapper that will cache the + # response when the stream has been consumed. + response._fp = CallbackFileWrapper( + response._fp, + functools.partial( + self.controller.cache_response, + request, + response, + ) + ) + + resp = super(CacheControlAdapter, self).build_response( + request, response + ) + + # See if we should invalidate the cache. + if request.method in self.invalidating_methods and resp.ok: + cache_url = self.controller.cache_url(request.url) + self.cache.delete(cache_url) + + # Give the request a from_cache attr to let people use it + resp.from_cache = from_cache + + return resp + + def close(self): + self.cache.close() + super(CacheControlAdapter, self).close() diff --git a/venv/lib/python2.7/site-packages/pip/_vendor/cachecontrol/cache.py b/venv/lib/python2.7/site-packages/pip/_vendor/cachecontrol/cache.py new file mode 100644 index 0000000..7389a73 --- /dev/null +++ b/venv/lib/python2.7/site-packages/pip/_vendor/cachecontrol/cache.py @@ -0,0 +1,39 @@ +""" +The cache object API for implementing caches. The default is a thread +safe in-memory dictionary. +""" +from threading import Lock + + +class BaseCache(object): + + def get(self, key): + raise NotImplemented() + + def set(self, key, value): + raise NotImplemented() + + def delete(self, key): + raise NotImplemented() + + def close(self): + pass + + +class DictCache(BaseCache): + + def __init__(self, init_dict=None): + self.lock = Lock() + self.data = init_dict or {} + + def get(self, key): + return self.data.get(key, None) + + def set(self, key, value): + with self.lock: + self.data.update({key: value}) + + def delete(self, key): + with self.lock: + if key in self.data: + self.data.pop(key) diff --git a/venv/lib/python2.7/site-packages/pip/_vendor/cachecontrol/caches/__init__.py b/venv/lib/python2.7/site-packages/pip/_vendor/cachecontrol/caches/__init__.py new file mode 100644 index 0000000..f9e66a1 --- /dev/null +++ b/venv/lib/python2.7/site-packages/pip/_vendor/cachecontrol/caches/__init__.py @@ -0,0 +1,18 @@ +from textwrap import dedent + +try: + from .file_cache import FileCache +except ImportError: + notice = dedent(''' + NOTE: In order to use the FileCache you must have + lockfile installed. You can install it via pip: + pip install lockfile + ''') + print(notice) + + +try: + import redis + from .redis_cache import RedisCache +except ImportError: + pass diff --git a/venv/lib/python2.7/site-packages/pip/_vendor/cachecontrol/caches/file_cache.py b/venv/lib/python2.7/site-packages/pip/_vendor/cachecontrol/caches/file_cache.py new file mode 100644 index 0000000..b77728f --- /dev/null +++ b/venv/lib/python2.7/site-packages/pip/_vendor/cachecontrol/caches/file_cache.py @@ -0,0 +1,116 @@ +import hashlib +import os + +from pip._vendor.lockfile import LockFile +from pip._vendor.lockfile.mkdirlockfile import MkdirLockFile + +from ..cache import BaseCache +from ..controller import CacheController + + +def _secure_open_write(filename, fmode): + # We only want to write to this file, so open it in write only mode + flags = os.O_WRONLY + + # os.O_CREAT | os.O_EXCL will fail if the file already exists, so we only + # will open *new* files. + # We specify this because we want to ensure that the mode we pass is the + # mode of the file. + flags |= os.O_CREAT | os.O_EXCL + + # Do not follow symlinks to prevent someone from making a symlink that + # we follow and insecurely open a cache file. + if hasattr(os, "O_NOFOLLOW"): + flags |= os.O_NOFOLLOW + + # On Windows we'll mark this file as binary + if hasattr(os, "O_BINARY"): + flags |= os.O_BINARY + + # Before we open our file, we want to delete any existing file that is + # there + try: + os.remove(filename) + except (IOError, OSError): + # The file must not exist already, so we can just skip ahead to opening + pass + + # Open our file, the use of os.O_CREAT | os.O_EXCL will ensure that if a + # race condition happens between the os.remove and this line, that an + # error will be raised. Because we utilize a lockfile this should only + # happen if someone is attempting to attack us. + fd = os.open(filename, flags, fmode) + try: + return os.fdopen(fd, "wb") + except: + # An error occurred wrapping our FD in a file object + os.close(fd) + raise + + +class FileCache(BaseCache): + def __init__(self, directory, forever=False, filemode=0o0600, + dirmode=0o0700, use_dir_lock=None, lock_class=None): + + if use_dir_lock is not None and lock_class is not None: + raise ValueError("Cannot use use_dir_lock and lock_class together") + + if use_dir_lock: + lock_class = MkdirLockFile + + if lock_class is None: + lock_class = LockFile + + self.directory = directory + self.forever = forever + self.filemode = filemode + self.dirmode = dirmode + self.lock_class = lock_class + + + @staticmethod + def encode(x): + return hashlib.sha224(x.encode()).hexdigest() + + def _fn(self, name): + # NOTE: This method should not change as some may depend on it. + # See: https://github.com/ionrock/cachecontrol/issues/63 + hashed = self.encode(name) + parts = list(hashed[:5]) + [hashed] + return os.path.join(self.directory, *parts) + + def get(self, key): + name = self._fn(key) + if not os.path.exists(name): + return None + + with open(name, 'rb') as fh: + return fh.read() + + def set(self, key, value): + name = self._fn(key) + + # Make sure the directory exists + try: + os.makedirs(os.path.dirname(name), self.dirmode) + except (IOError, OSError): + pass + + with self.lock_class(name) as lock: + # Write our actual file + with _secure_open_write(lock.path, self.filemode) as fh: + fh.write(value) + + def delete(self, key): + name = self._fn(key) + if not self.forever: + os.remove(name) + + +def url_to_file_path(url, filecache): + """Return the file cache path based on the URL. + + This does not ensure the file exists! + """ + key = CacheController.cache_url(url) + return filecache._fn(key) diff --git a/venv/lib/python2.7/site-packages/pip/_vendor/cachecontrol/caches/redis_cache.py b/venv/lib/python2.7/site-packages/pip/_vendor/cachecontrol/caches/redis_cache.py new file mode 100644 index 0000000..9f5d55f --- /dev/null +++ b/venv/lib/python2.7/site-packages/pip/_vendor/cachecontrol/caches/redis_cache.py @@ -0,0 +1,41 @@ +from __future__ import division + +from datetime import datetime + + +def total_seconds(td): + """Python 2.6 compatability""" + if hasattr(td, 'total_seconds'): + return td.total_seconds() + + ms = td.microseconds + secs = (td.seconds + td.days * 24 * 3600) + return (ms + secs * 10**6) / 10**6 + + +class RedisCache(object): + + def __init__(self, conn): + self.conn = conn + + def get(self, key): + return self.conn.get(key) + + def set(self, key, value, expires=None): + if not expires: + self.conn.set(key, value) + else: + expires = expires - datetime.now() + self.conn.setex(key, total_seconds(expires), value) + + def delete(self, key): + self.conn.delete(key) + + def clear(self): + """Helper for clearing all the keys in a database. Use with + caution!""" + for key in self.conn.keys(): + self.conn.delete(key) + + def close(self): + self.conn.disconnect() diff --git a/venv/lib/python2.7/site-packages/pip/_vendor/cachecontrol/compat.py b/venv/lib/python2.7/site-packages/pip/_vendor/cachecontrol/compat.py new file mode 100644 index 0000000..018e6ac --- /dev/null +++ b/venv/lib/python2.7/site-packages/pip/_vendor/cachecontrol/compat.py @@ -0,0 +1,20 @@ +try: + from urllib.parse import urljoin +except ImportError: + from urlparse import urljoin + + +try: + import cPickle as pickle +except ImportError: + import pickle + + +from pip._vendor.requests.packages.urllib3.response import HTTPResponse +from pip._vendor.requests.packages.urllib3.util import is_fp_closed + +# Replicate some six behaviour +try: + text_type = (unicode,) +except NameError: + text_type = (str,) diff --git a/venv/lib/python2.7/site-packages/pip/_vendor/cachecontrol/controller.py b/venv/lib/python2.7/site-packages/pip/_vendor/cachecontrol/controller.py new file mode 100644 index 0000000..6e591f8 --- /dev/null +++ b/venv/lib/python2.7/site-packages/pip/_vendor/cachecontrol/controller.py @@ -0,0 +1,353 @@ +""" +The httplib2 algorithms ported for use with requests. +""" +import logging +import re +import calendar +import time +from email.utils import parsedate_tz + +from pip._vendor.requests.structures import CaseInsensitiveDict + +from .cache import DictCache +from .serialize import Serializer + + +logger = logging.getLogger(__name__) + +URI = re.compile(r"^(([^:/?#]+):)?(//([^/?#]*))?([^?#]*)(\?([^#]*))?(#(.*))?") + + +def parse_uri(uri): + """Parses a URI using the regex given in Appendix B of RFC 3986. + + (scheme, authority, path, query, fragment) = parse_uri(uri) + """ + groups = URI.match(uri).groups() + return (groups[1], groups[3], groups[4], groups[6], groups[8]) + + +class CacheController(object): + """An interface to see if request should cached or not. + """ + def __init__(self, cache=None, cache_etags=True, serializer=None): + self.cache = cache or DictCache() + self.cache_etags = cache_etags + self.serializer = serializer or Serializer() + + @classmethod + def _urlnorm(cls, uri): + """Normalize the URL to create a safe key for the cache""" + (scheme, authority, path, query, fragment) = parse_uri(uri) + if not scheme or not authority: + raise Exception("Only absolute URIs are allowed. uri = %s" % uri) + + scheme = scheme.lower() + authority = authority.lower() + + if not path: + path = "/" + + # Could do syntax based normalization of the URI before + # computing the digest. See Section 6.2.2 of Std 66. + request_uri = query and "?".join([path, query]) or path + defrag_uri = scheme + "://" + authority + request_uri + + return defrag_uri + + @classmethod + def cache_url(cls, uri): + return cls._urlnorm(uri) + + def parse_cache_control(self, headers): + """ + Parse the cache control headers returning a dictionary with values + for the different directives. + """ + retval = {} + + cc_header = 'cache-control' + if 'Cache-Control' in headers: + cc_header = 'Cache-Control' + + if cc_header in headers: + parts = headers[cc_header].split(',') + parts_with_args = [ + tuple([x.strip().lower() for x in part.split("=", 1)]) + for part in parts if -1 != part.find("=") + ] + parts_wo_args = [ + (name.strip().lower(), 1) + for name in parts if -1 == name.find("=") + ] + retval = dict(parts_with_args + parts_wo_args) + return retval + + def cached_request(self, request): + """ + Return a cached response if it exists in the cache, otherwise + return False. + """ + cache_url = self.cache_url(request.url) + logger.debug('Looking up "%s" in the cache', cache_url) + cc = self.parse_cache_control(request.headers) + + # Bail out if the request insists on fresh data + if 'no-cache' in cc: + logger.debug('Request header has "no-cache", cache bypassed') + return False + + if 'max-age' in cc and cc['max-age'] == 0: + logger.debug('Request header has "max_age" as 0, cache bypassed') + return False + + # Request allows serving from the cache, let's see if we find something + cache_data = self.cache.get(cache_url) + if cache_data is None: + logger.debug('No cache entry available') + return False + + # Check whether it can be deserialized + resp = self.serializer.loads(request, cache_data) + if not resp: + logger.warning('Cache entry deserialization failed, entry ignored') + return False + + # If we have a cached 301, return it immediately. We don't + # need to test our response for other headers b/c it is + # intrinsically "cacheable" as it is Permanent. + # See: + # https://tools.ietf.org/html/rfc7231#section-6.4.2 + # + # Client can try to refresh the value by repeating the request + # with cache busting headers as usual (ie no-cache). + if resp.status == 301: + msg = ('Returning cached "301 Moved Permanently" response ' + '(ignoring date and etag information)') + logger.debug(msg) + return resp + + headers = CaseInsensitiveDict(resp.headers) + if not headers or 'date' not in headers: + if 'etag' not in headers: + # Without date or etag, the cached response can never be used + # and should be deleted. + logger.debug('Purging cached response: no date or etag') + self.cache.delete(cache_url) + logger.debug('Ignoring cached response: no date') + return False + + now = time.time() + date = calendar.timegm( + parsedate_tz(headers['date']) + ) + current_age = max(0, now - date) + logger.debug('Current age based on date: %i', current_age) + + # TODO: There is an assumption that the result will be a + # urllib3 response object. This may not be best since we + # could probably avoid instantiating or constructing the + # response until we know we need it. + resp_cc = self.parse_cache_control(headers) + + # determine freshness + freshness_lifetime = 0 + + # Check the max-age pragma in the cache control header + if 'max-age' in resp_cc and resp_cc['max-age'].isdigit(): + freshness_lifetime = int(resp_cc['max-age']) + logger.debug('Freshness lifetime from max-age: %i', + freshness_lifetime) + + # If there isn't a max-age, check for an expires header + elif 'expires' in headers: + expires = parsedate_tz(headers['expires']) + if expires is not None: + expire_time = calendar.timegm(expires) - date + freshness_lifetime = max(0, expire_time) + logger.debug("Freshness lifetime from expires: %i", + freshness_lifetime) + + # Determine if we are setting freshness limit in the + # request. Note, this overrides what was in the response. + if 'max-age' in cc: + try: + freshness_lifetime = int(cc['max-age']) + logger.debug('Freshness lifetime from request max-age: %i', + freshness_lifetime) + except ValueError: + freshness_lifetime = 0 + + if 'min-fresh' in cc: + try: + min_fresh = int(cc['min-fresh']) + except ValueError: + min_fresh = 0 + # adjust our current age by our min fresh + current_age += min_fresh + logger.debug('Adjusted current age from min-fresh: %i', + current_age) + + # Return entry if it is fresh enough + if freshness_lifetime > current_age: + logger.debug('The response is "fresh", returning cached response') + logger.debug('%i > %i', freshness_lifetime, current_age) + return resp + + # we're not fresh. If we don't have an Etag, clear it out + if 'etag' not in headers: + logger.debug( + 'The cached response is "stale" with no etag, purging' + ) + self.cache.delete(cache_url) + + # return the original handler + return False + + def conditional_headers(self, request): + cache_url = self.cache_url(request.url) + resp = self.serializer.loads(request, self.cache.get(cache_url)) + new_headers = {} + + if resp: + headers = CaseInsensitiveDict(resp.headers) + + if 'etag' in headers: + new_headers['If-None-Match'] = headers['ETag'] + + if 'last-modified' in headers: + new_headers['If-Modified-Since'] = headers['Last-Modified'] + + return new_headers + + def cache_response(self, request, response, body=None): + """ + Algorithm for caching requests. + + This assumes a requests Response object. + """ + # From httplib2: Don't cache 206's since we aren't going to + # handle byte range requests + cacheable_status_codes = [200, 203, 300, 301] + if response.status not in cacheable_status_codes: + logger.debug( + 'Status code %s not in %s', + response.status, + cacheable_status_codes + ) + return + + response_headers = CaseInsensitiveDict(response.headers) + + # If we've been given a body, our response has a Content-Length, that + # Content-Length is valid then we can check to see if the body we've + # been given matches the expected size, and if it doesn't we'll just + # skip trying to cache it. + if (body is not None and + "content-length" in response_headers and + response_headers["content-length"].isdigit() and + int(response_headers["content-length"]) != len(body)): + return + + cc_req = self.parse_cache_control(request.headers) + cc = self.parse_cache_control(response_headers) + + cache_url = self.cache_url(request.url) + logger.debug('Updating cache with response from "%s"', cache_url) + + # Delete it from the cache if we happen to have it stored there + no_store = False + if cc.get('no-store'): + no_store = True + logger.debug('Response header has "no-store"') + if cc_req.get('no-store'): + no_store = True + logger.debug('Request header has "no-store"') + if no_store and self.cache.get(cache_url): + logger.debug('Purging existing cache entry to honor "no-store"') + self.cache.delete(cache_url) + + # If we've been given an etag, then keep the response + if self.cache_etags and 'etag' in response_headers: + logger.debug('Caching due to etag') + self.cache.set( + cache_url, + self.serializer.dumps(request, response, body=body), + ) + + # Add to the cache any 301s. We do this before looking that + # the Date headers. + elif response.status == 301: + logger.debug('Caching permanant redirect') + self.cache.set( + cache_url, + self.serializer.dumps(request, response) + ) + + # Add to the cache if the response headers demand it. If there + # is no date header then we can't do anything about expiring + # the cache. + elif 'date' in response_headers: + # cache when there is a max-age > 0 + if cc and cc.get('max-age'): + if int(cc['max-age']) > 0: + logger.debug('Caching b/c date exists and max-age > 0') + self.cache.set( + cache_url, + self.serializer.dumps(request, response, body=body), + ) + + # If the request can expire, it means we should cache it + # in the meantime. + elif 'expires' in response_headers: + if response_headers['expires']: + logger.debug('Caching b/c of expires header') + self.cache.set( + cache_url, + self.serializer.dumps(request, response, body=body), + ) + + def update_cached_response(self, request, response): + """On a 304 we will get a new set of headers that we want to + update our cached value with, assuming we have one. + + This should only ever be called when we've sent an ETag and + gotten a 304 as the response. + """ + cache_url = self.cache_url(request.url) + + cached_response = self.serializer.loads( + request, + self.cache.get(cache_url) + ) + + if not cached_response: + # we didn't have a cached response + return response + + # Lets update our headers with the headers from the new request: + # http://tools.ietf.org/html/draft-ietf-httpbis-p4-conditional-26#section-4.1 + # + # The server isn't supposed to send headers that would make + # the cached body invalid. But... just in case, we'll be sure + # to strip out ones we know that might be problmatic due to + # typical assumptions. + excluded_headers = [ + "content-length", + ] + + cached_response.headers.update( + dict((k, v) for k, v in response.headers.items() + if k.lower() not in excluded_headers) + ) + + # we want a 200 b/c we have content via the cache + cached_response.status = 200 + + # update our cache + self.cache.set( + cache_url, + self.serializer.dumps(request, cached_response), + ) + + return cached_response diff --git a/venv/lib/python2.7/site-packages/pip/_vendor/cachecontrol/filewrapper.py b/venv/lib/python2.7/site-packages/pip/_vendor/cachecontrol/filewrapper.py new file mode 100644 index 0000000..4b91bce --- /dev/null +++ b/venv/lib/python2.7/site-packages/pip/_vendor/cachecontrol/filewrapper.py @@ -0,0 +1,63 @@ +from io import BytesIO + + +class CallbackFileWrapper(object): + """ + Small wrapper around a fp object which will tee everything read into a + buffer, and when that file is closed it will execute a callback with the + contents of that buffer. + + All attributes are proxied to the underlying file object. + + This class uses members with a double underscore (__) leading prefix so as + not to accidentally shadow an attribute. + """ + + def __init__(self, fp, callback): + self.__buf = BytesIO() + self.__fp = fp + self.__callback = callback + + def __getattr__(self, name): + # The vaguaries of garbage collection means that self.__fp is + # not always set. By using __getattribute__ and the private + # name[0] allows looking up the attribute value and raising an + # AttributeError when it doesn't exist. This stop thigns from + # infinitely recursing calls to getattr in the case where + # self.__fp hasn't been set. + # + # [0] https://docs.python.org/2/reference/expressions.html#atom-identifiers + fp = self.__getattribute__('_CallbackFileWrapper__fp') + return getattr(fp, name) + + def __is_fp_closed(self): + try: + return self.__fp.fp is None + except AttributeError: + pass + + try: + return self.__fp.closed + except AttributeError: + pass + + # We just don't cache it then. + # TODO: Add some logging here... + return False + + def read(self, amt=None): + data = self.__fp.read(amt) + self.__buf.write(data) + + if self.__is_fp_closed(): + if self.__callback: + self.__callback(self.__buf.getvalue()) + + # We assign this to None here, because otherwise we can get into + # really tricky problems where the CPython interpreter dead locks + # because the callback is holding a reference to something which + # has a __del__ method. Setting this to None breaks the cycle + # and allows the garbage collector to do it's thing normally. + self.__callback = None + + return data diff --git a/venv/lib/python2.7/site-packages/pip/_vendor/cachecontrol/heuristics.py b/venv/lib/python2.7/site-packages/pip/_vendor/cachecontrol/heuristics.py new file mode 100644 index 0000000..94715a4 --- /dev/null +++ b/venv/lib/python2.7/site-packages/pip/_vendor/cachecontrol/heuristics.py @@ -0,0 +1,138 @@ +import calendar +import time + +from email.utils import formatdate, parsedate, parsedate_tz + +from datetime import datetime, timedelta + +TIME_FMT = "%a, %d %b %Y %H:%M:%S GMT" + + +def expire_after(delta, date=None): + date = date or datetime.now() + return date + delta + + +def datetime_to_header(dt): + return formatdate(calendar.timegm(dt.timetuple())) + + +class BaseHeuristic(object): + + def warning(self, response): + """ + Return a valid 1xx warning header value describing the cache + adjustments. + + The response is provided too allow warnings like 113 + http://tools.ietf.org/html/rfc7234#section-5.5.4 where we need + to explicitly say response is over 24 hours old. + """ + return '110 - "Response is Stale"' + + def update_headers(self, response): + """Update the response headers with any new headers. + + NOTE: This SHOULD always include some Warning header to + signify that the response was cached by the client, not + by way of the provided headers. + """ + return {} + + def apply(self, response): + updated_headers = self.update_headers(response) + + if updated_headers: + response.headers.update(updated_headers) + warning_header_value = self.warning(response) + if warning_header_value is not None: + response.headers.update({'Warning': warning_header_value}) + + return response + + +class OneDayCache(BaseHeuristic): + """ + Cache the response by providing an expires 1 day in the + future. + """ + def update_headers(self, response): + headers = {} + + if 'expires' not in response.headers: + date = parsedate(response.headers['date']) + expires = expire_after(timedelta(days=1), + date=datetime(*date[:6])) + headers['expires'] = datetime_to_header(expires) + headers['cache-control'] = 'public' + return headers + + +class ExpiresAfter(BaseHeuristic): + """ + Cache **all** requests for a defined time period. + """ + + def __init__(self, **kw): + self.delta = timedelta(**kw) + + def update_headers(self, response): + expires = expire_after(self.delta) + return { + 'expires': datetime_to_header(expires), + 'cache-control': 'public', + } + + def warning(self, response): + tmpl = '110 - Automatically cached for %s. Response might be stale' + return tmpl % self.delta + + +class LastModified(BaseHeuristic): + """ + If there is no Expires header already, fall back on Last-Modified + using the heuristic from + http://tools.ietf.org/html/rfc7234#section-4.2.2 + to calculate a reasonable value. + + Firefox also does something like this per + https://developer.mozilla.org/en-US/docs/Web/HTTP/Caching_FAQ + http://lxr.mozilla.org/mozilla-release/source/netwerk/protocol/http/nsHttpResponseHead.cpp#397 + Unlike mozilla we limit this to 24-hr. + """ + cacheable_by_default_statuses = set([ + 200, 203, 204, 206, 300, 301, 404, 405, 410, 414, 501 + ]) + + def update_headers(self, resp): + headers = resp.headers + + if 'expires' in headers: + return {} + + if 'cache-control' in headers and headers['cache-control'] != 'public': + return {} + + if resp.status not in self.cacheable_by_default_statuses: + return {} + + if 'date' not in headers or 'last-modified' not in headers: + return {} + + date = calendar.timegm(parsedate_tz(headers['date'])) + last_modified = parsedate(headers['last-modified']) + if date is None or last_modified is None: + return {} + + now = time.time() + current_age = max(0, now - date) + delta = date - calendar.timegm(last_modified) + freshness_lifetime = max(0, min(delta / 10, 24 * 3600)) + if freshness_lifetime <= current_age: + return {} + + expires = date + freshness_lifetime + return {'expires': time.strftime(TIME_FMT, time.gmtime(expires))} + + def warning(self, resp): + return None diff --git a/venv/lib/python2.7/site-packages/pip/_vendor/cachecontrol/serialize.py b/venv/lib/python2.7/site-packages/pip/_vendor/cachecontrol/serialize.py new file mode 100644 index 0000000..ffbfbf6 --- /dev/null +++ b/venv/lib/python2.7/site-packages/pip/_vendor/cachecontrol/serialize.py @@ -0,0 +1,190 @@ +import base64 +import io +import json +import zlib + +from pip._vendor.requests.structures import CaseInsensitiveDict + +from .compat import HTTPResponse, pickle, text_type + + +def _b64_encode_bytes(b): + return base64.b64encode(b).decode("ascii") + + +def _b64_encode_str(s): + return _b64_encode_bytes(s.encode("utf8")) + + +def _b64_encode(s): + if isinstance(s, text_type): + return _b64_encode_str(s) + return _b64_encode_bytes(s) + + +def _b64_decode_bytes(b): + return base64.b64decode(b.encode("ascii")) + + +def _b64_decode_str(s): + return _b64_decode_bytes(s).decode("utf8") + + +class Serializer(object): + + def dumps(self, request, response, body=None): + response_headers = CaseInsensitiveDict(response.headers) + + if body is None: + body = response.read(decode_content=False) + + # NOTE: 99% sure this is dead code. I'm only leaving it + # here b/c I don't have a test yet to prove + # it. Basically, before using + # `cachecontrol.filewrapper.CallbackFileWrapper`, + # this made an effort to reset the file handle. The + # `CallbackFileWrapper` short circuits this code by + # setting the body as the content is consumed, the + # result being a `body` argument is *always* passed + # into cache_response, and in turn, + # `Serializer.dump`. + response._fp = io.BytesIO(body) + + data = { + "response": { + "body": _b64_encode_bytes(body), + "headers": dict( + (_b64_encode(k), _b64_encode(v)) + for k, v in response.headers.items() + ), + "status": response.status, + "version": response.version, + "reason": _b64_encode_str(response.reason), + "strict": response.strict, + "decode_content": response.decode_content, + }, + } + + # Construct our vary headers + data["vary"] = {} + if "vary" in response_headers: + varied_headers = response_headers['vary'].split(',') + for header in varied_headers: + header = header.strip() + data["vary"][header] = request.headers.get(header, None) + + # Encode our Vary headers to ensure they can be serialized as JSON + data["vary"] = dict( + (_b64_encode(k), _b64_encode(v) if v is not None else v) + for k, v in data["vary"].items() + ) + + return b",".join([ + b"cc=2", + zlib.compress( + json.dumps( + data, separators=(",", ":"), sort_keys=True, + ).encode("utf8"), + ), + ]) + + def loads(self, request, data): + # Short circuit if we've been given an empty set of data + if not data: + return + + # Determine what version of the serializer the data was serialized + # with + try: + ver, data = data.split(b",", 1) + except ValueError: + ver = b"cc=0" + + # Make sure that our "ver" is actually a version and isn't a false + # positive from a , being in the data stream. + if ver[:3] != b"cc=": + data = ver + data + ver = b"cc=0" + + # Get the version number out of the cc=N + ver = ver.split(b"=", 1)[-1].decode("ascii") + + # Dispatch to the actual load method for the given version + try: + return getattr(self, "_loads_v{0}".format(ver))(request, data) + except AttributeError: + # This is a version we don't have a loads function for, so we'll + # just treat it as a miss and return None + return + + def prepare_response(self, request, cached): + """Verify our vary headers match and construct a real urllib3 + HTTPResponse object. + """ + # Special case the '*' Vary value as it means we cannot actually + # determine if the cached response is suitable for this request. + if "*" in cached.get("vary", {}): + return + + # Ensure that the Vary headers for the cached response match our + # request + for header, value in cached.get("vary", {}).items(): + if request.headers.get(header, None) != value: + return + + body_raw = cached["response"].pop("body") + + try: + body = io.BytesIO(body_raw) + except TypeError: + # This can happen if cachecontrol serialized to v1 format (pickle) + # using Python 2. A Python 2 str(byte string) will be unpickled as + # a Python 3 str (unicode string), which will cause the above to + # fail with: + # + # TypeError: 'str' does not support the buffer interface + body = io.BytesIO(body_raw.encode('utf8')) + + return HTTPResponse( + body=body, + preload_content=False, + **cached["response"] + ) + + def _loads_v0(self, request, data): + # The original legacy cache data. This doesn't contain enough + # information to construct everything we need, so we'll treat this as + # a miss. + return + + def _loads_v1(self, request, data): + try: + cached = pickle.loads(data) + except ValueError: + return + + return self.prepare_response(request, cached) + + def _loads_v2(self, request, data): + try: + cached = json.loads(zlib.decompress(data).decode("utf8")) + except ValueError: + return + + # We need to decode the items that we've base64 encoded + cached["response"]["body"] = _b64_decode_bytes( + cached["response"]["body"] + ) + cached["response"]["headers"] = dict( + (_b64_decode_str(k), _b64_decode_str(v)) + for k, v in cached["response"]["headers"].items() + ) + cached["response"]["reason"] = _b64_decode_str( + cached["response"]["reason"], + ) + cached["vary"] = dict( + (_b64_decode_str(k), _b64_decode_str(v) if v is not None else v) + for k, v in cached["vary"].items() + ) + + return self.prepare_response(request, cached) diff --git a/venv/lib/python2.7/site-packages/pip/_vendor/cachecontrol/wrapper.py b/venv/lib/python2.7/site-packages/pip/_vendor/cachecontrol/wrapper.py new file mode 100644 index 0000000..ea421aa --- /dev/null +++ b/venv/lib/python2.7/site-packages/pip/_vendor/cachecontrol/wrapper.py @@ -0,0 +1,21 @@ +from .adapter import CacheControlAdapter +from .cache import DictCache + + +def CacheControl(sess, + cache=None, + cache_etags=True, + serializer=None, + heuristic=None): + + cache = cache or DictCache() + adapter = CacheControlAdapter( + cache, + cache_etags=cache_etags, + serializer=serializer, + heuristic=heuristic, + ) + sess.mount('http://', adapter) + sess.mount('https://', adapter) + + return sess diff --git a/venv/lib/python2.7/site-packages/pip/_vendor/colorama/__init__.py b/venv/lib/python2.7/site-packages/pip/_vendor/colorama/__init__.py new file mode 100644 index 0000000..670e6b3 --- /dev/null +++ b/venv/lib/python2.7/site-packages/pip/_vendor/colorama/__init__.py @@ -0,0 +1,7 @@ +# Copyright Jonathan Hartley 2013. BSD 3-Clause license, see LICENSE file. +from .initialise import init, deinit, reinit, colorama_text +from .ansi import Fore, Back, Style, Cursor +from .ansitowin32 import AnsiToWin32 + +__version__ = '0.3.7' + diff --git a/venv/lib/python2.7/site-packages/pip/_vendor/colorama/ansi.py b/venv/lib/python2.7/site-packages/pip/_vendor/colorama/ansi.py new file mode 100644 index 0000000..7877658 --- /dev/null +++ b/venv/lib/python2.7/site-packages/pip/_vendor/colorama/ansi.py @@ -0,0 +1,102 @@ +# Copyright Jonathan Hartley 2013. BSD 3-Clause license, see LICENSE file. +''' +This module generates ANSI character codes to printing colors to terminals. +See: http://en.wikipedia.org/wiki/ANSI_escape_code +''' + +CSI = '\033[' +OSC = '\033]' +BEL = '\007' + + +def code_to_chars(code): + return CSI + str(code) + 'm' + +def set_title(title): + return OSC + '2;' + title + BEL + +def clear_screen(mode=2): + return CSI + str(mode) + 'J' + +def clear_line(mode=2): + return CSI + str(mode) + 'K' + + +class AnsiCodes(object): + def __init__(self): + # the subclasses declare class attributes which are numbers. + # Upon instantiation we define instance attributes, which are the same + # as the class attributes but wrapped with the ANSI escape sequence + for name in dir(self): + if not name.startswith('_'): + value = getattr(self, name) + setattr(self, name, code_to_chars(value)) + + +class AnsiCursor(object): + def UP(self, n=1): + return CSI + str(n) + 'A' + def DOWN(self, n=1): + return CSI + str(n) + 'B' + def FORWARD(self, n=1): + return CSI + str(n) + 'C' + def BACK(self, n=1): + return CSI + str(n) + 'D' + def POS(self, x=1, y=1): + return CSI + str(y) + ';' + str(x) + 'H' + + +class AnsiFore(AnsiCodes): + BLACK = 30 + RED = 31 + GREEN = 32 + YELLOW = 33 + BLUE = 34 + MAGENTA = 35 + CYAN = 36 + WHITE = 37 + RESET = 39 + + # These are fairly well supported, but not part of the standard. + LIGHTBLACK_EX = 90 + LIGHTRED_EX = 91 + LIGHTGREEN_EX = 92 + LIGHTYELLOW_EX = 93 + LIGHTBLUE_EX = 94 + LIGHTMAGENTA_EX = 95 + LIGHTCYAN_EX = 96 + LIGHTWHITE_EX = 97 + + +class AnsiBack(AnsiCodes): + BLACK = 40 + RED = 41 + GREEN = 42 + YELLOW = 43 + BLUE = 44 + MAGENTA = 45 + CYAN = 46 + WHITE = 47 + RESET = 49 + + # These are fairly well supported, but not part of the standard. + LIGHTBLACK_EX = 100 + LIGHTRED_EX = 101 + LIGHTGREEN_EX = 102 + LIGHTYELLOW_EX = 103 + LIGHTBLUE_EX = 104 + LIGHTMAGENTA_EX = 105 + LIGHTCYAN_EX = 106 + LIGHTWHITE_EX = 107 + + +class AnsiStyle(AnsiCodes): + BRIGHT = 1 + DIM = 2 + NORMAL = 22 + RESET_ALL = 0 + +Fore = AnsiFore() +Back = AnsiBack() +Style = AnsiStyle() +Cursor = AnsiCursor() diff --git a/venv/lib/python2.7/site-packages/pip/_vendor/colorama/ansitowin32.py b/venv/lib/python2.7/site-packages/pip/_vendor/colorama/ansitowin32.py new file mode 100644 index 0000000..b7ff6f2 --- /dev/null +++ b/venv/lib/python2.7/site-packages/pip/_vendor/colorama/ansitowin32.py @@ -0,0 +1,236 @@ +# Copyright Jonathan Hartley 2013. BSD 3-Clause license, see LICENSE file. +import re +import sys +import os + +from .ansi import AnsiFore, AnsiBack, AnsiStyle, Style +from .winterm import WinTerm, WinColor, WinStyle +from .win32 import windll, winapi_test + + +winterm = None +if windll is not None: + winterm = WinTerm() + + +def is_stream_closed(stream): + return not hasattr(stream, 'closed') or stream.closed + + +def is_a_tty(stream): + return hasattr(stream, 'isatty') and stream.isatty() + + +class StreamWrapper(object): + ''' + Wraps a stream (such as stdout), acting as a transparent proxy for all + attribute access apart from method 'write()', which is delegated to our + Converter instance. + ''' + def __init__(self, wrapped, converter): + # double-underscore everything to prevent clashes with names of + # attributes on the wrapped stream object. + self.__wrapped = wrapped + self.__convertor = converter + + def __getattr__(self, name): + return getattr(self.__wrapped, name) + + def write(self, text): + self.__convertor.write(text) + + +class AnsiToWin32(object): + ''' + Implements a 'write()' method which, on Windows, will strip ANSI character + sequences from the text, and if outputting to a tty, will convert them into + win32 function calls. + ''' + ANSI_CSI_RE = re.compile('\001?\033\[((?:\d|;)*)([a-zA-Z])\002?') # Control Sequence Introducer + ANSI_OSC_RE = re.compile('\001?\033\]((?:.|;)*?)(\x07)\002?') # Operating System Command + + def __init__(self, wrapped, convert=None, strip=None, autoreset=False): + # The wrapped stream (normally sys.stdout or sys.stderr) + self.wrapped = wrapped + + # should we reset colors to defaults after every .write() + self.autoreset = autoreset + + # create the proxy wrapping our output stream + self.stream = StreamWrapper(wrapped, self) + + on_windows = os.name == 'nt' + # We test if the WinAPI works, because even if we are on Windows + # we may be using a terminal that doesn't support the WinAPI + # (e.g. Cygwin Terminal). In this case it's up to the terminal + # to support the ANSI codes. + conversion_supported = on_windows and winapi_test() + + # should we strip ANSI sequences from our output? + if strip is None: + strip = conversion_supported or (not is_stream_closed(wrapped) and not is_a_tty(wrapped)) + self.strip = strip + + # should we should convert ANSI sequences into win32 calls? + if convert is None: + convert = conversion_supported and not is_stream_closed(wrapped) and is_a_tty(wrapped) + self.convert = convert + + # dict of ansi codes to win32 functions and parameters + self.win32_calls = self.get_win32_calls() + + # are we wrapping stderr? + self.on_stderr = self.wrapped is sys.stderr + + def should_wrap(self): + ''' + True if this class is actually needed. If false, then the output + stream will not be affected, nor will win32 calls be issued, so + wrapping stdout is not actually required. This will generally be + False on non-Windows platforms, unless optional functionality like + autoreset has been requested using kwargs to init() + ''' + return self.convert or self.strip or self.autoreset + + def get_win32_calls(self): + if self.convert and winterm: + return { + AnsiStyle.RESET_ALL: (winterm.reset_all, ), + AnsiStyle.BRIGHT: (winterm.style, WinStyle.BRIGHT), + AnsiStyle.DIM: (winterm.style, WinStyle.NORMAL), + AnsiStyle.NORMAL: (winterm.style, WinStyle.NORMAL), + AnsiFore.BLACK: (winterm.fore, WinColor.BLACK), + AnsiFore.RED: (winterm.fore, WinColor.RED), + AnsiFore.GREEN: (winterm.fore, WinColor.GREEN), + AnsiFore.YELLOW: (winterm.fore, WinColor.YELLOW), + AnsiFore.BLUE: (winterm.fore, WinColor.BLUE), + AnsiFore.MAGENTA: (winterm.fore, WinColor.MAGENTA), + AnsiFore.CYAN: (winterm.fore, WinColor.CYAN), + AnsiFore.WHITE: (winterm.fore, WinColor.GREY), + AnsiFore.RESET: (winterm.fore, ), + AnsiFore.LIGHTBLACK_EX: (winterm.fore, WinColor.BLACK, True), + AnsiFore.LIGHTRED_EX: (winterm.fore, WinColor.RED, True), + AnsiFore.LIGHTGREEN_EX: (winterm.fore, WinColor.GREEN, True), + AnsiFore.LIGHTYELLOW_EX: (winterm.fore, WinColor.YELLOW, True), + AnsiFore.LIGHTBLUE_EX: (winterm.fore, WinColor.BLUE, True), + AnsiFore.LIGHTMAGENTA_EX: (winterm.fore, WinColor.MAGENTA, True), + AnsiFore.LIGHTCYAN_EX: (winterm.fore, WinColor.CYAN, True), + AnsiFore.LIGHTWHITE_EX: (winterm.fore, WinColor.GREY, True), + AnsiBack.BLACK: (winterm.back, WinColor.BLACK), + AnsiBack.RED: (winterm.back, WinColor.RED), + AnsiBack.GREEN: (winterm.back, WinColor.GREEN), + AnsiBack.YELLOW: (winterm.back, WinColor.YELLOW), + AnsiBack.BLUE: (winterm.back, WinColor.BLUE), + AnsiBack.MAGENTA: (winterm.back, WinColor.MAGENTA), + AnsiBack.CYAN: (winterm.back, WinColor.CYAN), + AnsiBack.WHITE: (winterm.back, WinColor.GREY), + AnsiBack.RESET: (winterm.back, ), + AnsiBack.LIGHTBLACK_EX: (winterm.back, WinColor.BLACK, True), + AnsiBack.LIGHTRED_EX: (winterm.back, WinColor.RED, True), + AnsiBack.LIGHTGREEN_EX: (winterm.back, WinColor.GREEN, True), + AnsiBack.LIGHTYELLOW_EX: (winterm.back, WinColor.YELLOW, True), + AnsiBack.LIGHTBLUE_EX: (winterm.back, WinColor.BLUE, True), + AnsiBack.LIGHTMAGENTA_EX: (winterm.back, WinColor.MAGENTA, True), + AnsiBack.LIGHTCYAN_EX: (winterm.back, WinColor.CYAN, True), + AnsiBack.LIGHTWHITE_EX: (winterm.back, WinColor.GREY, True), + } + return dict() + + def write(self, text): + if self.strip or self.convert: + self.write_and_convert(text) + else: + self.wrapped.write(text) + self.wrapped.flush() + if self.autoreset: + self.reset_all() + + + def reset_all(self): + if self.convert: + self.call_win32('m', (0,)) + elif not self.strip and not is_stream_closed(self.wrapped): + self.wrapped.write(Style.RESET_ALL) + + + def write_and_convert(self, text): + ''' + Write the given text to our wrapped stream, stripping any ANSI + sequences from the text, and optionally converting them into win32 + calls. + ''' + cursor = 0 + text = self.convert_osc(text) + for match in self.ANSI_CSI_RE.finditer(text): + start, end = match.span() + self.write_plain_text(text, cursor, start) + self.convert_ansi(*match.groups()) + cursor = end + self.write_plain_text(text, cursor, len(text)) + + + def write_plain_text(self, text, start, end): + if start < end: + self.wrapped.write(text[start:end]) + self.wrapped.flush() + + + def convert_ansi(self, paramstring, command): + if self.convert: + params = self.extract_params(command, paramstring) + self.call_win32(command, params) + + + def extract_params(self, command, paramstring): + if command in 'Hf': + params = tuple(int(p) if len(p) != 0 else 1 for p in paramstring.split(';')) + while len(params) < 2: + # defaults: + params = params + (1,) + else: + params = tuple(int(p) for p in paramstring.split(';') if len(p) != 0) + if len(params) == 0: + # defaults: + if command in 'JKm': + params = (0,) + elif command in 'ABCD': + params = (1,) + + return params + + + def call_win32(self, command, params): + if command == 'm': + for param in params: + if param in self.win32_calls: + func_args = self.win32_calls[param] + func = func_args[0] + args = func_args[1:] + kwargs = dict(on_stderr=self.on_stderr) + func(*args, **kwargs) + elif command in 'J': + winterm.erase_screen(params[0], on_stderr=self.on_stderr) + elif command in 'K': + winterm.erase_line(params[0], on_stderr=self.on_stderr) + elif command in 'Hf': # cursor position - absolute + winterm.set_cursor_position(params, on_stderr=self.on_stderr) + elif command in 'ABCD': # cursor position - relative + n = params[0] + # A - up, B - down, C - forward, D - back + x, y = {'A': (0, -n), 'B': (0, n), 'C': (n, 0), 'D': (-n, 0)}[command] + winterm.cursor_adjust(x, y, on_stderr=self.on_stderr) + + + def convert_osc(self, text): + for match in self.ANSI_OSC_RE.finditer(text): + start, end = match.span() + text = text[:start] + text[end:] + paramstring, command = match.groups() + if command in '\x07': # \x07 = BEL + params = paramstring.split(";") + # 0 - change title and icon (we will only change title) + # 1 - change icon (we don't support this) + # 2 - change title + if params[0] in '02': + winterm.set_title(params[1]) + return text diff --git a/venv/lib/python2.7/site-packages/pip/_vendor/colorama/initialise.py b/venv/lib/python2.7/site-packages/pip/_vendor/colorama/initialise.py new file mode 100644 index 0000000..834962a --- /dev/null +++ b/venv/lib/python2.7/site-packages/pip/_vendor/colorama/initialise.py @@ -0,0 +1,82 @@ +# Copyright Jonathan Hartley 2013. BSD 3-Clause license, see LICENSE file. +import atexit +import contextlib +import sys + +from .ansitowin32 import AnsiToWin32 + + +orig_stdout = None +orig_stderr = None + +wrapped_stdout = None +wrapped_stderr = None + +atexit_done = False + + +def reset_all(): + if AnsiToWin32 is not None: # Issue #74: objects might become None at exit + AnsiToWin32(orig_stdout).reset_all() + + +def init(autoreset=False, convert=None, strip=None, wrap=True): + + if not wrap and any([autoreset, convert, strip]): + raise ValueError('wrap=False conflicts with any other arg=True') + + global wrapped_stdout, wrapped_stderr + global orig_stdout, orig_stderr + + orig_stdout = sys.stdout + orig_stderr = sys.stderr + + if sys.stdout is None: + wrapped_stdout = None + else: + sys.stdout = wrapped_stdout = \ + wrap_stream(orig_stdout, convert, strip, autoreset, wrap) + if sys.stderr is None: + wrapped_stderr = None + else: + sys.stderr = wrapped_stderr = \ + wrap_stream(orig_stderr, convert, strip, autoreset, wrap) + + global atexit_done + if not atexit_done: + atexit.register(reset_all) + atexit_done = True + + +def deinit(): + if orig_stdout is not None: + sys.stdout = orig_stdout + if orig_stderr is not None: + sys.stderr = orig_stderr + + +@contextlib.contextmanager +def colorama_text(*args, **kwargs): + init(*args, **kwargs) + try: + yield + finally: + deinit() + + +def reinit(): + if wrapped_stdout is not None: + sys.stdout = wrapped_stdout + if wrapped_stderr is not None: + sys.stderr = wrapped_stderr + + +def wrap_stream(stream, convert, strip, autoreset, wrap): + if wrap: + wrapper = AnsiToWin32(stream, + convert=convert, strip=strip, autoreset=autoreset) + if wrapper.should_wrap(): + stream = wrapper.stream + return stream + + diff --git a/venv/lib/python2.7/site-packages/pip/_vendor/colorama/win32.py b/venv/lib/python2.7/site-packages/pip/_vendor/colorama/win32.py new file mode 100644 index 0000000..3d1d2f2 --- /dev/null +++ b/venv/lib/python2.7/site-packages/pip/_vendor/colorama/win32.py @@ -0,0 +1,154 @@ +# Copyright Jonathan Hartley 2013. BSD 3-Clause license, see LICENSE file. + +# from winbase.h +STDOUT = -11 +STDERR = -12 + +try: + import ctypes + from ctypes import LibraryLoader + windll = LibraryLoader(ctypes.WinDLL) + from ctypes import wintypes +except (AttributeError, ImportError): + windll = None + SetConsoleTextAttribute = lambda *_: None + winapi_test = lambda *_: None +else: + from ctypes import byref, Structure, c_char, POINTER + + COORD = wintypes._COORD + + class CONSOLE_SCREEN_BUFFER_INFO(Structure): + """struct in wincon.h.""" + _fields_ = [ + ("dwSize", COORD), + ("dwCursorPosition", COORD), + ("wAttributes", wintypes.WORD), + ("srWindow", wintypes.SMALL_RECT), + ("dwMaximumWindowSize", COORD), + ] + def __str__(self): + return '(%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d)' % ( + self.dwSize.Y, self.dwSize.X + , self.dwCursorPosition.Y, self.dwCursorPosition.X + , self.wAttributes + , self.srWindow.Top, self.srWindow.Left, self.srWindow.Bottom, self.srWindow.Right + , self.dwMaximumWindowSize.Y, self.dwMaximumWindowSize.X + ) + + _GetStdHandle = windll.kernel32.GetStdHandle + _GetStdHandle.argtypes = [ + wintypes.DWORD, + ] + _GetStdHandle.restype = wintypes.HANDLE + + _GetConsoleScreenBufferInfo = windll.kernel32.GetConsoleScreenBufferInfo + _GetConsoleScreenBufferInfo.argtypes = [ + wintypes.HANDLE, + POINTER(CONSOLE_SCREEN_BUFFER_INFO), + ] + _GetConsoleScreenBufferInfo.restype = wintypes.BOOL + + _SetConsoleTextAttribute = windll.kernel32.SetConsoleTextAttribute + _SetConsoleTextAttribute.argtypes = [ + wintypes.HANDLE, + wintypes.WORD, + ] + _SetConsoleTextAttribute.restype = wintypes.BOOL + + _SetConsoleCursorPosition = windll.kernel32.SetConsoleCursorPosition + _SetConsoleCursorPosition.argtypes = [ + wintypes.HANDLE, + COORD, + ] + _SetConsoleCursorPosition.restype = wintypes.BOOL + + _FillConsoleOutputCharacterA = windll.kernel32.FillConsoleOutputCharacterA + _FillConsoleOutputCharacterA.argtypes = [ + wintypes.HANDLE, + c_char, + wintypes.DWORD, + COORD, + POINTER(wintypes.DWORD), + ] + _FillConsoleOutputCharacterA.restype = wintypes.BOOL + + _FillConsoleOutputAttribute = windll.kernel32.FillConsoleOutputAttribute + _FillConsoleOutputAttribute.argtypes = [ + wintypes.HANDLE, + wintypes.WORD, + wintypes.DWORD, + COORD, + POINTER(wintypes.DWORD), + ] + _FillConsoleOutputAttribute.restype = wintypes.BOOL + + _SetConsoleTitleW = windll.kernel32.SetConsoleTitleA + _SetConsoleTitleW.argtypes = [ + wintypes.LPCSTR + ] + _SetConsoleTitleW.restype = wintypes.BOOL + + handles = { + STDOUT: _GetStdHandle(STDOUT), + STDERR: _GetStdHandle(STDERR), + } + + def winapi_test(): + handle = handles[STDOUT] + csbi = CONSOLE_SCREEN_BUFFER_INFO() + success = _GetConsoleScreenBufferInfo( + handle, byref(csbi)) + return bool(success) + + def GetConsoleScreenBufferInfo(stream_id=STDOUT): + handle = handles[stream_id] + csbi = CONSOLE_SCREEN_BUFFER_INFO() + success = _GetConsoleScreenBufferInfo( + handle, byref(csbi)) + return csbi + + def SetConsoleTextAttribute(stream_id, attrs): + handle = handles[stream_id] + return _SetConsoleTextAttribute(handle, attrs) + + def SetConsoleCursorPosition(stream_id, position, adjust=True): + position = COORD(*position) + # If the position is out of range, do nothing. + if position.Y <= 0 or position.X <= 0: + return + # Adjust for Windows' SetConsoleCursorPosition: + # 1. being 0-based, while ANSI is 1-based. + # 2. expecting (x,y), while ANSI uses (y,x). + adjusted_position = COORD(position.Y - 1, position.X - 1) + if adjust: + # Adjust for viewport's scroll position + sr = GetConsoleScreenBufferInfo(STDOUT).srWindow + adjusted_position.Y += sr.Top + adjusted_position.X += sr.Left + # Resume normal processing + handle = handles[stream_id] + return _SetConsoleCursorPosition(handle, adjusted_position) + + def FillConsoleOutputCharacter(stream_id, char, length, start): + handle = handles[stream_id] + char = c_char(char.encode()) + length = wintypes.DWORD(length) + num_written = wintypes.DWORD(0) + # Note that this is hard-coded for ANSI (vs wide) bytes. + success = _FillConsoleOutputCharacterA( + handle, char, length, start, byref(num_written)) + return num_written.value + + def FillConsoleOutputAttribute(stream_id, attr, length, start): + ''' FillConsoleOutputAttribute( hConsole, csbi.wAttributes, dwConSize, coordScreen, &cCharsWritten )''' + handle = handles[stream_id] + attribute = wintypes.WORD(attr) + length = wintypes.DWORD(length) + num_written = wintypes.DWORD(0) + # Note that this is hard-coded for ANSI (vs wide) bytes. + return _FillConsoleOutputAttribute( + handle, attribute, length, start, byref(num_written)) + + def SetConsoleTitle(title): + return _SetConsoleTitleW(title) diff --git a/venv/lib/python2.7/site-packages/pip/_vendor/colorama/winterm.py b/venv/lib/python2.7/site-packages/pip/_vendor/colorama/winterm.py new file mode 100644 index 0000000..60309d3 --- /dev/null +++ b/venv/lib/python2.7/site-packages/pip/_vendor/colorama/winterm.py @@ -0,0 +1,162 @@ +# Copyright Jonathan Hartley 2013. BSD 3-Clause license, see LICENSE file. +from . import win32 + + +# from wincon.h +class WinColor(object): + BLACK = 0 + BLUE = 1 + GREEN = 2 + CYAN = 3 + RED = 4 + MAGENTA = 5 + YELLOW = 6 + GREY = 7 + +# from wincon.h +class WinStyle(object): + NORMAL = 0x00 # dim text, dim background + BRIGHT = 0x08 # bright text, dim background + BRIGHT_BACKGROUND = 0x80 # dim text, bright background + +class WinTerm(object): + + def __init__(self): + self._default = win32.GetConsoleScreenBufferInfo(win32.STDOUT).wAttributes + self.set_attrs(self._default) + self._default_fore = self._fore + self._default_back = self._back + self._default_style = self._style + # In order to emulate LIGHT_EX in windows, we borrow the BRIGHT style. + # So that LIGHT_EX colors and BRIGHT style do not clobber each other, + # we track them separately, since LIGHT_EX is overwritten by Fore/Back + # and BRIGHT is overwritten by Style codes. + self._light = 0 + + def get_attrs(self): + return self._fore + self._back * 16 + (self._style | self._light) + + def set_attrs(self, value): + self._fore = value & 7 + self._back = (value >> 4) & 7 + self._style = value & (WinStyle.BRIGHT | WinStyle.BRIGHT_BACKGROUND) + + def reset_all(self, on_stderr=None): + self.set_attrs(self._default) + self.set_console(attrs=self._default) + + def fore(self, fore=None, light=False, on_stderr=False): + if fore is None: + fore = self._default_fore + self._fore = fore + # Emulate LIGHT_EX with BRIGHT Style + if light: + self._light |= WinStyle.BRIGHT + else: + self._light &= ~WinStyle.BRIGHT + self.set_console(on_stderr=on_stderr) + + def back(self, back=None, light=False, on_stderr=False): + if back is None: + back = self._default_back + self._back = back + # Emulate LIGHT_EX with BRIGHT_BACKGROUND Style + if light: + self._light |= WinStyle.BRIGHT_BACKGROUND + else: + self._light &= ~WinStyle.BRIGHT_BACKGROUND + self.set_console(on_stderr=on_stderr) + + def style(self, style=None, on_stderr=False): + if style is None: + style = self._default_style + self._style = style + self.set_console(on_stderr=on_stderr) + + def set_console(self, attrs=None, on_stderr=False): + if attrs is None: + attrs = self.get_attrs() + handle = win32.STDOUT + if on_stderr: + handle = win32.STDERR + win32.SetConsoleTextAttribute(handle, attrs) + + def get_position(self, handle): + position = win32.GetConsoleScreenBufferInfo(handle).dwCursorPosition + # Because Windows coordinates are 0-based, + # and win32.SetConsoleCursorPosition expects 1-based. + position.X += 1 + position.Y += 1 + return position + + def set_cursor_position(self, position=None, on_stderr=False): + if position is None: + # I'm not currently tracking the position, so there is no default. + # position = self.get_position() + return + handle = win32.STDOUT + if on_stderr: + handle = win32.STDERR + win32.SetConsoleCursorPosition(handle, position) + + def cursor_adjust(self, x, y, on_stderr=False): + handle = win32.STDOUT + if on_stderr: + handle = win32.STDERR + position = self.get_position(handle) + adjusted_position = (position.Y + y, position.X + x) + win32.SetConsoleCursorPosition(handle, adjusted_position, adjust=False) + + def erase_screen(self, mode=0, on_stderr=False): + # 0 should clear from the cursor to the end of the screen. + # 1 should clear from the cursor to the beginning of the screen. + # 2 should clear the entire screen, and move cursor to (1,1) + handle = win32.STDOUT + if on_stderr: + handle = win32.STDERR + csbi = win32.GetConsoleScreenBufferInfo(handle) + # get the number of character cells in the current buffer + cells_in_screen = csbi.dwSize.X * csbi.dwSize.Y + # get number of character cells before current cursor position + cells_before_cursor = csbi.dwSize.X * csbi.dwCursorPosition.Y + csbi.dwCursorPosition.X + if mode == 0: + from_coord = csbi.dwCursorPosition + cells_to_erase = cells_in_screen - cells_before_cursor + if mode == 1: + from_coord = win32.COORD(0, 0) + cells_to_erase = cells_before_cursor + elif mode == 2: + from_coord = win32.COORD(0, 0) + cells_to_erase = cells_in_screen + # fill the entire screen with blanks + win32.FillConsoleOutputCharacter(handle, ' ', cells_to_erase, from_coord) + # now set the buffer's attributes accordingly + win32.FillConsoleOutputAttribute(handle, self.get_attrs(), cells_to_erase, from_coord) + if mode == 2: + # put the cursor where needed + win32.SetConsoleCursorPosition(handle, (1, 1)) + + def erase_line(self, mode=0, on_stderr=False): + # 0 should clear from the cursor to the end of the line. + # 1 should clear from the cursor to the beginning of the line. + # 2 should clear the entire line. + handle = win32.STDOUT + if on_stderr: + handle = win32.STDERR + csbi = win32.GetConsoleScreenBufferInfo(handle) + if mode == 0: + from_coord = csbi.dwCursorPosition + cells_to_erase = csbi.dwSize.X - csbi.dwCursorPosition.X + if mode == 1: + from_coord = win32.COORD(0, csbi.dwCursorPosition.Y) + cells_to_erase = csbi.dwCursorPosition.X + elif mode == 2: + from_coord = win32.COORD(0, csbi.dwCursorPosition.Y) + cells_to_erase = csbi.dwSize.X + # fill the entire screen with blanks + win32.FillConsoleOutputCharacter(handle, ' ', cells_to_erase, from_coord) + # now set the buffer's attributes accordingly + win32.FillConsoleOutputAttribute(handle, self.get_attrs(), cells_to_erase, from_coord) + + def set_title(self, title): + win32.SetConsoleTitle(title) diff --git a/venv/lib/python2.7/site-packages/pip/_vendor/distlib/__init__.py b/venv/lib/python2.7/site-packages/pip/_vendor/distlib/__init__.py new file mode 100644 index 0000000..7026860 --- /dev/null +++ b/venv/lib/python2.7/site-packages/pip/_vendor/distlib/__init__.py @@ -0,0 +1,23 @@ +# -*- coding: utf-8 -*- +# +# Copyright (C) 2012-2016 Vinay Sajip. +# Licensed to the Python Software Foundation under a contributor agreement. +# See LICENSE.txt and CONTRIBUTORS.txt. +# +import logging + +__version__ = '0.2.3' + +class DistlibException(Exception): + pass + +try: + from logging import NullHandler +except ImportError: # pragma: no cover + class NullHandler(logging.Handler): + def handle(self, record): pass + def emit(self, record): pass + def createLock(self): self.lock = None + +logger = logging.getLogger(__name__) +logger.addHandler(NullHandler()) diff --git a/venv/lib/python2.7/site-packages/pip/_vendor/distlib/_backport/__init__.py b/venv/lib/python2.7/site-packages/pip/_vendor/distlib/_backport/__init__.py new file mode 100644 index 0000000..f7dbf4c --- /dev/null +++ b/venv/lib/python2.7/site-packages/pip/_vendor/distlib/_backport/__init__.py @@ -0,0 +1,6 @@ +"""Modules copied from Python 3 standard libraries, for internal use only. + +Individual classes and functions are found in d2._backport.misc. Intended +usage is to always import things missing from 3.1 from that module: the +built-in/stdlib objects will be used if found. +""" diff --git a/venv/lib/python2.7/site-packages/pip/_vendor/distlib/_backport/misc.py b/venv/lib/python2.7/site-packages/pip/_vendor/distlib/_backport/misc.py new file mode 100644 index 0000000..cfb318d --- /dev/null +++ b/venv/lib/python2.7/site-packages/pip/_vendor/distlib/_backport/misc.py @@ -0,0 +1,41 @@ +# -*- coding: utf-8 -*- +# +# Copyright (C) 2012 The Python Software Foundation. +# See LICENSE.txt and CONTRIBUTORS.txt. +# +"""Backports for individual classes and functions.""" + +import os +import sys + +__all__ = ['cache_from_source', 'callable', 'fsencode'] + + +try: + from imp import cache_from_source +except ImportError: + def cache_from_source(py_file, debug=__debug__): + ext = debug and 'c' or 'o' + return py_file + ext + + +try: + callable = callable +except NameError: + from collections import Callable + + def callable(obj): + return isinstance(obj, Callable) + + +try: + fsencode = os.fsencode +except AttributeError: + def fsencode(filename): + if isinstance(filename, bytes): + return filename + elif isinstance(filename, str): + return filename.encode(sys.getfilesystemencoding()) + else: + raise TypeError("expect bytes or str, not %s" % + type(filename).__name__) diff --git a/venv/lib/python2.7/site-packages/pip/_vendor/distlib/_backport/shutil.py b/venv/lib/python2.7/site-packages/pip/_vendor/distlib/_backport/shutil.py new file mode 100644 index 0000000..9e2e234 --- /dev/null +++ b/venv/lib/python2.7/site-packages/pip/_vendor/distlib/_backport/shutil.py @@ -0,0 +1,761 @@ +# -*- coding: utf-8 -*- +# +# Copyright (C) 2012 The Python Software Foundation. +# See LICENSE.txt and CONTRIBUTORS.txt. +# +"""Utility functions for copying and archiving files and directory trees. + +XXX The functions here don't copy the resource fork or other metadata on Mac. + +""" + +import os +import sys +import stat +from os.path import abspath +import fnmatch +import collections +import errno +from . import tarfile + +try: + import bz2 + _BZ2_SUPPORTED = True +except ImportError: + _BZ2_SUPPORTED = False + +try: + from pwd import getpwnam +except ImportError: + getpwnam = None + +try: + from grp import getgrnam +except ImportError: + getgrnam = None + +__all__ = ["copyfileobj", "copyfile", "copymode", "copystat", "copy", "copy2", + "copytree", "move", "rmtree", "Error", "SpecialFileError", + "ExecError", "make_archive", "get_archive_formats", + "register_archive_format", "unregister_archive_format", + "get_unpack_formats", "register_unpack_format", + "unregister_unpack_format", "unpack_archive", "ignore_patterns"] + +class Error(EnvironmentError): + pass + +class SpecialFileError(EnvironmentError): + """Raised when trying to do a kind of operation (e.g. copying) which is + not supported on a special file (e.g. a named pipe)""" + +class ExecError(EnvironmentError): + """Raised when a command could not be executed""" + +class ReadError(EnvironmentError): + """Raised when an archive cannot be read""" + +class RegistryError(Exception): + """Raised when a registery operation with the archiving + and unpacking registeries fails""" + + +try: + WindowsError +except NameError: + WindowsError = None + +def copyfileobj(fsrc, fdst, length=16*1024): + """copy data from file-like object fsrc to file-like object fdst""" + while 1: + buf = fsrc.read(length) + if not buf: + break + fdst.write(buf) + +def _samefile(src, dst): + # Macintosh, Unix. + if hasattr(os.path, 'samefile'): + try: + return os.path.samefile(src, dst) + except OSError: + return False + + # All other platforms: check for same pathname. + return (os.path.normcase(os.path.abspath(src)) == + os.path.normcase(os.path.abspath(dst))) + +def copyfile(src, dst): + """Copy data from src to dst""" + if _samefile(src, dst): + raise Error("`%s` and `%s` are the same file" % (src, dst)) + + for fn in [src, dst]: + try: + st = os.stat(fn) + except OSError: + # File most likely does not exist + pass + else: + # XXX What about other special files? (sockets, devices...) + if stat.S_ISFIFO(st.st_mode): + raise SpecialFileError("`%s` is a named pipe" % fn) + + with open(src, 'rb') as fsrc: + with open(dst, 'wb') as fdst: + copyfileobj(fsrc, fdst) + +def copymode(src, dst): + """Copy mode bits from src to dst""" + if hasattr(os, 'chmod'): + st = os.stat(src) + mode = stat.S_IMODE(st.st_mode) + os.chmod(dst, mode) + +def copystat(src, dst): + """Copy all stat info (mode bits, atime, mtime, flags) from src to dst""" + st = os.stat(src) + mode = stat.S_IMODE(st.st_mode) + if hasattr(os, 'utime'): + os.utime(dst, (st.st_atime, st.st_mtime)) + if hasattr(os, 'chmod'): + os.chmod(dst, mode) + if hasattr(os, 'chflags') and hasattr(st, 'st_flags'): + try: + os.chflags(dst, st.st_flags) + except OSError as why: + if (not hasattr(errno, 'EOPNOTSUPP') or + why.errno != errno.EOPNOTSUPP): + raise + +def copy(src, dst): + """Copy data and mode bits ("cp src dst"). + + The destination may be a directory. + + """ + if os.path.isdir(dst): + dst = os.path.join(dst, os.path.basename(src)) + copyfile(src, dst) + copymode(src, dst) + +def copy2(src, dst): + """Copy data and all stat info ("cp -p src dst"). + + The destination may be a directory. + + """ + if os.path.isdir(dst): + dst = os.path.join(dst, os.path.basename(src)) + copyfile(src, dst) + copystat(src, dst) + +def ignore_patterns(*patterns): + """Function that can be used as copytree() ignore parameter. + + Patterns is a sequence of glob-style patterns + that are used to exclude files""" + def _ignore_patterns(path, names): + ignored_names = [] + for pattern in patterns: + ignored_names.extend(fnmatch.filter(names, pattern)) + return set(ignored_names) + return _ignore_patterns + +def copytree(src, dst, symlinks=False, ignore=None, copy_function=copy2, + ignore_dangling_symlinks=False): + """Recursively copy a directory tree. + + The destination directory must not already exist. + If exception(s) occur, an Error is raised with a list of reasons. + + If the optional symlinks flag is true, symbolic links in the + source tree result in symbolic links in the destination tree; if + it is false, the contents of the files pointed to by symbolic + links are copied. If the file pointed by the symlink doesn't + exist, an exception will be added in the list of errors raised in + an Error exception at the end of the copy process. + + You can set the optional ignore_dangling_symlinks flag to true if you + want to silence this exception. Notice that this has no effect on + platforms that don't support os.symlink. + + The optional ignore argument is a callable. If given, it + is called with the `src` parameter, which is the directory + being visited by copytree(), and `names` which is the list of + `src` contents, as returned by os.listdir(): + + callable(src, names) -> ignored_names + + Since copytree() is called recursively, the callable will be + called once for each directory that is copied. It returns a + list of names relative to the `src` directory that should + not be copied. + + The optional copy_function argument is a callable that will be used + to copy each file. It will be called with the source path and the + destination path as arguments. By default, copy2() is used, but any + function that supports the same signature (like copy()) can be used. + + """ + names = os.listdir(src) + if ignore is not None: + ignored_names = ignore(src, names) + else: + ignored_names = set() + + os.makedirs(dst) + errors = [] + for name in names: + if name in ignored_names: + continue + srcname = os.path.join(src, name) + dstname = os.path.join(dst, name) + try: + if os.path.islink(srcname): + linkto = os.readlink(srcname) + if symlinks: + os.symlink(linkto, dstname) + else: + # ignore dangling symlink if the flag is on + if not os.path.exists(linkto) and ignore_dangling_symlinks: + continue + # otherwise let the copy occurs. copy2 will raise an error + copy_function(srcname, dstname) + elif os.path.isdir(srcname): + copytree(srcname, dstname, symlinks, ignore, copy_function) + else: + # Will raise a SpecialFileError for unsupported file types + copy_function(srcname, dstname) + # catch the Error from the recursive copytree so that we can + # continue with other files + except Error as err: + errors.extend(err.args[0]) + except EnvironmentError as why: + errors.append((srcname, dstname, str(why))) + try: + copystat(src, dst) + except OSError as why: + if WindowsError is not None and isinstance(why, WindowsError): + # Copying file access times may fail on Windows + pass + else: + errors.extend((src, dst, str(why))) + if errors: + raise Error(errors) + +def rmtree(path, ignore_errors=False, onerror=None): + """Recursively delete a directory tree. + + If ignore_errors is set, errors are ignored; otherwise, if onerror + is set, it is called to handle the error with arguments (func, + path, exc_info) where func is os.listdir, os.remove, or os.rmdir; + path is the argument to that function that caused it to fail; and + exc_info is a tuple returned by sys.exc_info(). If ignore_errors + is false and onerror is None, an exception is raised. + + """ + if ignore_errors: + def onerror(*args): + pass + elif onerror is None: + def onerror(*args): + raise + try: + if os.path.islink(path): + # symlinks to directories are forbidden, see bug #1669 + raise OSError("Cannot call rmtree on a symbolic link") + except OSError: + onerror(os.path.islink, path, sys.exc_info()) + # can't continue even if onerror hook returns + return + names = [] + try: + names = os.listdir(path) + except os.error: + onerror(os.listdir, path, sys.exc_info()) + for name in names: + fullname = os.path.join(path, name) + try: + mode = os.lstat(fullname).st_mode + except os.error: + mode = 0 + if stat.S_ISDIR(mode): + rmtree(fullname, ignore_errors, onerror) + else: + try: + os.remove(fullname) + except os.error: + onerror(os.remove, fullname, sys.exc_info()) + try: + os.rmdir(path) + except os.error: + onerror(os.rmdir, path, sys.exc_info()) + + +def _basename(path): + # A basename() variant which first strips the trailing slash, if present. + # Thus we always get the last component of the path, even for directories. + return os.path.basename(path.rstrip(os.path.sep)) + +def move(src, dst): + """Recursively move a file or directory to another location. This is + similar to the Unix "mv" command. + + If the destination is a directory or a symlink to a directory, the source + is moved inside the directory. The destination path must not already + exist. + + If the destination already exists but is not a directory, it may be + overwritten depending on os.rename() semantics. + + If the destination is on our current filesystem, then rename() is used. + Otherwise, src is copied to the destination and then removed. + A lot more could be done here... A look at a mv.c shows a lot of + the issues this implementation glosses over. + + """ + real_dst = dst + if os.path.isdir(dst): + if _samefile(src, dst): + # We might be on a case insensitive filesystem, + # perform the rename anyway. + os.rename(src, dst) + return + + real_dst = os.path.join(dst, _basename(src)) + if os.path.exists(real_dst): + raise Error("Destination path '%s' already exists" % real_dst) + try: + os.rename(src, real_dst) + except OSError: + if os.path.isdir(src): + if _destinsrc(src, dst): + raise Error("Cannot move a directory '%s' into itself '%s'." % (src, dst)) + copytree(src, real_dst, symlinks=True) + rmtree(src) + else: + copy2(src, real_dst) + os.unlink(src) + +def _destinsrc(src, dst): + src = abspath(src) + dst = abspath(dst) + if not src.endswith(os.path.sep): + src += os.path.sep + if not dst.endswith(os.path.sep): + dst += os.path.sep + return dst.startswith(src) + +def _get_gid(name): + """Returns a gid, given a group name.""" + if getgrnam is None or name is None: + return None + try: + result = getgrnam(name) + except KeyError: + result = None + if result is not None: + return result[2] + return None + +def _get_uid(name): + """Returns an uid, given a user name.""" + if getpwnam is None or name is None: + return None + try: + result = getpwnam(name) + except KeyError: + result = None + if result is not None: + return result[2] + return None + +def _make_tarball(base_name, base_dir, compress="gzip", verbose=0, dry_run=0, + owner=None, group=None, logger=None): + """Create a (possibly compressed) tar file from all the files under + 'base_dir'. + + 'compress' must be "gzip" (the default), "bzip2", or None. + + 'owner' and 'group' can be used to define an owner and a group for the + archive that is being built. If not provided, the current owner and group + will be used. + + The output tar file will be named 'base_name' + ".tar", possibly plus + the appropriate compression extension (".gz", or ".bz2"). + + Returns the output filename. + """ + tar_compression = {'gzip': 'gz', None: ''} + compress_ext = {'gzip': '.gz'} + + if _BZ2_SUPPORTED: + tar_compression['bzip2'] = 'bz2' + compress_ext['bzip2'] = '.bz2' + + # flags for compression program, each element of list will be an argument + if compress is not None and compress not in compress_ext: + raise ValueError("bad value for 'compress', or compression format not " + "supported : {0}".format(compress)) + + archive_name = base_name + '.tar' + compress_ext.get(compress, '') + archive_dir = os.path.dirname(archive_name) + + if not os.path.exists(archive_dir): + if logger is not None: + logger.info("creating %s", archive_dir) + if not dry_run: + os.makedirs(archive_dir) + + # creating the tarball + if logger is not None: + logger.info('Creating tar archive') + + uid = _get_uid(owner) + gid = _get_gid(group) + + def _set_uid_gid(tarinfo): + if gid is not None: + tarinfo.gid = gid + tarinfo.gname = group + if uid is not None: + tarinfo.uid = uid + tarinfo.uname = owner + return tarinfo + + if not dry_run: + tar = tarfile.open(archive_name, 'w|%s' % tar_compression[compress]) + try: + tar.add(base_dir, filter=_set_uid_gid) + finally: + tar.close() + + return archive_name + +def _call_external_zip(base_dir, zip_filename, verbose=False, dry_run=False): + # XXX see if we want to keep an external call here + if verbose: + zipoptions = "-r" + else: + zipoptions = "-rq" + from distutils.errors import DistutilsExecError + from distutils.spawn import spawn + try: + spawn(["zip", zipoptions, zip_filename, base_dir], dry_run=dry_run) + except DistutilsExecError: + # XXX really should distinguish between "couldn't find + # external 'zip' command" and "zip failed". + raise ExecError("unable to create zip file '%s': " + "could neither import the 'zipfile' module nor " + "find a standalone zip utility") % zip_filename + +def _make_zipfile(base_name, base_dir, verbose=0, dry_run=0, logger=None): + """Create a zip file from all the files under 'base_dir'. + + The output zip file will be named 'base_name' + ".zip". Uses either the + "zipfile" Python module (if available) or the InfoZIP "zip" utility + (if installed and found on the default search path). If neither tool is + available, raises ExecError. Returns the name of the output zip + file. + """ + zip_filename = base_name + ".zip" + archive_dir = os.path.dirname(base_name) + + if not os.path.exists(archive_dir): + if logger is not None: + logger.info("creating %s", archive_dir) + if not dry_run: + os.makedirs(archive_dir) + + # If zipfile module is not available, try spawning an external 'zip' + # command. + try: + import zipfile + except ImportError: + zipfile = None + + if zipfile is None: + _call_external_zip(base_dir, zip_filename, verbose, dry_run) + else: + if logger is not None: + logger.info("creating '%s' and adding '%s' to it", + zip_filename, base_dir) + + if not dry_run: + zip = zipfile.ZipFile(zip_filename, "w", + compression=zipfile.ZIP_DEFLATED) + + for dirpath, dirnames, filenames in os.walk(base_dir): + for name in filenames: + path = os.path.normpath(os.path.join(dirpath, name)) + if os.path.isfile(path): + zip.write(path, path) + if logger is not None: + logger.info("adding '%s'", path) + zip.close() + + return zip_filename + +_ARCHIVE_FORMATS = { + 'gztar': (_make_tarball, [('compress', 'gzip')], "gzip'ed tar-file"), + 'bztar': (_make_tarball, [('compress', 'bzip2')], "bzip2'ed tar-file"), + 'tar': (_make_tarball, [('compress', None)], "uncompressed tar file"), + 'zip': (_make_zipfile, [], "ZIP file"), + } + +if _BZ2_SUPPORTED: + _ARCHIVE_FORMATS['bztar'] = (_make_tarball, [('compress', 'bzip2')], + "bzip2'ed tar-file") + +def get_archive_formats(): + """Returns a list of supported formats for archiving and unarchiving. + + Each element of the returned sequence is a tuple (name, description) + """ + formats = [(name, registry[2]) for name, registry in + _ARCHIVE_FORMATS.items()] + formats.sort() + return formats + +def register_archive_format(name, function, extra_args=None, description=''): + """Registers an archive format. + + name is the name of the format. function is the callable that will be + used to create archives. If provided, extra_args is a sequence of + (name, value) tuples that will be passed as arguments to the callable. + description can be provided to describe the format, and will be returned + by the get_archive_formats() function. + """ + if extra_args is None: + extra_args = [] + if not isinstance(function, collections.Callable): + raise TypeError('The %s object is not callable' % function) + if not isinstance(extra_args, (tuple, list)): + raise TypeError('extra_args needs to be a sequence') + for element in extra_args: + if not isinstance(element, (tuple, list)) or len(element) !=2: + raise TypeError('extra_args elements are : (arg_name, value)') + + _ARCHIVE_FORMATS[name] = (function, extra_args, description) + +def unregister_archive_format(name): + del _ARCHIVE_FORMATS[name] + +def make_archive(base_name, format, root_dir=None, base_dir=None, verbose=0, + dry_run=0, owner=None, group=None, logger=None): + """Create an archive file (eg. zip or tar). + + 'base_name' is the name of the file to create, minus any format-specific + extension; 'format' is the archive format: one of "zip", "tar", "bztar" + or "gztar". + + 'root_dir' is a directory that will be the root directory of the + archive; ie. we typically chdir into 'root_dir' before creating the + archive. 'base_dir' is the directory where we start archiving from; + ie. 'base_dir' will be the common prefix of all files and + directories in the archive. 'root_dir' and 'base_dir' both default + to the current directory. Returns the name of the archive file. + + 'owner' and 'group' are used when creating a tar archive. By default, + uses the current owner and group. + """ + save_cwd = os.getcwd() + if root_dir is not None: + if logger is not None: + logger.debug("changing into '%s'", root_dir) + base_name = os.path.abspath(base_name) + if not dry_run: + os.chdir(root_dir) + + if base_dir is None: + base_dir = os.curdir + + kwargs = {'dry_run': dry_run, 'logger': logger} + + try: + format_info = _ARCHIVE_FORMATS[format] + except KeyError: + raise ValueError("unknown archive format '%s'" % format) + + func = format_info[0] + for arg, val in format_info[1]: + kwargs[arg] = val + + if format != 'zip': + kwargs['owner'] = owner + kwargs['group'] = group + + try: + filename = func(base_name, base_dir, **kwargs) + finally: + if root_dir is not None: + if logger is not None: + logger.debug("changing back to '%s'", save_cwd) + os.chdir(save_cwd) + + return filename + + +def get_unpack_formats(): + """Returns a list of supported formats for unpacking. + + Each element of the returned sequence is a tuple + (name, extensions, description) + """ + formats = [(name, info[0], info[3]) for name, info in + _UNPACK_FORMATS.items()] + formats.sort() + return formats + +def _check_unpack_options(extensions, function, extra_args): + """Checks what gets registered as an unpacker.""" + # first make sure no other unpacker is registered for this extension + existing_extensions = {} + for name, info in _UNPACK_FORMATS.items(): + for ext in info[0]: + existing_extensions[ext] = name + + for extension in extensions: + if extension in existing_extensions: + msg = '%s is already registered for "%s"' + raise RegistryError(msg % (extension, + existing_extensions[extension])) + + if not isinstance(function, collections.Callable): + raise TypeError('The registered function must be a callable') + + +def register_unpack_format(name, extensions, function, extra_args=None, + description=''): + """Registers an unpack format. + + `name` is the name of the format. `extensions` is a list of extensions + corresponding to the format. + + `function` is the callable that will be + used to unpack archives. The callable will receive archives to unpack. + If it's unable to handle an archive, it needs to raise a ReadError + exception. + + If provided, `extra_args` is a sequence of + (name, value) tuples that will be passed as arguments to the callable. + description can be provided to describe the format, and will be returned + by the get_unpack_formats() function. + """ + if extra_args is None: + extra_args = [] + _check_unpack_options(extensions, function, extra_args) + _UNPACK_FORMATS[name] = extensions, function, extra_args, description + +def unregister_unpack_format(name): + """Removes the pack format from the registery.""" + del _UNPACK_FORMATS[name] + +def _ensure_directory(path): + """Ensure that the parent directory of `path` exists""" + dirname = os.path.dirname(path) + if not os.path.isdir(dirname): + os.makedirs(dirname) + +def _unpack_zipfile(filename, extract_dir): + """Unpack zip `filename` to `extract_dir` + """ + try: + import zipfile + except ImportError: + raise ReadError('zlib not supported, cannot unpack this archive.') + + if not zipfile.is_zipfile(filename): + raise ReadError("%s is not a zip file" % filename) + + zip = zipfile.ZipFile(filename) + try: + for info in zip.infolist(): + name = info.filename + + # don't extract absolute paths or ones with .. in them + if name.startswith('/') or '..' in name: + continue + + target = os.path.join(extract_dir, *name.split('/')) + if not target: + continue + + _ensure_directory(target) + if not name.endswith('/'): + # file + data = zip.read(info.filename) + f = open(target, 'wb') + try: + f.write(data) + finally: + f.close() + del data + finally: + zip.close() + +def _unpack_tarfile(filename, extract_dir): + """Unpack tar/tar.gz/tar.bz2 `filename` to `extract_dir` + """ + try: + tarobj = tarfile.open(filename) + except tarfile.TarError: + raise ReadError( + "%s is not a compressed or uncompressed tar file" % filename) + try: + tarobj.extractall(extract_dir) + finally: + tarobj.close() + +_UNPACK_FORMATS = { + 'gztar': (['.tar.gz', '.tgz'], _unpack_tarfile, [], "gzip'ed tar-file"), + 'tar': (['.tar'], _unpack_tarfile, [], "uncompressed tar file"), + 'zip': (['.zip'], _unpack_zipfile, [], "ZIP file") + } + +if _BZ2_SUPPORTED: + _UNPACK_FORMATS['bztar'] = (['.bz2'], _unpack_tarfile, [], + "bzip2'ed tar-file") + +def _find_unpack_format(filename): + for name, info in _UNPACK_FORMATS.items(): + for extension in info[0]: + if filename.endswith(extension): + return name + return None + +def unpack_archive(filename, extract_dir=None, format=None): + """Unpack an archive. + + `filename` is the name of the archive. + + `extract_dir` is the name of the target directory, where the archive + is unpacked. If not provided, the current working directory is used. + + `format` is the archive format: one of "zip", "tar", or "gztar". Or any + other registered format. If not provided, unpack_archive will use the + filename extension and see if an unpacker was registered for that + extension. + + In case none is found, a ValueError is raised. + """ + if extract_dir is None: + extract_dir = os.getcwd() + + if format is not None: + try: + format_info = _UNPACK_FORMATS[format] + except KeyError: + raise ValueError("Unknown unpack format '{0}'".format(format)) + + func = format_info[1] + func(filename, extract_dir, **dict(format_info[2])) + else: + # we need to look at the registered unpackers supported extensions + format = _find_unpack_format(filename) + if format is None: + raise ReadError("Unknown archive format '{0}'".format(filename)) + + func = _UNPACK_FORMATS[format][1] + kwargs = dict(_UNPACK_FORMATS[format][2]) + func(filename, extract_dir, **kwargs) diff --git a/venv/lib/python2.7/site-packages/pip/_vendor/distlib/_backport/sysconfig.cfg b/venv/lib/python2.7/site-packages/pip/_vendor/distlib/_backport/sysconfig.cfg new file mode 100644 index 0000000..1746bd0 --- /dev/null +++ b/venv/lib/python2.7/site-packages/pip/_vendor/distlib/_backport/sysconfig.cfg @@ -0,0 +1,84 @@ +[posix_prefix] +# Configuration directories. Some of these come straight out of the +# configure script. They are for implementing the other variables, not to +# be used directly in [resource_locations]. +confdir = /etc +datadir = /usr/share +libdir = /usr/lib +statedir = /var +# User resource directory +local = ~/.local/{distribution.name} + +stdlib = {base}/lib/python{py_version_short} +platstdlib = {platbase}/lib/python{py_version_short} +purelib = {base}/lib/python{py_version_short}/site-packages +platlib = {platbase}/lib/python{py_version_short}/site-packages +include = {base}/include/python{py_version_short}{abiflags} +platinclude = {platbase}/include/python{py_version_short}{abiflags} +data = {base} + +[posix_home] +stdlib = {base}/lib/python +platstdlib = {base}/lib/python +purelib = {base}/lib/python +platlib = {base}/lib/python +include = {base}/include/python +platinclude = {base}/include/python +scripts = {base}/bin +data = {base} + +[nt] +stdlib = {base}/Lib +platstdlib = {base}/Lib +purelib = {base}/Lib/site-packages +platlib = {base}/Lib/site-packages +include = {base}/Include +platinclude = {base}/Include +scripts = {base}/Scripts +data = {base} + +[os2] +stdlib = {base}/Lib +platstdlib = {base}/Lib +purelib = {base}/Lib/site-packages +platlib = {base}/Lib/site-packages +include = {base}/Include +platinclude = {base}/Include +scripts = {base}/Scripts +data = {base} + +[os2_home] +stdlib = {userbase}/lib/python{py_version_short} +platstdlib = {userbase}/lib/python{py_version_short} +purelib = {userbase}/lib/python{py_version_short}/site-packages +platlib = {userbase}/lib/python{py_version_short}/site-packages +include = {userbase}/include/python{py_version_short} +scripts = {userbase}/bin +data = {userbase} + +[nt_user] +stdlib = {userbase}/Python{py_version_nodot} +platstdlib = {userbase}/Python{py_version_nodot} +purelib = {userbase}/Python{py_version_nodot}/site-packages +platlib = {userbase}/Python{py_version_nodot}/site-packages +include = {userbase}/Python{py_version_nodot}/Include +scripts = {userbase}/Scripts +data = {userbase} + +[posix_user] +stdlib = {userbase}/lib/python{py_version_short} +platstdlib = {userbase}/lib/python{py_version_short} +purelib = {userbase}/lib/python{py_version_short}/site-packages +platlib = {userbase}/lib/python{py_version_short}/site-packages +include = {userbase}/include/python{py_version_short} +scripts = {userbase}/bin +data = {userbase} + +[osx_framework_user] +stdlib = {userbase}/lib/python +platstdlib = {userbase}/lib/python +purelib = {userbase}/lib/python/site-packages +platlib = {userbase}/lib/python/site-packages +include = {userbase}/include +scripts = {userbase}/bin +data = {userbase} diff --git a/venv/lib/python2.7/site-packages/pip/_vendor/distlib/_backport/sysconfig.py b/venv/lib/python2.7/site-packages/pip/_vendor/distlib/_backport/sysconfig.py new file mode 100644 index 0000000..1d31326 --- /dev/null +++ b/venv/lib/python2.7/site-packages/pip/_vendor/distlib/_backport/sysconfig.py @@ -0,0 +1,788 @@ +# -*- coding: utf-8 -*- +# +# Copyright (C) 2012 The Python Software Foundation. +# See LICENSE.txt and CONTRIBUTORS.txt. +# +"""Access to Python's configuration information.""" + +import codecs +import os +import re +import sys +from os.path import pardir, realpath +try: + import configparser +except ImportError: + import ConfigParser as configparser + + +__all__ = [ + 'get_config_h_filename', + 'get_config_var', + 'get_config_vars', + 'get_makefile_filename', + 'get_path', + 'get_path_names', + 'get_paths', + 'get_platform', + 'get_python_version', + 'get_scheme_names', + 'parse_config_h', +] + + +def _safe_realpath(path): + try: + return realpath(path) + except OSError: + return path + + +if sys.executable: + _PROJECT_BASE = os.path.dirname(_safe_realpath(sys.executable)) +else: + # sys.executable can be empty if argv[0] has been changed and Python is + # unable to retrieve the real program name + _PROJECT_BASE = _safe_realpath(os.getcwd()) + +if os.name == "nt" and "pcbuild" in _PROJECT_BASE[-8:].lower(): + _PROJECT_BASE = _safe_realpath(os.path.join(_PROJECT_BASE, pardir)) +# PC/VS7.1 +if os.name == "nt" and "\\pc\\v" in _PROJECT_BASE[-10:].lower(): + _PROJECT_BASE = _safe_realpath(os.path.join(_PROJECT_BASE, pardir, pardir)) +# PC/AMD64 +if os.name == "nt" and "\\pcbuild\\amd64" in _PROJECT_BASE[-14:].lower(): + _PROJECT_BASE = _safe_realpath(os.path.join(_PROJECT_BASE, pardir, pardir)) + + +def is_python_build(): + for fn in ("Setup.dist", "Setup.local"): + if os.path.isfile(os.path.join(_PROJECT_BASE, "Modules", fn)): + return True + return False + +_PYTHON_BUILD = is_python_build() + +_cfg_read = False + +def _ensure_cfg_read(): + global _cfg_read + if not _cfg_read: + from ..resources import finder + backport_package = __name__.rsplit('.', 1)[0] + _finder = finder(backport_package) + _cfgfile = _finder.find('sysconfig.cfg') + assert _cfgfile, 'sysconfig.cfg exists' + with _cfgfile.as_stream() as s: + _SCHEMES.readfp(s) + if _PYTHON_BUILD: + for scheme in ('posix_prefix', 'posix_home'): + _SCHEMES.set(scheme, 'include', '{srcdir}/Include') + _SCHEMES.set(scheme, 'platinclude', '{projectbase}/.') + + _cfg_read = True + + +_SCHEMES = configparser.RawConfigParser() +_VAR_REPL = re.compile(r'\{([^{]*?)\}') + +def _expand_globals(config): + _ensure_cfg_read() + if config.has_section('globals'): + globals = config.items('globals') + else: + globals = tuple() + + sections = config.sections() + for section in sections: + if section == 'globals': + continue + for option, value in globals: + if config.has_option(section, option): + continue + config.set(section, option, value) + config.remove_section('globals') + + # now expanding local variables defined in the cfg file + # + for section in config.sections(): + variables = dict(config.items(section)) + + def _replacer(matchobj): + name = matchobj.group(1) + if name in variables: + return variables[name] + return matchobj.group(0) + + for option, value in config.items(section): + config.set(section, option, _VAR_REPL.sub(_replacer, value)) + +#_expand_globals(_SCHEMES) + + # FIXME don't rely on sys.version here, its format is an implementation detail + # of CPython, use sys.version_info or sys.hexversion +_PY_VERSION = sys.version.split()[0] +_PY_VERSION_SHORT = sys.version[:3] +_PY_VERSION_SHORT_NO_DOT = _PY_VERSION[0] + _PY_VERSION[2] +_PREFIX = os.path.normpath(sys.prefix) +_EXEC_PREFIX = os.path.normpath(sys.exec_prefix) +_CONFIG_VARS = None +_USER_BASE = None + + +def _subst_vars(path, local_vars): + """In the string `path`, replace tokens like {some.thing} with the + corresponding value from the map `local_vars`. + + If there is no corresponding value, leave the token unchanged. + """ + def _replacer(matchobj): + name = matchobj.group(1) + if name in local_vars: + return local_vars[name] + elif name in os.environ: + return os.environ[name] + return matchobj.group(0) + return _VAR_REPL.sub(_replacer, path) + + +def _extend_dict(target_dict, other_dict): + target_keys = target_dict.keys() + for key, value in other_dict.items(): + if key in target_keys: + continue + target_dict[key] = value + + +def _expand_vars(scheme, vars): + res = {} + if vars is None: + vars = {} + _extend_dict(vars, get_config_vars()) + + for key, value in _SCHEMES.items(scheme): + if os.name in ('posix', 'nt'): + value = os.path.expanduser(value) + res[key] = os.path.normpath(_subst_vars(value, vars)) + return res + + +def format_value(value, vars): + def _replacer(matchobj): + name = matchobj.group(1) + if name in vars: + return vars[name] + return matchobj.group(0) + return _VAR_REPL.sub(_replacer, value) + + +def _get_default_scheme(): + if os.name == 'posix': + # the default scheme for posix is posix_prefix + return 'posix_prefix' + return os.name + + +def _getuserbase(): + env_base = os.environ.get("PYTHONUSERBASE", None) + + def joinuser(*args): + return os.path.expanduser(os.path.join(*args)) + + # what about 'os2emx', 'riscos' ? + if os.name == "nt": + base = os.environ.get("APPDATA") or "~" + if env_base: + return env_base + else: + return joinuser(base, "Python") + + if sys.platform == "darwin": + framework = get_config_var("PYTHONFRAMEWORK") + if framework: + if env_base: + return env_base + else: + return joinuser("~", "Library", framework, "%d.%d" % + sys.version_info[:2]) + + if env_base: + return env_base + else: + return joinuser("~", ".local") + + +def _parse_makefile(filename, vars=None): + """Parse a Makefile-style file. + + A dictionary containing name/value pairs is returned. If an + optional dictionary is passed in as the second argument, it is + used instead of a new dictionary. + """ + # Regexes needed for parsing Makefile (and similar syntaxes, + # like old-style Setup files). + _variable_rx = re.compile("([a-zA-Z][a-zA-Z0-9_]+)\s*=\s*(.*)") + _findvar1_rx = re.compile(r"\$\(([A-Za-z][A-Za-z0-9_]*)\)") + _findvar2_rx = re.compile(r"\${([A-Za-z][A-Za-z0-9_]*)}") + + if vars is None: + vars = {} + done = {} + notdone = {} + + with codecs.open(filename, encoding='utf-8', errors="surrogateescape") as f: + lines = f.readlines() + + for line in lines: + if line.startswith('#') or line.strip() == '': + continue + m = _variable_rx.match(line) + if m: + n, v = m.group(1, 2) + v = v.strip() + # `$$' is a literal `$' in make + tmpv = v.replace('$$', '') + + if "$" in tmpv: + notdone[n] = v + else: + try: + v = int(v) + except ValueError: + # insert literal `$' + done[n] = v.replace('$$', '$') + else: + done[n] = v + + # do variable interpolation here + variables = list(notdone.keys()) + + # Variables with a 'PY_' prefix in the makefile. These need to + # be made available without that prefix through sysconfig. + # Special care is needed to ensure that variable expansion works, even + # if the expansion uses the name without a prefix. + renamed_variables = ('CFLAGS', 'LDFLAGS', 'CPPFLAGS') + + while len(variables) > 0: + for name in tuple(variables): + value = notdone[name] + m = _findvar1_rx.search(value) or _findvar2_rx.search(value) + if m is not None: + n = m.group(1) + found = True + if n in done: + item = str(done[n]) + elif n in notdone: + # get it on a subsequent round + found = False + elif n in os.environ: + # do it like make: fall back to environment + item = os.environ[n] + + elif n in renamed_variables: + if (name.startswith('PY_') and + name[3:] in renamed_variables): + item = "" + + elif 'PY_' + n in notdone: + found = False + + else: + item = str(done['PY_' + n]) + + else: + done[n] = item = "" + + if found: + after = value[m.end():] + value = value[:m.start()] + item + after + if "$" in after: + notdone[name] = value + else: + try: + value = int(value) + except ValueError: + done[name] = value.strip() + else: + done[name] = value + variables.remove(name) + + if (name.startswith('PY_') and + name[3:] in renamed_variables): + + name = name[3:] + if name not in done: + done[name] = value + + else: + # bogus variable reference (e.g. "prefix=$/opt/python"); + # just drop it since we can't deal + done[name] = value + variables.remove(name) + + # strip spurious spaces + for k, v in done.items(): + if isinstance(v, str): + done[k] = v.strip() + + # save the results in the global dictionary + vars.update(done) + return vars + + +def get_makefile_filename(): + """Return the path of the Makefile.""" + if _PYTHON_BUILD: + return os.path.join(_PROJECT_BASE, "Makefile") + if hasattr(sys, 'abiflags'): + config_dir_name = 'config-%s%s' % (_PY_VERSION_SHORT, sys.abiflags) + else: + config_dir_name = 'config' + return os.path.join(get_path('stdlib'), config_dir_name, 'Makefile') + + +def _init_posix(vars): + """Initialize the module as appropriate for POSIX systems.""" + # load the installed Makefile: + makefile = get_makefile_filename() + try: + _parse_makefile(makefile, vars) + except IOError as e: + msg = "invalid Python installation: unable to open %s" % makefile + if hasattr(e, "strerror"): + msg = msg + " (%s)" % e.strerror + raise IOError(msg) + # load the installed pyconfig.h: + config_h = get_config_h_filename() + try: + with open(config_h) as f: + parse_config_h(f, vars) + except IOError as e: + msg = "invalid Python installation: unable to open %s" % config_h + if hasattr(e, "strerror"): + msg = msg + " (%s)" % e.strerror + raise IOError(msg) + # On AIX, there are wrong paths to the linker scripts in the Makefile + # -- these paths are relative to the Python source, but when installed + # the scripts are in another directory. + if _PYTHON_BUILD: + vars['LDSHARED'] = vars['BLDSHARED'] + + +def _init_non_posix(vars): + """Initialize the module as appropriate for NT""" + # set basic install directories + vars['LIBDEST'] = get_path('stdlib') + vars['BINLIBDEST'] = get_path('platstdlib') + vars['INCLUDEPY'] = get_path('include') + vars['SO'] = '.pyd' + vars['EXE'] = '.exe' + vars['VERSION'] = _PY_VERSION_SHORT_NO_DOT + vars['BINDIR'] = os.path.dirname(_safe_realpath(sys.executable)) + +# +# public APIs +# + + +def parse_config_h(fp, vars=None): + """Parse a config.h-style file. + + A dictionary containing name/value pairs is returned. If an + optional dictionary is passed in as the second argument, it is + used instead of a new dictionary. + """ + if vars is None: + vars = {} + define_rx = re.compile("#define ([A-Z][A-Za-z0-9_]+) (.*)\n") + undef_rx = re.compile("/[*] #undef ([A-Z][A-Za-z0-9_]+) [*]/\n") + + while True: + line = fp.readline() + if not line: + break + m = define_rx.match(line) + if m: + n, v = m.group(1, 2) + try: + v = int(v) + except ValueError: + pass + vars[n] = v + else: + m = undef_rx.match(line) + if m: + vars[m.group(1)] = 0 + return vars + + +def get_config_h_filename(): + """Return the path of pyconfig.h.""" + if _PYTHON_BUILD: + if os.name == "nt": + inc_dir = os.path.join(_PROJECT_BASE, "PC") + else: + inc_dir = _PROJECT_BASE + else: + inc_dir = get_path('platinclude') + return os.path.join(inc_dir, 'pyconfig.h') + + +def get_scheme_names(): + """Return a tuple containing the schemes names.""" + return tuple(sorted(_SCHEMES.sections())) + + +def get_path_names(): + """Return a tuple containing the paths names.""" + # xxx see if we want a static list + return _SCHEMES.options('posix_prefix') + + +def get_paths(scheme=_get_default_scheme(), vars=None, expand=True): + """Return a mapping containing an install scheme. + + ``scheme`` is the install scheme name. If not provided, it will + return the default scheme for the current platform. + """ + _ensure_cfg_read() + if expand: + return _expand_vars(scheme, vars) + else: + return dict(_SCHEMES.items(scheme)) + + +def get_path(name, scheme=_get_default_scheme(), vars=None, expand=True): + """Return a path corresponding to the scheme. + + ``scheme`` is the install scheme name. + """ + return get_paths(scheme, vars, expand)[name] + + +def get_config_vars(*args): + """With no arguments, return a dictionary of all configuration + variables relevant for the current platform. + + On Unix, this means every variable defined in Python's installed Makefile; + On Windows and Mac OS it's a much smaller set. + + With arguments, return a list of values that result from looking up + each argument in the configuration variable dictionary. + """ + global _CONFIG_VARS + if _CONFIG_VARS is None: + _CONFIG_VARS = {} + # Normalized versions of prefix and exec_prefix are handy to have; + # in fact, these are the standard versions used most places in the + # distutils2 module. + _CONFIG_VARS['prefix'] = _PREFIX + _CONFIG_VARS['exec_prefix'] = _EXEC_PREFIX + _CONFIG_VARS['py_version'] = _PY_VERSION + _CONFIG_VARS['py_version_short'] = _PY_VERSION_SHORT + _CONFIG_VARS['py_version_nodot'] = _PY_VERSION[0] + _PY_VERSION[2] + _CONFIG_VARS['base'] = _PREFIX + _CONFIG_VARS['platbase'] = _EXEC_PREFIX + _CONFIG_VARS['projectbase'] = _PROJECT_BASE + try: + _CONFIG_VARS['abiflags'] = sys.abiflags + except AttributeError: + # sys.abiflags may not be defined on all platforms. + _CONFIG_VARS['abiflags'] = '' + + if os.name in ('nt', 'os2'): + _init_non_posix(_CONFIG_VARS) + if os.name == 'posix': + _init_posix(_CONFIG_VARS) + # Setting 'userbase' is done below the call to the + # init function to enable using 'get_config_var' in + # the init-function. + if sys.version >= '2.6': + _CONFIG_VARS['userbase'] = _getuserbase() + + if 'srcdir' not in _CONFIG_VARS: + _CONFIG_VARS['srcdir'] = _PROJECT_BASE + else: + _CONFIG_VARS['srcdir'] = _safe_realpath(_CONFIG_VARS['srcdir']) + + # Convert srcdir into an absolute path if it appears necessary. + # Normally it is relative to the build directory. However, during + # testing, for example, we might be running a non-installed python + # from a different directory. + if _PYTHON_BUILD and os.name == "posix": + base = _PROJECT_BASE + try: + cwd = os.getcwd() + except OSError: + cwd = None + if (not os.path.isabs(_CONFIG_VARS['srcdir']) and + base != cwd): + # srcdir is relative and we are not in the same directory + # as the executable. Assume executable is in the build + # directory and make srcdir absolute. + srcdir = os.path.join(base, _CONFIG_VARS['srcdir']) + _CONFIG_VARS['srcdir'] = os.path.normpath(srcdir) + + if sys.platform == 'darwin': + kernel_version = os.uname()[2] # Kernel version (8.4.3) + major_version = int(kernel_version.split('.')[0]) + + if major_version < 8: + # On Mac OS X before 10.4, check if -arch and -isysroot + # are in CFLAGS or LDFLAGS and remove them if they are. + # This is needed when building extensions on a 10.3 system + # using a universal build of python. + for key in ('LDFLAGS', 'BASECFLAGS', + # a number of derived variables. These need to be + # patched up as well. + 'CFLAGS', 'PY_CFLAGS', 'BLDSHARED'): + flags = _CONFIG_VARS[key] + flags = re.sub('-arch\s+\w+\s', ' ', flags) + flags = re.sub('-isysroot [^ \t]*', ' ', flags) + _CONFIG_VARS[key] = flags + else: + # Allow the user to override the architecture flags using + # an environment variable. + # NOTE: This name was introduced by Apple in OSX 10.5 and + # is used by several scripting languages distributed with + # that OS release. + if 'ARCHFLAGS' in os.environ: + arch = os.environ['ARCHFLAGS'] + for key in ('LDFLAGS', 'BASECFLAGS', + # a number of derived variables. These need to be + # patched up as well. + 'CFLAGS', 'PY_CFLAGS', 'BLDSHARED'): + + flags = _CONFIG_VARS[key] + flags = re.sub('-arch\s+\w+\s', ' ', flags) + flags = flags + ' ' + arch + _CONFIG_VARS[key] = flags + + # If we're on OSX 10.5 or later and the user tries to + # compiles an extension using an SDK that is not present + # on the current machine it is better to not use an SDK + # than to fail. + # + # The major usecase for this is users using a Python.org + # binary installer on OSX 10.6: that installer uses + # the 10.4u SDK, but that SDK is not installed by default + # when you install Xcode. + # + CFLAGS = _CONFIG_VARS.get('CFLAGS', '') + m = re.search('-isysroot\s+(\S+)', CFLAGS) + if m is not None: + sdk = m.group(1) + if not os.path.exists(sdk): + for key in ('LDFLAGS', 'BASECFLAGS', + # a number of derived variables. These need to be + # patched up as well. + 'CFLAGS', 'PY_CFLAGS', 'BLDSHARED'): + + flags = _CONFIG_VARS[key] + flags = re.sub('-isysroot\s+\S+(\s|$)', ' ', flags) + _CONFIG_VARS[key] = flags + + if args: + vals = [] + for name in args: + vals.append(_CONFIG_VARS.get(name)) + return vals + else: + return _CONFIG_VARS + + +def get_config_var(name): + """Return the value of a single variable using the dictionary returned by + 'get_config_vars()'. + + Equivalent to get_config_vars().get(name) + """ + return get_config_vars().get(name) + + +def get_platform(): + """Return a string that identifies the current platform. + + This is used mainly to distinguish platform-specific build directories and + platform-specific built distributions. Typically includes the OS name + and version and the architecture (as supplied by 'os.uname()'), + although the exact information included depends on the OS; eg. for IRIX + the architecture isn't particularly important (IRIX only runs on SGI + hardware), but for Linux the kernel version isn't particularly + important. + + Examples of returned values: + linux-i586 + linux-alpha (?) + solaris-2.6-sun4u + irix-5.3 + irix64-6.2 + + Windows will return one of: + win-amd64 (64bit Windows on AMD64 (aka x86_64, Intel64, EM64T, etc) + win-ia64 (64bit Windows on Itanium) + win32 (all others - specifically, sys.platform is returned) + + For other non-POSIX platforms, currently just returns 'sys.platform'. + """ + if os.name == 'nt': + # sniff sys.version for architecture. + prefix = " bit (" + i = sys.version.find(prefix) + if i == -1: + return sys.platform + j = sys.version.find(")", i) + look = sys.version[i+len(prefix):j].lower() + if look == 'amd64': + return 'win-amd64' + if look == 'itanium': + return 'win-ia64' + return sys.platform + + if os.name != "posix" or not hasattr(os, 'uname'): + # XXX what about the architecture? NT is Intel or Alpha, + # Mac OS is M68k or PPC, etc. + return sys.platform + + # Try to distinguish various flavours of Unix + osname, host, release, version, machine = os.uname() + + # Convert the OS name to lowercase, remove '/' characters + # (to accommodate BSD/OS), and translate spaces (for "Power Macintosh") + osname = osname.lower().replace('/', '') + machine = machine.replace(' ', '_') + machine = machine.replace('/', '-') + + if osname[:5] == "linux": + # At least on Linux/Intel, 'machine' is the processor -- + # i386, etc. + # XXX what about Alpha, SPARC, etc? + return "%s-%s" % (osname, machine) + elif osname[:5] == "sunos": + if release[0] >= "5": # SunOS 5 == Solaris 2 + osname = "solaris" + release = "%d.%s" % (int(release[0]) - 3, release[2:]) + # fall through to standard osname-release-machine representation + elif osname[:4] == "irix": # could be "irix64"! + return "%s-%s" % (osname, release) + elif osname[:3] == "aix": + return "%s-%s.%s" % (osname, version, release) + elif osname[:6] == "cygwin": + osname = "cygwin" + rel_re = re.compile(r'[\d.]+') + m = rel_re.match(release) + if m: + release = m.group() + elif osname[:6] == "darwin": + # + # For our purposes, we'll assume that the system version from + # distutils' perspective is what MACOSX_DEPLOYMENT_TARGET is set + # to. This makes the compatibility story a bit more sane because the + # machine is going to compile and link as if it were + # MACOSX_DEPLOYMENT_TARGET. + cfgvars = get_config_vars() + macver = cfgvars.get('MACOSX_DEPLOYMENT_TARGET') + + if True: + # Always calculate the release of the running machine, + # needed to determine if we can build fat binaries or not. + + macrelease = macver + # Get the system version. Reading this plist is a documented + # way to get the system version (see the documentation for + # the Gestalt Manager) + try: + f = open('/System/Library/CoreServices/SystemVersion.plist') + except IOError: + # We're on a plain darwin box, fall back to the default + # behaviour. + pass + else: + try: + m = re.search(r'ProductUserVisibleVersion\s*' + r'(.*?)', f.read()) + finally: + f.close() + if m is not None: + macrelease = '.'.join(m.group(1).split('.')[:2]) + # else: fall back to the default behaviour + + if not macver: + macver = macrelease + + if macver: + release = macver + osname = "macosx" + + if ((macrelease + '.') >= '10.4.' and + '-arch' in get_config_vars().get('CFLAGS', '').strip()): + # The universal build will build fat binaries, but not on + # systems before 10.4 + # + # Try to detect 4-way universal builds, those have machine-type + # 'universal' instead of 'fat'. + + machine = 'fat' + cflags = get_config_vars().get('CFLAGS') + + archs = re.findall('-arch\s+(\S+)', cflags) + archs = tuple(sorted(set(archs))) + + if len(archs) == 1: + machine = archs[0] + elif archs == ('i386', 'ppc'): + machine = 'fat' + elif archs == ('i386', 'x86_64'): + machine = 'intel' + elif archs == ('i386', 'ppc', 'x86_64'): + machine = 'fat3' + elif archs == ('ppc64', 'x86_64'): + machine = 'fat64' + elif archs == ('i386', 'ppc', 'ppc64', 'x86_64'): + machine = 'universal' + else: + raise ValueError( + "Don't know machine value for archs=%r" % (archs,)) + + elif machine == 'i386': + # On OSX the machine type returned by uname is always the + # 32-bit variant, even if the executable architecture is + # the 64-bit variant + if sys.maxsize >= 2**32: + machine = 'x86_64' + + elif machine in ('PowerPC', 'Power_Macintosh'): + # Pick a sane name for the PPC architecture. + # See 'i386' case + if sys.maxsize >= 2**32: + machine = 'ppc64' + else: + machine = 'ppc' + + return "%s-%s-%s" % (osname, release, machine) + + +def get_python_version(): + return _PY_VERSION_SHORT + + +def _print_dict(title, data): + for index, (key, value) in enumerate(sorted(data.items())): + if index == 0: + print('%s: ' % (title)) + print('\t%s = "%s"' % (key, value)) + + +def _main(): + """Display all information sysconfig detains.""" + print('Platform: "%s"' % get_platform()) + print('Python version: "%s"' % get_python_version()) + print('Current installation scheme: "%s"' % _get_default_scheme()) + print() + _print_dict('Paths', get_paths()) + print() + _print_dict('Variables', get_config_vars()) + + +if __name__ == '__main__': + _main() diff --git a/venv/lib/python2.7/site-packages/pip/_vendor/distlib/_backport/tarfile.py b/venv/lib/python2.7/site-packages/pip/_vendor/distlib/_backport/tarfile.py new file mode 100644 index 0000000..0580fb7 --- /dev/null +++ b/venv/lib/python2.7/site-packages/pip/_vendor/distlib/_backport/tarfile.py @@ -0,0 +1,2607 @@ +#------------------------------------------------------------------- +# tarfile.py +#------------------------------------------------------------------- +# Copyright (C) 2002 Lars Gustaebel +# All rights reserved. +# +# Permission is hereby granted, free of charge, to any person +# obtaining a copy of this software and associated documentation +# files (the "Software"), to deal in the Software without +# restriction, including without limitation the rights to use, +# copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the +# Software is furnished to do so, subject to the following +# conditions: +# +# The above copyright notice and this permission notice shall be +# included in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES +# OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT +# HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR +# OTHER DEALINGS IN THE SOFTWARE. +# +from __future__ import print_function + +"""Read from and write to tar format archives. +""" + +__version__ = "$Revision$" + +version = "0.9.0" +__author__ = "Lars Gust\u00e4bel (lars@gustaebel.de)" +__date__ = "$Date: 2011-02-25 17:42:01 +0200 (Fri, 25 Feb 2011) $" +__cvsid__ = "$Id: tarfile.py 88586 2011-02-25 15:42:01Z marc-andre.lemburg $" +__credits__ = "Gustavo Niemeyer, Niels Gust\u00e4bel, Richard Townsend." + +#--------- +# Imports +#--------- +import sys +import os +import stat +import errno +import time +import struct +import copy +import re + +try: + import grp, pwd +except ImportError: + grp = pwd = None + +# os.symlink on Windows prior to 6.0 raises NotImplementedError +symlink_exception = (AttributeError, NotImplementedError) +try: + # WindowsError (1314) will be raised if the caller does not hold the + # SeCreateSymbolicLinkPrivilege privilege + symlink_exception += (WindowsError,) +except NameError: + pass + +# from tarfile import * +__all__ = ["TarFile", "TarInfo", "is_tarfile", "TarError"] + +if sys.version_info[0] < 3: + import __builtin__ as builtins +else: + import builtins + +_open = builtins.open # Since 'open' is TarFile.open + +#--------------------------------------------------------- +# tar constants +#--------------------------------------------------------- +NUL = b"\0" # the null character +BLOCKSIZE = 512 # length of processing blocks +RECORDSIZE = BLOCKSIZE * 20 # length of records +GNU_MAGIC = b"ustar \0" # magic gnu tar string +POSIX_MAGIC = b"ustar\x0000" # magic posix tar string + +LENGTH_NAME = 100 # maximum length of a filename +LENGTH_LINK = 100 # maximum length of a linkname +LENGTH_PREFIX = 155 # maximum length of the prefix field + +REGTYPE = b"0" # regular file +AREGTYPE = b"\0" # regular file +LNKTYPE = b"1" # link (inside tarfile) +SYMTYPE = b"2" # symbolic link +CHRTYPE = b"3" # character special device +BLKTYPE = b"4" # block special device +DIRTYPE = b"5" # directory +FIFOTYPE = b"6" # fifo special device +CONTTYPE = b"7" # contiguous file + +GNUTYPE_LONGNAME = b"L" # GNU tar longname +GNUTYPE_LONGLINK = b"K" # GNU tar longlink +GNUTYPE_SPARSE = b"S" # GNU tar sparse file + +XHDTYPE = b"x" # POSIX.1-2001 extended header +XGLTYPE = b"g" # POSIX.1-2001 global header +SOLARIS_XHDTYPE = b"X" # Solaris extended header + +USTAR_FORMAT = 0 # POSIX.1-1988 (ustar) format +GNU_FORMAT = 1 # GNU tar format +PAX_FORMAT = 2 # POSIX.1-2001 (pax) format +DEFAULT_FORMAT = GNU_FORMAT + +#--------------------------------------------------------- +# tarfile constants +#--------------------------------------------------------- +# File types that tarfile supports: +SUPPORTED_TYPES = (REGTYPE, AREGTYPE, LNKTYPE, + SYMTYPE, DIRTYPE, FIFOTYPE, + CONTTYPE, CHRTYPE, BLKTYPE, + GNUTYPE_LONGNAME, GNUTYPE_LONGLINK, + GNUTYPE_SPARSE) + +# File types that will be treated as a regular file. +REGULAR_TYPES = (REGTYPE, AREGTYPE, + CONTTYPE, GNUTYPE_SPARSE) + +# File types that are part of the GNU tar format. +GNU_TYPES = (GNUTYPE_LONGNAME, GNUTYPE_LONGLINK, + GNUTYPE_SPARSE) + +# Fields from a pax header that override a TarInfo attribute. +PAX_FIELDS = ("path", "linkpath", "size", "mtime", + "uid", "gid", "uname", "gname") + +# Fields from a pax header that are affected by hdrcharset. +PAX_NAME_FIELDS = set(("path", "linkpath", "uname", "gname")) + +# Fields in a pax header that are numbers, all other fields +# are treated as strings. +PAX_NUMBER_FIELDS = { + "atime": float, + "ctime": float, + "mtime": float, + "uid": int, + "gid": int, + "size": int +} + +#--------------------------------------------------------- +# Bits used in the mode field, values in octal. +#--------------------------------------------------------- +S_IFLNK = 0o120000 # symbolic link +S_IFREG = 0o100000 # regular file +S_IFBLK = 0o060000 # block device +S_IFDIR = 0o040000 # directory +S_IFCHR = 0o020000 # character device +S_IFIFO = 0o010000 # fifo + +TSUID = 0o4000 # set UID on execution +TSGID = 0o2000 # set GID on execution +TSVTX = 0o1000 # reserved + +TUREAD = 0o400 # read by owner +TUWRITE = 0o200 # write by owner +TUEXEC = 0o100 # execute/search by owner +TGREAD = 0o040 # read by group +TGWRITE = 0o020 # write by group +TGEXEC = 0o010 # execute/search by group +TOREAD = 0o004 # read by other +TOWRITE = 0o002 # write by other +TOEXEC = 0o001 # execute/search by other + +#--------------------------------------------------------- +# initialization +#--------------------------------------------------------- +if os.name in ("nt", "ce"): + ENCODING = "utf-8" +else: + ENCODING = sys.getfilesystemencoding() + +#--------------------------------------------------------- +# Some useful functions +#--------------------------------------------------------- + +def stn(s, length, encoding, errors): + """Convert a string to a null-terminated bytes object. + """ + s = s.encode(encoding, errors) + return s[:length] + (length - len(s)) * NUL + +def nts(s, encoding, errors): + """Convert a null-terminated bytes object to a string. + """ + p = s.find(b"\0") + if p != -1: + s = s[:p] + return s.decode(encoding, errors) + +def nti(s): + """Convert a number field to a python number. + """ + # There are two possible encodings for a number field, see + # itn() below. + if s[0] != chr(0o200): + try: + n = int(nts(s, "ascii", "strict") or "0", 8) + except ValueError: + raise InvalidHeaderError("invalid header") + else: + n = 0 + for i in range(len(s) - 1): + n <<= 8 + n += ord(s[i + 1]) + return n + +def itn(n, digits=8, format=DEFAULT_FORMAT): + """Convert a python number to a number field. + """ + # POSIX 1003.1-1988 requires numbers to be encoded as a string of + # octal digits followed by a null-byte, this allows values up to + # (8**(digits-1))-1. GNU tar allows storing numbers greater than + # that if necessary. A leading 0o200 byte indicates this particular + # encoding, the following digits-1 bytes are a big-endian + # representation. This allows values up to (256**(digits-1))-1. + if 0 <= n < 8 ** (digits - 1): + s = ("%0*o" % (digits - 1, n)).encode("ascii") + NUL + else: + if format != GNU_FORMAT or n >= 256 ** (digits - 1): + raise ValueError("overflow in number field") + + if n < 0: + # XXX We mimic GNU tar's behaviour with negative numbers, + # this could raise OverflowError. + n = struct.unpack("L", struct.pack("l", n))[0] + + s = bytearray() + for i in range(digits - 1): + s.insert(0, n & 0o377) + n >>= 8 + s.insert(0, 0o200) + return s + +def calc_chksums(buf): + """Calculate the checksum for a member's header by summing up all + characters except for the chksum field which is treated as if + it was filled with spaces. According to the GNU tar sources, + some tars (Sun and NeXT) calculate chksum with signed char, + which will be different if there are chars in the buffer with + the high bit set. So we calculate two checksums, unsigned and + signed. + """ + unsigned_chksum = 256 + sum(struct.unpack("148B", buf[:148]) + struct.unpack("356B", buf[156:512])) + signed_chksum = 256 + sum(struct.unpack("148b", buf[:148]) + struct.unpack("356b", buf[156:512])) + return unsigned_chksum, signed_chksum + +def copyfileobj(src, dst, length=None): + """Copy length bytes from fileobj src to fileobj dst. + If length is None, copy the entire content. + """ + if length == 0: + return + if length is None: + while True: + buf = src.read(16*1024) + if not buf: + break + dst.write(buf) + return + + BUFSIZE = 16 * 1024 + blocks, remainder = divmod(length, BUFSIZE) + for b in range(blocks): + buf = src.read(BUFSIZE) + if len(buf) < BUFSIZE: + raise IOError("end of file reached") + dst.write(buf) + + if remainder != 0: + buf = src.read(remainder) + if len(buf) < remainder: + raise IOError("end of file reached") + dst.write(buf) + return + +filemode_table = ( + ((S_IFLNK, "l"), + (S_IFREG, "-"), + (S_IFBLK, "b"), + (S_IFDIR, "d"), + (S_IFCHR, "c"), + (S_IFIFO, "p")), + + ((TUREAD, "r"),), + ((TUWRITE, "w"),), + ((TUEXEC|TSUID, "s"), + (TSUID, "S"), + (TUEXEC, "x")), + + ((TGREAD, "r"),), + ((TGWRITE, "w"),), + ((TGEXEC|TSGID, "s"), + (TSGID, "S"), + (TGEXEC, "x")), + + ((TOREAD, "r"),), + ((TOWRITE, "w"),), + ((TOEXEC|TSVTX, "t"), + (TSVTX, "T"), + (TOEXEC, "x")) +) + +def filemode(mode): + """Convert a file's mode to a string of the form + -rwxrwxrwx. + Used by TarFile.list() + """ + perm = [] + for table in filemode_table: + for bit, char in table: + if mode & bit == bit: + perm.append(char) + break + else: + perm.append("-") + return "".join(perm) + +class TarError(Exception): + """Base exception.""" + pass +class ExtractError(TarError): + """General exception for extract errors.""" + pass +class ReadError(TarError): + """Exception for unreadble tar archives.""" + pass +class CompressionError(TarError): + """Exception for unavailable compression methods.""" + pass +class StreamError(TarError): + """Exception for unsupported operations on stream-like TarFiles.""" + pass +class HeaderError(TarError): + """Base exception for header errors.""" + pass +class EmptyHeaderError(HeaderError): + """Exception for empty headers.""" + pass +class TruncatedHeaderError(HeaderError): + """Exception for truncated headers.""" + pass +class EOFHeaderError(HeaderError): + """Exception for end of file headers.""" + pass +class InvalidHeaderError(HeaderError): + """Exception for invalid headers.""" + pass +class SubsequentHeaderError(HeaderError): + """Exception for missing and invalid extended headers.""" + pass + +#--------------------------- +# internal stream interface +#--------------------------- +class _LowLevelFile(object): + """Low-level file object. Supports reading and writing. + It is used instead of a regular file object for streaming + access. + """ + + def __init__(self, name, mode): + mode = { + "r": os.O_RDONLY, + "w": os.O_WRONLY | os.O_CREAT | os.O_TRUNC, + }[mode] + if hasattr(os, "O_BINARY"): + mode |= os.O_BINARY + self.fd = os.open(name, mode, 0o666) + + def close(self): + os.close(self.fd) + + def read(self, size): + return os.read(self.fd, size) + + def write(self, s): + os.write(self.fd, s) + +class _Stream(object): + """Class that serves as an adapter between TarFile and + a stream-like object. The stream-like object only + needs to have a read() or write() method and is accessed + blockwise. Use of gzip or bzip2 compression is possible. + A stream-like object could be for example: sys.stdin, + sys.stdout, a socket, a tape device etc. + + _Stream is intended to be used only internally. + """ + + def __init__(self, name, mode, comptype, fileobj, bufsize): + """Construct a _Stream object. + """ + self._extfileobj = True + if fileobj is None: + fileobj = _LowLevelFile(name, mode) + self._extfileobj = False + + if comptype == '*': + # Enable transparent compression detection for the + # stream interface + fileobj = _StreamProxy(fileobj) + comptype = fileobj.getcomptype() + + self.name = name or "" + self.mode = mode + self.comptype = comptype + self.fileobj = fileobj + self.bufsize = bufsize + self.buf = b"" + self.pos = 0 + self.closed = False + + try: + if comptype == "gz": + try: + import zlib + except ImportError: + raise CompressionError("zlib module is not available") + self.zlib = zlib + self.crc = zlib.crc32(b"") + if mode == "r": + self._init_read_gz() + else: + self._init_write_gz() + + if comptype == "bz2": + try: + import bz2 + except ImportError: + raise CompressionError("bz2 module is not available") + if mode == "r": + self.dbuf = b"" + self.cmp = bz2.BZ2Decompressor() + else: + self.cmp = bz2.BZ2Compressor() + except: + if not self._extfileobj: + self.fileobj.close() + self.closed = True + raise + + def __del__(self): + if hasattr(self, "closed") and not self.closed: + self.close() + + def _init_write_gz(self): + """Initialize for writing with gzip compression. + """ + self.cmp = self.zlib.compressobj(9, self.zlib.DEFLATED, + -self.zlib.MAX_WBITS, + self.zlib.DEF_MEM_LEVEL, + 0) + timestamp = struct.pack(" self.bufsize: + self.fileobj.write(self.buf[:self.bufsize]) + self.buf = self.buf[self.bufsize:] + + def close(self): + """Close the _Stream object. No operation should be + done on it afterwards. + """ + if self.closed: + return + + if self.mode == "w" and self.comptype != "tar": + self.buf += self.cmp.flush() + + if self.mode == "w" and self.buf: + self.fileobj.write(self.buf) + self.buf = b"" + if self.comptype == "gz": + # The native zlib crc is an unsigned 32-bit integer, but + # the Python wrapper implicitly casts that to a signed C + # long. So, on a 32-bit box self.crc may "look negative", + # while the same crc on a 64-bit box may "look positive". + # To avoid irksome warnings from the `struct` module, force + # it to look positive on all boxes. + self.fileobj.write(struct.pack("= 0: + blocks, remainder = divmod(pos - self.pos, self.bufsize) + for i in range(blocks): + self.read(self.bufsize) + self.read(remainder) + else: + raise StreamError("seeking backwards is not allowed") + return self.pos + + def read(self, size=None): + """Return the next size number of bytes from the stream. + If size is not defined, return all bytes of the stream + up to EOF. + """ + if size is None: + t = [] + while True: + buf = self._read(self.bufsize) + if not buf: + break + t.append(buf) + buf = "".join(t) + else: + buf = self._read(size) + self.pos += len(buf) + return buf + + def _read(self, size): + """Return size bytes from the stream. + """ + if self.comptype == "tar": + return self.__read(size) + + c = len(self.dbuf) + while c < size: + buf = self.__read(self.bufsize) + if not buf: + break + try: + buf = self.cmp.decompress(buf) + except IOError: + raise ReadError("invalid compressed data") + self.dbuf += buf + c += len(buf) + buf = self.dbuf[:size] + self.dbuf = self.dbuf[size:] + return buf + + def __read(self, size): + """Return size bytes from stream. If internal buffer is empty, + read another block from the stream. + """ + c = len(self.buf) + while c < size: + buf = self.fileobj.read(self.bufsize) + if not buf: + break + self.buf += buf + c += len(buf) + buf = self.buf[:size] + self.buf = self.buf[size:] + return buf +# class _Stream + +class _StreamProxy(object): + """Small proxy class that enables transparent compression + detection for the Stream interface (mode 'r|*'). + """ + + def __init__(self, fileobj): + self.fileobj = fileobj + self.buf = self.fileobj.read(BLOCKSIZE) + + def read(self, size): + self.read = self.fileobj.read + return self.buf + + def getcomptype(self): + if self.buf.startswith(b"\037\213\010"): + return "gz" + if self.buf.startswith(b"BZh91"): + return "bz2" + return "tar" + + def close(self): + self.fileobj.close() +# class StreamProxy + +class _BZ2Proxy(object): + """Small proxy class that enables external file object + support for "r:bz2" and "w:bz2" modes. This is actually + a workaround for a limitation in bz2 module's BZ2File + class which (unlike gzip.GzipFile) has no support for + a file object argument. + """ + + blocksize = 16 * 1024 + + def __init__(self, fileobj, mode): + self.fileobj = fileobj + self.mode = mode + self.name = getattr(self.fileobj, "name", None) + self.init() + + def init(self): + import bz2 + self.pos = 0 + if self.mode == "r": + self.bz2obj = bz2.BZ2Decompressor() + self.fileobj.seek(0) + self.buf = b"" + else: + self.bz2obj = bz2.BZ2Compressor() + + def read(self, size): + x = len(self.buf) + while x < size: + raw = self.fileobj.read(self.blocksize) + if not raw: + break + data = self.bz2obj.decompress(raw) + self.buf += data + x += len(data) + + buf = self.buf[:size] + self.buf = self.buf[size:] + self.pos += len(buf) + return buf + + def seek(self, pos): + if pos < self.pos: + self.init() + self.read(pos - self.pos) + + def tell(self): + return self.pos + + def write(self, data): + self.pos += len(data) + raw = self.bz2obj.compress(data) + self.fileobj.write(raw) + + def close(self): + if self.mode == "w": + raw = self.bz2obj.flush() + self.fileobj.write(raw) +# class _BZ2Proxy + +#------------------------ +# Extraction file object +#------------------------ +class _FileInFile(object): + """A thin wrapper around an existing file object that + provides a part of its data as an individual file + object. + """ + + def __init__(self, fileobj, offset, size, blockinfo=None): + self.fileobj = fileobj + self.offset = offset + self.size = size + self.position = 0 + + if blockinfo is None: + blockinfo = [(0, size)] + + # Construct a map with data and zero blocks. + self.map_index = 0 + self.map = [] + lastpos = 0 + realpos = self.offset + for offset, size in blockinfo: + if offset > lastpos: + self.map.append((False, lastpos, offset, None)) + self.map.append((True, offset, offset + size, realpos)) + realpos += size + lastpos = offset + size + if lastpos < self.size: + self.map.append((False, lastpos, self.size, None)) + + def seekable(self): + if not hasattr(self.fileobj, "seekable"): + # XXX gzip.GzipFile and bz2.BZ2File + return True + return self.fileobj.seekable() + + def tell(self): + """Return the current file position. + """ + return self.position + + def seek(self, position): + """Seek to a position in the file. + """ + self.position = position + + def read(self, size=None): + """Read data from the file. + """ + if size is None: + size = self.size - self.position + else: + size = min(size, self.size - self.position) + + buf = b"" + while size > 0: + while True: + data, start, stop, offset = self.map[self.map_index] + if start <= self.position < stop: + break + else: + self.map_index += 1 + if self.map_index == len(self.map): + self.map_index = 0 + length = min(size, stop - self.position) + if data: + self.fileobj.seek(offset + (self.position - start)) + buf += self.fileobj.read(length) + else: + buf += NUL * length + size -= length + self.position += length + return buf +#class _FileInFile + + +class ExFileObject(object): + """File-like object for reading an archive member. + Is returned by TarFile.extractfile(). + """ + blocksize = 1024 + + def __init__(self, tarfile, tarinfo): + self.fileobj = _FileInFile(tarfile.fileobj, + tarinfo.offset_data, + tarinfo.size, + tarinfo.sparse) + self.name = tarinfo.name + self.mode = "r" + self.closed = False + self.size = tarinfo.size + + self.position = 0 + self.buffer = b"" + + def readable(self): + return True + + def writable(self): + return False + + def seekable(self): + return self.fileobj.seekable() + + def read(self, size=None): + """Read at most size bytes from the file. If size is not + present or None, read all data until EOF is reached. + """ + if self.closed: + raise ValueError("I/O operation on closed file") + + buf = b"" + if self.buffer: + if size is None: + buf = self.buffer + self.buffer = b"" + else: + buf = self.buffer[:size] + self.buffer = self.buffer[size:] + + if size is None: + buf += self.fileobj.read() + else: + buf += self.fileobj.read(size - len(buf)) + + self.position += len(buf) + return buf + + # XXX TextIOWrapper uses the read1() method. + read1 = read + + def readline(self, size=-1): + """Read one entire line from the file. If size is present + and non-negative, return a string with at most that + size, which may be an incomplete line. + """ + if self.closed: + raise ValueError("I/O operation on closed file") + + pos = self.buffer.find(b"\n") + 1 + if pos == 0: + # no newline found. + while True: + buf = self.fileobj.read(self.blocksize) + self.buffer += buf + if not buf or b"\n" in buf: + pos = self.buffer.find(b"\n") + 1 + if pos == 0: + # no newline found. + pos = len(self.buffer) + break + + if size != -1: + pos = min(size, pos) + + buf = self.buffer[:pos] + self.buffer = self.buffer[pos:] + self.position += len(buf) + return buf + + def readlines(self): + """Return a list with all remaining lines. + """ + result = [] + while True: + line = self.readline() + if not line: break + result.append(line) + return result + + def tell(self): + """Return the current file position. + """ + if self.closed: + raise ValueError("I/O operation on closed file") + + return self.position + + def seek(self, pos, whence=os.SEEK_SET): + """Seek to a position in the file. + """ + if self.closed: + raise ValueError("I/O operation on closed file") + + if whence == os.SEEK_SET: + self.position = min(max(pos, 0), self.size) + elif whence == os.SEEK_CUR: + if pos < 0: + self.position = max(self.position + pos, 0) + else: + self.position = min(self.position + pos, self.size) + elif whence == os.SEEK_END: + self.position = max(min(self.size + pos, self.size), 0) + else: + raise ValueError("Invalid argument") + + self.buffer = b"" + self.fileobj.seek(self.position) + + def close(self): + """Close the file object. + """ + self.closed = True + + def __iter__(self): + """Get an iterator over the file's lines. + """ + while True: + line = self.readline() + if not line: + break + yield line +#class ExFileObject + +#------------------ +# Exported Classes +#------------------ +class TarInfo(object): + """Informational class which holds the details about an + archive member given by a tar header block. + TarInfo objects are returned by TarFile.getmember(), + TarFile.getmembers() and TarFile.gettarinfo() and are + usually created internally. + """ + + __slots__ = ("name", "mode", "uid", "gid", "size", "mtime", + "chksum", "type", "linkname", "uname", "gname", + "devmajor", "devminor", + "offset", "offset_data", "pax_headers", "sparse", + "tarfile", "_sparse_structs", "_link_target") + + def __init__(self, name=""): + """Construct a TarInfo object. name is the optional name + of the member. + """ + self.name = name # member name + self.mode = 0o644 # file permissions + self.uid = 0 # user id + self.gid = 0 # group id + self.size = 0 # file size + self.mtime = 0 # modification time + self.chksum = 0 # header checksum + self.type = REGTYPE # member type + self.linkname = "" # link name + self.uname = "" # user name + self.gname = "" # group name + self.devmajor = 0 # device major number + self.devminor = 0 # device minor number + + self.offset = 0 # the tar header starts here + self.offset_data = 0 # the file's data starts here + + self.sparse = None # sparse member information + self.pax_headers = {} # pax header information + + # In pax headers the "name" and "linkname" field are called + # "path" and "linkpath". + def _getpath(self): + return self.name + def _setpath(self, name): + self.name = name + path = property(_getpath, _setpath) + + def _getlinkpath(self): + return self.linkname + def _setlinkpath(self, linkname): + self.linkname = linkname + linkpath = property(_getlinkpath, _setlinkpath) + + def __repr__(self): + return "<%s %r at %#x>" % (self.__class__.__name__,self.name,id(self)) + + def get_info(self): + """Return the TarInfo's attributes as a dictionary. + """ + info = { + "name": self.name, + "mode": self.mode & 0o7777, + "uid": self.uid, + "gid": self.gid, + "size": self.size, + "mtime": self.mtime, + "chksum": self.chksum, + "type": self.type, + "linkname": self.linkname, + "uname": self.uname, + "gname": self.gname, + "devmajor": self.devmajor, + "devminor": self.devminor + } + + if info["type"] == DIRTYPE and not info["name"].endswith("/"): + info["name"] += "/" + + return info + + def tobuf(self, format=DEFAULT_FORMAT, encoding=ENCODING, errors="surrogateescape"): + """Return a tar header as a string of 512 byte blocks. + """ + info = self.get_info() + + if format == USTAR_FORMAT: + return self.create_ustar_header(info, encoding, errors) + elif format == GNU_FORMAT: + return self.create_gnu_header(info, encoding, errors) + elif format == PAX_FORMAT: + return self.create_pax_header(info, encoding) + else: + raise ValueError("invalid format") + + def create_ustar_header(self, info, encoding, errors): + """Return the object as a ustar header block. + """ + info["magic"] = POSIX_MAGIC + + if len(info["linkname"]) > LENGTH_LINK: + raise ValueError("linkname is too long") + + if len(info["name"]) > LENGTH_NAME: + info["prefix"], info["name"] = self._posix_split_name(info["name"]) + + return self._create_header(info, USTAR_FORMAT, encoding, errors) + + def create_gnu_header(self, info, encoding, errors): + """Return the object as a GNU header block sequence. + """ + info["magic"] = GNU_MAGIC + + buf = b"" + if len(info["linkname"]) > LENGTH_LINK: + buf += self._create_gnu_long_header(info["linkname"], GNUTYPE_LONGLINK, encoding, errors) + + if len(info["name"]) > LENGTH_NAME: + buf += self._create_gnu_long_header(info["name"], GNUTYPE_LONGNAME, encoding, errors) + + return buf + self._create_header(info, GNU_FORMAT, encoding, errors) + + def create_pax_header(self, info, encoding): + """Return the object as a ustar header block. If it cannot be + represented this way, prepend a pax extended header sequence + with supplement information. + """ + info["magic"] = POSIX_MAGIC + pax_headers = self.pax_headers.copy() + + # Test string fields for values that exceed the field length or cannot + # be represented in ASCII encoding. + for name, hname, length in ( + ("name", "path", LENGTH_NAME), ("linkname", "linkpath", LENGTH_LINK), + ("uname", "uname", 32), ("gname", "gname", 32)): + + if hname in pax_headers: + # The pax header has priority. + continue + + # Try to encode the string as ASCII. + try: + info[name].encode("ascii", "strict") + except UnicodeEncodeError: + pax_headers[hname] = info[name] + continue + + if len(info[name]) > length: + pax_headers[hname] = info[name] + + # Test number fields for values that exceed the field limit or values + # that like to be stored as float. + for name, digits in (("uid", 8), ("gid", 8), ("size", 12), ("mtime", 12)): + if name in pax_headers: + # The pax header has priority. Avoid overflow. + info[name] = 0 + continue + + val = info[name] + if not 0 <= val < 8 ** (digits - 1) or isinstance(val, float): + pax_headers[name] = str(val) + info[name] = 0 + + # Create a pax extended header if necessary. + if pax_headers: + buf = self._create_pax_generic_header(pax_headers, XHDTYPE, encoding) + else: + buf = b"" + + return buf + self._create_header(info, USTAR_FORMAT, "ascii", "replace") + + @classmethod + def create_pax_global_header(cls, pax_headers): + """Return the object as a pax global header block sequence. + """ + return cls._create_pax_generic_header(pax_headers, XGLTYPE, "utf8") + + def _posix_split_name(self, name): + """Split a name longer than 100 chars into a prefix + and a name part. + """ + prefix = name[:LENGTH_PREFIX + 1] + while prefix and prefix[-1] != "/": + prefix = prefix[:-1] + + name = name[len(prefix):] + prefix = prefix[:-1] + + if not prefix or len(name) > LENGTH_NAME: + raise ValueError("name is too long") + return prefix, name + + @staticmethod + def _create_header(info, format, encoding, errors): + """Return a header block. info is a dictionary with file + information, format must be one of the *_FORMAT constants. + """ + parts = [ + stn(info.get("name", ""), 100, encoding, errors), + itn(info.get("mode", 0) & 0o7777, 8, format), + itn(info.get("uid", 0), 8, format), + itn(info.get("gid", 0), 8, format), + itn(info.get("size", 0), 12, format), + itn(info.get("mtime", 0), 12, format), + b" ", # checksum field + info.get("type", REGTYPE), + stn(info.get("linkname", ""), 100, encoding, errors), + info.get("magic", POSIX_MAGIC), + stn(info.get("uname", ""), 32, encoding, errors), + stn(info.get("gname", ""), 32, encoding, errors), + itn(info.get("devmajor", 0), 8, format), + itn(info.get("devminor", 0), 8, format), + stn(info.get("prefix", ""), 155, encoding, errors) + ] + + buf = struct.pack("%ds" % BLOCKSIZE, b"".join(parts)) + chksum = calc_chksums(buf[-BLOCKSIZE:])[0] + buf = buf[:-364] + ("%06o\0" % chksum).encode("ascii") + buf[-357:] + return buf + + @staticmethod + def _create_payload(payload): + """Return the string payload filled with zero bytes + up to the next 512 byte border. + """ + blocks, remainder = divmod(len(payload), BLOCKSIZE) + if remainder > 0: + payload += (BLOCKSIZE - remainder) * NUL + return payload + + @classmethod + def _create_gnu_long_header(cls, name, type, encoding, errors): + """Return a GNUTYPE_LONGNAME or GNUTYPE_LONGLINK sequence + for name. + """ + name = name.encode(encoding, errors) + NUL + + info = {} + info["name"] = "././@LongLink" + info["type"] = type + info["size"] = len(name) + info["magic"] = GNU_MAGIC + + # create extended header + name blocks. + return cls._create_header(info, USTAR_FORMAT, encoding, errors) + \ + cls._create_payload(name) + + @classmethod + def _create_pax_generic_header(cls, pax_headers, type, encoding): + """Return a POSIX.1-2008 extended or global header sequence + that contains a list of keyword, value pairs. The values + must be strings. + """ + # Check if one of the fields contains surrogate characters and thereby + # forces hdrcharset=BINARY, see _proc_pax() for more information. + binary = False + for keyword, value in pax_headers.items(): + try: + value.encode("utf8", "strict") + except UnicodeEncodeError: + binary = True + break + + records = b"" + if binary: + # Put the hdrcharset field at the beginning of the header. + records += b"21 hdrcharset=BINARY\n" + + for keyword, value in pax_headers.items(): + keyword = keyword.encode("utf8") + if binary: + # Try to restore the original byte representation of `value'. + # Needless to say, that the encoding must match the string. + value = value.encode(encoding, "surrogateescape") + else: + value = value.encode("utf8") + + l = len(keyword) + len(value) + 3 # ' ' + '=' + '\n' + n = p = 0 + while True: + n = l + len(str(p)) + if n == p: + break + p = n + records += bytes(str(p), "ascii") + b" " + keyword + b"=" + value + b"\n" + + # We use a hardcoded "././@PaxHeader" name like star does + # instead of the one that POSIX recommends. + info = {} + info["name"] = "././@PaxHeader" + info["type"] = type + info["size"] = len(records) + info["magic"] = POSIX_MAGIC + + # Create pax header + record blocks. + return cls._create_header(info, USTAR_FORMAT, "ascii", "replace") + \ + cls._create_payload(records) + + @classmethod + def frombuf(cls, buf, encoding, errors): + """Construct a TarInfo object from a 512 byte bytes object. + """ + if len(buf) == 0: + raise EmptyHeaderError("empty header") + if len(buf) != BLOCKSIZE: + raise TruncatedHeaderError("truncated header") + if buf.count(NUL) == BLOCKSIZE: + raise EOFHeaderError("end of file header") + + chksum = nti(buf[148:156]) + if chksum not in calc_chksums(buf): + raise InvalidHeaderError("bad checksum") + + obj = cls() + obj.name = nts(buf[0:100], encoding, errors) + obj.mode = nti(buf[100:108]) + obj.uid = nti(buf[108:116]) + obj.gid = nti(buf[116:124]) + obj.size = nti(buf[124:136]) + obj.mtime = nti(buf[136:148]) + obj.chksum = chksum + obj.type = buf[156:157] + obj.linkname = nts(buf[157:257], encoding, errors) + obj.uname = nts(buf[265:297], encoding, errors) + obj.gname = nts(buf[297:329], encoding, errors) + obj.devmajor = nti(buf[329:337]) + obj.devminor = nti(buf[337:345]) + prefix = nts(buf[345:500], encoding, errors) + + # Old V7 tar format represents a directory as a regular + # file with a trailing slash. + if obj.type == AREGTYPE and obj.name.endswith("/"): + obj.type = DIRTYPE + + # The old GNU sparse format occupies some of the unused + # space in the buffer for up to 4 sparse structures. + # Save the them for later processing in _proc_sparse(). + if obj.type == GNUTYPE_SPARSE: + pos = 386 + structs = [] + for i in range(4): + try: + offset = nti(buf[pos:pos + 12]) + numbytes = nti(buf[pos + 12:pos + 24]) + except ValueError: + break + structs.append((offset, numbytes)) + pos += 24 + isextended = bool(buf[482]) + origsize = nti(buf[483:495]) + obj._sparse_structs = (structs, isextended, origsize) + + # Remove redundant slashes from directories. + if obj.isdir(): + obj.name = obj.name.rstrip("/") + + # Reconstruct a ustar longname. + if prefix and obj.type not in GNU_TYPES: + obj.name = prefix + "/" + obj.name + return obj + + @classmethod + def fromtarfile(cls, tarfile): + """Return the next TarInfo object from TarFile object + tarfile. + """ + buf = tarfile.fileobj.read(BLOCKSIZE) + obj = cls.frombuf(buf, tarfile.encoding, tarfile.errors) + obj.offset = tarfile.fileobj.tell() - BLOCKSIZE + return obj._proc_member(tarfile) + + #-------------------------------------------------------------------------- + # The following are methods that are called depending on the type of a + # member. The entry point is _proc_member() which can be overridden in a + # subclass to add custom _proc_*() methods. A _proc_*() method MUST + # implement the following + # operations: + # 1. Set self.offset_data to the position where the data blocks begin, + # if there is data that follows. + # 2. Set tarfile.offset to the position where the next member's header will + # begin. + # 3. Return self or another valid TarInfo object. + def _proc_member(self, tarfile): + """Choose the right processing method depending on + the type and call it. + """ + if self.type in (GNUTYPE_LONGNAME, GNUTYPE_LONGLINK): + return self._proc_gnulong(tarfile) + elif self.type == GNUTYPE_SPARSE: + return self._proc_sparse(tarfile) + elif self.type in (XHDTYPE, XGLTYPE, SOLARIS_XHDTYPE): + return self._proc_pax(tarfile) + else: + return self._proc_builtin(tarfile) + + def _proc_builtin(self, tarfile): + """Process a builtin type or an unknown type which + will be treated as a regular file. + """ + self.offset_data = tarfile.fileobj.tell() + offset = self.offset_data + if self.isreg() or self.type not in SUPPORTED_TYPES: + # Skip the following data blocks. + offset += self._block(self.size) + tarfile.offset = offset + + # Patch the TarInfo object with saved global + # header information. + self._apply_pax_info(tarfile.pax_headers, tarfile.encoding, tarfile.errors) + + return self + + def _proc_gnulong(self, tarfile): + """Process the blocks that hold a GNU longname + or longlink member. + """ + buf = tarfile.fileobj.read(self._block(self.size)) + + # Fetch the next header and process it. + try: + next = self.fromtarfile(tarfile) + except HeaderError: + raise SubsequentHeaderError("missing or bad subsequent header") + + # Patch the TarInfo object from the next header with + # the longname information. + next.offset = self.offset + if self.type == GNUTYPE_LONGNAME: + next.name = nts(buf, tarfile.encoding, tarfile.errors) + elif self.type == GNUTYPE_LONGLINK: + next.linkname = nts(buf, tarfile.encoding, tarfile.errors) + + return next + + def _proc_sparse(self, tarfile): + """Process a GNU sparse header plus extra headers. + """ + # We already collected some sparse structures in frombuf(). + structs, isextended, origsize = self._sparse_structs + del self._sparse_structs + + # Collect sparse structures from extended header blocks. + while isextended: + buf = tarfile.fileobj.read(BLOCKSIZE) + pos = 0 + for i in range(21): + try: + offset = nti(buf[pos:pos + 12]) + numbytes = nti(buf[pos + 12:pos + 24]) + except ValueError: + break + if offset and numbytes: + structs.append((offset, numbytes)) + pos += 24 + isextended = bool(buf[504]) + self.sparse = structs + + self.offset_data = tarfile.fileobj.tell() + tarfile.offset = self.offset_data + self._block(self.size) + self.size = origsize + return self + + def _proc_pax(self, tarfile): + """Process an extended or global header as described in + POSIX.1-2008. + """ + # Read the header information. + buf = tarfile.fileobj.read(self._block(self.size)) + + # A pax header stores supplemental information for either + # the following file (extended) or all following files + # (global). + if self.type == XGLTYPE: + pax_headers = tarfile.pax_headers + else: + pax_headers = tarfile.pax_headers.copy() + + # Check if the pax header contains a hdrcharset field. This tells us + # the encoding of the path, linkpath, uname and gname fields. Normally, + # these fields are UTF-8 encoded but since POSIX.1-2008 tar + # implementations are allowed to store them as raw binary strings if + # the translation to UTF-8 fails. + match = re.search(br"\d+ hdrcharset=([^\n]+)\n", buf) + if match is not None: + pax_headers["hdrcharset"] = match.group(1).decode("utf8") + + # For the time being, we don't care about anything other than "BINARY". + # The only other value that is currently allowed by the standard is + # "ISO-IR 10646 2000 UTF-8" in other words UTF-8. + hdrcharset = pax_headers.get("hdrcharset") + if hdrcharset == "BINARY": + encoding = tarfile.encoding + else: + encoding = "utf8" + + # Parse pax header information. A record looks like that: + # "%d %s=%s\n" % (length, keyword, value). length is the size + # of the complete record including the length field itself and + # the newline. keyword and value are both UTF-8 encoded strings. + regex = re.compile(br"(\d+) ([^=]+)=") + pos = 0 + while True: + match = regex.match(buf, pos) + if not match: + break + + length, keyword = match.groups() + length = int(length) + value = buf[match.end(2) + 1:match.start(1) + length - 1] + + # Normally, we could just use "utf8" as the encoding and "strict" + # as the error handler, but we better not take the risk. For + # example, GNU tar <= 1.23 is known to store filenames it cannot + # translate to UTF-8 as raw strings (unfortunately without a + # hdrcharset=BINARY header). + # We first try the strict standard encoding, and if that fails we + # fall back on the user's encoding and error handler. + keyword = self._decode_pax_field(keyword, "utf8", "utf8", + tarfile.errors) + if keyword in PAX_NAME_FIELDS: + value = self._decode_pax_field(value, encoding, tarfile.encoding, + tarfile.errors) + else: + value = self._decode_pax_field(value, "utf8", "utf8", + tarfile.errors) + + pax_headers[keyword] = value + pos += length + + # Fetch the next header. + try: + next = self.fromtarfile(tarfile) + except HeaderError: + raise SubsequentHeaderError("missing or bad subsequent header") + + # Process GNU sparse information. + if "GNU.sparse.map" in pax_headers: + # GNU extended sparse format version 0.1. + self._proc_gnusparse_01(next, pax_headers) + + elif "GNU.sparse.size" in pax_headers: + # GNU extended sparse format version 0.0. + self._proc_gnusparse_00(next, pax_headers, buf) + + elif pax_headers.get("GNU.sparse.major") == "1" and pax_headers.get("GNU.sparse.minor") == "0": + # GNU extended sparse format version 1.0. + self._proc_gnusparse_10(next, pax_headers, tarfile) + + if self.type in (XHDTYPE, SOLARIS_XHDTYPE): + # Patch the TarInfo object with the extended header info. + next._apply_pax_info(pax_headers, tarfile.encoding, tarfile.errors) + next.offset = self.offset + + if "size" in pax_headers: + # If the extended header replaces the size field, + # we need to recalculate the offset where the next + # header starts. + offset = next.offset_data + if next.isreg() or next.type not in SUPPORTED_TYPES: + offset += next._block(next.size) + tarfile.offset = offset + + return next + + def _proc_gnusparse_00(self, next, pax_headers, buf): + """Process a GNU tar extended sparse header, version 0.0. + """ + offsets = [] + for match in re.finditer(br"\d+ GNU.sparse.offset=(\d+)\n", buf): + offsets.append(int(match.group(1))) + numbytes = [] + for match in re.finditer(br"\d+ GNU.sparse.numbytes=(\d+)\n", buf): + numbytes.append(int(match.group(1))) + next.sparse = list(zip(offsets, numbytes)) + + def _proc_gnusparse_01(self, next, pax_headers): + """Process a GNU tar extended sparse header, version 0.1. + """ + sparse = [int(x) for x in pax_headers["GNU.sparse.map"].split(",")] + next.sparse = list(zip(sparse[::2], sparse[1::2])) + + def _proc_gnusparse_10(self, next, pax_headers, tarfile): + """Process a GNU tar extended sparse header, version 1.0. + """ + fields = None + sparse = [] + buf = tarfile.fileobj.read(BLOCKSIZE) + fields, buf = buf.split(b"\n", 1) + fields = int(fields) + while len(sparse) < fields * 2: + if b"\n" not in buf: + buf += tarfile.fileobj.read(BLOCKSIZE) + number, buf = buf.split(b"\n", 1) + sparse.append(int(number)) + next.offset_data = tarfile.fileobj.tell() + next.sparse = list(zip(sparse[::2], sparse[1::2])) + + def _apply_pax_info(self, pax_headers, encoding, errors): + """Replace fields with supplemental information from a previous + pax extended or global header. + """ + for keyword, value in pax_headers.items(): + if keyword == "GNU.sparse.name": + setattr(self, "path", value) + elif keyword == "GNU.sparse.size": + setattr(self, "size", int(value)) + elif keyword == "GNU.sparse.realsize": + setattr(self, "size", int(value)) + elif keyword in PAX_FIELDS: + if keyword in PAX_NUMBER_FIELDS: + try: + value = PAX_NUMBER_FIELDS[keyword](value) + except ValueError: + value = 0 + if keyword == "path": + value = value.rstrip("/") + setattr(self, keyword, value) + + self.pax_headers = pax_headers.copy() + + def _decode_pax_field(self, value, encoding, fallback_encoding, fallback_errors): + """Decode a single field from a pax record. + """ + try: + return value.decode(encoding, "strict") + except UnicodeDecodeError: + return value.decode(fallback_encoding, fallback_errors) + + def _block(self, count): + """Round up a byte count by BLOCKSIZE and return it, + e.g. _block(834) => 1024. + """ + blocks, remainder = divmod(count, BLOCKSIZE) + if remainder: + blocks += 1 + return blocks * BLOCKSIZE + + def isreg(self): + return self.type in REGULAR_TYPES + def isfile(self): + return self.isreg() + def isdir(self): + return self.type == DIRTYPE + def issym(self): + return self.type == SYMTYPE + def islnk(self): + return self.type == LNKTYPE + def ischr(self): + return self.type == CHRTYPE + def isblk(self): + return self.type == BLKTYPE + def isfifo(self): + return self.type == FIFOTYPE + def issparse(self): + return self.sparse is not None + def isdev(self): + return self.type in (CHRTYPE, BLKTYPE, FIFOTYPE) +# class TarInfo + +class TarFile(object): + """The TarFile Class provides an interface to tar archives. + """ + + debug = 0 # May be set from 0 (no msgs) to 3 (all msgs) + + dereference = False # If true, add content of linked file to the + # tar file, else the link. + + ignore_zeros = False # If true, skips empty or invalid blocks and + # continues processing. + + errorlevel = 1 # If 0, fatal errors only appear in debug + # messages (if debug >= 0). If > 0, errors + # are passed to the caller as exceptions. + + format = DEFAULT_FORMAT # The format to use when creating an archive. + + encoding = ENCODING # Encoding for 8-bit character strings. + + errors = None # Error handler for unicode conversion. + + tarinfo = TarInfo # The default TarInfo class to use. + + fileobject = ExFileObject # The default ExFileObject class to use. + + def __init__(self, name=None, mode="r", fileobj=None, format=None, + tarinfo=None, dereference=None, ignore_zeros=None, encoding=None, + errors="surrogateescape", pax_headers=None, debug=None, errorlevel=None): + """Open an (uncompressed) tar archive `name'. `mode' is either 'r' to + read from an existing archive, 'a' to append data to an existing + file or 'w' to create a new file overwriting an existing one. `mode' + defaults to 'r'. + If `fileobj' is given, it is used for reading or writing data. If it + can be determined, `mode' is overridden by `fileobj's mode. + `fileobj' is not closed, when TarFile is closed. + """ + if len(mode) > 1 or mode not in "raw": + raise ValueError("mode must be 'r', 'a' or 'w'") + self.mode = mode + self._mode = {"r": "rb", "a": "r+b", "w": "wb"}[mode] + + if not fileobj: + if self.mode == "a" and not os.path.exists(name): + # Create nonexistent files in append mode. + self.mode = "w" + self._mode = "wb" + fileobj = bltn_open(name, self._mode) + self._extfileobj = False + else: + if name is None and hasattr(fileobj, "name"): + name = fileobj.name + if hasattr(fileobj, "mode"): + self._mode = fileobj.mode + self._extfileobj = True + self.name = os.path.abspath(name) if name else None + self.fileobj = fileobj + + # Init attributes. + if format is not None: + self.format = format + if tarinfo is not None: + self.tarinfo = tarinfo + if dereference is not None: + self.dereference = dereference + if ignore_zeros is not None: + self.ignore_zeros = ignore_zeros + if encoding is not None: + self.encoding = encoding + self.errors = errors + + if pax_headers is not None and self.format == PAX_FORMAT: + self.pax_headers = pax_headers + else: + self.pax_headers = {} + + if debug is not None: + self.debug = debug + if errorlevel is not None: + self.errorlevel = errorlevel + + # Init datastructures. + self.closed = False + self.members = [] # list of members as TarInfo objects + self._loaded = False # flag if all members have been read + self.offset = self.fileobj.tell() + # current position in the archive file + self.inodes = {} # dictionary caching the inodes of + # archive members already added + + try: + if self.mode == "r": + self.firstmember = None + self.firstmember = self.next() + + if self.mode == "a": + # Move to the end of the archive, + # before the first empty block. + while True: + self.fileobj.seek(self.offset) + try: + tarinfo = self.tarinfo.fromtarfile(self) + self.members.append(tarinfo) + except EOFHeaderError: + self.fileobj.seek(self.offset) + break + except HeaderError as e: + raise ReadError(str(e)) + + if self.mode in "aw": + self._loaded = True + + if self.pax_headers: + buf = self.tarinfo.create_pax_global_header(self.pax_headers.copy()) + self.fileobj.write(buf) + self.offset += len(buf) + except: + if not self._extfileobj: + self.fileobj.close() + self.closed = True + raise + + #-------------------------------------------------------------------------- + # Below are the classmethods which act as alternate constructors to the + # TarFile class. The open() method is the only one that is needed for + # public use; it is the "super"-constructor and is able to select an + # adequate "sub"-constructor for a particular compression using the mapping + # from OPEN_METH. + # + # This concept allows one to subclass TarFile without losing the comfort of + # the super-constructor. A sub-constructor is registered and made available + # by adding it to the mapping in OPEN_METH. + + @classmethod + def open(cls, name=None, mode="r", fileobj=None, bufsize=RECORDSIZE, **kwargs): + """Open a tar archive for reading, writing or appending. Return + an appropriate TarFile class. + + mode: + 'r' or 'r:*' open for reading with transparent compression + 'r:' open for reading exclusively uncompressed + 'r:gz' open for reading with gzip compression + 'r:bz2' open for reading with bzip2 compression + 'a' or 'a:' open for appending, creating the file if necessary + 'w' or 'w:' open for writing without compression + 'w:gz' open for writing with gzip compression + 'w:bz2' open for writing with bzip2 compression + + 'r|*' open a stream of tar blocks with transparent compression + 'r|' open an uncompressed stream of tar blocks for reading + 'r|gz' open a gzip compressed stream of tar blocks + 'r|bz2' open a bzip2 compressed stream of tar blocks + 'w|' open an uncompressed stream for writing + 'w|gz' open a gzip compressed stream for writing + 'w|bz2' open a bzip2 compressed stream for writing + """ + + if not name and not fileobj: + raise ValueError("nothing to open") + + if mode in ("r", "r:*"): + # Find out which *open() is appropriate for opening the file. + for comptype in cls.OPEN_METH: + func = getattr(cls, cls.OPEN_METH[comptype]) + if fileobj is not None: + saved_pos = fileobj.tell() + try: + return func(name, "r", fileobj, **kwargs) + except (ReadError, CompressionError) as e: + if fileobj is not None: + fileobj.seek(saved_pos) + continue + raise ReadError("file could not be opened successfully") + + elif ":" in mode: + filemode, comptype = mode.split(":", 1) + filemode = filemode or "r" + comptype = comptype or "tar" + + # Select the *open() function according to + # given compression. + if comptype in cls.OPEN_METH: + func = getattr(cls, cls.OPEN_METH[comptype]) + else: + raise CompressionError("unknown compression type %r" % comptype) + return func(name, filemode, fileobj, **kwargs) + + elif "|" in mode: + filemode, comptype = mode.split("|", 1) + filemode = filemode or "r" + comptype = comptype or "tar" + + if filemode not in "rw": + raise ValueError("mode must be 'r' or 'w'") + + stream = _Stream(name, filemode, comptype, fileobj, bufsize) + try: + t = cls(name, filemode, stream, **kwargs) + except: + stream.close() + raise + t._extfileobj = False + return t + + elif mode in "aw": + return cls.taropen(name, mode, fileobj, **kwargs) + + raise ValueError("undiscernible mode") + + @classmethod + def taropen(cls, name, mode="r", fileobj=None, **kwargs): + """Open uncompressed tar archive name for reading or writing. + """ + if len(mode) > 1 or mode not in "raw": + raise ValueError("mode must be 'r', 'a' or 'w'") + return cls(name, mode, fileobj, **kwargs) + + @classmethod + def gzopen(cls, name, mode="r", fileobj=None, compresslevel=9, **kwargs): + """Open gzip compressed tar archive name for reading or writing. + Appending is not allowed. + """ + if len(mode) > 1 or mode not in "rw": + raise ValueError("mode must be 'r' or 'w'") + + try: + import gzip + gzip.GzipFile + except (ImportError, AttributeError): + raise CompressionError("gzip module is not available") + + extfileobj = fileobj is not None + try: + fileobj = gzip.GzipFile(name, mode + "b", compresslevel, fileobj) + t = cls.taropen(name, mode, fileobj, **kwargs) + except IOError: + if not extfileobj and fileobj is not None: + fileobj.close() + if fileobj is None: + raise + raise ReadError("not a gzip file") + except: + if not extfileobj and fileobj is not None: + fileobj.close() + raise + t._extfileobj = extfileobj + return t + + @classmethod + def bz2open(cls, name, mode="r", fileobj=None, compresslevel=9, **kwargs): + """Open bzip2 compressed tar archive name for reading or writing. + Appending is not allowed. + """ + if len(mode) > 1 or mode not in "rw": + raise ValueError("mode must be 'r' or 'w'.") + + try: + import bz2 + except ImportError: + raise CompressionError("bz2 module is not available") + + if fileobj is not None: + fileobj = _BZ2Proxy(fileobj, mode) + else: + fileobj = bz2.BZ2File(name, mode, compresslevel=compresslevel) + + try: + t = cls.taropen(name, mode, fileobj, **kwargs) + except (IOError, EOFError): + fileobj.close() + raise ReadError("not a bzip2 file") + t._extfileobj = False + return t + + # All *open() methods are registered here. + OPEN_METH = { + "tar": "taropen", # uncompressed tar + "gz": "gzopen", # gzip compressed tar + "bz2": "bz2open" # bzip2 compressed tar + } + + #-------------------------------------------------------------------------- + # The public methods which TarFile provides: + + def close(self): + """Close the TarFile. In write-mode, two finishing zero blocks are + appended to the archive. + """ + if self.closed: + return + + if self.mode in "aw": + self.fileobj.write(NUL * (BLOCKSIZE * 2)) + self.offset += (BLOCKSIZE * 2) + # fill up the end with zero-blocks + # (like option -b20 for tar does) + blocks, remainder = divmod(self.offset, RECORDSIZE) + if remainder > 0: + self.fileobj.write(NUL * (RECORDSIZE - remainder)) + + if not self._extfileobj: + self.fileobj.close() + self.closed = True + + def getmember(self, name): + """Return a TarInfo object for member `name'. If `name' can not be + found in the archive, KeyError is raised. If a member occurs more + than once in the archive, its last occurrence is assumed to be the + most up-to-date version. + """ + tarinfo = self._getmember(name) + if tarinfo is None: + raise KeyError("filename %r not found" % name) + return tarinfo + + def getmembers(self): + """Return the members of the archive as a list of TarInfo objects. The + list has the same order as the members in the archive. + """ + self._check() + if not self._loaded: # if we want to obtain a list of + self._load() # all members, we first have to + # scan the whole archive. + return self.members + + def getnames(self): + """Return the members of the archive as a list of their names. It has + the same order as the list returned by getmembers(). + """ + return [tarinfo.name for tarinfo in self.getmembers()] + + def gettarinfo(self, name=None, arcname=None, fileobj=None): + """Create a TarInfo object for either the file `name' or the file + object `fileobj' (using os.fstat on its file descriptor). You can + modify some of the TarInfo's attributes before you add it using + addfile(). If given, `arcname' specifies an alternative name for the + file in the archive. + """ + self._check("aw") + + # When fileobj is given, replace name by + # fileobj's real name. + if fileobj is not None: + name = fileobj.name + + # Building the name of the member in the archive. + # Backward slashes are converted to forward slashes, + # Absolute paths are turned to relative paths. + if arcname is None: + arcname = name + drv, arcname = os.path.splitdrive(arcname) + arcname = arcname.replace(os.sep, "/") + arcname = arcname.lstrip("/") + + # Now, fill the TarInfo object with + # information specific for the file. + tarinfo = self.tarinfo() + tarinfo.tarfile = self + + # Use os.stat or os.lstat, depending on platform + # and if symlinks shall be resolved. + if fileobj is None: + if hasattr(os, "lstat") and not self.dereference: + statres = os.lstat(name) + else: + statres = os.stat(name) + else: + statres = os.fstat(fileobj.fileno()) + linkname = "" + + stmd = statres.st_mode + if stat.S_ISREG(stmd): + inode = (statres.st_ino, statres.st_dev) + if not self.dereference and statres.st_nlink > 1 and \ + inode in self.inodes and arcname != self.inodes[inode]: + # Is it a hardlink to an already + # archived file? + type = LNKTYPE + linkname = self.inodes[inode] + else: + # The inode is added only if its valid. + # For win32 it is always 0. + type = REGTYPE + if inode[0]: + self.inodes[inode] = arcname + elif stat.S_ISDIR(stmd): + type = DIRTYPE + elif stat.S_ISFIFO(stmd): + type = FIFOTYPE + elif stat.S_ISLNK(stmd): + type = SYMTYPE + linkname = os.readlink(name) + elif stat.S_ISCHR(stmd): + type = CHRTYPE + elif stat.S_ISBLK(stmd): + type = BLKTYPE + else: + return None + + # Fill the TarInfo object with all + # information we can get. + tarinfo.name = arcname + tarinfo.mode = stmd + tarinfo.uid = statres.st_uid + tarinfo.gid = statres.st_gid + if type == REGTYPE: + tarinfo.size = statres.st_size + else: + tarinfo.size = 0 + tarinfo.mtime = statres.st_mtime + tarinfo.type = type + tarinfo.linkname = linkname + if pwd: + try: + tarinfo.uname = pwd.getpwuid(tarinfo.uid)[0] + except KeyError: + pass + if grp: + try: + tarinfo.gname = grp.getgrgid(tarinfo.gid)[0] + except KeyError: + pass + + if type in (CHRTYPE, BLKTYPE): + if hasattr(os, "major") and hasattr(os, "minor"): + tarinfo.devmajor = os.major(statres.st_rdev) + tarinfo.devminor = os.minor(statres.st_rdev) + return tarinfo + + def list(self, verbose=True): + """Print a table of contents to sys.stdout. If `verbose' is False, only + the names of the members are printed. If it is True, an `ls -l'-like + output is produced. + """ + self._check() + + for tarinfo in self: + if verbose: + print(filemode(tarinfo.mode), end=' ') + print("%s/%s" % (tarinfo.uname or tarinfo.uid, + tarinfo.gname or tarinfo.gid), end=' ') + if tarinfo.ischr() or tarinfo.isblk(): + print("%10s" % ("%d,%d" \ + % (tarinfo.devmajor, tarinfo.devminor)), end=' ') + else: + print("%10d" % tarinfo.size, end=' ') + print("%d-%02d-%02d %02d:%02d:%02d" \ + % time.localtime(tarinfo.mtime)[:6], end=' ') + + print(tarinfo.name + ("/" if tarinfo.isdir() else ""), end=' ') + + if verbose: + if tarinfo.issym(): + print("->", tarinfo.linkname, end=' ') + if tarinfo.islnk(): + print("link to", tarinfo.linkname, end=' ') + print() + + def add(self, name, arcname=None, recursive=True, exclude=None, filter=None): + """Add the file `name' to the archive. `name' may be any type of file + (directory, fifo, symbolic link, etc.). If given, `arcname' + specifies an alternative name for the file in the archive. + Directories are added recursively by default. This can be avoided by + setting `recursive' to False. `exclude' is a function that should + return True for each filename to be excluded. `filter' is a function + that expects a TarInfo object argument and returns the changed + TarInfo object, if it returns None the TarInfo object will be + excluded from the archive. + """ + self._check("aw") + + if arcname is None: + arcname = name + + # Exclude pathnames. + if exclude is not None: + import warnings + warnings.warn("use the filter argument instead", + DeprecationWarning, 2) + if exclude(name): + self._dbg(2, "tarfile: Excluded %r" % name) + return + + # Skip if somebody tries to archive the archive... + if self.name is not None and os.path.abspath(name) == self.name: + self._dbg(2, "tarfile: Skipped %r" % name) + return + + self._dbg(1, name) + + # Create a TarInfo object from the file. + tarinfo = self.gettarinfo(name, arcname) + + if tarinfo is None: + self._dbg(1, "tarfile: Unsupported type %r" % name) + return + + # Change or exclude the TarInfo object. + if filter is not None: + tarinfo = filter(tarinfo) + if tarinfo is None: + self._dbg(2, "tarfile: Excluded %r" % name) + return + + # Append the tar header and data to the archive. + if tarinfo.isreg(): + f = bltn_open(name, "rb") + self.addfile(tarinfo, f) + f.close() + + elif tarinfo.isdir(): + self.addfile(tarinfo) + if recursive: + for f in os.listdir(name): + self.add(os.path.join(name, f), os.path.join(arcname, f), + recursive, exclude, filter=filter) + + else: + self.addfile(tarinfo) + + def addfile(self, tarinfo, fileobj=None): + """Add the TarInfo object `tarinfo' to the archive. If `fileobj' is + given, tarinfo.size bytes are read from it and added to the archive. + You can create TarInfo objects using gettarinfo(). + On Windows platforms, `fileobj' should always be opened with mode + 'rb' to avoid irritation about the file size. + """ + self._check("aw") + + tarinfo = copy.copy(tarinfo) + + buf = tarinfo.tobuf(self.format, self.encoding, self.errors) + self.fileobj.write(buf) + self.offset += len(buf) + + # If there's data to follow, append it. + if fileobj is not None: + copyfileobj(fileobj, self.fileobj, tarinfo.size) + blocks, remainder = divmod(tarinfo.size, BLOCKSIZE) + if remainder > 0: + self.fileobj.write(NUL * (BLOCKSIZE - remainder)) + blocks += 1 + self.offset += blocks * BLOCKSIZE + + self.members.append(tarinfo) + + def extractall(self, path=".", members=None): + """Extract all members from the archive to the current working + directory and set owner, modification time and permissions on + directories afterwards. `path' specifies a different directory + to extract to. `members' is optional and must be a subset of the + list returned by getmembers(). + """ + directories = [] + + if members is None: + members = self + + for tarinfo in members: + if tarinfo.isdir(): + # Extract directories with a safe mode. + directories.append(tarinfo) + tarinfo = copy.copy(tarinfo) + tarinfo.mode = 0o700 + # Do not set_attrs directories, as we will do that further down + self.extract(tarinfo, path, set_attrs=not tarinfo.isdir()) + + # Reverse sort directories. + directories.sort(key=lambda a: a.name) + directories.reverse() + + # Set correct owner, mtime and filemode on directories. + for tarinfo in directories: + dirpath = os.path.join(path, tarinfo.name) + try: + self.chown(tarinfo, dirpath) + self.utime(tarinfo, dirpath) + self.chmod(tarinfo, dirpath) + except ExtractError as e: + if self.errorlevel > 1: + raise + else: + self._dbg(1, "tarfile: %s" % e) + + def extract(self, member, path="", set_attrs=True): + """Extract a member from the archive to the current working directory, + using its full name. Its file information is extracted as accurately + as possible. `member' may be a filename or a TarInfo object. You can + specify a different directory using `path'. File attributes (owner, + mtime, mode) are set unless `set_attrs' is False. + """ + self._check("r") + + if isinstance(member, str): + tarinfo = self.getmember(member) + else: + tarinfo = member + + # Prepare the link target for makelink(). + if tarinfo.islnk(): + tarinfo._link_target = os.path.join(path, tarinfo.linkname) + + try: + self._extract_member(tarinfo, os.path.join(path, tarinfo.name), + set_attrs=set_attrs) + except EnvironmentError as e: + if self.errorlevel > 0: + raise + else: + if e.filename is None: + self._dbg(1, "tarfile: %s" % e.strerror) + else: + self._dbg(1, "tarfile: %s %r" % (e.strerror, e.filename)) + except ExtractError as e: + if self.errorlevel > 1: + raise + else: + self._dbg(1, "tarfile: %s" % e) + + def extractfile(self, member): + """Extract a member from the archive as a file object. `member' may be + a filename or a TarInfo object. If `member' is a regular file, a + file-like object is returned. If `member' is a link, a file-like + object is constructed from the link's target. If `member' is none of + the above, None is returned. + The file-like object is read-only and provides the following + methods: read(), readline(), readlines(), seek() and tell() + """ + self._check("r") + + if isinstance(member, str): + tarinfo = self.getmember(member) + else: + tarinfo = member + + if tarinfo.isreg(): + return self.fileobject(self, tarinfo) + + elif tarinfo.type not in SUPPORTED_TYPES: + # If a member's type is unknown, it is treated as a + # regular file. + return self.fileobject(self, tarinfo) + + elif tarinfo.islnk() or tarinfo.issym(): + if isinstance(self.fileobj, _Stream): + # A small but ugly workaround for the case that someone tries + # to extract a (sym)link as a file-object from a non-seekable + # stream of tar blocks. + raise StreamError("cannot extract (sym)link as file object") + else: + # A (sym)link's file object is its target's file object. + return self.extractfile(self._find_link_target(tarinfo)) + else: + # If there's no data associated with the member (directory, chrdev, + # blkdev, etc.), return None instead of a file object. + return None + + def _extract_member(self, tarinfo, targetpath, set_attrs=True): + """Extract the TarInfo object tarinfo to a physical + file called targetpath. + """ + # Fetch the TarInfo object for the given name + # and build the destination pathname, replacing + # forward slashes to platform specific separators. + targetpath = targetpath.rstrip("/") + targetpath = targetpath.replace("/", os.sep) + + # Create all upper directories. + upperdirs = os.path.dirname(targetpath) + if upperdirs and not os.path.exists(upperdirs): + # Create directories that are not part of the archive with + # default permissions. + os.makedirs(upperdirs) + + if tarinfo.islnk() or tarinfo.issym(): + self._dbg(1, "%s -> %s" % (tarinfo.name, tarinfo.linkname)) + else: + self._dbg(1, tarinfo.name) + + if tarinfo.isreg(): + self.makefile(tarinfo, targetpath) + elif tarinfo.isdir(): + self.makedir(tarinfo, targetpath) + elif tarinfo.isfifo(): + self.makefifo(tarinfo, targetpath) + elif tarinfo.ischr() or tarinfo.isblk(): + self.makedev(tarinfo, targetpath) + elif tarinfo.islnk() or tarinfo.issym(): + self.makelink(tarinfo, targetpath) + elif tarinfo.type not in SUPPORTED_TYPES: + self.makeunknown(tarinfo, targetpath) + else: + self.makefile(tarinfo, targetpath) + + if set_attrs: + self.chown(tarinfo, targetpath) + if not tarinfo.issym(): + self.chmod(tarinfo, targetpath) + self.utime(tarinfo, targetpath) + + #-------------------------------------------------------------------------- + # Below are the different file methods. They are called via + # _extract_member() when extract() is called. They can be replaced in a + # subclass to implement other functionality. + + def makedir(self, tarinfo, targetpath): + """Make a directory called targetpath. + """ + try: + # Use a safe mode for the directory, the real mode is set + # later in _extract_member(). + os.mkdir(targetpath, 0o700) + except EnvironmentError as e: + if e.errno != errno.EEXIST: + raise + + def makefile(self, tarinfo, targetpath): + """Make a file called targetpath. + """ + source = self.fileobj + source.seek(tarinfo.offset_data) + target = bltn_open(targetpath, "wb") + if tarinfo.sparse is not None: + for offset, size in tarinfo.sparse: + target.seek(offset) + copyfileobj(source, target, size) + else: + copyfileobj(source, target, tarinfo.size) + target.seek(tarinfo.size) + target.truncate() + target.close() + + def makeunknown(self, tarinfo, targetpath): + """Make a file from a TarInfo object with an unknown type + at targetpath. + """ + self.makefile(tarinfo, targetpath) + self._dbg(1, "tarfile: Unknown file type %r, " \ + "extracted as regular file." % tarinfo.type) + + def makefifo(self, tarinfo, targetpath): + """Make a fifo called targetpath. + """ + if hasattr(os, "mkfifo"): + os.mkfifo(targetpath) + else: + raise ExtractError("fifo not supported by system") + + def makedev(self, tarinfo, targetpath): + """Make a character or block device called targetpath. + """ + if not hasattr(os, "mknod") or not hasattr(os, "makedev"): + raise ExtractError("special devices not supported by system") + + mode = tarinfo.mode + if tarinfo.isblk(): + mode |= stat.S_IFBLK + else: + mode |= stat.S_IFCHR + + os.mknod(targetpath, mode, + os.makedev(tarinfo.devmajor, tarinfo.devminor)) + + def makelink(self, tarinfo, targetpath): + """Make a (symbolic) link called targetpath. If it cannot be created + (platform limitation), we try to make a copy of the referenced file + instead of a link. + """ + try: + # For systems that support symbolic and hard links. + if tarinfo.issym(): + os.symlink(tarinfo.linkname, targetpath) + else: + # See extract(). + if os.path.exists(tarinfo._link_target): + os.link(tarinfo._link_target, targetpath) + else: + self._extract_member(self._find_link_target(tarinfo), + targetpath) + except symlink_exception: + if tarinfo.issym(): + linkpath = os.path.join(os.path.dirname(tarinfo.name), + tarinfo.linkname) + else: + linkpath = tarinfo.linkname + else: + try: + self._extract_member(self._find_link_target(tarinfo), + targetpath) + except KeyError: + raise ExtractError("unable to resolve link inside archive") + + def chown(self, tarinfo, targetpath): + """Set owner of targetpath according to tarinfo. + """ + if pwd and hasattr(os, "geteuid") and os.geteuid() == 0: + # We have to be root to do so. + try: + g = grp.getgrnam(tarinfo.gname)[2] + except KeyError: + g = tarinfo.gid + try: + u = pwd.getpwnam(tarinfo.uname)[2] + except KeyError: + u = tarinfo.uid + try: + if tarinfo.issym() and hasattr(os, "lchown"): + os.lchown(targetpath, u, g) + else: + if sys.platform != "os2emx": + os.chown(targetpath, u, g) + except EnvironmentError as e: + raise ExtractError("could not change owner") + + def chmod(self, tarinfo, targetpath): + """Set file permissions of targetpath according to tarinfo. + """ + if hasattr(os, 'chmod'): + try: + os.chmod(targetpath, tarinfo.mode) + except EnvironmentError as e: + raise ExtractError("could not change mode") + + def utime(self, tarinfo, targetpath): + """Set modification time of targetpath according to tarinfo. + """ + if not hasattr(os, 'utime'): + return + try: + os.utime(targetpath, (tarinfo.mtime, tarinfo.mtime)) + except EnvironmentError as e: + raise ExtractError("could not change modification time") + + #-------------------------------------------------------------------------- + def next(self): + """Return the next member of the archive as a TarInfo object, when + TarFile is opened for reading. Return None if there is no more + available. + """ + self._check("ra") + if self.firstmember is not None: + m = self.firstmember + self.firstmember = None + return m + + # Read the next block. + self.fileobj.seek(self.offset) + tarinfo = None + while True: + try: + tarinfo = self.tarinfo.fromtarfile(self) + except EOFHeaderError as e: + if self.ignore_zeros: + self._dbg(2, "0x%X: %s" % (self.offset, e)) + self.offset += BLOCKSIZE + continue + except InvalidHeaderError as e: + if self.ignore_zeros: + self._dbg(2, "0x%X: %s" % (self.offset, e)) + self.offset += BLOCKSIZE + continue + elif self.offset == 0: + raise ReadError(str(e)) + except EmptyHeaderError: + if self.offset == 0: + raise ReadError("empty file") + except TruncatedHeaderError as e: + if self.offset == 0: + raise ReadError(str(e)) + except SubsequentHeaderError as e: + raise ReadError(str(e)) + break + + if tarinfo is not None: + self.members.append(tarinfo) + else: + self._loaded = True + + return tarinfo + + #-------------------------------------------------------------------------- + # Little helper methods: + + def _getmember(self, name, tarinfo=None, normalize=False): + """Find an archive member by name from bottom to top. + If tarinfo is given, it is used as the starting point. + """ + # Ensure that all members have been loaded. + members = self.getmembers() + + # Limit the member search list up to tarinfo. + if tarinfo is not None: + members = members[:members.index(tarinfo)] + + if normalize: + name = os.path.normpath(name) + + for member in reversed(members): + if normalize: + member_name = os.path.normpath(member.name) + else: + member_name = member.name + + if name == member_name: + return member + + def _load(self): + """Read through the entire archive file and look for readable + members. + """ + while True: + tarinfo = self.next() + if tarinfo is None: + break + self._loaded = True + + def _check(self, mode=None): + """Check if TarFile is still open, and if the operation's mode + corresponds to TarFile's mode. + """ + if self.closed: + raise IOError("%s is closed" % self.__class__.__name__) + if mode is not None and self.mode not in mode: + raise IOError("bad operation for mode %r" % self.mode) + + def _find_link_target(self, tarinfo): + """Find the target member of a symlink or hardlink member in the + archive. + """ + if tarinfo.issym(): + # Always search the entire archive. + linkname = os.path.dirname(tarinfo.name) + "/" + tarinfo.linkname + limit = None + else: + # Search the archive before the link, because a hard link is + # just a reference to an already archived file. + linkname = tarinfo.linkname + limit = tarinfo + + member = self._getmember(linkname, tarinfo=limit, normalize=True) + if member is None: + raise KeyError("linkname %r not found" % linkname) + return member + + def __iter__(self): + """Provide an iterator object. + """ + if self._loaded: + return iter(self.members) + else: + return TarIter(self) + + def _dbg(self, level, msg): + """Write debugging output to sys.stderr. + """ + if level <= self.debug: + print(msg, file=sys.stderr) + + def __enter__(self): + self._check() + return self + + def __exit__(self, type, value, traceback): + if type is None: + self.close() + else: + # An exception occurred. We must not call close() because + # it would try to write end-of-archive blocks and padding. + if not self._extfileobj: + self.fileobj.close() + self.closed = True +# class TarFile + +class TarIter(object): + """Iterator Class. + + for tarinfo in TarFile(...): + suite... + """ + + def __init__(self, tarfile): + """Construct a TarIter object. + """ + self.tarfile = tarfile + self.index = 0 + def __iter__(self): + """Return iterator object. + """ + return self + + def __next__(self): + """Return the next item using TarFile's next() method. + When all members have been read, set TarFile as _loaded. + """ + # Fix for SF #1100429: Under rare circumstances it can + # happen that getmembers() is called during iteration, + # which will cause TarIter to stop prematurely. + if not self.tarfile._loaded: + tarinfo = self.tarfile.next() + if not tarinfo: + self.tarfile._loaded = True + raise StopIteration + else: + try: + tarinfo = self.tarfile.members[self.index] + except IndexError: + raise StopIteration + self.index += 1 + return tarinfo + + next = __next__ # for Python 2.x + +#-------------------- +# exported functions +#-------------------- +def is_tarfile(name): + """Return True if name points to a tar archive that we + are able to handle, else return False. + """ + try: + t = open(name) + t.close() + return True + except TarError: + return False + +bltn_open = open +open = TarFile.open diff --git a/venv/lib/python2.7/site-packages/pip/_vendor/distlib/compat.py b/venv/lib/python2.7/site-packages/pip/_vendor/distlib/compat.py new file mode 100644 index 0000000..069ec77 --- /dev/null +++ b/venv/lib/python2.7/site-packages/pip/_vendor/distlib/compat.py @@ -0,0 +1,1102 @@ +# -*- coding: utf-8 -*- +# +# Copyright (C) 2013-2016 Vinay Sajip. +# Licensed to the Python Software Foundation under a contributor agreement. +# See LICENSE.txt and CONTRIBUTORS.txt. +# +from __future__ import absolute_import + +import os +import re +import sys + +if sys.version_info[0] < 3: # pragma: no cover + from StringIO import StringIO + string_types = basestring, + text_type = unicode + from types import FileType as file_type + import __builtin__ as builtins + import ConfigParser as configparser + from ._backport import shutil + from urlparse import urlparse, urlunparse, urljoin, urlsplit, urlunsplit + from urllib import (urlretrieve, quote as _quote, unquote, url2pathname, + pathname2url, ContentTooShortError, splittype) + + def quote(s): + if isinstance(s, unicode): + s = s.encode('utf-8') + return _quote(s) + + import urllib2 + from urllib2 import (Request, urlopen, URLError, HTTPError, + HTTPBasicAuthHandler, HTTPPasswordMgr, + HTTPSHandler, HTTPHandler, HTTPRedirectHandler, + build_opener) + import httplib + import xmlrpclib + import Queue as queue + from HTMLParser import HTMLParser + import htmlentitydefs + raw_input = raw_input + from itertools import ifilter as filter + from itertools import ifilterfalse as filterfalse + + _userprog = None + def splituser(host): + """splituser('user[:passwd]@host[:port]') --> 'user[:passwd]', 'host[:port]'.""" + global _userprog + if _userprog is None: + import re + _userprog = re.compile('^(.*)@(.*)$') + + match = _userprog.match(host) + if match: return match.group(1, 2) + return None, host + +else: # pragma: no cover + from io import StringIO + string_types = str, + text_type = str + from io import TextIOWrapper as file_type + import builtins + import configparser + import shutil + from urllib.parse import (urlparse, urlunparse, urljoin, splituser, quote, + unquote, urlsplit, urlunsplit, splittype) + from urllib.request import (urlopen, urlretrieve, Request, url2pathname, + pathname2url, + HTTPBasicAuthHandler, HTTPPasswordMgr, + HTTPSHandler, HTTPHandler, HTTPRedirectHandler, + build_opener) + from urllib.error import HTTPError, URLError, ContentTooShortError + import http.client as httplib + import urllib.request as urllib2 + import xmlrpc.client as xmlrpclib + import queue + from html.parser import HTMLParser + import html.entities as htmlentitydefs + raw_input = input + from itertools import filterfalse + filter = filter + +try: + from ssl import match_hostname, CertificateError +except ImportError: # pragma: no cover + class CertificateError(ValueError): + pass + + + def _dnsname_match(dn, hostname, max_wildcards=1): + """Matching according to RFC 6125, section 6.4.3 + + http://tools.ietf.org/html/rfc6125#section-6.4.3 + """ + pats = [] + if not dn: + return False + + parts = dn.split('.') + leftmost, remainder = parts[0], parts[1:] + + wildcards = leftmost.count('*') + if wildcards > max_wildcards: + # Issue #17980: avoid denials of service by refusing more + # than one wildcard per fragment. A survery of established + # policy among SSL implementations showed it to be a + # reasonable choice. + raise CertificateError( + "too many wildcards in certificate DNS name: " + repr(dn)) + + # speed up common case w/o wildcards + if not wildcards: + return dn.lower() == hostname.lower() + + # RFC 6125, section 6.4.3, subitem 1. + # The client SHOULD NOT attempt to match a presented identifier in which + # the wildcard character comprises a label other than the left-most label. + if leftmost == '*': + # When '*' is a fragment by itself, it matches a non-empty dotless + # fragment. + pats.append('[^.]+') + elif leftmost.startswith('xn--') or hostname.startswith('xn--'): + # RFC 6125, section 6.4.3, subitem 3. + # The client SHOULD NOT attempt to match a presented identifier + # where the wildcard character is embedded within an A-label or + # U-label of an internationalized domain name. + pats.append(re.escape(leftmost)) + else: + # Otherwise, '*' matches any dotless string, e.g. www* + pats.append(re.escape(leftmost).replace(r'\*', '[^.]*')) + + # add the remaining fragments, ignore any wildcards + for frag in remainder: + pats.append(re.escape(frag)) + + pat = re.compile(r'\A' + r'\.'.join(pats) + r'\Z', re.IGNORECASE) + return pat.match(hostname) + + + def match_hostname(cert, hostname): + """Verify that *cert* (in decoded format as returned by + SSLSocket.getpeercert()) matches the *hostname*. RFC 2818 and RFC 6125 + rules are followed, but IP addresses are not accepted for *hostname*. + + CertificateError is raised on failure. On success, the function + returns nothing. + """ + if not cert: + raise ValueError("empty or no certificate, match_hostname needs a " + "SSL socket or SSL context with either " + "CERT_OPTIONAL or CERT_REQUIRED") + dnsnames = [] + san = cert.get('subjectAltName', ()) + for key, value in san: + if key == 'DNS': + if _dnsname_match(value, hostname): + return + dnsnames.append(value) + if not dnsnames: + # The subject is only checked when there is no dNSName entry + # in subjectAltName + for sub in cert.get('subject', ()): + for key, value in sub: + # XXX according to RFC 2818, the most specific Common Name + # must be used. + if key == 'commonName': + if _dnsname_match(value, hostname): + return + dnsnames.append(value) + if len(dnsnames) > 1: + raise CertificateError("hostname %r " + "doesn't match either of %s" + % (hostname, ', '.join(map(repr, dnsnames)))) + elif len(dnsnames) == 1: + raise CertificateError("hostname %r " + "doesn't match %r" + % (hostname, dnsnames[0])) + else: + raise CertificateError("no appropriate commonName or " + "subjectAltName fields were found") + + +try: + from types import SimpleNamespace as Container +except ImportError: # pragma: no cover + class Container(object): + """ + A generic container for when multiple values need to be returned + """ + def __init__(self, **kwargs): + self.__dict__.update(kwargs) + + +try: + from shutil import which +except ImportError: # pragma: no cover + # Implementation from Python 3.3 + def which(cmd, mode=os.F_OK | os.X_OK, path=None): + """Given a command, mode, and a PATH string, return the path which + conforms to the given mode on the PATH, or None if there is no such + file. + + `mode` defaults to os.F_OK | os.X_OK. `path` defaults to the result + of os.environ.get("PATH"), or can be overridden with a custom search + path. + + """ + # Check that a given file can be accessed with the correct mode. + # Additionally check that `file` is not a directory, as on Windows + # directories pass the os.access check. + def _access_check(fn, mode): + return (os.path.exists(fn) and os.access(fn, mode) + and not os.path.isdir(fn)) + + # If we're given a path with a directory part, look it up directly rather + # than referring to PATH directories. This includes checking relative to the + # current directory, e.g. ./script + if os.path.dirname(cmd): + if _access_check(cmd, mode): + return cmd + return None + + if path is None: + path = os.environ.get("PATH", os.defpath) + if not path: + return None + path = path.split(os.pathsep) + + if sys.platform == "win32": + # The current directory takes precedence on Windows. + if not os.curdir in path: + path.insert(0, os.curdir) + + # PATHEXT is necessary to check on Windows. + pathext = os.environ.get("PATHEXT", "").split(os.pathsep) + # See if the given file matches any of the expected path extensions. + # This will allow us to short circuit when given "python.exe". + # If it does match, only test that one, otherwise we have to try + # others. + if any(cmd.lower().endswith(ext.lower()) for ext in pathext): + files = [cmd] + else: + files = [cmd + ext for ext in pathext] + else: + # On other platforms you don't have things like PATHEXT to tell you + # what file suffixes are executable, so just pass on cmd as-is. + files = [cmd] + + seen = set() + for dir in path: + normdir = os.path.normcase(dir) + if not normdir in seen: + seen.add(normdir) + for thefile in files: + name = os.path.join(dir, thefile) + if _access_check(name, mode): + return name + return None + + +# ZipFile is a context manager in 2.7, but not in 2.6 + +from zipfile import ZipFile as BaseZipFile + +if hasattr(BaseZipFile, '__enter__'): # pragma: no cover + ZipFile = BaseZipFile +else: + from zipfile import ZipExtFile as BaseZipExtFile + + class ZipExtFile(BaseZipExtFile): + def __init__(self, base): + self.__dict__.update(base.__dict__) + + def __enter__(self): + return self + + def __exit__(self, *exc_info): + self.close() + # return None, so if an exception occurred, it will propagate + + class ZipFile(BaseZipFile): + def __enter__(self): + return self + + def __exit__(self, *exc_info): + self.close() + # return None, so if an exception occurred, it will propagate + + def open(self, *args, **kwargs): + base = BaseZipFile.open(self, *args, **kwargs) + return ZipExtFile(base) + +try: + from platform import python_implementation +except ImportError: # pragma: no cover + def python_implementation(): + """Return a string identifying the Python implementation.""" + if 'PyPy' in sys.version: + return 'PyPy' + if os.name == 'java': + return 'Jython' + if sys.version.startswith('IronPython'): + return 'IronPython' + return 'CPython' + +try: + import sysconfig +except ImportError: # pragma: no cover + from ._backport import sysconfig + +try: + callable = callable +except NameError: # pragma: no cover + from collections import Callable + + def callable(obj): + return isinstance(obj, Callable) + + +try: + fsencode = os.fsencode + fsdecode = os.fsdecode +except AttributeError: # pragma: no cover + _fsencoding = sys.getfilesystemencoding() + if _fsencoding == 'mbcs': + _fserrors = 'strict' + else: + _fserrors = 'surrogateescape' + + def fsencode(filename): + if isinstance(filename, bytes): + return filename + elif isinstance(filename, text_type): + return filename.encode(_fsencoding, _fserrors) + else: + raise TypeError("expect bytes or str, not %s" % + type(filename).__name__) + + def fsdecode(filename): + if isinstance(filename, text_type): + return filename + elif isinstance(filename, bytes): + return filename.decode(_fsencoding, _fserrors) + else: + raise TypeError("expect bytes or str, not %s" % + type(filename).__name__) + +try: + from tokenize import detect_encoding +except ImportError: # pragma: no cover + from codecs import BOM_UTF8, lookup + import re + + cookie_re = re.compile("coding[:=]\s*([-\w.]+)") + + def _get_normal_name(orig_enc): + """Imitates get_normal_name in tokenizer.c.""" + # Only care about the first 12 characters. + enc = orig_enc[:12].lower().replace("_", "-") + if enc == "utf-8" or enc.startswith("utf-8-"): + return "utf-8" + if enc in ("latin-1", "iso-8859-1", "iso-latin-1") or \ + enc.startswith(("latin-1-", "iso-8859-1-", "iso-latin-1-")): + return "iso-8859-1" + return orig_enc + + def detect_encoding(readline): + """ + The detect_encoding() function is used to detect the encoding that should + be used to decode a Python source file. It requires one argment, readline, + in the same way as the tokenize() generator. + + It will call readline a maximum of twice, and return the encoding used + (as a string) and a list of any lines (left as bytes) it has read in. + + It detects the encoding from the presence of a utf-8 bom or an encoding + cookie as specified in pep-0263. If both a bom and a cookie are present, + but disagree, a SyntaxError will be raised. If the encoding cookie is an + invalid charset, raise a SyntaxError. Note that if a utf-8 bom is found, + 'utf-8-sig' is returned. + + If no encoding is specified, then the default of 'utf-8' will be returned. + """ + try: + filename = readline.__self__.name + except AttributeError: + filename = None + bom_found = False + encoding = None + default = 'utf-8' + def read_or_stop(): + try: + return readline() + except StopIteration: + return b'' + + def find_cookie(line): + try: + # Decode as UTF-8. Either the line is an encoding declaration, + # in which case it should be pure ASCII, or it must be UTF-8 + # per default encoding. + line_string = line.decode('utf-8') + except UnicodeDecodeError: + msg = "invalid or missing encoding declaration" + if filename is not None: + msg = '{} for {!r}'.format(msg, filename) + raise SyntaxError(msg) + + matches = cookie_re.findall(line_string) + if not matches: + return None + encoding = _get_normal_name(matches[0]) + try: + codec = lookup(encoding) + except LookupError: + # This behaviour mimics the Python interpreter + if filename is None: + msg = "unknown encoding: " + encoding + else: + msg = "unknown encoding for {!r}: {}".format(filename, + encoding) + raise SyntaxError(msg) + + if bom_found: + if codec.name != 'utf-8': + # This behaviour mimics the Python interpreter + if filename is None: + msg = 'encoding problem: utf-8' + else: + msg = 'encoding problem for {!r}: utf-8'.format(filename) + raise SyntaxError(msg) + encoding += '-sig' + return encoding + + first = read_or_stop() + if first.startswith(BOM_UTF8): + bom_found = True + first = first[3:] + default = 'utf-8-sig' + if not first: + return default, [] + + encoding = find_cookie(first) + if encoding: + return encoding, [first] + + second = read_or_stop() + if not second: + return default, [first] + + encoding = find_cookie(second) + if encoding: + return encoding, [first, second] + + return default, [first, second] + +# For converting & <-> & etc. +try: + from html import escape +except ImportError: + from cgi import escape +if sys.version_info[:2] < (3, 4): + unescape = HTMLParser().unescape +else: + from html import unescape + +try: + from collections import ChainMap +except ImportError: # pragma: no cover + from collections import MutableMapping + + try: + from reprlib import recursive_repr as _recursive_repr + except ImportError: + def _recursive_repr(fillvalue='...'): + ''' + Decorator to make a repr function return fillvalue for a recursive + call + ''' + + def decorating_function(user_function): + repr_running = set() + + def wrapper(self): + key = id(self), get_ident() + if key in repr_running: + return fillvalue + repr_running.add(key) + try: + result = user_function(self) + finally: + repr_running.discard(key) + return result + + # Can't use functools.wraps() here because of bootstrap issues + wrapper.__module__ = getattr(user_function, '__module__') + wrapper.__doc__ = getattr(user_function, '__doc__') + wrapper.__name__ = getattr(user_function, '__name__') + wrapper.__annotations__ = getattr(user_function, '__annotations__', {}) + return wrapper + + return decorating_function + + class ChainMap(MutableMapping): + ''' A ChainMap groups multiple dicts (or other mappings) together + to create a single, updateable view. + + The underlying mappings are stored in a list. That list is public and can + accessed or updated using the *maps* attribute. There is no other state. + + Lookups search the underlying mappings successively until a key is found. + In contrast, writes, updates, and deletions only operate on the first + mapping. + + ''' + + def __init__(self, *maps): + '''Initialize a ChainMap by setting *maps* to the given mappings. + If no mappings are provided, a single empty dictionary is used. + + ''' + self.maps = list(maps) or [{}] # always at least one map + + def __missing__(self, key): + raise KeyError(key) + + def __getitem__(self, key): + for mapping in self.maps: + try: + return mapping[key] # can't use 'key in mapping' with defaultdict + except KeyError: + pass + return self.__missing__(key) # support subclasses that define __missing__ + + def get(self, key, default=None): + return self[key] if key in self else default + + def __len__(self): + return len(set().union(*self.maps)) # reuses stored hash values if possible + + def __iter__(self): + return iter(set().union(*self.maps)) + + def __contains__(self, key): + return any(key in m for m in self.maps) + + def __bool__(self): + return any(self.maps) + + @_recursive_repr() + def __repr__(self): + return '{0.__class__.__name__}({1})'.format( + self, ', '.join(map(repr, self.maps))) + + @classmethod + def fromkeys(cls, iterable, *args): + 'Create a ChainMap with a single dict created from the iterable.' + return cls(dict.fromkeys(iterable, *args)) + + def copy(self): + 'New ChainMap or subclass with a new copy of maps[0] and refs to maps[1:]' + return self.__class__(self.maps[0].copy(), *self.maps[1:]) + + __copy__ = copy + + def new_child(self): # like Django's Context.push() + 'New ChainMap with a new dict followed by all previous maps.' + return self.__class__({}, *self.maps) + + @property + def parents(self): # like Django's Context.pop() + 'New ChainMap from maps[1:].' + return self.__class__(*self.maps[1:]) + + def __setitem__(self, key, value): + self.maps[0][key] = value + + def __delitem__(self, key): + try: + del self.maps[0][key] + except KeyError: + raise KeyError('Key not found in the first mapping: {!r}'.format(key)) + + def popitem(self): + 'Remove and return an item pair from maps[0]. Raise KeyError is maps[0] is empty.' + try: + return self.maps[0].popitem() + except KeyError: + raise KeyError('No keys found in the first mapping.') + + def pop(self, key, *args): + 'Remove *key* from maps[0] and return its value. Raise KeyError if *key* not in maps[0].' + try: + return self.maps[0].pop(key, *args) + except KeyError: + raise KeyError('Key not found in the first mapping: {!r}'.format(key)) + + def clear(self): + 'Clear maps[0], leaving maps[1:] intact.' + self.maps[0].clear() + +try: + from imp import cache_from_source +except ImportError: # pragma: no cover + def cache_from_source(path, debug_override=None): + assert path.endswith('.py') + if debug_override is None: + debug_override = __debug__ + if debug_override: + suffix = 'c' + else: + suffix = 'o' + return path + suffix + +try: + from collections import OrderedDict +except ImportError: # pragma: no cover +## {{{ http://code.activestate.com/recipes/576693/ (r9) +# Backport of OrderedDict() class that runs on Python 2.4, 2.5, 2.6, 2.7 and pypy. +# Passes Python2.7's test suite and incorporates all the latest updates. + try: + from thread import get_ident as _get_ident + except ImportError: + from dummy_thread import get_ident as _get_ident + + try: + from _abcoll import KeysView, ValuesView, ItemsView + except ImportError: + pass + + + class OrderedDict(dict): + 'Dictionary that remembers insertion order' + # An inherited dict maps keys to values. + # The inherited dict provides __getitem__, __len__, __contains__, and get. + # The remaining methods are order-aware. + # Big-O running times for all methods are the same as for regular dictionaries. + + # The internal self.__map dictionary maps keys to links in a doubly linked list. + # The circular doubly linked list starts and ends with a sentinel element. + # The sentinel element never gets deleted (this simplifies the algorithm). + # Each link is stored as a list of length three: [PREV, NEXT, KEY]. + + def __init__(self, *args, **kwds): + '''Initialize an ordered dictionary. Signature is the same as for + regular dictionaries, but keyword arguments are not recommended + because their insertion order is arbitrary. + + ''' + if len(args) > 1: + raise TypeError('expected at most 1 arguments, got %d' % len(args)) + try: + self.__root + except AttributeError: + self.__root = root = [] # sentinel node + root[:] = [root, root, None] + self.__map = {} + self.__update(*args, **kwds) + + def __setitem__(self, key, value, dict_setitem=dict.__setitem__): + 'od.__setitem__(i, y) <==> od[i]=y' + # Setting a new item creates a new link which goes at the end of the linked + # list, and the inherited dictionary is updated with the new key/value pair. + if key not in self: + root = self.__root + last = root[0] + last[1] = root[0] = self.__map[key] = [last, root, key] + dict_setitem(self, key, value) + + def __delitem__(self, key, dict_delitem=dict.__delitem__): + 'od.__delitem__(y) <==> del od[y]' + # Deleting an existing item uses self.__map to find the link which is + # then removed by updating the links in the predecessor and successor nodes. + dict_delitem(self, key) + link_prev, link_next, key = self.__map.pop(key) + link_prev[1] = link_next + link_next[0] = link_prev + + def __iter__(self): + 'od.__iter__() <==> iter(od)' + root = self.__root + curr = root[1] + while curr is not root: + yield curr[2] + curr = curr[1] + + def __reversed__(self): + 'od.__reversed__() <==> reversed(od)' + root = self.__root + curr = root[0] + while curr is not root: + yield curr[2] + curr = curr[0] + + def clear(self): + 'od.clear() -> None. Remove all items from od.' + try: + for node in self.__map.itervalues(): + del node[:] + root = self.__root + root[:] = [root, root, None] + self.__map.clear() + except AttributeError: + pass + dict.clear(self) + + def popitem(self, last=True): + '''od.popitem() -> (k, v), return and remove a (key, value) pair. + Pairs are returned in LIFO order if last is true or FIFO order if false. + + ''' + if not self: + raise KeyError('dictionary is empty') + root = self.__root + if last: + link = root[0] + link_prev = link[0] + link_prev[1] = root + root[0] = link_prev + else: + link = root[1] + link_next = link[1] + root[1] = link_next + link_next[0] = root + key = link[2] + del self.__map[key] + value = dict.pop(self, key) + return key, value + + # -- the following methods do not depend on the internal structure -- + + def keys(self): + 'od.keys() -> list of keys in od' + return list(self) + + def values(self): + 'od.values() -> list of values in od' + return [self[key] for key in self] + + def items(self): + 'od.items() -> list of (key, value) pairs in od' + return [(key, self[key]) for key in self] + + def iterkeys(self): + 'od.iterkeys() -> an iterator over the keys in od' + return iter(self) + + def itervalues(self): + 'od.itervalues -> an iterator over the values in od' + for k in self: + yield self[k] + + def iteritems(self): + 'od.iteritems -> an iterator over the (key, value) items in od' + for k in self: + yield (k, self[k]) + + def update(*args, **kwds): + '''od.update(E, **F) -> None. Update od from dict/iterable E and F. + + If E is a dict instance, does: for k in E: od[k] = E[k] + If E has a .keys() method, does: for k in E.keys(): od[k] = E[k] + Or if E is an iterable of items, does: for k, v in E: od[k] = v + In either case, this is followed by: for k, v in F.items(): od[k] = v + + ''' + if len(args) > 2: + raise TypeError('update() takes at most 2 positional ' + 'arguments (%d given)' % (len(args),)) + elif not args: + raise TypeError('update() takes at least 1 argument (0 given)') + self = args[0] + # Make progressively weaker assumptions about "other" + other = () + if len(args) == 2: + other = args[1] + if isinstance(other, dict): + for key in other: + self[key] = other[key] + elif hasattr(other, 'keys'): + for key in other.keys(): + self[key] = other[key] + else: + for key, value in other: + self[key] = value + for key, value in kwds.items(): + self[key] = value + + __update = update # let subclasses override update without breaking __init__ + + __marker = object() + + def pop(self, key, default=__marker): + '''od.pop(k[,d]) -> v, remove specified key and return the corresponding value. + If key is not found, d is returned if given, otherwise KeyError is raised. + + ''' + if key in self: + result = self[key] + del self[key] + return result + if default is self.__marker: + raise KeyError(key) + return default + + def setdefault(self, key, default=None): + 'od.setdefault(k[,d]) -> od.get(k,d), also set od[k]=d if k not in od' + if key in self: + return self[key] + self[key] = default + return default + + def __repr__(self, _repr_running=None): + 'od.__repr__() <==> repr(od)' + if not _repr_running: _repr_running = {} + call_key = id(self), _get_ident() + if call_key in _repr_running: + return '...' + _repr_running[call_key] = 1 + try: + if not self: + return '%s()' % (self.__class__.__name__,) + return '%s(%r)' % (self.__class__.__name__, self.items()) + finally: + del _repr_running[call_key] + + def __reduce__(self): + 'Return state information for pickling' + items = [[k, self[k]] for k in self] + inst_dict = vars(self).copy() + for k in vars(OrderedDict()): + inst_dict.pop(k, None) + if inst_dict: + return (self.__class__, (items,), inst_dict) + return self.__class__, (items,) + + def copy(self): + 'od.copy() -> a shallow copy of od' + return self.__class__(self) + + @classmethod + def fromkeys(cls, iterable, value=None): + '''OD.fromkeys(S[, v]) -> New ordered dictionary with keys from S + and values equal to v (which defaults to None). + + ''' + d = cls() + for key in iterable: + d[key] = value + return d + + def __eq__(self, other): + '''od.__eq__(y) <==> od==y. Comparison to another OD is order-sensitive + while comparison to a regular mapping is order-insensitive. + + ''' + if isinstance(other, OrderedDict): + return len(self)==len(other) and self.items() == other.items() + return dict.__eq__(self, other) + + def __ne__(self, other): + return not self == other + + # -- the following methods are only used in Python 2.7 -- + + def viewkeys(self): + "od.viewkeys() -> a set-like object providing a view on od's keys" + return KeysView(self) + + def viewvalues(self): + "od.viewvalues() -> an object providing a view on od's values" + return ValuesView(self) + + def viewitems(self): + "od.viewitems() -> a set-like object providing a view on od's items" + return ItemsView(self) + +try: + from logging.config import BaseConfigurator, valid_ident +except ImportError: # pragma: no cover + IDENTIFIER = re.compile('^[a-z_][a-z0-9_]*$', re.I) + + + def valid_ident(s): + m = IDENTIFIER.match(s) + if not m: + raise ValueError('Not a valid Python identifier: %r' % s) + return True + + + # The ConvertingXXX classes are wrappers around standard Python containers, + # and they serve to convert any suitable values in the container. The + # conversion converts base dicts, lists and tuples to their wrapped + # equivalents, whereas strings which match a conversion format are converted + # appropriately. + # + # Each wrapper should have a configurator attribute holding the actual + # configurator to use for conversion. + + class ConvertingDict(dict): + """A converting dictionary wrapper.""" + + def __getitem__(self, key): + value = dict.__getitem__(self, key) + result = self.configurator.convert(value) + #If the converted value is different, save for next time + if value is not result: + self[key] = result + if type(result) in (ConvertingDict, ConvertingList, + ConvertingTuple): + result.parent = self + result.key = key + return result + + def get(self, key, default=None): + value = dict.get(self, key, default) + result = self.configurator.convert(value) + #If the converted value is different, save for next time + if value is not result: + self[key] = result + if type(result) in (ConvertingDict, ConvertingList, + ConvertingTuple): + result.parent = self + result.key = key + return result + + def pop(self, key, default=None): + value = dict.pop(self, key, default) + result = self.configurator.convert(value) + if value is not result: + if type(result) in (ConvertingDict, ConvertingList, + ConvertingTuple): + result.parent = self + result.key = key + return result + + class ConvertingList(list): + """A converting list wrapper.""" + def __getitem__(self, key): + value = list.__getitem__(self, key) + result = self.configurator.convert(value) + #If the converted value is different, save for next time + if value is not result: + self[key] = result + if type(result) in (ConvertingDict, ConvertingList, + ConvertingTuple): + result.parent = self + result.key = key + return result + + def pop(self, idx=-1): + value = list.pop(self, idx) + result = self.configurator.convert(value) + if value is not result: + if type(result) in (ConvertingDict, ConvertingList, + ConvertingTuple): + result.parent = self + return result + + class ConvertingTuple(tuple): + """A converting tuple wrapper.""" + def __getitem__(self, key): + value = tuple.__getitem__(self, key) + result = self.configurator.convert(value) + if value is not result: + if type(result) in (ConvertingDict, ConvertingList, + ConvertingTuple): + result.parent = self + result.key = key + return result + + class BaseConfigurator(object): + """ + The configurator base class which defines some useful defaults. + """ + + CONVERT_PATTERN = re.compile(r'^(?P[a-z]+)://(?P.*)$') + + WORD_PATTERN = re.compile(r'^\s*(\w+)\s*') + DOT_PATTERN = re.compile(r'^\.\s*(\w+)\s*') + INDEX_PATTERN = re.compile(r'^\[\s*(\w+)\s*\]\s*') + DIGIT_PATTERN = re.compile(r'^\d+$') + + value_converters = { + 'ext' : 'ext_convert', + 'cfg' : 'cfg_convert', + } + + # We might want to use a different one, e.g. importlib + importer = staticmethod(__import__) + + def __init__(self, config): + self.config = ConvertingDict(config) + self.config.configurator = self + + def resolve(self, s): + """ + Resolve strings to objects using standard import and attribute + syntax. + """ + name = s.split('.') + used = name.pop(0) + try: + found = self.importer(used) + for frag in name: + used += '.' + frag + try: + found = getattr(found, frag) + except AttributeError: + self.importer(used) + found = getattr(found, frag) + return found + except ImportError: + e, tb = sys.exc_info()[1:] + v = ValueError('Cannot resolve %r: %s' % (s, e)) + v.__cause__, v.__traceback__ = e, tb + raise v + + def ext_convert(self, value): + """Default converter for the ext:// protocol.""" + return self.resolve(value) + + def cfg_convert(self, value): + """Default converter for the cfg:// protocol.""" + rest = value + m = self.WORD_PATTERN.match(rest) + if m is None: + raise ValueError("Unable to convert %r" % value) + else: + rest = rest[m.end():] + d = self.config[m.groups()[0]] + #print d, rest + while rest: + m = self.DOT_PATTERN.match(rest) + if m: + d = d[m.groups()[0]] + else: + m = self.INDEX_PATTERN.match(rest) + if m: + idx = m.groups()[0] + if not self.DIGIT_PATTERN.match(idx): + d = d[idx] + else: + try: + n = int(idx) # try as number first (most likely) + d = d[n] + except TypeError: + d = d[idx] + if m: + rest = rest[m.end():] + else: + raise ValueError('Unable to convert ' + '%r at %r' % (value, rest)) + #rest should be empty + return d + + def convert(self, value): + """ + Convert values to an appropriate type. dicts, lists and tuples are + replaced by their converting alternatives. Strings are checked to + see if they have a conversion format and are converted if they do. + """ + if not isinstance(value, ConvertingDict) and isinstance(value, dict): + value = ConvertingDict(value) + value.configurator = self + elif not isinstance(value, ConvertingList) and isinstance(value, list): + value = ConvertingList(value) + value.configurator = self + elif not isinstance(value, ConvertingTuple) and\ + isinstance(value, tuple): + value = ConvertingTuple(value) + value.configurator = self + elif isinstance(value, string_types): + m = self.CONVERT_PATTERN.match(value) + if m: + d = m.groupdict() + prefix = d['prefix'] + converter = self.value_converters.get(prefix, None) + if converter: + suffix = d['suffix'] + converter = getattr(self, converter) + value = converter(suffix) + return value + + def configure_custom(self, config): + """Configure an object with a user-supplied factory.""" + c = config.pop('()') + if not callable(c): + c = self.resolve(c) + props = config.pop('.', None) + # Check for valid identifiers + kwargs = dict([(k, config[k]) for k in config if valid_ident(k)]) + result = c(**kwargs) + if props: + for name, value in props.items(): + setattr(result, name, value) + return result + + def as_tuple(self, value): + """Utility function which converts lists to tuples.""" + if isinstance(value, list): + value = tuple(value) + return value diff --git a/venv/lib/python2.7/site-packages/pip/_vendor/distlib/database.py b/venv/lib/python2.7/site-packages/pip/_vendor/distlib/database.py new file mode 100644 index 0000000..7bc1914 --- /dev/null +++ b/venv/lib/python2.7/site-packages/pip/_vendor/distlib/database.py @@ -0,0 +1,1312 @@ +# -*- coding: utf-8 -*- +# +# Copyright (C) 2012-2016 The Python Software Foundation. +# See LICENSE.txt and CONTRIBUTORS.txt. +# +"""PEP 376 implementation.""" + +from __future__ import unicode_literals + +import base64 +import codecs +import contextlib +import hashlib +import logging +import os +import posixpath +import sys +import zipimport + +from . import DistlibException, resources +from .compat import StringIO +from .version import get_scheme, UnsupportedVersionError +from .metadata import Metadata, METADATA_FILENAME, WHEEL_METADATA_FILENAME +from .util import (parse_requirement, cached_property, parse_name_and_version, + read_exports, write_exports, CSVReader, CSVWriter) + + +__all__ = ['Distribution', 'BaseInstalledDistribution', + 'InstalledDistribution', 'EggInfoDistribution', + 'DistributionPath'] + + +logger = logging.getLogger(__name__) + +EXPORTS_FILENAME = 'pydist-exports.json' +COMMANDS_FILENAME = 'pydist-commands.json' + +DIST_FILES = ('INSTALLER', METADATA_FILENAME, 'RECORD', 'REQUESTED', + 'RESOURCES', EXPORTS_FILENAME, 'SHARED') + +DISTINFO_EXT = '.dist-info' + + +class _Cache(object): + """ + A simple cache mapping names and .dist-info paths to distributions + """ + def __init__(self): + """ + Initialise an instance. There is normally one for each DistributionPath. + """ + self.name = {} + self.path = {} + self.generated = False + + def clear(self): + """ + Clear the cache, setting it to its initial state. + """ + self.name.clear() + self.path.clear() + self.generated = False + + def add(self, dist): + """ + Add a distribution to the cache. + :param dist: The distribution to add. + """ + if dist.path not in self.path: + self.path[dist.path] = dist + self.name.setdefault(dist.key, []).append(dist) + + +class DistributionPath(object): + """ + Represents a set of distributions installed on a path (typically sys.path). + """ + def __init__(self, path=None, include_egg=False): + """ + Create an instance from a path, optionally including legacy (distutils/ + setuptools/distribute) distributions. + :param path: The path to use, as a list of directories. If not specified, + sys.path is used. + :param include_egg: If True, this instance will look for and return legacy + distributions as well as those based on PEP 376. + """ + if path is None: + path = sys.path + self.path = path + self._include_dist = True + self._include_egg = include_egg + + self._cache = _Cache() + self._cache_egg = _Cache() + self._cache_enabled = True + self._scheme = get_scheme('default') + + def _get_cache_enabled(self): + return self._cache_enabled + + def _set_cache_enabled(self, value): + self._cache_enabled = value + + cache_enabled = property(_get_cache_enabled, _set_cache_enabled) + + def clear_cache(self): + """ + Clears the internal cache. + """ + self._cache.clear() + self._cache_egg.clear() + + + def _yield_distributions(self): + """ + Yield .dist-info and/or .egg(-info) distributions. + """ + # We need to check if we've seen some resources already, because on + # some Linux systems (e.g. some Debian/Ubuntu variants) there are + # symlinks which alias other files in the environment. + seen = set() + for path in self.path: + finder = resources.finder_for_path(path) + if finder is None: + continue + r = finder.find('') + if not r or not r.is_container: + continue + rset = sorted(r.resources) + for entry in rset: + r = finder.find(entry) + if not r or r.path in seen: + continue + if self._include_dist and entry.endswith(DISTINFO_EXT): + possible_filenames = [METADATA_FILENAME, WHEEL_METADATA_FILENAME] + for metadata_filename in possible_filenames: + metadata_path = posixpath.join(entry, metadata_filename) + pydist = finder.find(metadata_path) + if pydist: + break + else: + continue + + with contextlib.closing(pydist.as_stream()) as stream: + metadata = Metadata(fileobj=stream, scheme='legacy') + logger.debug('Found %s', r.path) + seen.add(r.path) + yield new_dist_class(r.path, metadata=metadata, + env=self) + elif self._include_egg and entry.endswith(('.egg-info', + '.egg')): + logger.debug('Found %s', r.path) + seen.add(r.path) + yield old_dist_class(r.path, self) + + def _generate_cache(self): + """ + Scan the path for distributions and populate the cache with + those that are found. + """ + gen_dist = not self._cache.generated + gen_egg = self._include_egg and not self._cache_egg.generated + if gen_dist or gen_egg: + for dist in self._yield_distributions(): + if isinstance(dist, InstalledDistribution): + self._cache.add(dist) + else: + self._cache_egg.add(dist) + + if gen_dist: + self._cache.generated = True + if gen_egg: + self._cache_egg.generated = True + + @classmethod + def distinfo_dirname(cls, name, version): + """ + The *name* and *version* parameters are converted into their + filename-escaped form, i.e. any ``'-'`` characters are replaced + with ``'_'`` other than the one in ``'dist-info'`` and the one + separating the name from the version number. + + :parameter name: is converted to a standard distribution name by replacing + any runs of non- alphanumeric characters with a single + ``'-'``. + :type name: string + :parameter version: is converted to a standard version string. Spaces + become dots, and all other non-alphanumeric characters + (except dots) become dashes, with runs of multiple + dashes condensed to a single dash. + :type version: string + :returns: directory name + :rtype: string""" + name = name.replace('-', '_') + return '-'.join([name, version]) + DISTINFO_EXT + + def get_distributions(self): + """ + Provides an iterator that looks for distributions and returns + :class:`InstalledDistribution` or + :class:`EggInfoDistribution` instances for each one of them. + + :rtype: iterator of :class:`InstalledDistribution` and + :class:`EggInfoDistribution` instances + """ + if not self._cache_enabled: + for dist in self._yield_distributions(): + yield dist + else: + self._generate_cache() + + for dist in self._cache.path.values(): + yield dist + + if self._include_egg: + for dist in self._cache_egg.path.values(): + yield dist + + def get_distribution(self, name): + """ + Looks for a named distribution on the path. + + This function only returns the first result found, as no more than one + value is expected. If nothing is found, ``None`` is returned. + + :rtype: :class:`InstalledDistribution`, :class:`EggInfoDistribution` + or ``None`` + """ + result = None + name = name.lower() + if not self._cache_enabled: + for dist in self._yield_distributions(): + if dist.key == name: + result = dist + break + else: + self._generate_cache() + + if name in self._cache.name: + result = self._cache.name[name][0] + elif self._include_egg and name in self._cache_egg.name: + result = self._cache_egg.name[name][0] + return result + + def provides_distribution(self, name, version=None): + """ + Iterates over all distributions to find which distributions provide *name*. + If a *version* is provided, it will be used to filter the results. + + This function only returns the first result found, since no more than + one values are expected. If the directory is not found, returns ``None``. + + :parameter version: a version specifier that indicates the version + required, conforming to the format in ``PEP-345`` + + :type name: string + :type version: string + """ + matcher = None + if not version is None: + try: + matcher = self._scheme.matcher('%s (%s)' % (name, version)) + except ValueError: + raise DistlibException('invalid name or version: %r, %r' % + (name, version)) + + for dist in self.get_distributions(): + provided = dist.provides + + for p in provided: + p_name, p_ver = parse_name_and_version(p) + if matcher is None: + if p_name == name: + yield dist + break + else: + if p_name == name and matcher.match(p_ver): + yield dist + break + + def get_file_path(self, name, relative_path): + """ + Return the path to a resource file. + """ + dist = self.get_distribution(name) + if dist is None: + raise LookupError('no distribution named %r found' % name) + return dist.get_resource_path(relative_path) + + def get_exported_entries(self, category, name=None): + """ + Return all of the exported entries in a particular category. + + :param category: The category to search for entries. + :param name: If specified, only entries with that name are returned. + """ + for dist in self.get_distributions(): + r = dist.exports + if category in r: + d = r[category] + if name is not None: + if name in d: + yield d[name] + else: + for v in d.values(): + yield v + + +class Distribution(object): + """ + A base class for distributions, whether installed or from indexes. + Either way, it must have some metadata, so that's all that's needed + for construction. + """ + + build_time_dependency = False + """ + Set to True if it's known to be only a build-time dependency (i.e. + not needed after installation). + """ + + requested = False + """A boolean that indicates whether the ``REQUESTED`` metadata file is + present (in other words, whether the package was installed by user + request or it was installed as a dependency).""" + + def __init__(self, metadata): + """ + Initialise an instance. + :param metadata: The instance of :class:`Metadata` describing this + distribution. + """ + self.metadata = metadata + self.name = metadata.name + self.key = self.name.lower() # for case-insensitive comparisons + self.version = metadata.version + self.locator = None + self.digest = None + self.extras = None # additional features requested + self.context = None # environment marker overrides + self.download_urls = set() + self.digests = {} + + @property + def source_url(self): + """ + The source archive download URL for this distribution. + """ + return self.metadata.source_url + + download_url = source_url # Backward compatibility + + @property + def name_and_version(self): + """ + A utility property which displays the name and version in parentheses. + """ + return '%s (%s)' % (self.name, self.version) + + @property + def provides(self): + """ + A set of distribution names and versions provided by this distribution. + :return: A set of "name (version)" strings. + """ + plist = self.metadata.provides + s = '%s (%s)' % (self.name, self.version) + if s not in plist: + plist.append(s) + return plist + + def _get_requirements(self, req_attr): + md = self.metadata + logger.debug('Getting requirements from metadata %r', md.todict()) + reqts = getattr(md, req_attr) + return set(md.get_requirements(reqts, extras=self.extras, + env=self.context)) + + @property + def run_requires(self): + return self._get_requirements('run_requires') + + @property + def meta_requires(self): + return self._get_requirements('meta_requires') + + @property + def build_requires(self): + return self._get_requirements('build_requires') + + @property + def test_requires(self): + return self._get_requirements('test_requires') + + @property + def dev_requires(self): + return self._get_requirements('dev_requires') + + def matches_requirement(self, req): + """ + Say if this instance matches (fulfills) a requirement. + :param req: The requirement to match. + :rtype req: str + :return: True if it matches, else False. + """ + # Requirement may contain extras - parse to lose those + # from what's passed to the matcher + r = parse_requirement(req) + scheme = get_scheme(self.metadata.scheme) + try: + matcher = scheme.matcher(r.requirement) + except UnsupportedVersionError: + # XXX compat-mode if cannot read the version + logger.warning('could not read version %r - using name only', + req) + name = req.split()[0] + matcher = scheme.matcher(name) + + name = matcher.key # case-insensitive + + result = False + for p in self.provides: + p_name, p_ver = parse_name_and_version(p) + if p_name != name: + continue + try: + result = matcher.match(p_ver) + break + except UnsupportedVersionError: + pass + return result + + def __repr__(self): + """ + Return a textual representation of this instance, + """ + if self.source_url: + suffix = ' [%s]' % self.source_url + else: + suffix = '' + return '' % (self.name, self.version, suffix) + + def __eq__(self, other): + """ + See if this distribution is the same as another. + :param other: The distribution to compare with. To be equal to one + another. distributions must have the same type, name, + version and source_url. + :return: True if it is the same, else False. + """ + if type(other) is not type(self): + result = False + else: + result = (self.name == other.name and + self.version == other.version and + self.source_url == other.source_url) + return result + + def __hash__(self): + """ + Compute hash in a way which matches the equality test. + """ + return hash(self.name) + hash(self.version) + hash(self.source_url) + + +class BaseInstalledDistribution(Distribution): + """ + This is the base class for installed distributions (whether PEP 376 or + legacy). + """ + + hasher = None + + def __init__(self, metadata, path, env=None): + """ + Initialise an instance. + :param metadata: An instance of :class:`Metadata` which describes the + distribution. This will normally have been initialised + from a metadata file in the ``path``. + :param path: The path of the ``.dist-info`` or ``.egg-info`` + directory for the distribution. + :param env: This is normally the :class:`DistributionPath` + instance where this distribution was found. + """ + super(BaseInstalledDistribution, self).__init__(metadata) + self.path = path + self.dist_path = env + + def get_hash(self, data, hasher=None): + """ + Get the hash of some data, using a particular hash algorithm, if + specified. + + :param data: The data to be hashed. + :type data: bytes + :param hasher: The name of a hash implementation, supported by hashlib, + or ``None``. Examples of valid values are ``'sha1'``, + ``'sha224'``, ``'sha384'``, '``sha256'``, ``'md5'`` and + ``'sha512'``. If no hasher is specified, the ``hasher`` + attribute of the :class:`InstalledDistribution` instance + is used. If the hasher is determined to be ``None``, MD5 + is used as the hashing algorithm. + :returns: The hash of the data. If a hasher was explicitly specified, + the returned hash will be prefixed with the specified hasher + followed by '='. + :rtype: str + """ + if hasher is None: + hasher = self.hasher + if hasher is None: + hasher = hashlib.md5 + prefix = '' + else: + hasher = getattr(hashlib, hasher) + prefix = '%s=' % self.hasher + digest = hasher(data).digest() + digest = base64.urlsafe_b64encode(digest).rstrip(b'=').decode('ascii') + return '%s%s' % (prefix, digest) + + +class InstalledDistribution(BaseInstalledDistribution): + """ + Created with the *path* of the ``.dist-info`` directory provided to the + constructor. It reads the metadata contained in ``pydist.json`` when it is + instantiated., or uses a passed in Metadata instance (useful for when + dry-run mode is being used). + """ + + hasher = 'sha256' + + def __init__(self, path, metadata=None, env=None): + self.finder = finder = resources.finder_for_path(path) + if finder is None: + import pdb; pdb.set_trace () + if env and env._cache_enabled and path in env._cache.path: + metadata = env._cache.path[path].metadata + elif metadata is None: + r = finder.find(METADATA_FILENAME) + # Temporary - for Wheel 0.23 support + if r is None: + r = finder.find(WHEEL_METADATA_FILENAME) + # Temporary - for legacy support + if r is None: + r = finder.find('METADATA') + if r is None: + raise ValueError('no %s found in %s' % (METADATA_FILENAME, + path)) + with contextlib.closing(r.as_stream()) as stream: + metadata = Metadata(fileobj=stream, scheme='legacy') + + super(InstalledDistribution, self).__init__(metadata, path, env) + + if env and env._cache_enabled: + env._cache.add(self) + + try: + r = finder.find('REQUESTED') + except AttributeError: + import pdb; pdb.set_trace () + self.requested = r is not None + + def __repr__(self): + return '' % ( + self.name, self.version, self.path) + + def __str__(self): + return "%s %s" % (self.name, self.version) + + def _get_records(self): + """ + Get the list of installed files for the distribution + :return: A list of tuples of path, hash and size. Note that hash and + size might be ``None`` for some entries. The path is exactly + as stored in the file (which is as in PEP 376). + """ + results = [] + r = self.get_distinfo_resource('RECORD') + with contextlib.closing(r.as_stream()) as stream: + with CSVReader(stream=stream) as record_reader: + # Base location is parent dir of .dist-info dir + #base_location = os.path.dirname(self.path) + #base_location = os.path.abspath(base_location) + for row in record_reader: + missing = [None for i in range(len(row), 3)] + path, checksum, size = row + missing + #if not os.path.isabs(path): + # path = path.replace('/', os.sep) + # path = os.path.join(base_location, path) + results.append((path, checksum, size)) + return results + + @cached_property + def exports(self): + """ + Return the information exported by this distribution. + :return: A dictionary of exports, mapping an export category to a dict + of :class:`ExportEntry` instances describing the individual + export entries, and keyed by name. + """ + result = {} + r = self.get_distinfo_resource(EXPORTS_FILENAME) + if r: + result = self.read_exports() + return result + + def read_exports(self): + """ + Read exports data from a file in .ini format. + + :return: A dictionary of exports, mapping an export category to a list + of :class:`ExportEntry` instances describing the individual + export entries. + """ + result = {} + r = self.get_distinfo_resource(EXPORTS_FILENAME) + if r: + with contextlib.closing(r.as_stream()) as stream: + result = read_exports(stream) + return result + + def write_exports(self, exports): + """ + Write a dictionary of exports to a file in .ini format. + :param exports: A dictionary of exports, mapping an export category to + a list of :class:`ExportEntry` instances describing the + individual export entries. + """ + rf = self.get_distinfo_file(EXPORTS_FILENAME) + with open(rf, 'w') as f: + write_exports(exports, f) + + def get_resource_path(self, relative_path): + """ + NOTE: This API may change in the future. + + Return the absolute path to a resource file with the given relative + path. + + :param relative_path: The path, relative to .dist-info, of the resource + of interest. + :return: The absolute path where the resource is to be found. + """ + r = self.get_distinfo_resource('RESOURCES') + with contextlib.closing(r.as_stream()) as stream: + with CSVReader(stream=stream) as resources_reader: + for relative, destination in resources_reader: + if relative == relative_path: + return destination + raise KeyError('no resource file with relative path %r ' + 'is installed' % relative_path) + + def list_installed_files(self): + """ + Iterates over the ``RECORD`` entries and returns a tuple + ``(path, hash, size)`` for each line. + + :returns: iterator of (path, hash, size) + """ + for result in self._get_records(): + yield result + + def write_installed_files(self, paths, prefix, dry_run=False): + """ + Writes the ``RECORD`` file, using the ``paths`` iterable passed in. Any + existing ``RECORD`` file is silently overwritten. + + prefix is used to determine when to write absolute paths. + """ + prefix = os.path.join(prefix, '') + base = os.path.dirname(self.path) + base_under_prefix = base.startswith(prefix) + base = os.path.join(base, '') + record_path = self.get_distinfo_file('RECORD') + logger.info('creating %s', record_path) + if dry_run: + return None + with CSVWriter(record_path) as writer: + for path in paths: + if os.path.isdir(path) or path.endswith(('.pyc', '.pyo')): + # do not put size and hash, as in PEP-376 + hash_value = size = '' + else: + size = '%d' % os.path.getsize(path) + with open(path, 'rb') as fp: + hash_value = self.get_hash(fp.read()) + if path.startswith(base) or (base_under_prefix and + path.startswith(prefix)): + path = os.path.relpath(path, base) + writer.writerow((path, hash_value, size)) + + # add the RECORD file itself + if record_path.startswith(base): + record_path = os.path.relpath(record_path, base) + writer.writerow((record_path, '', '')) + return record_path + + def check_installed_files(self): + """ + Checks that the hashes and sizes of the files in ``RECORD`` are + matched by the files themselves. Returns a (possibly empty) list of + mismatches. Each entry in the mismatch list will be a tuple consisting + of the path, 'exists', 'size' or 'hash' according to what didn't match + (existence is checked first, then size, then hash), the expected + value and the actual value. + """ + mismatches = [] + base = os.path.dirname(self.path) + record_path = self.get_distinfo_file('RECORD') + for path, hash_value, size in self.list_installed_files(): + if not os.path.isabs(path): + path = os.path.join(base, path) + if path == record_path: + continue + if not os.path.exists(path): + mismatches.append((path, 'exists', True, False)) + elif os.path.isfile(path): + actual_size = str(os.path.getsize(path)) + if size and actual_size != size: + mismatches.append((path, 'size', size, actual_size)) + elif hash_value: + if '=' in hash_value: + hasher = hash_value.split('=', 1)[0] + else: + hasher = None + + with open(path, 'rb') as f: + actual_hash = self.get_hash(f.read(), hasher) + if actual_hash != hash_value: + mismatches.append((path, 'hash', hash_value, actual_hash)) + return mismatches + + @cached_property + def shared_locations(self): + """ + A dictionary of shared locations whose keys are in the set 'prefix', + 'purelib', 'platlib', 'scripts', 'headers', 'data' and 'namespace'. + The corresponding value is the absolute path of that category for + this distribution, and takes into account any paths selected by the + user at installation time (e.g. via command-line arguments). In the + case of the 'namespace' key, this would be a list of absolute paths + for the roots of namespace packages in this distribution. + + The first time this property is accessed, the relevant information is + read from the SHARED file in the .dist-info directory. + """ + result = {} + shared_path = os.path.join(self.path, 'SHARED') + if os.path.isfile(shared_path): + with codecs.open(shared_path, 'r', encoding='utf-8') as f: + lines = f.read().splitlines() + for line in lines: + key, value = line.split('=', 1) + if key == 'namespace': + result.setdefault(key, []).append(value) + else: + result[key] = value + return result + + def write_shared_locations(self, paths, dry_run=False): + """ + Write shared location information to the SHARED file in .dist-info. + :param paths: A dictionary as described in the documentation for + :meth:`shared_locations`. + :param dry_run: If True, the action is logged but no file is actually + written. + :return: The path of the file written to. + """ + shared_path = os.path.join(self.path, 'SHARED') + logger.info('creating %s', shared_path) + if dry_run: + return None + lines = [] + for key in ('prefix', 'lib', 'headers', 'scripts', 'data'): + path = paths[key] + if os.path.isdir(paths[key]): + lines.append('%s=%s' % (key, path)) + for ns in paths.get('namespace', ()): + lines.append('namespace=%s' % ns) + + with codecs.open(shared_path, 'w', encoding='utf-8') as f: + f.write('\n'.join(lines)) + return shared_path + + def get_distinfo_resource(self, path): + if path not in DIST_FILES: + raise DistlibException('invalid path for a dist-info file: ' + '%r at %r' % (path, self.path)) + finder = resources.finder_for_path(self.path) + if finder is None: + raise DistlibException('Unable to get a finder for %s' % self.path) + return finder.find(path) + + def get_distinfo_file(self, path): + """ + Returns a path located under the ``.dist-info`` directory. Returns a + string representing the path. + + :parameter path: a ``'/'``-separated path relative to the + ``.dist-info`` directory or an absolute path; + If *path* is an absolute path and doesn't start + with the ``.dist-info`` directory path, + a :class:`DistlibException` is raised + :type path: str + :rtype: str + """ + # Check if it is an absolute path # XXX use relpath, add tests + if path.find(os.sep) >= 0: + # it's an absolute path? + distinfo_dirname, path = path.split(os.sep)[-2:] + if distinfo_dirname != self.path.split(os.sep)[-1]: + raise DistlibException( + 'dist-info file %r does not belong to the %r %s ' + 'distribution' % (path, self.name, self.version)) + + # The file must be relative + if path not in DIST_FILES: + raise DistlibException('invalid path for a dist-info file: ' + '%r at %r' % (path, self.path)) + + return os.path.join(self.path, path) + + def list_distinfo_files(self): + """ + Iterates over the ``RECORD`` entries and returns paths for each line if + the path is pointing to a file located in the ``.dist-info`` directory + or one of its subdirectories. + + :returns: iterator of paths + """ + base = os.path.dirname(self.path) + for path, checksum, size in self._get_records(): + # XXX add separator or use real relpath algo + if not os.path.isabs(path): + path = os.path.join(base, path) + if path.startswith(self.path): + yield path + + def __eq__(self, other): + return (isinstance(other, InstalledDistribution) and + self.path == other.path) + + # See http://docs.python.org/reference/datamodel#object.__hash__ + __hash__ = object.__hash__ + + +class EggInfoDistribution(BaseInstalledDistribution): + """Created with the *path* of the ``.egg-info`` directory or file provided + to the constructor. It reads the metadata contained in the file itself, or + if the given path happens to be a directory, the metadata is read from the + file ``PKG-INFO`` under that directory.""" + + requested = True # as we have no way of knowing, assume it was + shared_locations = {} + + def __init__(self, path, env=None): + def set_name_and_version(s, n, v): + s.name = n + s.key = n.lower() # for case-insensitive comparisons + s.version = v + + self.path = path + self.dist_path = env + if env and env._cache_enabled and path in env._cache_egg.path: + metadata = env._cache_egg.path[path].metadata + set_name_and_version(self, metadata.name, metadata.version) + else: + metadata = self._get_metadata(path) + + # Need to be set before caching + set_name_and_version(self, metadata.name, metadata.version) + + if env and env._cache_enabled: + env._cache_egg.add(self) + super(EggInfoDistribution, self).__init__(metadata, path, env) + + def _get_metadata(self, path): + requires = None + + def parse_requires_data(data): + """Create a list of dependencies from a requires.txt file. + + *data*: the contents of a setuptools-produced requires.txt file. + """ + reqs = [] + lines = data.splitlines() + for line in lines: + line = line.strip() + if line.startswith('['): + logger.warning('Unexpected line: quitting requirement scan: %r', + line) + break + r = parse_requirement(line) + if not r: + logger.warning('Not recognised as a requirement: %r', line) + continue + if r.extras: + logger.warning('extra requirements in requires.txt are ' + 'not supported') + if not r.constraints: + reqs.append(r.name) + else: + cons = ', '.join('%s%s' % c for c in r.constraints) + reqs.append('%s (%s)' % (r.name, cons)) + return reqs + + def parse_requires_path(req_path): + """Create a list of dependencies from a requires.txt file. + + *req_path*: the path to a setuptools-produced requires.txt file. + """ + + reqs = [] + try: + with codecs.open(req_path, 'r', 'utf-8') as fp: + reqs = parse_requires_data(fp.read()) + except IOError: + pass + return reqs + + if path.endswith('.egg'): + if os.path.isdir(path): + meta_path = os.path.join(path, 'EGG-INFO', 'PKG-INFO') + metadata = Metadata(path=meta_path, scheme='legacy') + req_path = os.path.join(path, 'EGG-INFO', 'requires.txt') + requires = parse_requires_path(req_path) + else: + # FIXME handle the case where zipfile is not available + zipf = zipimport.zipimporter(path) + fileobj = StringIO( + zipf.get_data('EGG-INFO/PKG-INFO').decode('utf8')) + metadata = Metadata(fileobj=fileobj, scheme='legacy') + try: + data = zipf.get_data('EGG-INFO/requires.txt') + requires = parse_requires_data(data.decode('utf-8')) + except IOError: + requires = None + elif path.endswith('.egg-info'): + if os.path.isdir(path): + req_path = os.path.join(path, 'requires.txt') + requires = parse_requires_path(req_path) + path = os.path.join(path, 'PKG-INFO') + metadata = Metadata(path=path, scheme='legacy') + else: + raise DistlibException('path must end with .egg-info or .egg, ' + 'got %r' % path) + + if requires: + metadata.add_requirements(requires) + return metadata + + def __repr__(self): + return '' % ( + self.name, self.version, self.path) + + def __str__(self): + return "%s %s" % (self.name, self.version) + + def check_installed_files(self): + """ + Checks that the hashes and sizes of the files in ``RECORD`` are + matched by the files themselves. Returns a (possibly empty) list of + mismatches. Each entry in the mismatch list will be a tuple consisting + of the path, 'exists', 'size' or 'hash' according to what didn't match + (existence is checked first, then size, then hash), the expected + value and the actual value. + """ + mismatches = [] + record_path = os.path.join(self.path, 'installed-files.txt') + if os.path.exists(record_path): + for path, _, _ in self.list_installed_files(): + if path == record_path: + continue + if not os.path.exists(path): + mismatches.append((path, 'exists', True, False)) + return mismatches + + def list_installed_files(self): + """ + Iterates over the ``installed-files.txt`` entries and returns a tuple + ``(path, hash, size)`` for each line. + + :returns: a list of (path, hash, size) + """ + + def _md5(path): + f = open(path, 'rb') + try: + content = f.read() + finally: + f.close() + return hashlib.md5(content).hexdigest() + + def _size(path): + return os.stat(path).st_size + + record_path = os.path.join(self.path, 'installed-files.txt') + result = [] + if os.path.exists(record_path): + with codecs.open(record_path, 'r', encoding='utf-8') as f: + for line in f: + line = line.strip() + p = os.path.normpath(os.path.join(self.path, line)) + # "./" is present as a marker between installed files + # and installation metadata files + if not os.path.exists(p): + logger.warning('Non-existent file: %s', p) + if p.endswith(('.pyc', '.pyo')): + continue + #otherwise fall through and fail + if not os.path.isdir(p): + result.append((p, _md5(p), _size(p))) + result.append((record_path, None, None)) + return result + + def list_distinfo_files(self, absolute=False): + """ + Iterates over the ``installed-files.txt`` entries and returns paths for + each line if the path is pointing to a file located in the + ``.egg-info`` directory or one of its subdirectories. + + :parameter absolute: If *absolute* is ``True``, each returned path is + transformed into a local absolute path. Otherwise the + raw value from ``installed-files.txt`` is returned. + :type absolute: boolean + :returns: iterator of paths + """ + record_path = os.path.join(self.path, 'installed-files.txt') + skip = True + with codecs.open(record_path, 'r', encoding='utf-8') as f: + for line in f: + line = line.strip() + if line == './': + skip = False + continue + if not skip: + p = os.path.normpath(os.path.join(self.path, line)) + if p.startswith(self.path): + if absolute: + yield p + else: + yield line + + def __eq__(self, other): + return (isinstance(other, EggInfoDistribution) and + self.path == other.path) + + # See http://docs.python.org/reference/datamodel#object.__hash__ + __hash__ = object.__hash__ + +new_dist_class = InstalledDistribution +old_dist_class = EggInfoDistribution + + +class DependencyGraph(object): + """ + Represents a dependency graph between distributions. + + The dependency relationships are stored in an ``adjacency_list`` that maps + distributions to a list of ``(other, label)`` tuples where ``other`` + is a distribution and the edge is labeled with ``label`` (i.e. the version + specifier, if such was provided). Also, for more efficient traversal, for + every distribution ``x``, a list of predecessors is kept in + ``reverse_list[x]``. An edge from distribution ``a`` to + distribution ``b`` means that ``a`` depends on ``b``. If any missing + dependencies are found, they are stored in ``missing``, which is a + dictionary that maps distributions to a list of requirements that were not + provided by any other distributions. + """ + + def __init__(self): + self.adjacency_list = {} + self.reverse_list = {} + self.missing = {} + + def add_distribution(self, distribution): + """Add the *distribution* to the graph. + + :type distribution: :class:`distutils2.database.InstalledDistribution` + or :class:`distutils2.database.EggInfoDistribution` + """ + self.adjacency_list[distribution] = [] + self.reverse_list[distribution] = [] + #self.missing[distribution] = [] + + def add_edge(self, x, y, label=None): + """Add an edge from distribution *x* to distribution *y* with the given + *label*. + + :type x: :class:`distutils2.database.InstalledDistribution` or + :class:`distutils2.database.EggInfoDistribution` + :type y: :class:`distutils2.database.InstalledDistribution` or + :class:`distutils2.database.EggInfoDistribution` + :type label: ``str`` or ``None`` + """ + self.adjacency_list[x].append((y, label)) + # multiple edges are allowed, so be careful + if x not in self.reverse_list[y]: + self.reverse_list[y].append(x) + + def add_missing(self, distribution, requirement): + """ + Add a missing *requirement* for the given *distribution*. + + :type distribution: :class:`distutils2.database.InstalledDistribution` + or :class:`distutils2.database.EggInfoDistribution` + :type requirement: ``str`` + """ + logger.debug('%s missing %r', distribution, requirement) + self.missing.setdefault(distribution, []).append(requirement) + + def _repr_dist(self, dist): + return '%s %s' % (dist.name, dist.version) + + def repr_node(self, dist, level=1): + """Prints only a subgraph""" + output = [self._repr_dist(dist)] + for other, label in self.adjacency_list[dist]: + dist = self._repr_dist(other) + if label is not None: + dist = '%s [%s]' % (dist, label) + output.append(' ' * level + str(dist)) + suboutput = self.repr_node(other, level + 1) + subs = suboutput.split('\n') + output.extend(subs[1:]) + return '\n'.join(output) + + def to_dot(self, f, skip_disconnected=True): + """Writes a DOT output for the graph to the provided file *f*. + + If *skip_disconnected* is set to ``True``, then all distributions + that are not dependent on any other distribution are skipped. + + :type f: has to support ``file``-like operations + :type skip_disconnected: ``bool`` + """ + disconnected = [] + + f.write("digraph dependencies {\n") + for dist, adjs in self.adjacency_list.items(): + if len(adjs) == 0 and not skip_disconnected: + disconnected.append(dist) + for other, label in adjs: + if not label is None: + f.write('"%s" -> "%s" [label="%s"]\n' % + (dist.name, other.name, label)) + else: + f.write('"%s" -> "%s"\n' % (dist.name, other.name)) + if not skip_disconnected and len(disconnected) > 0: + f.write('subgraph disconnected {\n') + f.write('label = "Disconnected"\n') + f.write('bgcolor = red\n') + + for dist in disconnected: + f.write('"%s"' % dist.name) + f.write('\n') + f.write('}\n') + f.write('}\n') + + def topological_sort(self): + """ + Perform a topological sort of the graph. + :return: A tuple, the first element of which is a topologically sorted + list of distributions, and the second element of which is a + list of distributions that cannot be sorted because they have + circular dependencies and so form a cycle. + """ + result = [] + # Make a shallow copy of the adjacency list + alist = {} + for k, v in self.adjacency_list.items(): + alist[k] = v[:] + while True: + # See what we can remove in this run + to_remove = [] + for k, v in list(alist.items())[:]: + if not v: + to_remove.append(k) + del alist[k] + if not to_remove: + # What's left in alist (if anything) is a cycle. + break + # Remove from the adjacency list of others + for k, v in alist.items(): + alist[k] = [(d, r) for d, r in v if d not in to_remove] + logger.debug('Moving to result: %s', + ['%s (%s)' % (d.name, d.version) for d in to_remove]) + result.extend(to_remove) + return result, list(alist.keys()) + + def __repr__(self): + """Representation of the graph""" + output = [] + for dist, adjs in self.adjacency_list.items(): + output.append(self.repr_node(dist)) + return '\n'.join(output) + + +def make_graph(dists, scheme='default'): + """Makes a dependency graph from the given distributions. + + :parameter dists: a list of distributions + :type dists: list of :class:`distutils2.database.InstalledDistribution` and + :class:`distutils2.database.EggInfoDistribution` instances + :rtype: a :class:`DependencyGraph` instance + """ + scheme = get_scheme(scheme) + graph = DependencyGraph() + provided = {} # maps names to lists of (version, dist) tuples + + # first, build the graph and find out what's provided + for dist in dists: + graph.add_distribution(dist) + + for p in dist.provides: + name, version = parse_name_and_version(p) + logger.debug('Add to provided: %s, %s, %s', name, version, dist) + provided.setdefault(name, []).append((version, dist)) + + # now make the edges + for dist in dists: + requires = (dist.run_requires | dist.meta_requires | + dist.build_requires | dist.dev_requires) + for req in requires: + try: + matcher = scheme.matcher(req) + except UnsupportedVersionError: + # XXX compat-mode if cannot read the version + logger.warning('could not read version %r - using name only', + req) + name = req.split()[0] + matcher = scheme.matcher(name) + + name = matcher.key # case-insensitive + + matched = False + if name in provided: + for version, provider in provided[name]: + try: + match = matcher.match(version) + except UnsupportedVersionError: + match = False + + if match: + graph.add_edge(dist, provider, req) + matched = True + break + if not matched: + graph.add_missing(dist, req) + return graph + + +def get_dependent_dists(dists, dist): + """Recursively generate a list of distributions from *dists* that are + dependent on *dist*. + + :param dists: a list of distributions + :param dist: a distribution, member of *dists* for which we are interested + """ + if dist not in dists: + raise DistlibException('given distribution %r is not a member ' + 'of the list' % dist.name) + graph = make_graph(dists) + + dep = [dist] # dependent distributions + todo = graph.reverse_list[dist] # list of nodes we should inspect + + while todo: + d = todo.pop() + dep.append(d) + for succ in graph.reverse_list[d]: + if succ not in dep: + todo.append(succ) + + dep.pop(0) # remove dist from dep, was there to prevent infinite loops + return dep + + +def get_required_dists(dists, dist): + """Recursively generate a list of distributions from *dists* that are + required by *dist*. + + :param dists: a list of distributions + :param dist: a distribution, member of *dists* for which we are interested + """ + if dist not in dists: + raise DistlibException('given distribution %r is not a member ' + 'of the list' % dist.name) + graph = make_graph(dists) + + req = [] # required distributions + todo = graph.adjacency_list[dist] # list of nodes we should inspect + + while todo: + d = todo.pop()[0] + req.append(d) + for pred in graph.adjacency_list[d]: + if pred not in req: + todo.append(pred) + + return req + + +def make_dist(name, version, **kwargs): + """ + A convenience method for making a dist given just a name and version. + """ + summary = kwargs.pop('summary', 'Placeholder for summary') + md = Metadata(**kwargs) + md.name = name + md.version = version + md.summary = summary or 'Plaeholder for summary' + return Distribution(md) diff --git a/venv/lib/python2.7/site-packages/pip/_vendor/distlib/index.py b/venv/lib/python2.7/site-packages/pip/_vendor/distlib/index.py new file mode 100644 index 0000000..73037c9 --- /dev/null +++ b/venv/lib/python2.7/site-packages/pip/_vendor/distlib/index.py @@ -0,0 +1,513 @@ +# -*- coding: utf-8 -*- +# +# Copyright (C) 2013 Vinay Sajip. +# Licensed to the Python Software Foundation under a contributor agreement. +# See LICENSE.txt and CONTRIBUTORS.txt. +# +import hashlib +import logging +import os +import shutil +import subprocess +import tempfile +try: + from threading import Thread +except ImportError: + from dummy_threading import Thread + +from . import DistlibException +from .compat import (HTTPBasicAuthHandler, Request, HTTPPasswordMgr, + urlparse, build_opener, string_types) +from .util import cached_property, zip_dir, ServerProxy + +logger = logging.getLogger(__name__) + +DEFAULT_INDEX = 'https://pypi.python.org/pypi' +DEFAULT_REALM = 'pypi' + +class PackageIndex(object): + """ + This class represents a package index compatible with PyPI, the Python + Package Index. + """ + + boundary = b'----------ThIs_Is_tHe_distlib_index_bouNdaRY_$' + + def __init__(self, url=None): + """ + Initialise an instance. + + :param url: The URL of the index. If not specified, the URL for PyPI is + used. + """ + self.url = url or DEFAULT_INDEX + self.read_configuration() + scheme, netloc, path, params, query, frag = urlparse(self.url) + if params or query or frag or scheme not in ('http', 'https'): + raise DistlibException('invalid repository: %s' % self.url) + self.password_handler = None + self.ssl_verifier = None + self.gpg = None + self.gpg_home = None + self.rpc_proxy = None + with open(os.devnull, 'w') as sink: + for s in ('gpg2', 'gpg'): + try: + rc = subprocess.check_call([s, '--version'], stdout=sink, + stderr=sink) + if rc == 0: + self.gpg = s + break + except OSError: + pass + + def _get_pypirc_command(self): + """ + Get the distutils command for interacting with PyPI configurations. + :return: the command. + """ + from distutils.core import Distribution + from distutils.config import PyPIRCCommand + d = Distribution() + return PyPIRCCommand(d) + + def read_configuration(self): + """ + Read the PyPI access configuration as supported by distutils, getting + PyPI to do the acutal work. This populates ``username``, ``password``, + ``realm`` and ``url`` attributes from the configuration. + """ + # get distutils to do the work + c = self._get_pypirc_command() + c.repository = self.url + cfg = c._read_pypirc() + self.username = cfg.get('username') + self.password = cfg.get('password') + self.realm = cfg.get('realm', 'pypi') + self.url = cfg.get('repository', self.url) + + def save_configuration(self): + """ + Save the PyPI access configuration. You must have set ``username`` and + ``password`` attributes before calling this method. + + Again, distutils is used to do the actual work. + """ + self.check_credentials() + # get distutils to do the work + c = self._get_pypirc_command() + c._store_pypirc(self.username, self.password) + + def check_credentials(self): + """ + Check that ``username`` and ``password`` have been set, and raise an + exception if not. + """ + if self.username is None or self.password is None: + raise DistlibException('username and password must be set') + pm = HTTPPasswordMgr() + _, netloc, _, _, _, _ = urlparse(self.url) + pm.add_password(self.realm, netloc, self.username, self.password) + self.password_handler = HTTPBasicAuthHandler(pm) + + def register(self, metadata): + """ + Register a distribution on PyPI, using the provided metadata. + + :param metadata: A :class:`Metadata` instance defining at least a name + and version number for the distribution to be + registered. + :return: The HTTP response received from PyPI upon submission of the + request. + """ + self.check_credentials() + metadata.validate() + d = metadata.todict() + d[':action'] = 'verify' + request = self.encode_request(d.items(), []) + response = self.send_request(request) + d[':action'] = 'submit' + request = self.encode_request(d.items(), []) + return self.send_request(request) + + def _reader(self, name, stream, outbuf): + """ + Thread runner for reading lines of from a subprocess into a buffer. + + :param name: The logical name of the stream (used for logging only). + :param stream: The stream to read from. This will typically a pipe + connected to the output stream of a subprocess. + :param outbuf: The list to append the read lines to. + """ + while True: + s = stream.readline() + if not s: + break + s = s.decode('utf-8').rstrip() + outbuf.append(s) + logger.debug('%s: %s' % (name, s)) + stream.close() + + def get_sign_command(self, filename, signer, sign_password, + keystore=None): + """ + Return a suitable command for signing a file. + + :param filename: The pathname to the file to be signed. + :param signer: The identifier of the signer of the file. + :param sign_password: The passphrase for the signer's + private key used for signing. + :param keystore: The path to a directory which contains the keys + used in verification. If not specified, the + instance's ``gpg_home`` attribute is used instead. + :return: The signing command as a list suitable to be + passed to :class:`subprocess.Popen`. + """ + cmd = [self.gpg, '--status-fd', '2', '--no-tty'] + if keystore is None: + keystore = self.gpg_home + if keystore: + cmd.extend(['--homedir', keystore]) + if sign_password is not None: + cmd.extend(['--batch', '--passphrase-fd', '0']) + td = tempfile.mkdtemp() + sf = os.path.join(td, os.path.basename(filename) + '.asc') + cmd.extend(['--detach-sign', '--armor', '--local-user', + signer, '--output', sf, filename]) + logger.debug('invoking: %s', ' '.join(cmd)) + return cmd, sf + + def run_command(self, cmd, input_data=None): + """ + Run a command in a child process , passing it any input data specified. + + :param cmd: The command to run. + :param input_data: If specified, this must be a byte string containing + data to be sent to the child process. + :return: A tuple consisting of the subprocess' exit code, a list of + lines read from the subprocess' ``stdout``, and a list of + lines read from the subprocess' ``stderr``. + """ + kwargs = { + 'stdout': subprocess.PIPE, + 'stderr': subprocess.PIPE, + } + if input_data is not None: + kwargs['stdin'] = subprocess.PIPE + stdout = [] + stderr = [] + p = subprocess.Popen(cmd, **kwargs) + # We don't use communicate() here because we may need to + # get clever with interacting with the command + t1 = Thread(target=self._reader, args=('stdout', p.stdout, stdout)) + t1.start() + t2 = Thread(target=self._reader, args=('stderr', p.stderr, stderr)) + t2.start() + if input_data is not None: + p.stdin.write(input_data) + p.stdin.close() + + p.wait() + t1.join() + t2.join() + return p.returncode, stdout, stderr + + def sign_file(self, filename, signer, sign_password, keystore=None): + """ + Sign a file. + + :param filename: The pathname to the file to be signed. + :param signer: The identifier of the signer of the file. + :param sign_password: The passphrase for the signer's + private key used for signing. + :param keystore: The path to a directory which contains the keys + used in signing. If not specified, the instance's + ``gpg_home`` attribute is used instead. + :return: The absolute pathname of the file where the signature is + stored. + """ + cmd, sig_file = self.get_sign_command(filename, signer, sign_password, + keystore) + rc, stdout, stderr = self.run_command(cmd, + sign_password.encode('utf-8')) + if rc != 0: + raise DistlibException('sign command failed with error ' + 'code %s' % rc) + return sig_file + + def upload_file(self, metadata, filename, signer=None, sign_password=None, + filetype='sdist', pyversion='source', keystore=None): + """ + Upload a release file to the index. + + :param metadata: A :class:`Metadata` instance defining at least a name + and version number for the file to be uploaded. + :param filename: The pathname of the file to be uploaded. + :param signer: The identifier of the signer of the file. + :param sign_password: The passphrase for the signer's + private key used for signing. + :param filetype: The type of the file being uploaded. This is the + distutils command which produced that file, e.g. + ``sdist`` or ``bdist_wheel``. + :param pyversion: The version of Python which the release relates + to. For code compatible with any Python, this would + be ``source``, otherwise it would be e.g. ``3.2``. + :param keystore: The path to a directory which contains the keys + used in signing. If not specified, the instance's + ``gpg_home`` attribute is used instead. + :return: The HTTP response received from PyPI upon submission of the + request. + """ + self.check_credentials() + if not os.path.exists(filename): + raise DistlibException('not found: %s' % filename) + metadata.validate() + d = metadata.todict() + sig_file = None + if signer: + if not self.gpg: + logger.warning('no signing program available - not signed') + else: + sig_file = self.sign_file(filename, signer, sign_password, + keystore) + with open(filename, 'rb') as f: + file_data = f.read() + md5_digest = hashlib.md5(file_data).hexdigest() + sha256_digest = hashlib.sha256(file_data).hexdigest() + d.update({ + ':action': 'file_upload', + 'protcol_version': '1', + 'filetype': filetype, + 'pyversion': pyversion, + 'md5_digest': md5_digest, + 'sha256_digest': sha256_digest, + }) + files = [('content', os.path.basename(filename), file_data)] + if sig_file: + with open(sig_file, 'rb') as f: + sig_data = f.read() + files.append(('gpg_signature', os.path.basename(sig_file), + sig_data)) + shutil.rmtree(os.path.dirname(sig_file)) + request = self.encode_request(d.items(), files) + return self.send_request(request) + + def upload_documentation(self, metadata, doc_dir): + """ + Upload documentation to the index. + + :param metadata: A :class:`Metadata` instance defining at least a name + and version number for the documentation to be + uploaded. + :param doc_dir: The pathname of the directory which contains the + documentation. This should be the directory that + contains the ``index.html`` for the documentation. + :return: The HTTP response received from PyPI upon submission of the + request. + """ + self.check_credentials() + if not os.path.isdir(doc_dir): + raise DistlibException('not a directory: %r' % doc_dir) + fn = os.path.join(doc_dir, 'index.html') + if not os.path.exists(fn): + raise DistlibException('not found: %r' % fn) + metadata.validate() + name, version = metadata.name, metadata.version + zip_data = zip_dir(doc_dir).getvalue() + fields = [(':action', 'doc_upload'), + ('name', name), ('version', version)] + files = [('content', name, zip_data)] + request = self.encode_request(fields, files) + return self.send_request(request) + + def get_verify_command(self, signature_filename, data_filename, + keystore=None): + """ + Return a suitable command for verifying a file. + + :param signature_filename: The pathname to the file containing the + signature. + :param data_filename: The pathname to the file containing the + signed data. + :param keystore: The path to a directory which contains the keys + used in verification. If not specified, the + instance's ``gpg_home`` attribute is used instead. + :return: The verifying command as a list suitable to be + passed to :class:`subprocess.Popen`. + """ + cmd = [self.gpg, '--status-fd', '2', '--no-tty'] + if keystore is None: + keystore = self.gpg_home + if keystore: + cmd.extend(['--homedir', keystore]) + cmd.extend(['--verify', signature_filename, data_filename]) + logger.debug('invoking: %s', ' '.join(cmd)) + return cmd + + def verify_signature(self, signature_filename, data_filename, + keystore=None): + """ + Verify a signature for a file. + + :param signature_filename: The pathname to the file containing the + signature. + :param data_filename: The pathname to the file containing the + signed data. + :param keystore: The path to a directory which contains the keys + used in verification. If not specified, the + instance's ``gpg_home`` attribute is used instead. + :return: True if the signature was verified, else False. + """ + if not self.gpg: + raise DistlibException('verification unavailable because gpg ' + 'unavailable') + cmd = self.get_verify_command(signature_filename, data_filename, + keystore) + rc, stdout, stderr = self.run_command(cmd) + if rc not in (0, 1): + raise DistlibException('verify command failed with error ' + 'code %s' % rc) + return rc == 0 + + def download_file(self, url, destfile, digest=None, reporthook=None): + """ + This is a convenience method for downloading a file from an URL. + Normally, this will be a file from the index, though currently + no check is made for this (i.e. a file can be downloaded from + anywhere). + + The method is just like the :func:`urlretrieve` function in the + standard library, except that it allows digest computation to be + done during download and checking that the downloaded data + matched any expected value. + + :param url: The URL of the file to be downloaded (assumed to be + available via an HTTP GET request). + :param destfile: The pathname where the downloaded file is to be + saved. + :param digest: If specified, this must be a (hasher, value) + tuple, where hasher is the algorithm used (e.g. + ``'md5'``) and ``value`` is the expected value. + :param reporthook: The same as for :func:`urlretrieve` in the + standard library. + """ + if digest is None: + digester = None + logger.debug('No digest specified') + else: + if isinstance(digest, (list, tuple)): + hasher, digest = digest + else: + hasher = 'md5' + digester = getattr(hashlib, hasher)() + logger.debug('Digest specified: %s' % digest) + # The following code is equivalent to urlretrieve. + # We need to do it this way so that we can compute the + # digest of the file as we go. + with open(destfile, 'wb') as dfp: + # addinfourl is not a context manager on 2.x + # so we have to use try/finally + sfp = self.send_request(Request(url)) + try: + headers = sfp.info() + blocksize = 8192 + size = -1 + read = 0 + blocknum = 0 + if "content-length" in headers: + size = int(headers["Content-Length"]) + if reporthook: + reporthook(blocknum, blocksize, size) + while True: + block = sfp.read(blocksize) + if not block: + break + read += len(block) + dfp.write(block) + if digester: + digester.update(block) + blocknum += 1 + if reporthook: + reporthook(blocknum, blocksize, size) + finally: + sfp.close() + + # check that we got the whole file, if we can + if size >= 0 and read < size: + raise DistlibException( + 'retrieval incomplete: got only %d out of %d bytes' + % (read, size)) + # if we have a digest, it must match. + if digester: + actual = digester.hexdigest() + if digest != actual: + raise DistlibException('%s digest mismatch for %s: expected ' + '%s, got %s' % (hasher, destfile, + digest, actual)) + logger.debug('Digest verified: %s', digest) + + def send_request(self, req): + """ + Send a standard library :class:`Request` to PyPI and return its + response. + + :param req: The request to send. + :return: The HTTP response from PyPI (a standard library HTTPResponse). + """ + handlers = [] + if self.password_handler: + handlers.append(self.password_handler) + if self.ssl_verifier: + handlers.append(self.ssl_verifier) + opener = build_opener(*handlers) + return opener.open(req) + + def encode_request(self, fields, files): + """ + Encode fields and files for posting to an HTTP server. + + :param fields: The fields to send as a list of (fieldname, value) + tuples. + :param files: The files to send as a list of (fieldname, filename, + file_bytes) tuple. + """ + # Adapted from packaging, which in turn was adapted from + # http://code.activestate.com/recipes/146306 + + parts = [] + boundary = self.boundary + for k, values in fields: + if not isinstance(values, (list, tuple)): + values = [values] + + for v in values: + parts.extend(( + b'--' + boundary, + ('Content-Disposition: form-data; name="%s"' % + k).encode('utf-8'), + b'', + v.encode('utf-8'))) + for key, filename, value in files: + parts.extend(( + b'--' + boundary, + ('Content-Disposition: form-data; name="%s"; filename="%s"' % + (key, filename)).encode('utf-8'), + b'', + value)) + + parts.extend((b'--' + boundary + b'--', b'')) + + body = b'\r\n'.join(parts) + ct = b'multipart/form-data; boundary=' + boundary + headers = { + 'Content-type': ct, + 'Content-length': str(len(body)) + } + return Request(self.url, body, headers) + + def search(self, terms, operator=None): + if isinstance(terms, string_types): + terms = {'name': terms} + if self.rpc_proxy is None: + self.rpc_proxy = ServerProxy(self.url, timeout=3.0) + return self.rpc_proxy.search(terms, operator or 'and') diff --git a/venv/lib/python2.7/site-packages/pip/_vendor/distlib/locators.py b/venv/lib/python2.7/site-packages/pip/_vendor/distlib/locators.py new file mode 100644 index 0000000..1e14638 --- /dev/null +++ b/venv/lib/python2.7/site-packages/pip/_vendor/distlib/locators.py @@ -0,0 +1,1264 @@ +# -*- coding: utf-8 -*- +# +# Copyright (C) 2012-2015 Vinay Sajip. +# Licensed to the Python Software Foundation under a contributor agreement. +# See LICENSE.txt and CONTRIBUTORS.txt. +# + +import gzip +from io import BytesIO +import json +import logging +import os +import posixpath +import re +try: + import threading +except ImportError: # pragma: no cover + import dummy_threading as threading +import zlib + +from . import DistlibException +from .compat import (urljoin, urlparse, urlunparse, url2pathname, pathname2url, + queue, quote, unescape, string_types, build_opener, + HTTPRedirectHandler as BaseRedirectHandler, + Request, HTTPError, URLError) +from .database import Distribution, DistributionPath, make_dist +from .metadata import Metadata +from .util import (cached_property, parse_credentials, ensure_slash, + split_filename, get_project_data, parse_requirement, + parse_name_and_version, ServerProxy) +from .version import get_scheme, UnsupportedVersionError +from .wheel import Wheel, is_compatible + +logger = logging.getLogger(__name__) + +HASHER_HASH = re.compile('^(\w+)=([a-f0-9]+)') +CHARSET = re.compile(r';\s*charset\s*=\s*(.*)\s*$', re.I) +HTML_CONTENT_TYPE = re.compile('text/html|application/x(ht)?ml') +DEFAULT_INDEX = 'https://pypi.python.org/pypi' + +def get_all_distribution_names(url=None): + """ + Return all distribution names known by an index. + :param url: The URL of the index. + :return: A list of all known distribution names. + """ + if url is None: + url = DEFAULT_INDEX + client = ServerProxy(url, timeout=3.0) + return client.list_packages() + +class RedirectHandler(BaseRedirectHandler): + """ + A class to work around a bug in some Python 3.2.x releases. + """ + # There's a bug in the base version for some 3.2.x + # (e.g. 3.2.2 on Ubuntu Oneiric). If a Location header + # returns e.g. /abc, it bails because it says the scheme '' + # is bogus, when actually it should use the request's + # URL for the scheme. See Python issue #13696. + def http_error_302(self, req, fp, code, msg, headers): + # Some servers (incorrectly) return multiple Location headers + # (so probably same goes for URI). Use first header. + newurl = None + for key in ('location', 'uri'): + if key in headers: + newurl = headers[key] + break + if newurl is None: + return + urlparts = urlparse(newurl) + if urlparts.scheme == '': + newurl = urljoin(req.get_full_url(), newurl) + if hasattr(headers, 'replace_header'): + headers.replace_header(key, newurl) + else: + headers[key] = newurl + return BaseRedirectHandler.http_error_302(self, req, fp, code, msg, + headers) + + http_error_301 = http_error_303 = http_error_307 = http_error_302 + +class Locator(object): + """ + A base class for locators - things that locate distributions. + """ + source_extensions = ('.tar.gz', '.tar.bz2', '.tar', '.zip', '.tgz', '.tbz') + binary_extensions = ('.egg', '.exe', '.whl') + excluded_extensions = ('.pdf',) + + # A list of tags indicating which wheels you want to match. The default + # value of None matches against the tags compatible with the running + # Python. If you want to match other values, set wheel_tags on a locator + # instance to a list of tuples (pyver, abi, arch) which you want to match. + wheel_tags = None + + downloadable_extensions = source_extensions + ('.whl',) + + def __init__(self, scheme='default'): + """ + Initialise an instance. + :param scheme: Because locators look for most recent versions, they + need to know the version scheme to use. This specifies + the current PEP-recommended scheme - use ``'legacy'`` + if you need to support existing distributions on PyPI. + """ + self._cache = {} + self.scheme = scheme + # Because of bugs in some of the handlers on some of the platforms, + # we use our own opener rather than just using urlopen. + self.opener = build_opener(RedirectHandler()) + # If get_project() is called from locate(), the matcher instance + # is set from the requirement passed to locate(). See issue #18 for + # why this can be useful to know. + self.matcher = None + + def clear_cache(self): + self._cache.clear() + + def _get_scheme(self): + return self._scheme + + def _set_scheme(self, value): + self._scheme = value + + scheme = property(_get_scheme, _set_scheme) + + def _get_project(self, name): + """ + For a given project, get a dictionary mapping available versions to Distribution + instances. + + This should be implemented in subclasses. + + If called from a locate() request, self.matcher will be set to a + matcher for the requirement to satisfy, otherwise it will be None. + """ + raise NotImplementedError('Please implement in the subclass') + + def get_distribution_names(self): + """ + Return all the distribution names known to this locator. + """ + raise NotImplementedError('Please implement in the subclass') + + def get_project(self, name): + """ + For a given project, get a dictionary mapping available versions to Distribution + instances. + + This calls _get_project to do all the work, and just implements a caching layer on top. + """ + if self._cache is None: + result = self._get_project(name) + elif name in self._cache: + result = self._cache[name] + else: + result = self._get_project(name) + self._cache[name] = result + return result + + def score_url(self, url): + """ + Give an url a score which can be used to choose preferred URLs + for a given project release. + """ + t = urlparse(url) + basename = posixpath.basename(t.path) + compatible = True + is_wheel = basename.endswith('.whl') + if is_wheel: + compatible = is_compatible(Wheel(basename), self.wheel_tags) + return (t.scheme != 'https', 'pypi.python.org' in t.netloc, + is_wheel, compatible, basename) + + def prefer_url(self, url1, url2): + """ + Choose one of two URLs where both are candidates for distribution + archives for the same version of a distribution (for example, + .tar.gz vs. zip). + + The current implementation favours https:// URLs over http://, archives + from PyPI over those from other locations, wheel compatibility (if a + wheel) and then the archive name. + """ + result = url2 + if url1: + s1 = self.score_url(url1) + s2 = self.score_url(url2) + if s1 > s2: + result = url1 + if result != url2: + logger.debug('Not replacing %r with %r', url1, url2) + else: + logger.debug('Replacing %r with %r', url1, url2) + return result + + def split_filename(self, filename, project_name): + """ + Attempt to split a filename in project name, version and Python version. + """ + return split_filename(filename, project_name) + + def convert_url_to_download_info(self, url, project_name): + """ + See if a URL is a candidate for a download URL for a project (the URL + has typically been scraped from an HTML page). + + If it is, a dictionary is returned with keys "name", "version", + "filename" and "url"; otherwise, None is returned. + """ + def same_project(name1, name2): + name1, name2 = name1.lower(), name2.lower() + if name1 == name2: + result = True + else: + # distribute replaces '-' by '_' in project names, so it + # can tell where the version starts in a filename. + result = name1.replace('_', '-') == name2.replace('_', '-') + return result + + result = None + scheme, netloc, path, params, query, frag = urlparse(url) + if frag.lower().startswith('egg='): + logger.debug('%s: version hint in fragment: %r', + project_name, frag) + m = HASHER_HASH.match(frag) + if m: + algo, digest = m.groups() + else: + algo, digest = None, None + origpath = path + if path and path[-1] == '/': + path = path[:-1] + if path.endswith('.whl'): + try: + wheel = Wheel(path) + if is_compatible(wheel, self.wheel_tags): + if project_name is None: + include = True + else: + include = same_project(wheel.name, project_name) + if include: + result = { + 'name': wheel.name, + 'version': wheel.version, + 'filename': wheel.filename, + 'url': urlunparse((scheme, netloc, origpath, + params, query, '')), + 'python-version': ', '.join( + ['.'.join(list(v[2:])) for v in wheel.pyver]), + } + except Exception as e: + logger.warning('invalid path for wheel: %s', path) + elif path.endswith(self.downloadable_extensions): + path = filename = posixpath.basename(path) + for ext in self.downloadable_extensions: + if path.endswith(ext): + path = path[:-len(ext)] + t = self.split_filename(path, project_name) + if not t: + logger.debug('No match for project/version: %s', path) + else: + name, version, pyver = t + if not project_name or same_project(project_name, name): + result = { + 'name': name, + 'version': version, + 'filename': filename, + 'url': urlunparse((scheme, netloc, origpath, + params, query, '')), + #'packagetype': 'sdist', + } + if pyver: + result['python-version'] = pyver + break + if result and algo: + result['%s_digest' % algo] = digest + return result + + def _get_digest(self, info): + """ + Get a digest from a dictionary by looking at keys of the form + 'algo_digest'. + + Returns a 2-tuple (algo, digest) if found, else None. Currently + looks only for SHA256, then MD5. + """ + result = None + for algo in ('sha256', 'md5'): + key = '%s_digest' % algo + if key in info: + result = (algo, info[key]) + break + return result + + def _update_version_data(self, result, info): + """ + Update a result dictionary (the final result from _get_project) with a + dictionary for a specific version, which typically holds information + gleaned from a filename or URL for an archive for the distribution. + """ + name = info.pop('name') + version = info.pop('version') + if version in result: + dist = result[version] + md = dist.metadata + else: + dist = make_dist(name, version, scheme=self.scheme) + md = dist.metadata + dist.digest = digest = self._get_digest(info) + url = info['url'] + result['digests'][url] = digest + if md.source_url != info['url']: + md.source_url = self.prefer_url(md.source_url, url) + result['urls'].setdefault(version, set()).add(url) + dist.locator = self + result[version] = dist + + def locate(self, requirement, prereleases=False): + """ + Find the most recent distribution which matches the given + requirement. + + :param requirement: A requirement of the form 'foo (1.0)' or perhaps + 'foo (>= 1.0, < 2.0, != 1.3)' + :param prereleases: If ``True``, allow pre-release versions + to be located. Otherwise, pre-release versions + are not returned. + :return: A :class:`Distribution` instance, or ``None`` if no such + distribution could be located. + """ + result = None + r = parse_requirement(requirement) + if r is None: + raise DistlibException('Not a valid requirement: %r' % requirement) + scheme = get_scheme(self.scheme) + self.matcher = matcher = scheme.matcher(r.requirement) + logger.debug('matcher: %s (%s)', matcher, type(matcher).__name__) + versions = self.get_project(r.name) + if len(versions) > 2: # urls and digests keys are present + # sometimes, versions are invalid + slist = [] + vcls = matcher.version_class + for k in versions: + if k in ('urls', 'digests'): + continue + try: + if not matcher.match(k): + logger.debug('%s did not match %r', matcher, k) + else: + if prereleases or not vcls(k).is_prerelease: + slist.append(k) + else: + logger.debug('skipping pre-release ' + 'version %s of %s', k, matcher.name) + except Exception: # pragma: no cover + logger.warning('error matching %s with %r', matcher, k) + pass # slist.append(k) + if len(slist) > 1: + slist = sorted(slist, key=scheme.key) + if slist: + logger.debug('sorted list: %s', slist) + version = slist[-1] + result = versions[version] + if result: + if r.extras: + result.extras = r.extras + result.download_urls = versions.get('urls', {}).get(version, set()) + d = {} + sd = versions.get('digests', {}) + for url in result.download_urls: + if url in sd: + d[url] = sd[url] + result.digests = d + self.matcher = None + return result + + +class PyPIRPCLocator(Locator): + """ + This locator uses XML-RPC to locate distributions. It therefore + cannot be used with simple mirrors (that only mirror file content). + """ + def __init__(self, url, **kwargs): + """ + Initialise an instance. + + :param url: The URL to use for XML-RPC. + :param kwargs: Passed to the superclass constructor. + """ + super(PyPIRPCLocator, self).__init__(**kwargs) + self.base_url = url + self.client = ServerProxy(url, timeout=3.0) + + def get_distribution_names(self): + """ + Return all the distribution names known to this locator. + """ + return set(self.client.list_packages()) + + def _get_project(self, name): + result = {'urls': {}, 'digests': {}} + versions = self.client.package_releases(name, True) + for v in versions: + urls = self.client.release_urls(name, v) + data = self.client.release_data(name, v) + metadata = Metadata(scheme=self.scheme) + metadata.name = data['name'] + metadata.version = data['version'] + metadata.license = data.get('license') + metadata.keywords = data.get('keywords', []) + metadata.summary = data.get('summary') + dist = Distribution(metadata) + if urls: + info = urls[0] + metadata.source_url = info['url'] + dist.digest = self._get_digest(info) + dist.locator = self + result[v] = dist + for info in urls: + url = info['url'] + digest = self._get_digest(info) + result['urls'].setdefault(v, set()).add(url) + result['digests'][url] = digest + return result + +class PyPIJSONLocator(Locator): + """ + This locator uses PyPI's JSON interface. It's very limited in functionality + and probably not worth using. + """ + def __init__(self, url, **kwargs): + super(PyPIJSONLocator, self).__init__(**kwargs) + self.base_url = ensure_slash(url) + + def get_distribution_names(self): + """ + Return all the distribution names known to this locator. + """ + raise NotImplementedError('Not available from this locator') + + def _get_project(self, name): + result = {'urls': {}, 'digests': {}} + url = urljoin(self.base_url, '%s/json' % quote(name)) + try: + resp = self.opener.open(url) + data = resp.read().decode() # for now + d = json.loads(data) + md = Metadata(scheme=self.scheme) + data = d['info'] + md.name = data['name'] + md.version = data['version'] + md.license = data.get('license') + md.keywords = data.get('keywords', []) + md.summary = data.get('summary') + dist = Distribution(md) + dist.locator = self + urls = d['urls'] + result[md.version] = dist + for info in d['urls']: + url = info['url'] + dist.download_urls.add(url) + dist.digests[url] = self._get_digest(info) + result['urls'].setdefault(md.version, set()).add(url) + result['digests'][url] = self._get_digest(info) + # Now get other releases + for version, infos in d['releases'].items(): + if version == md.version: + continue # already done + omd = Metadata(scheme=self.scheme) + omd.name = md.name + omd.version = version + odist = Distribution(omd) + odist.locator = self + result[version] = odist + for info in infos: + url = info['url'] + odist.download_urls.add(url) + odist.digests[url] = self._get_digest(info) + result['urls'].setdefault(version, set()).add(url) + result['digests'][url] = self._get_digest(info) +# for info in urls: +# md.source_url = info['url'] +# dist.digest = self._get_digest(info) +# dist.locator = self +# for info in urls: +# url = info['url'] +# result['urls'].setdefault(md.version, set()).add(url) +# result['digests'][url] = self._get_digest(info) + except Exception as e: + logger.exception('JSON fetch failed: %s', e) + return result + + +class Page(object): + """ + This class represents a scraped HTML page. + """ + # The following slightly hairy-looking regex just looks for the contents of + # an anchor link, which has an attribute "href" either immediately preceded + # or immediately followed by a "rel" attribute. The attribute values can be + # declared with double quotes, single quotes or no quotes - which leads to + # the length of the expression. + _href = re.compile(""" +(rel\s*=\s*(?:"(?P[^"]*)"|'(?P[^']*)'|(?P[^>\s\n]*))\s+)? +href\s*=\s*(?:"(?P[^"]*)"|'(?P[^']*)'|(?P[^>\s\n]*)) +(\s+rel\s*=\s*(?:"(?P[^"]*)"|'(?P[^']*)'|(?P[^>\s\n]*)))? +""", re.I | re.S | re.X) + _base = re.compile(r"""]+)""", re.I | re.S) + + def __init__(self, data, url): + """ + Initialise an instance with the Unicode page contents and the URL they + came from. + """ + self.data = data + self.base_url = self.url = url + m = self._base.search(self.data) + if m: + self.base_url = m.group(1) + + _clean_re = re.compile(r'[^a-z0-9$&+,/:;=?@.#%_\\|-]', re.I) + + @cached_property + def links(self): + """ + Return the URLs of all the links on a page together with information + about their "rel" attribute, for determining which ones to treat as + downloads and which ones to queue for further scraping. + """ + def clean(url): + "Tidy up an URL." + scheme, netloc, path, params, query, frag = urlparse(url) + return urlunparse((scheme, netloc, quote(path), + params, query, frag)) + + result = set() + for match in self._href.finditer(self.data): + d = match.groupdict('') + rel = (d['rel1'] or d['rel2'] or d['rel3'] or + d['rel4'] or d['rel5'] or d['rel6']) + url = d['url1'] or d['url2'] or d['url3'] + url = urljoin(self.base_url, url) + url = unescape(url) + url = self._clean_re.sub(lambda m: '%%%2x' % ord(m.group(0)), url) + result.add((url, rel)) + # We sort the result, hoping to bring the most recent versions + # to the front + result = sorted(result, key=lambda t: t[0], reverse=True) + return result + + +class SimpleScrapingLocator(Locator): + """ + A locator which scrapes HTML pages to locate downloads for a distribution. + This runs multiple threads to do the I/O; performance is at least as good + as pip's PackageFinder, which works in an analogous fashion. + """ + + # These are used to deal with various Content-Encoding schemes. + decoders = { + 'deflate': zlib.decompress, + 'gzip': lambda b: gzip.GzipFile(fileobj=BytesIO(d)).read(), + 'none': lambda b: b, + } + + def __init__(self, url, timeout=None, num_workers=10, **kwargs): + """ + Initialise an instance. + :param url: The root URL to use for scraping. + :param timeout: The timeout, in seconds, to be applied to requests. + This defaults to ``None`` (no timeout specified). + :param num_workers: The number of worker threads you want to do I/O, + This defaults to 10. + :param kwargs: Passed to the superclass. + """ + super(SimpleScrapingLocator, self).__init__(**kwargs) + self.base_url = ensure_slash(url) + self.timeout = timeout + self._page_cache = {} + self._seen = set() + self._to_fetch = queue.Queue() + self._bad_hosts = set() + self.skip_externals = False + self.num_workers = num_workers + self._lock = threading.RLock() + # See issue #45: we need to be resilient when the locator is used + # in a thread, e.g. with concurrent.futures. We can't use self._lock + # as it is for coordinating our internal threads - the ones created + # in _prepare_threads. + self._gplock = threading.RLock() + + def _prepare_threads(self): + """ + Threads are created only when get_project is called, and terminate + before it returns. They are there primarily to parallelise I/O (i.e. + fetching web pages). + """ + self._threads = [] + for i in range(self.num_workers): + t = threading.Thread(target=self._fetch) + t.setDaemon(True) + t.start() + self._threads.append(t) + + def _wait_threads(self): + """ + Tell all the threads to terminate (by sending a sentinel value) and + wait for them to do so. + """ + # Note that you need two loops, since you can't say which + # thread will get each sentinel + for t in self._threads: + self._to_fetch.put(None) # sentinel + for t in self._threads: + t.join() + self._threads = [] + + def _get_project(self, name): + result = {'urls': {}, 'digests': {}} + with self._gplock: + self.result = result + self.project_name = name + url = urljoin(self.base_url, '%s/' % quote(name)) + self._seen.clear() + self._page_cache.clear() + self._prepare_threads() + try: + logger.debug('Queueing %s', url) + self._to_fetch.put(url) + self._to_fetch.join() + finally: + self._wait_threads() + del self.result + return result + + platform_dependent = re.compile(r'\b(linux-(i\d86|x86_64|arm\w+)|' + r'win(32|-amd64)|macosx-?\d+)\b', re.I) + + def _is_platform_dependent(self, url): + """ + Does an URL refer to a platform-specific download? + """ + return self.platform_dependent.search(url) + + def _process_download(self, url): + """ + See if an URL is a suitable download for a project. + + If it is, register information in the result dictionary (for + _get_project) about the specific version it's for. + + Note that the return value isn't actually used other than as a boolean + value. + """ + if self._is_platform_dependent(url): + info = None + else: + info = self.convert_url_to_download_info(url, self.project_name) + logger.debug('process_download: %s -> %s', url, info) + if info: + with self._lock: # needed because self.result is shared + self._update_version_data(self.result, info) + return info + + def _should_queue(self, link, referrer, rel): + """ + Determine whether a link URL from a referring page and with a + particular "rel" attribute should be queued for scraping. + """ + scheme, netloc, path, _, _, _ = urlparse(link) + if path.endswith(self.source_extensions + self.binary_extensions + + self.excluded_extensions): + result = False + elif self.skip_externals and not link.startswith(self.base_url): + result = False + elif not referrer.startswith(self.base_url): + result = False + elif rel not in ('homepage', 'download'): + result = False + elif scheme not in ('http', 'https', 'ftp'): + result = False + elif self._is_platform_dependent(link): + result = False + else: + host = netloc.split(':', 1)[0] + if host.lower() == 'localhost': + result = False + else: + result = True + logger.debug('should_queue: %s (%s) from %s -> %s', link, rel, + referrer, result) + return result + + def _fetch(self): + """ + Get a URL to fetch from the work queue, get the HTML page, examine its + links for download candidates and candidates for further scraping. + + This is a handy method to run in a thread. + """ + while True: + url = self._to_fetch.get() + try: + if url: + page = self.get_page(url) + if page is None: # e.g. after an error + continue + for link, rel in page.links: + if link not in self._seen: + self._seen.add(link) + if (not self._process_download(link) and + self._should_queue(link, url, rel)): + logger.debug('Queueing %s from %s', link, url) + self._to_fetch.put(link) + finally: + # always do this, to avoid hangs :-) + self._to_fetch.task_done() + if not url: + #logger.debug('Sentinel seen, quitting.') + break + + def get_page(self, url): + """ + Get the HTML for an URL, possibly from an in-memory cache. + + XXX TODO Note: this cache is never actually cleared. It's assumed that + the data won't get stale over the lifetime of a locator instance (not + necessarily true for the default_locator). + """ + # http://peak.telecommunity.com/DevCenter/EasyInstall#package-index-api + scheme, netloc, path, _, _, _ = urlparse(url) + if scheme == 'file' and os.path.isdir(url2pathname(path)): + url = urljoin(ensure_slash(url), 'index.html') + + if url in self._page_cache: + result = self._page_cache[url] + logger.debug('Returning %s from cache: %s', url, result) + else: + host = netloc.split(':', 1)[0] + result = None + if host in self._bad_hosts: + logger.debug('Skipping %s due to bad host %s', url, host) + else: + req = Request(url, headers={'Accept-encoding': 'identity'}) + try: + logger.debug('Fetching %s', url) + resp = self.opener.open(req, timeout=self.timeout) + logger.debug('Fetched %s', url) + headers = resp.info() + content_type = headers.get('Content-Type', '') + if HTML_CONTENT_TYPE.match(content_type): + final_url = resp.geturl() + data = resp.read() + encoding = headers.get('Content-Encoding') + if encoding: + decoder = self.decoders[encoding] # fail if not found + data = decoder(data) + encoding = 'utf-8' + m = CHARSET.search(content_type) + if m: + encoding = m.group(1) + try: + data = data.decode(encoding) + except UnicodeError: # pragma: no cover + data = data.decode('latin-1') # fallback + result = Page(data, final_url) + self._page_cache[final_url] = result + except HTTPError as e: + if e.code != 404: + logger.exception('Fetch failed: %s: %s', url, e) + except URLError as e: # pragma: no cover + logger.exception('Fetch failed: %s: %s', url, e) + with self._lock: + self._bad_hosts.add(host) + except Exception as e: # pragma: no cover + logger.exception('Fetch failed: %s: %s', url, e) + finally: + self._page_cache[url] = result # even if None (failure) + return result + + _distname_re = re.compile(']*>([^<]+)<') + + def get_distribution_names(self): + """ + Return all the distribution names known to this locator. + """ + result = set() + page = self.get_page(self.base_url) + if not page: + raise DistlibException('Unable to get %s' % self.base_url) + for match in self._distname_re.finditer(page.data): + result.add(match.group(1)) + return result + +class DirectoryLocator(Locator): + """ + This class locates distributions in a directory tree. + """ + + def __init__(self, path, **kwargs): + """ + Initialise an instance. + :param path: The root of the directory tree to search. + :param kwargs: Passed to the superclass constructor, + except for: + * recursive - if True (the default), subdirectories are + recursed into. If False, only the top-level directory + is searched, + """ + self.recursive = kwargs.pop('recursive', True) + super(DirectoryLocator, self).__init__(**kwargs) + path = os.path.abspath(path) + if not os.path.isdir(path): # pragma: no cover + raise DistlibException('Not a directory: %r' % path) + self.base_dir = path + + def should_include(self, filename, parent): + """ + Should a filename be considered as a candidate for a distribution + archive? As well as the filename, the directory which contains it + is provided, though not used by the current implementation. + """ + return filename.endswith(self.downloadable_extensions) + + def _get_project(self, name): + result = {'urls': {}, 'digests': {}} + for root, dirs, files in os.walk(self.base_dir): + for fn in files: + if self.should_include(fn, root): + fn = os.path.join(root, fn) + url = urlunparse(('file', '', + pathname2url(os.path.abspath(fn)), + '', '', '')) + info = self.convert_url_to_download_info(url, name) + if info: + self._update_version_data(result, info) + if not self.recursive: + break + return result + + def get_distribution_names(self): + """ + Return all the distribution names known to this locator. + """ + result = set() + for root, dirs, files in os.walk(self.base_dir): + for fn in files: + if self.should_include(fn, root): + fn = os.path.join(root, fn) + url = urlunparse(('file', '', + pathname2url(os.path.abspath(fn)), + '', '', '')) + info = self.convert_url_to_download_info(url, None) + if info: + result.add(info['name']) + if not self.recursive: + break + return result + +class JSONLocator(Locator): + """ + This locator uses special extended metadata (not available on PyPI) and is + the basis of performant dependency resolution in distlib. Other locators + require archive downloads before dependencies can be determined! As you + might imagine, that can be slow. + """ + def get_distribution_names(self): + """ + Return all the distribution names known to this locator. + """ + raise NotImplementedError('Not available from this locator') + + def _get_project(self, name): + result = {'urls': {}, 'digests': {}} + data = get_project_data(name) + if data: + for info in data.get('files', []): + if info['ptype'] != 'sdist' or info['pyversion'] != 'source': + continue + # We don't store summary in project metadata as it makes + # the data bigger for no benefit during dependency + # resolution + dist = make_dist(data['name'], info['version'], + summary=data.get('summary', + 'Placeholder for summary'), + scheme=self.scheme) + md = dist.metadata + md.source_url = info['url'] + # TODO SHA256 digest + if 'digest' in info and info['digest']: + dist.digest = ('md5', info['digest']) + md.dependencies = info.get('requirements', {}) + dist.exports = info.get('exports', {}) + result[dist.version] = dist + result['urls'].setdefault(dist.version, set()).add(info['url']) + return result + +class DistPathLocator(Locator): + """ + This locator finds installed distributions in a path. It can be useful for + adding to an :class:`AggregatingLocator`. + """ + def __init__(self, distpath, **kwargs): + """ + Initialise an instance. + + :param distpath: A :class:`DistributionPath` instance to search. + """ + super(DistPathLocator, self).__init__(**kwargs) + assert isinstance(distpath, DistributionPath) + self.distpath = distpath + + def _get_project(self, name): + dist = self.distpath.get_distribution(name) + if dist is None: + result = {'urls': {}, 'digests': {}} + else: + result = { + dist.version: dist, + 'urls': {dist.version: set([dist.source_url])}, + 'digests': {dist.version: set([None])} + } + return result + + +class AggregatingLocator(Locator): + """ + This class allows you to chain and/or merge a list of locators. + """ + def __init__(self, *locators, **kwargs): + """ + Initialise an instance. + + :param locators: The list of locators to search. + :param kwargs: Passed to the superclass constructor, + except for: + * merge - if False (the default), the first successful + search from any of the locators is returned. If True, + the results from all locators are merged (this can be + slow). + """ + self.merge = kwargs.pop('merge', False) + self.locators = locators + super(AggregatingLocator, self).__init__(**kwargs) + + def clear_cache(self): + super(AggregatingLocator, self).clear_cache() + for locator in self.locators: + locator.clear_cache() + + def _set_scheme(self, value): + self._scheme = value + for locator in self.locators: + locator.scheme = value + + scheme = property(Locator.scheme.fget, _set_scheme) + + def _get_project(self, name): + result = {} + for locator in self.locators: + d = locator.get_project(name) + if d: + if self.merge: + files = result.get('urls', {}) + digests = result.get('digests', {}) + # next line could overwrite result['urls'], result['digests'] + result.update(d) + df = result.get('urls') + if files and df: + for k, v in files.items(): + if k in df: + df[k] |= v + else: + df[k] = v + dd = result.get('digests') + if digests and dd: + dd.update(digests) + else: + # See issue #18. If any dists are found and we're looking + # for specific constraints, we only return something if + # a match is found. For example, if a DirectoryLocator + # returns just foo (1.0) while we're looking for + # foo (>= 2.0), we'll pretend there was nothing there so + # that subsequent locators can be queried. Otherwise we + # would just return foo (1.0) which would then lead to a + # failure to find foo (>= 2.0), because other locators + # weren't searched. Note that this only matters when + # merge=False. + if self.matcher is None: + found = True + else: + found = False + for k in d: + if self.matcher.match(k): + found = True + break + if found: + result = d + break + return result + + def get_distribution_names(self): + """ + Return all the distribution names known to this locator. + """ + result = set() + for locator in self.locators: + try: + result |= locator.get_distribution_names() + except NotImplementedError: + pass + return result + + +# We use a legacy scheme simply because most of the dists on PyPI use legacy +# versions which don't conform to PEP 426 / PEP 440. +default_locator = AggregatingLocator( + JSONLocator(), + SimpleScrapingLocator('https://pypi.python.org/simple/', + timeout=3.0), + scheme='legacy') + +locate = default_locator.locate + +NAME_VERSION_RE = re.compile(r'(?P[\w-]+)\s*' + r'\(\s*(==\s*)?(?P[^)]+)\)$') + +class DependencyFinder(object): + """ + Locate dependencies for distributions. + """ + + def __init__(self, locator=None): + """ + Initialise an instance, using the specified locator + to locate distributions. + """ + self.locator = locator or default_locator + self.scheme = get_scheme(self.locator.scheme) + + def add_distribution(self, dist): + """ + Add a distribution to the finder. This will update internal information + about who provides what. + :param dist: The distribution to add. + """ + logger.debug('adding distribution %s', dist) + name = dist.key + self.dists_by_name[name] = dist + self.dists[(name, dist.version)] = dist + for p in dist.provides: + name, version = parse_name_and_version(p) + logger.debug('Add to provided: %s, %s, %s', name, version, dist) + self.provided.setdefault(name, set()).add((version, dist)) + + def remove_distribution(self, dist): + """ + Remove a distribution from the finder. This will update internal + information about who provides what. + :param dist: The distribution to remove. + """ + logger.debug('removing distribution %s', dist) + name = dist.key + del self.dists_by_name[name] + del self.dists[(name, dist.version)] + for p in dist.provides: + name, version = parse_name_and_version(p) + logger.debug('Remove from provided: %s, %s, %s', name, version, dist) + s = self.provided[name] + s.remove((version, dist)) + if not s: + del self.provided[name] + + def get_matcher(self, reqt): + """ + Get a version matcher for a requirement. + :param reqt: The requirement + :type reqt: str + :return: A version matcher (an instance of + :class:`distlib.version.Matcher`). + """ + try: + matcher = self.scheme.matcher(reqt) + except UnsupportedVersionError: # pragma: no cover + # XXX compat-mode if cannot read the version + name = reqt.split()[0] + matcher = self.scheme.matcher(name) + return matcher + + def find_providers(self, reqt): + """ + Find the distributions which can fulfill a requirement. + + :param reqt: The requirement. + :type reqt: str + :return: A set of distribution which can fulfill the requirement. + """ + matcher = self.get_matcher(reqt) + name = matcher.key # case-insensitive + result = set() + provided = self.provided + if name in provided: + for version, provider in provided[name]: + try: + match = matcher.match(version) + except UnsupportedVersionError: + match = False + + if match: + result.add(provider) + break + return result + + def try_to_replace(self, provider, other, problems): + """ + Attempt to replace one provider with another. This is typically used + when resolving dependencies from multiple sources, e.g. A requires + (B >= 1.0) while C requires (B >= 1.1). + + For successful replacement, ``provider`` must meet all the requirements + which ``other`` fulfills. + + :param provider: The provider we are trying to replace with. + :param other: The provider we're trying to replace. + :param problems: If False is returned, this will contain what + problems prevented replacement. This is currently + a tuple of the literal string 'cantreplace', + ``provider``, ``other`` and the set of requirements + that ``provider`` couldn't fulfill. + :return: True if we can replace ``other`` with ``provider``, else + False. + """ + rlist = self.reqts[other] + unmatched = set() + for s in rlist: + matcher = self.get_matcher(s) + if not matcher.match(provider.version): + unmatched.add(s) + if unmatched: + # can't replace other with provider + problems.add(('cantreplace', provider, other, + frozenset(unmatched))) + result = False + else: + # can replace other with provider + self.remove_distribution(other) + del self.reqts[other] + for s in rlist: + self.reqts.setdefault(provider, set()).add(s) + self.add_distribution(provider) + result = True + return result + + def find(self, requirement, meta_extras=None, prereleases=False): + """ + Find a distribution and all distributions it depends on. + + :param requirement: The requirement specifying the distribution to + find, or a Distribution instance. + :param meta_extras: A list of meta extras such as :test:, :build: and + so on. + :param prereleases: If ``True``, allow pre-release versions to be + returned - otherwise, don't return prereleases + unless they're all that's available. + + Return a set of :class:`Distribution` instances and a set of + problems. + + The distributions returned should be such that they have the + :attr:`required` attribute set to ``True`` if they were + from the ``requirement`` passed to ``find()``, and they have the + :attr:`build_time_dependency` attribute set to ``True`` unless they + are post-installation dependencies of the ``requirement``. + + The problems should be a tuple consisting of the string + ``'unsatisfied'`` and the requirement which couldn't be satisfied + by any distribution known to the locator. + """ + + self.provided = {} + self.dists = {} + self.dists_by_name = {} + self.reqts = {} + + meta_extras = set(meta_extras or []) + if ':*:' in meta_extras: + meta_extras.remove(':*:') + # :meta: and :run: are implicitly included + meta_extras |= set([':test:', ':build:', ':dev:']) + + if isinstance(requirement, Distribution): + dist = odist = requirement + logger.debug('passed %s as requirement', odist) + else: + dist = odist = self.locator.locate(requirement, + prereleases=prereleases) + if dist is None: + raise DistlibException('Unable to locate %r' % requirement) + logger.debug('located %s', odist) + dist.requested = True + problems = set() + todo = set([dist]) + install_dists = set([odist]) + while todo: + dist = todo.pop() + name = dist.key # case-insensitive + if name not in self.dists_by_name: + self.add_distribution(dist) + else: + #import pdb; pdb.set_trace() + other = self.dists_by_name[name] + if other != dist: + self.try_to_replace(dist, other, problems) + + ireqts = dist.run_requires | dist.meta_requires + sreqts = dist.build_requires + ereqts = set() + if dist in install_dists: + for key in ('test', 'build', 'dev'): + e = ':%s:' % key + if e in meta_extras: + ereqts |= getattr(dist, '%s_requires' % key) + all_reqts = ireqts | sreqts | ereqts + for r in all_reqts: + providers = self.find_providers(r) + if not providers: + logger.debug('No providers found for %r', r) + provider = self.locator.locate(r, prereleases=prereleases) + # If no provider is found and we didn't consider + # prereleases, consider them now. + if provider is None and not prereleases: + provider = self.locator.locate(r, prereleases=True) + if provider is None: + logger.debug('Cannot satisfy %r', r) + problems.add(('unsatisfied', r)) + else: + n, v = provider.key, provider.version + if (n, v) not in self.dists: + todo.add(provider) + providers.add(provider) + if r in ireqts and dist in install_dists: + install_dists.add(provider) + logger.debug('Adding %s to install_dists', + provider.name_and_version) + for p in providers: + name = p.key + if name not in self.dists_by_name: + self.reqts.setdefault(p, set()).add(r) + else: + other = self.dists_by_name[name] + if other != p: + # see if other can be replaced by p + self.try_to_replace(p, other, problems) + + dists = set(self.dists.values()) + for dist in dists: + dist.build_time_dependency = dist not in install_dists + if dist.build_time_dependency: + logger.debug('%s is a build-time dependency only.', + dist.name_and_version) + logger.debug('find done for %s', odist) + return dists, problems diff --git a/venv/lib/python2.7/site-packages/pip/_vendor/distlib/manifest.py b/venv/lib/python2.7/site-packages/pip/_vendor/distlib/manifest.py new file mode 100644 index 0000000..21cff45 --- /dev/null +++ b/venv/lib/python2.7/site-packages/pip/_vendor/distlib/manifest.py @@ -0,0 +1,367 @@ +# -*- coding: utf-8 -*- +# +# Copyright (C) 2012-2013 Python Software Foundation. +# See LICENSE.txt and CONTRIBUTORS.txt. +# +""" +Class representing the list of files in a distribution. + +Equivalent to distutils.filelist, but fixes some problems. +""" +import fnmatch +import logging +import os +import re + +from . import DistlibException +from .compat import fsdecode +from .util import convert_path + + +__all__ = ['Manifest'] + +logger = logging.getLogger(__name__) + +# a \ followed by some spaces + EOL +_COLLAPSE_PATTERN = re.compile('\\\w*\n', re.M) +_COMMENTED_LINE = re.compile('#.*?(?=\n)|\n(?=$)', re.M | re.S) + + +class Manifest(object): + """A list of files built by on exploring the filesystem and filtered by + applying various patterns to what we find there. + """ + + def __init__(self, base=None): + """ + Initialise an instance. + + :param base: The base directory to explore under. + """ + self.base = os.path.abspath(os.path.normpath(base or os.getcwd())) + self.prefix = self.base + os.sep + self.allfiles = None + self.files = set() + + # + # Public API + # + + def findall(self): + """Find all files under the base and set ``allfiles`` to the absolute + pathnames of files found. + """ + from stat import S_ISREG, S_ISDIR, S_ISLNK + + self.allfiles = allfiles = [] + root = self.base + stack = [root] + pop = stack.pop + push = stack.append + + while stack: + root = pop() + names = os.listdir(root) + + for name in names: + fullname = os.path.join(root, name) + + # Avoid excess stat calls -- just one will do, thank you! + stat = os.stat(fullname) + mode = stat.st_mode + if S_ISREG(mode): + allfiles.append(fsdecode(fullname)) + elif S_ISDIR(mode) and not S_ISLNK(mode): + push(fullname) + + def add(self, item): + """ + Add a file to the manifest. + + :param item: The pathname to add. This can be relative to the base. + """ + if not item.startswith(self.prefix): + item = os.path.join(self.base, item) + self.files.add(os.path.normpath(item)) + + def add_many(self, items): + """ + Add a list of files to the manifest. + + :param items: The pathnames to add. These can be relative to the base. + """ + for item in items: + self.add(item) + + def sorted(self, wantdirs=False): + """ + Return sorted files in directory order + """ + + def add_dir(dirs, d): + dirs.add(d) + logger.debug('add_dir added %s', d) + if d != self.base: + parent, _ = os.path.split(d) + assert parent not in ('', '/') + add_dir(dirs, parent) + + result = set(self.files) # make a copy! + if wantdirs: + dirs = set() + for f in result: + add_dir(dirs, os.path.dirname(f)) + result |= dirs + return [os.path.join(*path_tuple) for path_tuple in + sorted(os.path.split(path) for path in result)] + + def clear(self): + """Clear all collected files.""" + self.files = set() + self.allfiles = [] + + def process_directive(self, directive): + """ + Process a directive which either adds some files from ``allfiles`` to + ``files``, or removes some files from ``files``. + + :param directive: The directive to process. This should be in a format + compatible with distutils ``MANIFEST.in`` files: + + http://docs.python.org/distutils/sourcedist.html#commands + """ + # Parse the line: split it up, make sure the right number of words + # is there, and return the relevant words. 'action' is always + # defined: it's the first word of the line. Which of the other + # three are defined depends on the action; it'll be either + # patterns, (dir and patterns), or (dirpattern). + action, patterns, thedir, dirpattern = self._parse_directive(directive) + + # OK, now we know that the action is valid and we have the + # right number of words on the line for that action -- so we + # can proceed with minimal error-checking. + if action == 'include': + for pattern in patterns: + if not self._include_pattern(pattern, anchor=True): + logger.warning('no files found matching %r', pattern) + + elif action == 'exclude': + for pattern in patterns: + found = self._exclude_pattern(pattern, anchor=True) + #if not found: + # logger.warning('no previously-included files ' + # 'found matching %r', pattern) + + elif action == 'global-include': + for pattern in patterns: + if not self._include_pattern(pattern, anchor=False): + logger.warning('no files found matching %r ' + 'anywhere in distribution', pattern) + + elif action == 'global-exclude': + for pattern in patterns: + found = self._exclude_pattern(pattern, anchor=False) + #if not found: + # logger.warning('no previously-included files ' + # 'matching %r found anywhere in ' + # 'distribution', pattern) + + elif action == 'recursive-include': + for pattern in patterns: + if not self._include_pattern(pattern, prefix=thedir): + logger.warning('no files found matching %r ' + 'under directory %r', pattern, thedir) + + elif action == 'recursive-exclude': + for pattern in patterns: + found = self._exclude_pattern(pattern, prefix=thedir) + #if not found: + # logger.warning('no previously-included files ' + # 'matching %r found under directory %r', + # pattern, thedir) + + elif action == 'graft': + if not self._include_pattern(None, prefix=dirpattern): + logger.warning('no directories found matching %r', + dirpattern) + + elif action == 'prune': + if not self._exclude_pattern(None, prefix=dirpattern): + logger.warning('no previously-included directories found ' + 'matching %r', dirpattern) + else: # pragma: no cover + # This should never happen, as it should be caught in + # _parse_template_line + raise DistlibException( + 'invalid action %r' % action) + + # + # Private API + # + + def _parse_directive(self, directive): + """ + Validate a directive. + :param directive: The directive to validate. + :return: A tuple of action, patterns, thedir, dir_patterns + """ + words = directive.split() + if len(words) == 1 and words[0] not in ('include', 'exclude', + 'global-include', + 'global-exclude', + 'recursive-include', + 'recursive-exclude', + 'graft', 'prune'): + # no action given, let's use the default 'include' + words.insert(0, 'include') + + action = words[0] + patterns = thedir = dir_pattern = None + + if action in ('include', 'exclude', + 'global-include', 'global-exclude'): + if len(words) < 2: + raise DistlibException( + '%r expects ...' % action) + + patterns = [convert_path(word) for word in words[1:]] + + elif action in ('recursive-include', 'recursive-exclude'): + if len(words) < 3: + raise DistlibException( + '%r expects

...' % action) + + thedir = convert_path(words[1]) + patterns = [convert_path(word) for word in words[2:]] + + elif action in ('graft', 'prune'): + if len(words) != 2: + raise DistlibException( + '%r expects a single ' % action) + + dir_pattern = convert_path(words[1]) + + else: + raise DistlibException('unknown action %r' % action) + + return action, patterns, thedir, dir_pattern + + def _include_pattern(self, pattern, anchor=True, prefix=None, + is_regex=False): + """Select strings (presumably filenames) from 'self.files' that + match 'pattern', a Unix-style wildcard (glob) pattern. + + Patterns are not quite the same as implemented by the 'fnmatch' + module: '*' and '?' match non-special characters, where "special" + is platform-dependent: slash on Unix; colon, slash, and backslash on + DOS/Windows; and colon on Mac OS. + + If 'anchor' is true (the default), then the pattern match is more + stringent: "*.py" will match "foo.py" but not "foo/bar.py". If + 'anchor' is false, both of these will match. + + If 'prefix' is supplied, then only filenames starting with 'prefix' + (itself a pattern) and ending with 'pattern', with anything in between + them, will match. 'anchor' is ignored in this case. + + If 'is_regex' is true, 'anchor' and 'prefix' are ignored, and + 'pattern' is assumed to be either a string containing a regex or a + regex object -- no translation is done, the regex is just compiled + and used as-is. + + Selected strings will be added to self.files. + + Return True if files are found. + """ + # XXX docstring lying about what the special chars are? + found = False + pattern_re = self._translate_pattern(pattern, anchor, prefix, is_regex) + + # delayed loading of allfiles list + if self.allfiles is None: + self.findall() + + for name in self.allfiles: + if pattern_re.search(name): + self.files.add(name) + found = True + return found + + def _exclude_pattern(self, pattern, anchor=True, prefix=None, + is_regex=False): + """Remove strings (presumably filenames) from 'files' that match + 'pattern'. + + Other parameters are the same as for 'include_pattern()', above. + The list 'self.files' is modified in place. Return True if files are + found. + + This API is public to allow e.g. exclusion of SCM subdirs, e.g. when + packaging source distributions + """ + found = False + pattern_re = self._translate_pattern(pattern, anchor, prefix, is_regex) + for f in list(self.files): + if pattern_re.search(f): + self.files.remove(f) + found = True + return found + + def _translate_pattern(self, pattern, anchor=True, prefix=None, + is_regex=False): + """Translate a shell-like wildcard pattern to a compiled regular + expression. + + Return the compiled regex. If 'is_regex' true, + then 'pattern' is directly compiled to a regex (if it's a string) + or just returned as-is (assumes it's a regex object). + """ + if is_regex: + if isinstance(pattern, str): + return re.compile(pattern) + else: + return pattern + + if pattern: + pattern_re = self._glob_to_re(pattern) + else: + pattern_re = '' + + base = re.escape(os.path.join(self.base, '')) + if prefix is not None: + # ditch end of pattern character + empty_pattern = self._glob_to_re('') + prefix_re = self._glob_to_re(prefix)[:-len(empty_pattern)] + sep = os.sep + if os.sep == '\\': + sep = r'\\' + pattern_re = '^' + base + sep.join((prefix_re, + '.*' + pattern_re)) + else: # no prefix -- respect anchor flag + if anchor: + pattern_re = '^' + base + pattern_re + + return re.compile(pattern_re) + + def _glob_to_re(self, pattern): + """Translate a shell-like glob pattern to a regular expression. + + Return a string containing the regex. Differs from + 'fnmatch.translate()' in that '*' does not match "special characters" + (which are platform-specific). + """ + pattern_re = fnmatch.translate(pattern) + + # '?' and '*' in the glob pattern become '.' and '.*' in the RE, which + # IMHO is wrong -- '?' and '*' aren't supposed to match slash in Unix, + # and by extension they shouldn't match such "special characters" under + # any OS. So change all non-escaped dots in the RE to match any + # character except the special characters (currently: just os.sep). + sep = os.sep + if os.sep == '\\': + # we're using a regex to manipulate a regex, so we need + # to escape the backslash twice + sep = r'\\\\' + escaped = r'\1[^%s]' % sep + pattern_re = re.sub(r'((? y, + 'gte': lambda x, y: x >= y, + 'in': lambda x, y: x in y, + 'lt': lambda x, y: x < y, + 'lte': lambda x, y: x <= y, + 'not': lambda x: not x, + 'noteq': lambda x, y: x != y, + 'notin': lambda x, y: x not in y, + } + + allowed_values = { + 'sys_platform': sys.platform, + 'python_version': '%s.%s' % sys.version_info[:2], + # parsing sys.platform is not reliable, but there is no other + # way to get e.g. 2.7.2+, and the PEP is defined with sys.version + 'python_full_version': sys.version.split(' ', 1)[0], + 'os_name': os.name, + 'platform_in_venv': str(in_venv()), + 'platform_release': platform.release(), + 'platform_version': platform.version(), + 'platform_machine': platform.machine(), + 'platform_python_implementation': python_implementation(), + } + + def __init__(self, context=None): + """ + Initialise an instance. + + :param context: If specified, names are looked up in this mapping. + """ + self.context = context or {} + self.source = None + + def get_fragment(self, offset): + """ + Get the part of the source which is causing a problem. + """ + fragment_len = 10 + s = '%r' % (self.source[offset:offset + fragment_len]) + if offset + fragment_len < len(self.source): + s += '...' + return s + + def get_handler(self, node_type): + """ + Get a handler for the specified AST node type. + """ + return getattr(self, 'do_%s' % node_type, None) + + def evaluate(self, node, filename=None): + """ + Evaluate a source string or node, using ``filename`` when + displaying errors. + """ + if isinstance(node, string_types): + self.source = node + kwargs = {'mode': 'eval'} + if filename: + kwargs['filename'] = filename + try: + node = ast.parse(node, **kwargs) + except SyntaxError as e: + s = self.get_fragment(e.offset) + raise SyntaxError('syntax error %s' % s) + node_type = node.__class__.__name__.lower() + handler = self.get_handler(node_type) + if handler is None: + if self.source is None: + s = '(source not available)' + else: + s = self.get_fragment(node.col_offset) + raise SyntaxError("don't know how to evaluate %r %s" % ( + node_type, s)) + return handler(node) + + def get_attr_key(self, node): + assert isinstance(node, ast.Attribute), 'attribute node expected' + return '%s.%s' % (node.value.id, node.attr) + + def do_attribute(self, node): + if not isinstance(node.value, ast.Name): + valid = False + else: + key = self.get_attr_key(node) + valid = key in self.context or key in self.allowed_values + if not valid: + raise SyntaxError('invalid expression: %s' % key) + if key in self.context: + result = self.context[key] + else: + result = self.allowed_values[key] + return result + + def do_boolop(self, node): + result = self.evaluate(node.values[0]) + is_or = node.op.__class__ is ast.Or + is_and = node.op.__class__ is ast.And + assert is_or or is_and + if (is_and and result) or (is_or and not result): + for n in node.values[1:]: + result = self.evaluate(n) + if (is_or and result) or (is_and and not result): + break + return result + + def do_compare(self, node): + def sanity_check(lhsnode, rhsnode): + valid = True + if isinstance(lhsnode, ast.Str) and isinstance(rhsnode, ast.Str): + valid = False + #elif (isinstance(lhsnode, ast.Attribute) + # and isinstance(rhsnode, ast.Attribute)): + # klhs = self.get_attr_key(lhsnode) + # krhs = self.get_attr_key(rhsnode) + # valid = klhs != krhs + if not valid: + s = self.get_fragment(node.col_offset) + raise SyntaxError('Invalid comparison: %s' % s) + + lhsnode = node.left + lhs = self.evaluate(lhsnode) + result = True + for op, rhsnode in zip(node.ops, node.comparators): + sanity_check(lhsnode, rhsnode) + op = op.__class__.__name__.lower() + if op not in self.operators: + raise SyntaxError('unsupported operation: %r' % op) + rhs = self.evaluate(rhsnode) + result = self.operators[op](lhs, rhs) + if not result: + break + lhs = rhs + lhsnode = rhsnode + return result + + def do_expression(self, node): + return self.evaluate(node.body) + + def do_name(self, node): + valid = False + if node.id in self.context: + valid = True + result = self.context[node.id] + elif node.id in self.allowed_values: + valid = True + result = self.allowed_values[node.id] + if not valid: + raise SyntaxError('invalid expression: %s' % node.id) + return result + + def do_str(self, node): + return node.s + + +def interpret(marker, execution_context=None): + """ + Interpret a marker and return a result depending on environment. + + :param marker: The marker to interpret. + :type marker: str + :param execution_context: The context used for name lookup. + :type execution_context: mapping + """ + return Evaluator(execution_context).evaluate(marker.strip()) diff --git a/venv/lib/python2.7/site-packages/pip/_vendor/distlib/metadata.py b/venv/lib/python2.7/site-packages/pip/_vendor/distlib/metadata.py new file mode 100644 index 0000000..71525dd --- /dev/null +++ b/venv/lib/python2.7/site-packages/pip/_vendor/distlib/metadata.py @@ -0,0 +1,1066 @@ +# -*- coding: utf-8 -*- +# +# Copyright (C) 2012 The Python Software Foundation. +# See LICENSE.txt and CONTRIBUTORS.txt. +# +"""Implementation of the Metadata for Python packages PEPs. + +Supports all metadata formats (1.0, 1.1, 1.2, and 2.0 experimental). +""" +from __future__ import unicode_literals + +import codecs +from email import message_from_file +import json +import logging +import re + + +from . import DistlibException, __version__ +from .compat import StringIO, string_types, text_type +from .markers import interpret +from .util import extract_by_key, get_extras +from .version import get_scheme, PEP440_VERSION_RE + +logger = logging.getLogger(__name__) + + +class MetadataMissingError(DistlibException): + """A required metadata is missing""" + + +class MetadataConflictError(DistlibException): + """Attempt to read or write metadata fields that are conflictual.""" + + +class MetadataUnrecognizedVersionError(DistlibException): + """Unknown metadata version number.""" + + +class MetadataInvalidError(DistlibException): + """A metadata value is invalid""" + +# public API of this module +__all__ = ['Metadata', 'PKG_INFO_ENCODING', 'PKG_INFO_PREFERRED_VERSION'] + +# Encoding used for the PKG-INFO files +PKG_INFO_ENCODING = 'utf-8' + +# preferred version. Hopefully will be changed +# to 1.2 once PEP 345 is supported everywhere +PKG_INFO_PREFERRED_VERSION = '1.1' + +_LINE_PREFIX_1_2 = re.compile('\n \|') +_LINE_PREFIX_PRE_1_2 = re.compile('\n ') +_241_FIELDS = ('Metadata-Version', 'Name', 'Version', 'Platform', + 'Summary', 'Description', + 'Keywords', 'Home-page', 'Author', 'Author-email', + 'License') + +_314_FIELDS = ('Metadata-Version', 'Name', 'Version', 'Platform', + 'Supported-Platform', 'Summary', 'Description', + 'Keywords', 'Home-page', 'Author', 'Author-email', + 'License', 'Classifier', 'Download-URL', 'Obsoletes', + 'Provides', 'Requires') + +_314_MARKERS = ('Obsoletes', 'Provides', 'Requires', 'Classifier', + 'Download-URL') + +_345_FIELDS = ('Metadata-Version', 'Name', 'Version', 'Platform', + 'Supported-Platform', 'Summary', 'Description', + 'Keywords', 'Home-page', 'Author', 'Author-email', + 'Maintainer', 'Maintainer-email', 'License', + 'Classifier', 'Download-URL', 'Obsoletes-Dist', + 'Project-URL', 'Provides-Dist', 'Requires-Dist', + 'Requires-Python', 'Requires-External') + +_345_MARKERS = ('Provides-Dist', 'Requires-Dist', 'Requires-Python', + 'Obsoletes-Dist', 'Requires-External', 'Maintainer', + 'Maintainer-email', 'Project-URL') + +_426_FIELDS = ('Metadata-Version', 'Name', 'Version', 'Platform', + 'Supported-Platform', 'Summary', 'Description', + 'Keywords', 'Home-page', 'Author', 'Author-email', + 'Maintainer', 'Maintainer-email', 'License', + 'Classifier', 'Download-URL', 'Obsoletes-Dist', + 'Project-URL', 'Provides-Dist', 'Requires-Dist', + 'Requires-Python', 'Requires-External', 'Private-Version', + 'Obsoleted-By', 'Setup-Requires-Dist', 'Extension', + 'Provides-Extra') + +_426_MARKERS = ('Private-Version', 'Provides-Extra', 'Obsoleted-By', + 'Setup-Requires-Dist', 'Extension') + +_ALL_FIELDS = set() +_ALL_FIELDS.update(_241_FIELDS) +_ALL_FIELDS.update(_314_FIELDS) +_ALL_FIELDS.update(_345_FIELDS) +_ALL_FIELDS.update(_426_FIELDS) + +EXTRA_RE = re.compile(r'''extra\s*==\s*("([^"]+)"|'([^']+)')''') + + +def _version2fieldlist(version): + if version == '1.0': + return _241_FIELDS + elif version == '1.1': + return _314_FIELDS + elif version == '1.2': + return _345_FIELDS + elif version == '2.0': + return _426_FIELDS + raise MetadataUnrecognizedVersionError(version) + + +def _best_version(fields): + """Detect the best version depending on the fields used.""" + def _has_marker(keys, markers): + for marker in markers: + if marker in keys: + return True + return False + + keys = [] + for key, value in fields.items(): + if value in ([], 'UNKNOWN', None): + continue + keys.append(key) + + possible_versions = ['1.0', '1.1', '1.2', '2.0'] + + # first let's try to see if a field is not part of one of the version + for key in keys: + if key not in _241_FIELDS and '1.0' in possible_versions: + possible_versions.remove('1.0') + if key not in _314_FIELDS and '1.1' in possible_versions: + possible_versions.remove('1.1') + if key not in _345_FIELDS and '1.2' in possible_versions: + possible_versions.remove('1.2') + if key not in _426_FIELDS and '2.0' in possible_versions: + possible_versions.remove('2.0') + + # possible_version contains qualified versions + if len(possible_versions) == 1: + return possible_versions[0] # found ! + elif len(possible_versions) == 0: + raise MetadataConflictError('Unknown metadata set') + + # let's see if one unique marker is found + is_1_1 = '1.1' in possible_versions and _has_marker(keys, _314_MARKERS) + is_1_2 = '1.2' in possible_versions and _has_marker(keys, _345_MARKERS) + is_2_0 = '2.0' in possible_versions and _has_marker(keys, _426_MARKERS) + if int(is_1_1) + int(is_1_2) + int(is_2_0) > 1: + raise MetadataConflictError('You used incompatible 1.1/1.2/2.0 fields') + + # we have the choice, 1.0, or 1.2, or 2.0 + # - 1.0 has a broken Summary field but works with all tools + # - 1.1 is to avoid + # - 1.2 fixes Summary but has little adoption + # - 2.0 adds more features and is very new + if not is_1_1 and not is_1_2 and not is_2_0: + # we couldn't find any specific marker + if PKG_INFO_PREFERRED_VERSION in possible_versions: + return PKG_INFO_PREFERRED_VERSION + if is_1_1: + return '1.1' + if is_1_2: + return '1.2' + + return '2.0' + +_ATTR2FIELD = { + 'metadata_version': 'Metadata-Version', + 'name': 'Name', + 'version': 'Version', + 'platform': 'Platform', + 'supported_platform': 'Supported-Platform', + 'summary': 'Summary', + 'description': 'Description', + 'keywords': 'Keywords', + 'home_page': 'Home-page', + 'author': 'Author', + 'author_email': 'Author-email', + 'maintainer': 'Maintainer', + 'maintainer_email': 'Maintainer-email', + 'license': 'License', + 'classifier': 'Classifier', + 'download_url': 'Download-URL', + 'obsoletes_dist': 'Obsoletes-Dist', + 'provides_dist': 'Provides-Dist', + 'requires_dist': 'Requires-Dist', + 'setup_requires_dist': 'Setup-Requires-Dist', + 'requires_python': 'Requires-Python', + 'requires_external': 'Requires-External', + 'requires': 'Requires', + 'provides': 'Provides', + 'obsoletes': 'Obsoletes', + 'project_url': 'Project-URL', + 'private_version': 'Private-Version', + 'obsoleted_by': 'Obsoleted-By', + 'extension': 'Extension', + 'provides_extra': 'Provides-Extra', +} + +_PREDICATE_FIELDS = ('Requires-Dist', 'Obsoletes-Dist', 'Provides-Dist') +_VERSIONS_FIELDS = ('Requires-Python',) +_VERSION_FIELDS = ('Version',) +_LISTFIELDS = ('Platform', 'Classifier', 'Obsoletes', + 'Requires', 'Provides', 'Obsoletes-Dist', + 'Provides-Dist', 'Requires-Dist', 'Requires-External', + 'Project-URL', 'Supported-Platform', 'Setup-Requires-Dist', + 'Provides-Extra', 'Extension') +_LISTTUPLEFIELDS = ('Project-URL',) + +_ELEMENTSFIELD = ('Keywords',) + +_UNICODEFIELDS = ('Author', 'Maintainer', 'Summary', 'Description') + +_MISSING = object() + +_FILESAFE = re.compile('[^A-Za-z0-9.]+') + + +def _get_name_and_version(name, version, for_filename=False): + """Return the distribution name with version. + + If for_filename is true, return a filename-escaped form.""" + if for_filename: + # For both name and version any runs of non-alphanumeric or '.' + # characters are replaced with a single '-'. Additionally any + # spaces in the version string become '.' + name = _FILESAFE.sub('-', name) + version = _FILESAFE.sub('-', version.replace(' ', '.')) + return '%s-%s' % (name, version) + + +class LegacyMetadata(object): + """The legacy metadata of a release. + + Supports versions 1.0, 1.1 and 1.2 (auto-detected). You can + instantiate the class with one of these arguments (or none): + - *path*, the path to a metadata file + - *fileobj* give a file-like object with metadata as content + - *mapping* is a dict-like object + - *scheme* is a version scheme name + """ + # TODO document the mapping API and UNKNOWN default key + + def __init__(self, path=None, fileobj=None, mapping=None, + scheme='default'): + if [path, fileobj, mapping].count(None) < 2: + raise TypeError('path, fileobj and mapping are exclusive') + self._fields = {} + self.requires_files = [] + self._dependencies = None + self.scheme = scheme + if path is not None: + self.read(path) + elif fileobj is not None: + self.read_file(fileobj) + elif mapping is not None: + self.update(mapping) + self.set_metadata_version() + + def set_metadata_version(self): + self._fields['Metadata-Version'] = _best_version(self._fields) + + def _write_field(self, fileobj, name, value): + fileobj.write('%s: %s\n' % (name, value)) + + def __getitem__(self, name): + return self.get(name) + + def __setitem__(self, name, value): + return self.set(name, value) + + def __delitem__(self, name): + field_name = self._convert_name(name) + try: + del self._fields[field_name] + except KeyError: + raise KeyError(name) + + def __contains__(self, name): + return (name in self._fields or + self._convert_name(name) in self._fields) + + def _convert_name(self, name): + if name in _ALL_FIELDS: + return name + name = name.replace('-', '_').lower() + return _ATTR2FIELD.get(name, name) + + def _default_value(self, name): + if name in _LISTFIELDS or name in _ELEMENTSFIELD: + return [] + return 'UNKNOWN' + + def _remove_line_prefix(self, value): + if self.metadata_version in ('1.0', '1.1'): + return _LINE_PREFIX_PRE_1_2.sub('\n', value) + else: + return _LINE_PREFIX_1_2.sub('\n', value) + + def __getattr__(self, name): + if name in _ATTR2FIELD: + return self[name] + raise AttributeError(name) + + # + # Public API + # + +# dependencies = property(_get_dependencies, _set_dependencies) + + def get_fullname(self, filesafe=False): + """Return the distribution name with version. + + If filesafe is true, return a filename-escaped form.""" + return _get_name_and_version(self['Name'], self['Version'], filesafe) + + def is_field(self, name): + """return True if name is a valid metadata key""" + name = self._convert_name(name) + return name in _ALL_FIELDS + + def is_multi_field(self, name): + name = self._convert_name(name) + return name in _LISTFIELDS + + def read(self, filepath): + """Read the metadata values from a file path.""" + fp = codecs.open(filepath, 'r', encoding='utf-8') + try: + self.read_file(fp) + finally: + fp.close() + + def read_file(self, fileob): + """Read the metadata values from a file object.""" + msg = message_from_file(fileob) + self._fields['Metadata-Version'] = msg['metadata-version'] + + # When reading, get all the fields we can + for field in _ALL_FIELDS: + if field not in msg: + continue + if field in _LISTFIELDS: + # we can have multiple lines + values = msg.get_all(field) + if field in _LISTTUPLEFIELDS and values is not None: + values = [tuple(value.split(',')) for value in values] + self.set(field, values) + else: + # single line + value = msg[field] + if value is not None and value != 'UNKNOWN': + self.set(field, value) + self.set_metadata_version() + + def write(self, filepath, skip_unknown=False): + """Write the metadata fields to filepath.""" + fp = codecs.open(filepath, 'w', encoding='utf-8') + try: + self.write_file(fp, skip_unknown) + finally: + fp.close() + + def write_file(self, fileobject, skip_unknown=False): + """Write the PKG-INFO format data to a file object.""" + self.set_metadata_version() + + for field in _version2fieldlist(self['Metadata-Version']): + values = self.get(field) + if skip_unknown and values in ('UNKNOWN', [], ['UNKNOWN']): + continue + if field in _ELEMENTSFIELD: + self._write_field(fileobject, field, ','.join(values)) + continue + if field not in _LISTFIELDS: + if field == 'Description': + if self.metadata_version in ('1.0', '1.1'): + values = values.replace('\n', '\n ') + else: + values = values.replace('\n', '\n |') + values = [values] + + if field in _LISTTUPLEFIELDS: + values = [','.join(value) for value in values] + + for value in values: + self._write_field(fileobject, field, value) + + def update(self, other=None, **kwargs): + """Set metadata values from the given iterable `other` and kwargs. + + Behavior is like `dict.update`: If `other` has a ``keys`` method, + they are looped over and ``self[key]`` is assigned ``other[key]``. + Else, ``other`` is an iterable of ``(key, value)`` iterables. + + Keys that don't match a metadata field or that have an empty value are + dropped. + """ + def _set(key, value): + if key in _ATTR2FIELD and value: + self.set(self._convert_name(key), value) + + if not other: + # other is None or empty container + pass + elif hasattr(other, 'keys'): + for k in other.keys(): + _set(k, other[k]) + else: + for k, v in other: + _set(k, v) + + if kwargs: + for k, v in kwargs.items(): + _set(k, v) + + def set(self, name, value): + """Control then set a metadata field.""" + name = self._convert_name(name) + + if ((name in _ELEMENTSFIELD or name == 'Platform') and + not isinstance(value, (list, tuple))): + if isinstance(value, string_types): + value = [v.strip() for v in value.split(',')] + else: + value = [] + elif (name in _LISTFIELDS and + not isinstance(value, (list, tuple))): + if isinstance(value, string_types): + value = [value] + else: + value = [] + + if logger.isEnabledFor(logging.WARNING): + project_name = self['Name'] + + scheme = get_scheme(self.scheme) + if name in _PREDICATE_FIELDS and value is not None: + for v in value: + # check that the values are valid + if not scheme.is_valid_matcher(v.split(';')[0]): + logger.warning( + '%r: %r is not valid (field %r)', + project_name, v, name) + # FIXME this rejects UNKNOWN, is that right? + elif name in _VERSIONS_FIELDS and value is not None: + if not scheme.is_valid_constraint_list(value): + logger.warning('%r: %r is not a valid version (field %r)', + project_name, value, name) + elif name in _VERSION_FIELDS and value is not None: + if not scheme.is_valid_version(value): + logger.warning('%r: %r is not a valid version (field %r)', + project_name, value, name) + + if name in _UNICODEFIELDS: + if name == 'Description': + value = self._remove_line_prefix(value) + + self._fields[name] = value + + def get(self, name, default=_MISSING): + """Get a metadata field.""" + name = self._convert_name(name) + if name not in self._fields: + if default is _MISSING: + default = self._default_value(name) + return default + if name in _UNICODEFIELDS: + value = self._fields[name] + return value + elif name in _LISTFIELDS: + value = self._fields[name] + if value is None: + return [] + res = [] + for val in value: + if name not in _LISTTUPLEFIELDS: + res.append(val) + else: + # That's for Project-URL + res.append((val[0], val[1])) + return res + + elif name in _ELEMENTSFIELD: + value = self._fields[name] + if isinstance(value, string_types): + return value.split(',') + return self._fields[name] + + def check(self, strict=False): + """Check if the metadata is compliant. If strict is True then raise if + no Name or Version are provided""" + self.set_metadata_version() + + # XXX should check the versions (if the file was loaded) + missing, warnings = [], [] + + for attr in ('Name', 'Version'): # required by PEP 345 + if attr not in self: + missing.append(attr) + + if strict and missing != []: + msg = 'missing required metadata: %s' % ', '.join(missing) + raise MetadataMissingError(msg) + + for attr in ('Home-page', 'Author'): + if attr not in self: + missing.append(attr) + + # checking metadata 1.2 (XXX needs to check 1.1, 1.0) + if self['Metadata-Version'] != '1.2': + return missing, warnings + + scheme = get_scheme(self.scheme) + + def are_valid_constraints(value): + for v in value: + if not scheme.is_valid_matcher(v.split(';')[0]): + return False + return True + + for fields, controller in ((_PREDICATE_FIELDS, are_valid_constraints), + (_VERSIONS_FIELDS, + scheme.is_valid_constraint_list), + (_VERSION_FIELDS, + scheme.is_valid_version)): + for field in fields: + value = self.get(field, None) + if value is not None and not controller(value): + warnings.append('Wrong value for %r: %s' % (field, value)) + + return missing, warnings + + def todict(self, skip_missing=False): + """Return fields as a dict. + + Field names will be converted to use the underscore-lowercase style + instead of hyphen-mixed case (i.e. home_page instead of Home-page). + """ + self.set_metadata_version() + + mapping_1_0 = ( + ('metadata_version', 'Metadata-Version'), + ('name', 'Name'), + ('version', 'Version'), + ('summary', 'Summary'), + ('home_page', 'Home-page'), + ('author', 'Author'), + ('author_email', 'Author-email'), + ('license', 'License'), + ('description', 'Description'), + ('keywords', 'Keywords'), + ('platform', 'Platform'), + ('classifiers', 'Classifier'), + ('download_url', 'Download-URL'), + ) + + data = {} + for key, field_name in mapping_1_0: + if not skip_missing or field_name in self._fields: + data[key] = self[field_name] + + if self['Metadata-Version'] == '1.2': + mapping_1_2 = ( + ('requires_dist', 'Requires-Dist'), + ('requires_python', 'Requires-Python'), + ('requires_external', 'Requires-External'), + ('provides_dist', 'Provides-Dist'), + ('obsoletes_dist', 'Obsoletes-Dist'), + ('project_url', 'Project-URL'), + ('maintainer', 'Maintainer'), + ('maintainer_email', 'Maintainer-email'), + ) + for key, field_name in mapping_1_2: + if not skip_missing or field_name in self._fields: + if key != 'project_url': + data[key] = self[field_name] + else: + data[key] = [','.join(u) for u in self[field_name]] + + elif self['Metadata-Version'] == '1.1': + mapping_1_1 = ( + ('provides', 'Provides'), + ('requires', 'Requires'), + ('obsoletes', 'Obsoletes'), + ) + for key, field_name in mapping_1_1: + if not skip_missing or field_name in self._fields: + data[key] = self[field_name] + + return data + + def add_requirements(self, requirements): + if self['Metadata-Version'] == '1.1': + # we can't have 1.1 metadata *and* Setuptools requires + for field in ('Obsoletes', 'Requires', 'Provides'): + if field in self: + del self[field] + self['Requires-Dist'] += requirements + + # Mapping API + # TODO could add iter* variants + + def keys(self): + return list(_version2fieldlist(self['Metadata-Version'])) + + def __iter__(self): + for key in self.keys(): + yield key + + def values(self): + return [self[key] for key in self.keys()] + + def items(self): + return [(key, self[key]) for key in self.keys()] + + def __repr__(self): + return '<%s %s %s>' % (self.__class__.__name__, self.name, + self.version) + + +METADATA_FILENAME = 'pydist.json' +WHEEL_METADATA_FILENAME = 'metadata.json' + + +class Metadata(object): + """ + The metadata of a release. This implementation uses 2.0 (JSON) + metadata where possible. If not possible, it wraps a LegacyMetadata + instance which handles the key-value metadata format. + """ + + METADATA_VERSION_MATCHER = re.compile('^\d+(\.\d+)*$') + + NAME_MATCHER = re.compile('^[0-9A-Z]([0-9A-Z_.-]*[0-9A-Z])?$', re.I) + + VERSION_MATCHER = PEP440_VERSION_RE + + SUMMARY_MATCHER = re.compile('.{1,2047}') + + METADATA_VERSION = '2.0' + + GENERATOR = 'distlib (%s)' % __version__ + + MANDATORY_KEYS = { + 'name': (), + 'version': (), + 'summary': ('legacy',), + } + + INDEX_KEYS = ('name version license summary description author ' + 'author_email keywords platform home_page classifiers ' + 'download_url') + + DEPENDENCY_KEYS = ('extras run_requires test_requires build_requires ' + 'dev_requires provides meta_requires obsoleted_by ' + 'supports_environments') + + SYNTAX_VALIDATORS = { + 'metadata_version': (METADATA_VERSION_MATCHER, ()), + 'name': (NAME_MATCHER, ('legacy',)), + 'version': (VERSION_MATCHER, ('legacy',)), + 'summary': (SUMMARY_MATCHER, ('legacy',)), + } + + __slots__ = ('_legacy', '_data', 'scheme') + + def __init__(self, path=None, fileobj=None, mapping=None, + scheme='default'): + if [path, fileobj, mapping].count(None) < 2: + raise TypeError('path, fileobj and mapping are exclusive') + self._legacy = None + self._data = None + self.scheme = scheme + #import pdb; pdb.set_trace() + if mapping is not None: + try: + self._validate_mapping(mapping, scheme) + self._data = mapping + except MetadataUnrecognizedVersionError: + self._legacy = LegacyMetadata(mapping=mapping, scheme=scheme) + self.validate() + else: + data = None + if path: + with open(path, 'rb') as f: + data = f.read() + elif fileobj: + data = fileobj.read() + if data is None: + # Initialised with no args - to be added + self._data = { + 'metadata_version': self.METADATA_VERSION, + 'generator': self.GENERATOR, + } + else: + if not isinstance(data, text_type): + data = data.decode('utf-8') + try: + self._data = json.loads(data) + self._validate_mapping(self._data, scheme) + except ValueError: + # Note: MetadataUnrecognizedVersionError does not + # inherit from ValueError (it's a DistlibException, + # which should not inherit from ValueError). + # The ValueError comes from the json.load - if that + # succeeds and we get a validation error, we want + # that to propagate + self._legacy = LegacyMetadata(fileobj=StringIO(data), + scheme=scheme) + self.validate() + + common_keys = set(('name', 'version', 'license', 'keywords', 'summary')) + + none_list = (None, list) + none_dict = (None, dict) + + mapped_keys = { + 'run_requires': ('Requires-Dist', list), + 'build_requires': ('Setup-Requires-Dist', list), + 'dev_requires': none_list, + 'test_requires': none_list, + 'meta_requires': none_list, + 'extras': ('Provides-Extra', list), + 'modules': none_list, + 'namespaces': none_list, + 'exports': none_dict, + 'commands': none_dict, + 'classifiers': ('Classifier', list), + 'source_url': ('Download-URL', None), + 'metadata_version': ('Metadata-Version', None), + } + + del none_list, none_dict + + def __getattribute__(self, key): + common = object.__getattribute__(self, 'common_keys') + mapped = object.__getattribute__(self, 'mapped_keys') + if key in mapped: + lk, maker = mapped[key] + if self._legacy: + if lk is None: + result = None if maker is None else maker() + else: + result = self._legacy.get(lk) + else: + value = None if maker is None else maker() + if key not in ('commands', 'exports', 'modules', 'namespaces', + 'classifiers'): + result = self._data.get(key, value) + else: + # special cases for PEP 459 + sentinel = object() + result = sentinel + d = self._data.get('extensions') + if d: + if key == 'commands': + result = d.get('python.commands', value) + elif key == 'classifiers': + d = d.get('python.details') + if d: + result = d.get(key, value) + else: + d = d.get('python.exports') + if d: + result = d.get(key, value) + if result is sentinel: + result = value + elif key not in common: + result = object.__getattribute__(self, key) + elif self._legacy: + result = self._legacy.get(key) + else: + result = self._data.get(key) + return result + + def _validate_value(self, key, value, scheme=None): + if key in self.SYNTAX_VALIDATORS: + pattern, exclusions = self.SYNTAX_VALIDATORS[key] + if (scheme or self.scheme) not in exclusions: + m = pattern.match(value) + if not m: + raise MetadataInvalidError('%r is an invalid value for ' + 'the %r property' % (value, + key)) + + def __setattr__(self, key, value): + self._validate_value(key, value) + common = object.__getattribute__(self, 'common_keys') + mapped = object.__getattribute__(self, 'mapped_keys') + if key in mapped: + lk, _ = mapped[key] + if self._legacy: + if lk is None: + raise NotImplementedError + self._legacy[lk] = value + elif key not in ('commands', 'exports', 'modules', 'namespaces', + 'classifiers'): + self._data[key] = value + else: + # special cases for PEP 459 + d = self._data.setdefault('extensions', {}) + if key == 'commands': + d['python.commands'] = value + elif key == 'classifiers': + d = d.setdefault('python.details', {}) + d[key] = value + else: + d = d.setdefault('python.exports', {}) + d[key] = value + elif key not in common: + object.__setattr__(self, key, value) + else: + if key == 'keywords': + if isinstance(value, string_types): + value = value.strip() + if value: + value = value.split() + else: + value = [] + if self._legacy: + self._legacy[key] = value + else: + self._data[key] = value + + @property + def name_and_version(self): + return _get_name_and_version(self.name, self.version, True) + + @property + def provides(self): + if self._legacy: + result = self._legacy['Provides-Dist'] + else: + result = self._data.setdefault('provides', []) + s = '%s (%s)' % (self.name, self.version) + if s not in result: + result.append(s) + return result + + @provides.setter + def provides(self, value): + if self._legacy: + self._legacy['Provides-Dist'] = value + else: + self._data['provides'] = value + + def get_requirements(self, reqts, extras=None, env=None): + """ + Base method to get dependencies, given a set of extras + to satisfy and an optional environment context. + :param reqts: A list of sometimes-wanted dependencies, + perhaps dependent on extras and environment. + :param extras: A list of optional components being requested. + :param env: An optional environment for marker evaluation. + """ + if self._legacy: + result = reqts + else: + result = [] + extras = get_extras(extras or [], self.extras) + for d in reqts: + if 'extra' not in d and 'environment' not in d: + # unconditional + include = True + else: + if 'extra' not in d: + # Not extra-dependent - only environment-dependent + include = True + else: + include = d.get('extra') in extras + if include: + # Not excluded because of extras, check environment + marker = d.get('environment') + if marker: + include = interpret(marker, env) + if include: + result.extend(d['requires']) + for key in ('build', 'dev', 'test'): + e = ':%s:' % key + if e in extras: + extras.remove(e) + # A recursive call, but it should terminate since 'test' + # has been removed from the extras + reqts = self._data.get('%s_requires' % key, []) + result.extend(self.get_requirements(reqts, extras=extras, + env=env)) + return result + + @property + def dictionary(self): + if self._legacy: + return self._from_legacy() + return self._data + + @property + def dependencies(self): + if self._legacy: + raise NotImplementedError + else: + return extract_by_key(self._data, self.DEPENDENCY_KEYS) + + @dependencies.setter + def dependencies(self, value): + if self._legacy: + raise NotImplementedError + else: + self._data.update(value) + + def _validate_mapping(self, mapping, scheme): + if mapping.get('metadata_version') != self.METADATA_VERSION: + raise MetadataUnrecognizedVersionError() + missing = [] + for key, exclusions in self.MANDATORY_KEYS.items(): + if key not in mapping: + if scheme not in exclusions: + missing.append(key) + if missing: + msg = 'Missing metadata items: %s' % ', '.join(missing) + raise MetadataMissingError(msg) + for k, v in mapping.items(): + self._validate_value(k, v, scheme) + + def validate(self): + if self._legacy: + missing, warnings = self._legacy.check(True) + if missing or warnings: + logger.warning('Metadata: missing: %s, warnings: %s', + missing, warnings) + else: + self._validate_mapping(self._data, self.scheme) + + def todict(self): + if self._legacy: + return self._legacy.todict(True) + else: + result = extract_by_key(self._data, self.INDEX_KEYS) + return result + + def _from_legacy(self): + assert self._legacy and not self._data + result = { + 'metadata_version': self.METADATA_VERSION, + 'generator': self.GENERATOR, + } + lmd = self._legacy.todict(True) # skip missing ones + for k in ('name', 'version', 'license', 'summary', 'description', + 'classifier'): + if k in lmd: + if k == 'classifier': + nk = 'classifiers' + else: + nk = k + result[nk] = lmd[k] + kw = lmd.get('Keywords', []) + if kw == ['']: + kw = [] + result['keywords'] = kw + keys = (('requires_dist', 'run_requires'), + ('setup_requires_dist', 'build_requires')) + for ok, nk in keys: + if ok in lmd and lmd[ok]: + result[nk] = [{'requires': lmd[ok]}] + result['provides'] = self.provides + author = {} + maintainer = {} + return result + + LEGACY_MAPPING = { + 'name': 'Name', + 'version': 'Version', + 'license': 'License', + 'summary': 'Summary', + 'description': 'Description', + 'classifiers': 'Classifier', + } + + def _to_legacy(self): + def process_entries(entries): + reqts = set() + for e in entries: + extra = e.get('extra') + env = e.get('environment') + rlist = e['requires'] + for r in rlist: + if not env and not extra: + reqts.add(r) + else: + marker = '' + if extra: + marker = 'extra == "%s"' % extra + if env: + if marker: + marker = '(%s) and %s' % (env, marker) + else: + marker = env + reqts.add(';'.join((r, marker))) + return reqts + + assert self._data and not self._legacy + result = LegacyMetadata() + nmd = self._data + for nk, ok in self.LEGACY_MAPPING.items(): + if nk in nmd: + result[ok] = nmd[nk] + r1 = process_entries(self.run_requires + self.meta_requires) + r2 = process_entries(self.build_requires + self.dev_requires) + if self.extras: + result['Provides-Extra'] = sorted(self.extras) + result['Requires-Dist'] = sorted(r1) + result['Setup-Requires-Dist'] = sorted(r2) + # TODO: other fields such as contacts + return result + + def write(self, path=None, fileobj=None, legacy=False, skip_unknown=True): + if [path, fileobj].count(None) != 1: + raise ValueError('Exactly one of path and fileobj is needed') + self.validate() + if legacy: + if self._legacy: + legacy_md = self._legacy + else: + legacy_md = self._to_legacy() + if path: + legacy_md.write(path, skip_unknown=skip_unknown) + else: + legacy_md.write_file(fileobj, skip_unknown=skip_unknown) + else: + if self._legacy: + d = self._from_legacy() + else: + d = self._data + if fileobj: + json.dump(d, fileobj, ensure_ascii=True, indent=2, + sort_keys=True) + else: + with codecs.open(path, 'w', 'utf-8') as f: + json.dump(d, f, ensure_ascii=True, indent=2, + sort_keys=True) + + def add_requirements(self, requirements): + if self._legacy: + self._legacy.add_requirements(requirements) + else: + run_requires = self._data.setdefault('run_requires', []) + always = None + for entry in run_requires: + if 'environment' not in entry and 'extra' not in entry: + always = entry + break + if always is None: + always = { 'requires': requirements } + run_requires.insert(0, always) + else: + rset = set(always['requires']) | set(requirements) + always['requires'] = sorted(rset) + + def __repr__(self): + name = self.name or '(no name)' + version = self.version or 'no version' + return '<%s %s %s (%s)>' % (self.__class__.__name__, + self.metadata_version, name, version) diff --git a/venv/lib/python2.7/site-packages/pip/_vendor/distlib/resources.py b/venv/lib/python2.7/site-packages/pip/_vendor/distlib/resources.py new file mode 100644 index 0000000..9dd8ca0 --- /dev/null +++ b/venv/lib/python2.7/site-packages/pip/_vendor/distlib/resources.py @@ -0,0 +1,350 @@ +# -*- coding: utf-8 -*- +# +# Copyright (C) 2013-2016 Vinay Sajip. +# Licensed to the Python Software Foundation under a contributor agreement. +# See LICENSE.txt and CONTRIBUTORS.txt. +# +from __future__ import unicode_literals + +import bisect +import io +import logging +import os +import pkgutil +import shutil +import sys +import types +import zipimport + +from . import DistlibException +from .util import cached_property, get_cache_base, path_to_cache_dir, Cache + +logger = logging.getLogger(__name__) + + +cache = None # created when needed + + +class ResourceCache(Cache): + def __init__(self, base=None): + if base is None: + # Use native string to avoid issues on 2.x: see Python #20140. + base = os.path.join(get_cache_base(), str('resource-cache')) + super(ResourceCache, self).__init__(base) + + def is_stale(self, resource, path): + """ + Is the cache stale for the given resource? + + :param resource: The :class:`Resource` being cached. + :param path: The path of the resource in the cache. + :return: True if the cache is stale. + """ + # Cache invalidation is a hard problem :-) + return True + + def get(self, resource): + """ + Get a resource into the cache, + + :param resource: A :class:`Resource` instance. + :return: The pathname of the resource in the cache. + """ + prefix, path = resource.finder.get_cache_info(resource) + if prefix is None: + result = path + else: + result = os.path.join(self.base, self.prefix_to_dir(prefix), path) + dirname = os.path.dirname(result) + if not os.path.isdir(dirname): + os.makedirs(dirname) + if not os.path.exists(result): + stale = True + else: + stale = self.is_stale(resource, path) + if stale: + # write the bytes of the resource to the cache location + with open(result, 'wb') as f: + f.write(resource.bytes) + return result + + +class ResourceBase(object): + def __init__(self, finder, name): + self.finder = finder + self.name = name + + +class Resource(ResourceBase): + """ + A class representing an in-package resource, such as a data file. This is + not normally instantiated by user code, but rather by a + :class:`ResourceFinder` which manages the resource. + """ + is_container = False # Backwards compatibility + + def as_stream(self): + """ + Get the resource as a stream. + + This is not a property to make it obvious that it returns a new stream + each time. + """ + return self.finder.get_stream(self) + + @cached_property + def file_path(self): + global cache + if cache is None: + cache = ResourceCache() + return cache.get(self) + + @cached_property + def bytes(self): + return self.finder.get_bytes(self) + + @cached_property + def size(self): + return self.finder.get_size(self) + + +class ResourceContainer(ResourceBase): + is_container = True # Backwards compatibility + + @cached_property + def resources(self): + return self.finder.get_resources(self) + + +class ResourceFinder(object): + """ + Resource finder for file system resources. + """ + + if sys.platform.startswith('java'): + skipped_extensions = ('.pyc', '.pyo', '.class') + else: + skipped_extensions = ('.pyc', '.pyo') + + def __init__(self, module): + self.module = module + self.loader = getattr(module, '__loader__', None) + self.base = os.path.dirname(getattr(module, '__file__', '')) + + def _adjust_path(self, path): + return os.path.realpath(path) + + def _make_path(self, resource_name): + # Issue #50: need to preserve type of path on Python 2.x + # like os.path._get_sep + if isinstance(resource_name, bytes): # should only happen on 2.x + sep = b'/' + else: + sep = '/' + parts = resource_name.split(sep) + parts.insert(0, self.base) + result = os.path.join(*parts) + return self._adjust_path(result) + + def _find(self, path): + return os.path.exists(path) + + def get_cache_info(self, resource): + return None, resource.path + + def find(self, resource_name): + path = self._make_path(resource_name) + if not self._find(path): + result = None + else: + if self._is_directory(path): + result = ResourceContainer(self, resource_name) + else: + result = Resource(self, resource_name) + result.path = path + return result + + def get_stream(self, resource): + return open(resource.path, 'rb') + + def get_bytes(self, resource): + with open(resource.path, 'rb') as f: + return f.read() + + def get_size(self, resource): + return os.path.getsize(resource.path) + + def get_resources(self, resource): + def allowed(f): + return (f != '__pycache__' and not + f.endswith(self.skipped_extensions)) + return set([f for f in os.listdir(resource.path) if allowed(f)]) + + def is_container(self, resource): + return self._is_directory(resource.path) + + _is_directory = staticmethod(os.path.isdir) + + def iterator(self, resource_name): + resource = self.find(resource_name) + if resource is not None: + todo = [resource] + while todo: + resource = todo.pop(0) + yield resource + if resource.is_container: + rname = resource.name + for name in resource.resources: + if not rname: + new_name = name + else: + new_name = '/'.join([rname, name]) + child = self.find(new_name) + if child.is_container: + todo.append(child) + else: + yield child + + +class ZipResourceFinder(ResourceFinder): + """ + Resource finder for resources in .zip files. + """ + def __init__(self, module): + super(ZipResourceFinder, self).__init__(module) + archive = self.loader.archive + self.prefix_len = 1 + len(archive) + # PyPy doesn't have a _files attr on zipimporter, and you can't set one + if hasattr(self.loader, '_files'): + self._files = self.loader._files + else: + self._files = zipimport._zip_directory_cache[archive] + self.index = sorted(self._files) + + def _adjust_path(self, path): + return path + + def _find(self, path): + path = path[self.prefix_len:] + if path in self._files: + result = True + else: + if path and path[-1] != os.sep: + path = path + os.sep + i = bisect.bisect(self.index, path) + try: + result = self.index[i].startswith(path) + except IndexError: + result = False + if not result: + logger.debug('_find failed: %r %r', path, self.loader.prefix) + else: + logger.debug('_find worked: %r %r', path, self.loader.prefix) + return result + + def get_cache_info(self, resource): + prefix = self.loader.archive + path = resource.path[1 + len(prefix):] + return prefix, path + + def get_bytes(self, resource): + return self.loader.get_data(resource.path) + + def get_stream(self, resource): + return io.BytesIO(self.get_bytes(resource)) + + def get_size(self, resource): + path = resource.path[self.prefix_len:] + return self._files[path][3] + + def get_resources(self, resource): + path = resource.path[self.prefix_len:] + if path and path[-1] != os.sep: + path += os.sep + plen = len(path) + result = set() + i = bisect.bisect(self.index, path) + while i < len(self.index): + if not self.index[i].startswith(path): + break + s = self.index[i][plen:] + result.add(s.split(os.sep, 1)[0]) # only immediate children + i += 1 + return result + + def _is_directory(self, path): + path = path[self.prefix_len:] + if path and path[-1] != os.sep: + path += os.sep + i = bisect.bisect(self.index, path) + try: + result = self.index[i].startswith(path) + except IndexError: + result = False + return result + +_finder_registry = { + type(None): ResourceFinder, + zipimport.zipimporter: ZipResourceFinder +} + +try: + import _frozen_importlib + _finder_registry[_frozen_importlib.SourceFileLoader] = ResourceFinder + _finder_registry[_frozen_importlib.FileFinder] = ResourceFinder +except (ImportError, AttributeError): + pass + + +def register_finder(loader, finder_maker): + _finder_registry[type(loader)] = finder_maker + +_finder_cache = {} + + +def finder(package): + """ + Return a resource finder for a package. + :param package: The name of the package. + :return: A :class:`ResourceFinder` instance for the package. + """ + if package in _finder_cache: + result = _finder_cache[package] + else: + if package not in sys.modules: + __import__(package) + module = sys.modules[package] + path = getattr(module, '__path__', None) + if path is None: + raise DistlibException('You cannot get a finder for a module, ' + 'only for a package') + loader = getattr(module, '__loader__', None) + finder_maker = _finder_registry.get(type(loader)) + if finder_maker is None: + raise DistlibException('Unable to locate finder for %r' % package) + result = finder_maker(module) + _finder_cache[package] = result + return result + + +_dummy_module = types.ModuleType(str('__dummy__')) + + +def finder_for_path(path): + """ + Return a resource finder for a path, which should represent a container. + + :param path: The path. + :return: A :class:`ResourceFinder` instance for the path. + """ + result = None + # calls any path hooks, gets importer into cache + pkgutil.get_importer(path) + loader = sys.path_importer_cache.get(path) + finder = _finder_registry.get(type(loader)) + if finder: + module = _dummy_module + module.__file__ = os.path.join(path, '') + module.__loader__ = loader + result = finder(module) + return result diff --git a/venv/lib/python2.7/site-packages/pip/_vendor/distlib/scripts.py b/venv/lib/python2.7/site-packages/pip/_vendor/distlib/scripts.py new file mode 100644 index 0000000..c9996d5 --- /dev/null +++ b/venv/lib/python2.7/site-packages/pip/_vendor/distlib/scripts.py @@ -0,0 +1,384 @@ +# -*- coding: utf-8 -*- +# +# Copyright (C) 2013-2015 Vinay Sajip. +# Licensed to the Python Software Foundation under a contributor agreement. +# See LICENSE.txt and CONTRIBUTORS.txt. +# +from io import BytesIO +import logging +import os +import re +import struct +import sys + +from .compat import sysconfig, detect_encoding, ZipFile +from .resources import finder +from .util import (FileOperator, get_export_entry, convert_path, + get_executable, in_venv) + +logger = logging.getLogger(__name__) + +_DEFAULT_MANIFEST = ''' + + + + + + + + + + + + +'''.strip() + +# check if Python is called on the first line with this expression +FIRST_LINE_RE = re.compile(b'^#!.*pythonw?[0-9.]*([ \t].*)?$') +SCRIPT_TEMPLATE = '''# -*- coding: utf-8 -*- +if __name__ == '__main__': + import sys, re + + def _resolve(module, func): + __import__(module) + mod = sys.modules[module] + parts = func.split('.') + result = getattr(mod, parts.pop(0)) + for p in parts: + result = getattr(result, p) + return result + + try: + sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0]) + + func = _resolve('%(module)s', '%(func)s') + rc = func() # None interpreted as 0 + except Exception as e: # only supporting Python >= 2.6 + sys.stderr.write('%%s\\n' %% e) + rc = 1 + sys.exit(rc) +''' + + +def _enquote_executable(executable): + if ' ' in executable: + # make sure we quote only the executable in case of env + # for example /usr/bin/env "/dir with spaces/bin/jython" + # instead of "/usr/bin/env /dir with spaces/bin/jython" + # otherwise whole + if executable.startswith('/usr/bin/env '): + env, _executable = executable.split(' ', 1) + if ' ' in _executable and not _executable.startswith('"'): + executable = '%s "%s"' % (env, _executable) + else: + if not executable.startswith('"'): + executable = '"%s"' % executable + return executable + + +class ScriptMaker(object): + """ + A class to copy or create scripts from source scripts or callable + specifications. + """ + script_template = SCRIPT_TEMPLATE + + executable = None # for shebangs + + def __init__(self, source_dir, target_dir, add_launchers=True, + dry_run=False, fileop=None): + self.source_dir = source_dir + self.target_dir = target_dir + self.add_launchers = add_launchers + self.force = False + self.clobber = False + # It only makes sense to set mode bits on POSIX. + self.set_mode = (os.name == 'posix') or (os.name == 'java' and + os._name == 'posix') + self.variants = set(('', 'X.Y')) + self._fileop = fileop or FileOperator(dry_run) + + self._is_nt = os.name == 'nt' or ( + os.name == 'java' and os._name == 'nt') + + def _get_alternate_executable(self, executable, options): + if options.get('gui', False) and self._is_nt: # pragma: no cover + dn, fn = os.path.split(executable) + fn = fn.replace('python', 'pythonw') + executable = os.path.join(dn, fn) + return executable + + if sys.platform.startswith('java'): # pragma: no cover + def _is_shell(self, executable): + """ + Determine if the specified executable is a script + (contains a #! line) + """ + try: + with open(executable) as fp: + return fp.read(2) == '#!' + except (OSError, IOError): + logger.warning('Failed to open %s', executable) + return False + + def _fix_jython_executable(self, executable): + if self._is_shell(executable): + # Workaround for Jython is not needed on Linux systems. + import java + + if java.lang.System.getProperty('os.name') == 'Linux': + return executable + elif executable.lower().endswith('jython.exe'): + # Use wrapper exe for Jython on Windows + return executable + return '/usr/bin/env %s' % executable + + def _get_shebang(self, encoding, post_interp=b'', options=None): + enquote = True + if self.executable: + executable = self.executable + enquote = False # assume this will be taken care of + elif not sysconfig.is_python_build(): + executable = get_executable() + elif in_venv(): # pragma: no cover + executable = os.path.join(sysconfig.get_path('scripts'), + 'python%s' % sysconfig.get_config_var('EXE')) + else: # pragma: no cover + executable = os.path.join( + sysconfig.get_config_var('BINDIR'), + 'python%s%s' % (sysconfig.get_config_var('VERSION'), + sysconfig.get_config_var('EXE'))) + if options: + executable = self._get_alternate_executable(executable, options) + + if sys.platform.startswith('java'): # pragma: no cover + executable = self._fix_jython_executable(executable) + # Normalise case for Windows + executable = os.path.normcase(executable) + # If the user didn't specify an executable, it may be necessary to + # cater for executable paths with spaces (not uncommon on Windows) + if enquote: + executable = _enquote_executable(executable) + # Issue #51: don't use fsencode, since we later try to + # check that the shebang is decodable using utf-8. + executable = executable.encode('utf-8') + # in case of IronPython, play safe and enable frames support + if (sys.platform == 'cli' and '-X:Frames' not in post_interp + and '-X:FullFrames' not in post_interp): # pragma: no cover + post_interp += b' -X:Frames' + shebang = b'#!' + executable + post_interp + b'\n' + # Python parser starts to read a script using UTF-8 until + # it gets a #coding:xxx cookie. The shebang has to be the + # first line of a file, the #coding:xxx cookie cannot be + # written before. So the shebang has to be decodable from + # UTF-8. + try: + shebang.decode('utf-8') + except UnicodeDecodeError: # pragma: no cover + raise ValueError( + 'The shebang (%r) is not decodable from utf-8' % shebang) + # If the script is encoded to a custom encoding (use a + # #coding:xxx cookie), the shebang has to be decodable from + # the script encoding too. + if encoding != 'utf-8': + try: + shebang.decode(encoding) + except UnicodeDecodeError: # pragma: no cover + raise ValueError( + 'The shebang (%r) is not decodable ' + 'from the script encoding (%r)' % (shebang, encoding)) + return shebang + + def _get_script_text(self, entry): + return self.script_template % dict(module=entry.prefix, + func=entry.suffix) + + manifest = _DEFAULT_MANIFEST + + def get_manifest(self, exename): + base = os.path.basename(exename) + return self.manifest % base + + def _write_script(self, names, shebang, script_bytes, filenames, ext): + use_launcher = self.add_launchers and self._is_nt + linesep = os.linesep.encode('utf-8') + if not use_launcher: + script_bytes = shebang + linesep + script_bytes + else: # pragma: no cover + if ext == 'py': + launcher = self._get_launcher('t') + else: + launcher = self._get_launcher('w') + stream = BytesIO() + with ZipFile(stream, 'w') as zf: + zf.writestr('__main__.py', script_bytes) + zip_data = stream.getvalue() + script_bytes = launcher + shebang + linesep + zip_data + for name in names: + outname = os.path.join(self.target_dir, name) + if use_launcher: # pragma: no cover + n, e = os.path.splitext(outname) + if e.startswith('.py'): + outname = n + outname = '%s.exe' % outname + try: + self._fileop.write_binary_file(outname, script_bytes) + except Exception: + # Failed writing an executable - it might be in use. + logger.warning('Failed to write executable - trying to ' + 'use .deleteme logic') + dfname = '%s.deleteme' % outname + if os.path.exists(dfname): + os.remove(dfname) # Not allowed to fail here + os.rename(outname, dfname) # nor here + self._fileop.write_binary_file(outname, script_bytes) + logger.debug('Able to replace executable using ' + '.deleteme logic') + try: + os.remove(dfname) + except Exception: + pass # still in use - ignore error + else: + if self._is_nt and not outname.endswith('.' + ext): # pragma: no cover + outname = '%s.%s' % (outname, ext) + if os.path.exists(outname) and not self.clobber: + logger.warning('Skipping existing file %s', outname) + continue + self._fileop.write_binary_file(outname, script_bytes) + if self.set_mode: + self._fileop.set_executable_mode([outname]) + filenames.append(outname) + + def _make_script(self, entry, filenames, options=None): + post_interp = b'' + if options: + args = options.get('interpreter_args', []) + if args: + args = ' %s' % ' '.join(args) + post_interp = args.encode('utf-8') + shebang = self._get_shebang('utf-8', post_interp, options=options) + script = self._get_script_text(entry).encode('utf-8') + name = entry.name + scriptnames = set() + if '' in self.variants: + scriptnames.add(name) + if 'X' in self.variants: + scriptnames.add('%s%s' % (name, sys.version[0])) + if 'X.Y' in self.variants: + scriptnames.add('%s-%s' % (name, sys.version[:3])) + if options and options.get('gui', False): + ext = 'pyw' + else: + ext = 'py' + self._write_script(scriptnames, shebang, script, filenames, ext) + + def _copy_script(self, script, filenames): + adjust = False + script = os.path.join(self.source_dir, convert_path(script)) + outname = os.path.join(self.target_dir, os.path.basename(script)) + if not self.force and not self._fileop.newer(script, outname): + logger.debug('not copying %s (up-to-date)', script) + return + + # Always open the file, but ignore failures in dry-run mode -- + # that way, we'll get accurate feedback if we can read the + # script. + try: + f = open(script, 'rb') + except IOError: # pragma: no cover + if not self.dry_run: + raise + f = None + else: + first_line = f.readline() + if not first_line: # pragma: no cover + logger.warning('%s: %s is an empty file (skipping)', + self.get_command_name(), script) + return + + match = FIRST_LINE_RE.match(first_line.replace(b'\r\n', b'\n')) + if match: + adjust = True + post_interp = match.group(1) or b'' + + if not adjust: + if f: + f.close() + self._fileop.copy_file(script, outname) + if self.set_mode: + self._fileop.set_executable_mode([outname]) + filenames.append(outname) + else: + logger.info('copying and adjusting %s -> %s', script, + self.target_dir) + if not self._fileop.dry_run: + encoding, lines = detect_encoding(f.readline) + f.seek(0) + shebang = self._get_shebang(encoding, post_interp) + if b'pythonw' in first_line: # pragma: no cover + ext = 'pyw' + else: + ext = 'py' + n = os.path.basename(outname) + self._write_script([n], shebang, f.read(), filenames, ext) + if f: + f.close() + + @property + def dry_run(self): + return self._fileop.dry_run + + @dry_run.setter + def dry_run(self, value): + self._fileop.dry_run = value + + if os.name == 'nt' or (os.name == 'java' and os._name == 'nt'): # pragma: no cover + # Executable launcher support. + # Launchers are from https://bitbucket.org/vinay.sajip/simple_launcher/ + + def _get_launcher(self, kind): + if struct.calcsize('P') == 8: # 64-bit + bits = '64' + else: + bits = '32' + name = '%s%s.exe' % (kind, bits) + # Issue 31: don't hardcode an absolute package name, but + # determine it relative to the current package + distlib_package = __name__.rsplit('.', 1)[0] + result = finder(distlib_package).find(name).bytes + return result + + # Public API follows + + def make(self, specification, options=None): + """ + Make a script. + + :param specification: The specification, which is either a valid export + entry specification (to make a script from a + callable) or a filename (to make a script by + copying from a source location). + :param options: A dictionary of options controlling script generation. + :return: A list of all absolute pathnames written to. + """ + filenames = [] + entry = get_export_entry(specification) + if entry is None: + self._copy_script(specification, filenames) + else: + self._make_script(entry, filenames, options=options) + return filenames + + def make_multiple(self, specifications, options=None): + """ + Take a list of specifications and make scripts from them, + :param specifications: A list of specifications. + :return: A list of all absolute pathnames written to, + """ + filenames = [] + for specification in specifications: + filenames.extend(self.make(specification, options)) + return filenames diff --git a/venv/lib/python2.7/site-packages/pip/_vendor/distlib/t32.exe b/venv/lib/python2.7/site-packages/pip/_vendor/distlib/t32.exe new file mode 100644 index 0000000..e0168c2 Binary files /dev/null and b/venv/lib/python2.7/site-packages/pip/_vendor/distlib/t32.exe differ diff --git a/venv/lib/python2.7/site-packages/pip/_vendor/distlib/t64.exe b/venv/lib/python2.7/site-packages/pip/_vendor/distlib/t64.exe new file mode 100644 index 0000000..29a1fd6 Binary files /dev/null and b/venv/lib/python2.7/site-packages/pip/_vendor/distlib/t64.exe differ diff --git a/venv/lib/python2.7/site-packages/pip/_vendor/distlib/util.py b/venv/lib/python2.7/site-packages/pip/_vendor/distlib/util.py new file mode 100644 index 0000000..7e209ec --- /dev/null +++ b/venv/lib/python2.7/site-packages/pip/_vendor/distlib/util.py @@ -0,0 +1,1597 @@ +# +# Copyright (C) 2012-2016 The Python Software Foundation. +# See LICENSE.txt and CONTRIBUTORS.txt. +# +import codecs +from collections import deque +import contextlib +import csv +from glob import iglob as std_iglob +import io +import json +import logging +import os +import py_compile +import re +import shutil +import socket +import ssl +import subprocess +import sys +import tarfile +import tempfile +import textwrap + +try: + import threading +except ImportError: + import dummy_threading as threading +import time + +from . import DistlibException +from .compat import (string_types, text_type, shutil, raw_input, StringIO, + cache_from_source, urlopen, urljoin, httplib, xmlrpclib, + splittype, HTTPHandler, HTTPSHandler as BaseHTTPSHandler, + BaseConfigurator, valid_ident, Container, configparser, + URLError, match_hostname, CertificateError, ZipFile, + fsdecode) + +logger = logging.getLogger(__name__) + +# +# Requirement parsing code for name + optional constraints + optional extras +# +# e.g. 'foo >= 1.2, < 2.0 [bar, baz]' +# +# The regex can seem a bit hairy, so we build it up out of smaller pieces +# which are manageable. +# + +COMMA = r'\s*,\s*' +COMMA_RE = re.compile(COMMA) + +IDENT = r'(\w|[.-])+' +EXTRA_IDENT = r'(\*|:(\*|\w+):|' + IDENT + ')' +VERSPEC = IDENT + r'\*?' + +RELOP = '([<>=!~]=)|[<>]' + +# +# The first relop is optional - if absent, will be taken as '~=' +# +BARE_CONSTRAINTS = ('(' + RELOP + r')?\s*(' + VERSPEC + ')(' + COMMA + '(' + + RELOP + r')\s*(' + VERSPEC + '))*') + +DIRECT_REF = '(from\s+(?P.*))' + +# +# Either the bare constraints or the bare constraints in parentheses +# +CONSTRAINTS = (r'\(\s*(?P' + BARE_CONSTRAINTS + '|' + DIRECT_REF + + r')\s*\)|(?P' + BARE_CONSTRAINTS + '\s*)') + +EXTRA_LIST = EXTRA_IDENT + '(' + COMMA + EXTRA_IDENT + ')*' +EXTRAS = r'\[\s*(?P' + EXTRA_LIST + r')?\s*\]' +REQUIREMENT = ('(?P' + IDENT + r')\s*(' + EXTRAS + r'\s*)?(\s*' + + CONSTRAINTS + ')?$') +REQUIREMENT_RE = re.compile(REQUIREMENT) + +# +# Used to scan through the constraints +# +RELOP_IDENT = '(?P' + RELOP + r')\s*(?P' + VERSPEC + ')' +RELOP_IDENT_RE = re.compile(RELOP_IDENT) + +def parse_requirement(s): + + def get_constraint(m): + d = m.groupdict() + return d['op'], d['vn'] + + result = None + m = REQUIREMENT_RE.match(s) + if m: + d = m.groupdict() + name = d['dn'] + cons = d['c1'] or d['c2'] + if not d['diref']: + url = None + else: + # direct reference + cons = None + url = d['diref'].strip() + if not cons: + cons = None + constr = '' + rs = d['dn'] + else: + if cons[0] not in '<>!=': + cons = '~=' + cons + iterator = RELOP_IDENT_RE.finditer(cons) + cons = [get_constraint(m) for m in iterator] + rs = '%s (%s)' % (name, ', '.join(['%s %s' % con for con in cons])) + if not d['ex']: + extras = None + else: + extras = COMMA_RE.split(d['ex']) + result = Container(name=name, constraints=cons, extras=extras, + requirement=rs, source=s, url=url) + return result + + +def get_resources_dests(resources_root, rules): + """Find destinations for resources files""" + + def get_rel_path(base, path): + # normalizes and returns a lstripped-/-separated path + base = base.replace(os.path.sep, '/') + path = path.replace(os.path.sep, '/') + assert path.startswith(base) + return path[len(base):].lstrip('/') + + + destinations = {} + for base, suffix, dest in rules: + prefix = os.path.join(resources_root, base) + for abs_base in iglob(prefix): + abs_glob = os.path.join(abs_base, suffix) + for abs_path in iglob(abs_glob): + resource_file = get_rel_path(resources_root, abs_path) + if dest is None: # remove the entry if it was here + destinations.pop(resource_file, None) + else: + rel_path = get_rel_path(abs_base, abs_path) + rel_dest = dest.replace(os.path.sep, '/').rstrip('/') + destinations[resource_file] = rel_dest + '/' + rel_path + return destinations + + +def in_venv(): + if hasattr(sys, 'real_prefix'): + # virtualenv venvs + result = True + else: + # PEP 405 venvs + result = sys.prefix != getattr(sys, 'base_prefix', sys.prefix) + return result + + +def get_executable(): +# The __PYVENV_LAUNCHER__ dance is apparently no longer needed, as +# changes to the stub launcher mean that sys.executable always points +# to the stub on OS X +# if sys.platform == 'darwin' and ('__PYVENV_LAUNCHER__' +# in os.environ): +# result = os.environ['__PYVENV_LAUNCHER__'] +# else: +# result = sys.executable +# return result + result = os.path.normcase(sys.executable) + if not isinstance(result, text_type): + result = fsdecode(result) + return result + + +def proceed(prompt, allowed_chars, error_prompt=None, default=None): + p = prompt + while True: + s = raw_input(p) + p = prompt + if not s and default: + s = default + if s: + c = s[0].lower() + if c in allowed_chars: + break + if error_prompt: + p = '%c: %s\n%s' % (c, error_prompt, prompt) + return c + + +def extract_by_key(d, keys): + if isinstance(keys, string_types): + keys = keys.split() + result = {} + for key in keys: + if key in d: + result[key] = d[key] + return result + +def read_exports(stream): + if sys.version_info[0] >= 3: + # needs to be a text stream + stream = codecs.getreader('utf-8')(stream) + # Try to load as JSON, falling back on legacy format + data = stream.read() + stream = StringIO(data) + try: + jdata = json.load(stream) + result = jdata['extensions']['python.exports']['exports'] + for group, entries in result.items(): + for k, v in entries.items(): + s = '%s = %s' % (k, v) + entry = get_export_entry(s) + assert entry is not None + entries[k] = entry + return result + except Exception: + stream.seek(0, 0) + + def read_stream(cp, stream): + if hasattr(cp, 'read_file'): + cp.read_file(stream) + else: + cp.readfp(stream) + + cp = configparser.ConfigParser() + try: + read_stream(cp, stream) + except configparser.MissingSectionHeaderError: + stream.close() + data = textwrap.dedent(data) + stream = StringIO(data) + read_stream(cp, stream) + + result = {} + for key in cp.sections(): + result[key] = entries = {} + for name, value in cp.items(key): + s = '%s = %s' % (name, value) + entry = get_export_entry(s) + assert entry is not None + #entry.dist = self + entries[name] = entry + return result + + +def write_exports(exports, stream): + if sys.version_info[0] >= 3: + # needs to be a text stream + stream = codecs.getwriter('utf-8')(stream) + cp = configparser.ConfigParser() + for k, v in exports.items(): + # TODO check k, v for valid values + cp.add_section(k) + for entry in v.values(): + if entry.suffix is None: + s = entry.prefix + else: + s = '%s:%s' % (entry.prefix, entry.suffix) + if entry.flags: + s = '%s [%s]' % (s, ', '.join(entry.flags)) + cp.set(k, entry.name, s) + cp.write(stream) + + +@contextlib.contextmanager +def tempdir(): + td = tempfile.mkdtemp() + try: + yield td + finally: + shutil.rmtree(td) + +@contextlib.contextmanager +def chdir(d): + cwd = os.getcwd() + try: + os.chdir(d) + yield + finally: + os.chdir(cwd) + + +@contextlib.contextmanager +def socket_timeout(seconds=15): + cto = socket.getdefaulttimeout() + try: + socket.setdefaulttimeout(seconds) + yield + finally: + socket.setdefaulttimeout(cto) + + +class cached_property(object): + def __init__(self, func): + self.func = func + #for attr in ('__name__', '__module__', '__doc__'): + # setattr(self, attr, getattr(func, attr, None)) + + def __get__(self, obj, cls=None): + if obj is None: + return self + value = self.func(obj) + object.__setattr__(obj, self.func.__name__, value) + #obj.__dict__[self.func.__name__] = value = self.func(obj) + return value + +def convert_path(pathname): + """Return 'pathname' as a name that will work on the native filesystem. + + The path is split on '/' and put back together again using the current + directory separator. Needed because filenames in the setup script are + always supplied in Unix style, and have to be converted to the local + convention before we can actually use them in the filesystem. Raises + ValueError on non-Unix-ish systems if 'pathname' either starts or + ends with a slash. + """ + if os.sep == '/': + return pathname + if not pathname: + return pathname + if pathname[0] == '/': + raise ValueError("path '%s' cannot be absolute" % pathname) + if pathname[-1] == '/': + raise ValueError("path '%s' cannot end with '/'" % pathname) + + paths = pathname.split('/') + while os.curdir in paths: + paths.remove(os.curdir) + if not paths: + return os.curdir + return os.path.join(*paths) + + +class FileOperator(object): + def __init__(self, dry_run=False): + self.dry_run = dry_run + self.ensured = set() + self._init_record() + + def _init_record(self): + self.record = False + self.files_written = set() + self.dirs_created = set() + + def record_as_written(self, path): + if self.record: + self.files_written.add(path) + + def newer(self, source, target): + """Tell if the target is newer than the source. + + Returns true if 'source' exists and is more recently modified than + 'target', or if 'source' exists and 'target' doesn't. + + Returns false if both exist and 'target' is the same age or younger + than 'source'. Raise PackagingFileError if 'source' does not exist. + + Note that this test is not very accurate: files created in the same + second will have the same "age". + """ + if not os.path.exists(source): + raise DistlibException("file '%r' does not exist" % + os.path.abspath(source)) + if not os.path.exists(target): + return True + + return os.stat(source).st_mtime > os.stat(target).st_mtime + + def copy_file(self, infile, outfile, check=True): + """Copy a file respecting dry-run and force flags. + """ + self.ensure_dir(os.path.dirname(outfile)) + logger.info('Copying %s to %s', infile, outfile) + if not self.dry_run: + msg = None + if check: + if os.path.islink(outfile): + msg = '%s is a symlink' % outfile + elif os.path.exists(outfile) and not os.path.isfile(outfile): + msg = '%s is a non-regular file' % outfile + if msg: + raise ValueError(msg + ' which would be overwritten') + shutil.copyfile(infile, outfile) + self.record_as_written(outfile) + + def copy_stream(self, instream, outfile, encoding=None): + assert not os.path.isdir(outfile) + self.ensure_dir(os.path.dirname(outfile)) + logger.info('Copying stream %s to %s', instream, outfile) + if not self.dry_run: + if encoding is None: + outstream = open(outfile, 'wb') + else: + outstream = codecs.open(outfile, 'w', encoding=encoding) + try: + shutil.copyfileobj(instream, outstream) + finally: + outstream.close() + self.record_as_written(outfile) + + def write_binary_file(self, path, data): + self.ensure_dir(os.path.dirname(path)) + if not self.dry_run: + with open(path, 'wb') as f: + f.write(data) + self.record_as_written(path) + + def write_text_file(self, path, data, encoding): + self.ensure_dir(os.path.dirname(path)) + if not self.dry_run: + with open(path, 'wb') as f: + f.write(data.encode(encoding)) + self.record_as_written(path) + + def set_mode(self, bits, mask, files): + if os.name == 'posix' or (os.name == 'java' and os._name == 'posix'): + # Set the executable bits (owner, group, and world) on + # all the files specified. + for f in files: + if self.dry_run: + logger.info("changing mode of %s", f) + else: + mode = (os.stat(f).st_mode | bits) & mask + logger.info("changing mode of %s to %o", f, mode) + os.chmod(f, mode) + + set_executable_mode = lambda s, f: s.set_mode(0o555, 0o7777, f) + + def ensure_dir(self, path): + path = os.path.abspath(path) + if path not in self.ensured and not os.path.exists(path): + self.ensured.add(path) + d, f = os.path.split(path) + self.ensure_dir(d) + logger.info('Creating %s' % path) + if not self.dry_run: + os.mkdir(path) + if self.record: + self.dirs_created.add(path) + + def byte_compile(self, path, optimize=False, force=False, prefix=None): + dpath = cache_from_source(path, not optimize) + logger.info('Byte-compiling %s to %s', path, dpath) + if not self.dry_run: + if force or self.newer(path, dpath): + if not prefix: + diagpath = None + else: + assert path.startswith(prefix) + diagpath = path[len(prefix):] + py_compile.compile(path, dpath, diagpath, True) # raise error + self.record_as_written(dpath) + return dpath + + def ensure_removed(self, path): + if os.path.exists(path): + if os.path.isdir(path) and not os.path.islink(path): + logger.debug('Removing directory tree at %s', path) + if not self.dry_run: + shutil.rmtree(path) + if self.record: + if path in self.dirs_created: + self.dirs_created.remove(path) + else: + if os.path.islink(path): + s = 'link' + else: + s = 'file' + logger.debug('Removing %s %s', s, path) + if not self.dry_run: + os.remove(path) + if self.record: + if path in self.files_written: + self.files_written.remove(path) + + def is_writable(self, path): + result = False + while not result: + if os.path.exists(path): + result = os.access(path, os.W_OK) + break + parent = os.path.dirname(path) + if parent == path: + break + path = parent + return result + + def commit(self): + """ + Commit recorded changes, turn off recording, return + changes. + """ + assert self.record + result = self.files_written, self.dirs_created + self._init_record() + return result + + def rollback(self): + if not self.dry_run: + for f in list(self.files_written): + if os.path.exists(f): + os.remove(f) + # dirs should all be empty now, except perhaps for + # __pycache__ subdirs + # reverse so that subdirs appear before their parents + dirs = sorted(self.dirs_created, reverse=True) + for d in dirs: + flist = os.listdir(d) + if flist: + assert flist == ['__pycache__'] + sd = os.path.join(d, flist[0]) + os.rmdir(sd) + os.rmdir(d) # should fail if non-empty + self._init_record() + +def resolve(module_name, dotted_path): + if module_name in sys.modules: + mod = sys.modules[module_name] + else: + mod = __import__(module_name) + if dotted_path is None: + result = mod + else: + parts = dotted_path.split('.') + result = getattr(mod, parts.pop(0)) + for p in parts: + result = getattr(result, p) + return result + + +class ExportEntry(object): + def __init__(self, name, prefix, suffix, flags): + self.name = name + self.prefix = prefix + self.suffix = suffix + self.flags = flags + + @cached_property + def value(self): + return resolve(self.prefix, self.suffix) + + def __repr__(self): + return '' % (self.name, self.prefix, + self.suffix, self.flags) + + def __eq__(self, other): + if not isinstance(other, ExportEntry): + result = False + else: + result = (self.name == other.name and + self.prefix == other.prefix and + self.suffix == other.suffix and + self.flags == other.flags) + return result + + __hash__ = object.__hash__ + + +ENTRY_RE = re.compile(r'''(?P(\w|[-.+])+) + \s*=\s*(?P(\w+)([:\.]\w+)*) + \s*(\[\s*(?P\w+(=\w+)?(,\s*\w+(=\w+)?)*)\s*\])? + ''', re.VERBOSE) + +def get_export_entry(specification): + m = ENTRY_RE.search(specification) + if not m: + result = None + if '[' in specification or ']' in specification: + raise DistlibException('Invalid specification ' + '%r' % specification) + else: + d = m.groupdict() + name = d['name'] + path = d['callable'] + colons = path.count(':') + if colons == 0: + prefix, suffix = path, None + else: + if colons != 1: + raise DistlibException('Invalid specification ' + '%r' % specification) + prefix, suffix = path.split(':') + flags = d['flags'] + if flags is None: + if '[' in specification or ']' in specification: + raise DistlibException('Invalid specification ' + '%r' % specification) + flags = [] + else: + flags = [f.strip() for f in flags.split(',')] + result = ExportEntry(name, prefix, suffix, flags) + return result + + +def get_cache_base(suffix=None): + """ + Return the default base location for distlib caches. If the directory does + not exist, it is created. Use the suffix provided for the base directory, + and default to '.distlib' if it isn't provided. + + On Windows, if LOCALAPPDATA is defined in the environment, then it is + assumed to be a directory, and will be the parent directory of the result. + On POSIX, and on Windows if LOCALAPPDATA is not defined, the user's home + directory - using os.expanduser('~') - will be the parent directory of + the result. + + The result is just the directory '.distlib' in the parent directory as + determined above, or with the name specified with ``suffix``. + """ + if suffix is None: + suffix = '.distlib' + if os.name == 'nt' and 'LOCALAPPDATA' in os.environ: + result = os.path.expandvars('$localappdata') + else: + # Assume posix, or old Windows + result = os.path.expanduser('~') + # we use 'isdir' instead of 'exists', because we want to + # fail if there's a file with that name + if os.path.isdir(result): + usable = os.access(result, os.W_OK) + if not usable: + logger.warning('Directory exists but is not writable: %s', result) + else: + try: + os.makedirs(result) + usable = True + except OSError: + logger.warning('Unable to create %s', result, exc_info=True) + usable = False + if not usable: + result = tempfile.mkdtemp() + logger.warning('Default location unusable, using %s', result) + return os.path.join(result, suffix) + + +def path_to_cache_dir(path): + """ + Convert an absolute path to a directory name for use in a cache. + + The algorithm used is: + + #. On Windows, any ``':'`` in the drive is replaced with ``'---'``. + #. Any occurrence of ``os.sep`` is replaced with ``'--'``. + #. ``'.cache'`` is appended. + """ + d, p = os.path.splitdrive(os.path.abspath(path)) + if d: + d = d.replace(':', '---') + p = p.replace(os.sep, '--') + return d + p + '.cache' + + +def ensure_slash(s): + if not s.endswith('/'): + return s + '/' + return s + + +def parse_credentials(netloc): + username = password = None + if '@' in netloc: + prefix, netloc = netloc.split('@', 1) + if ':' not in prefix: + username = prefix + else: + username, password = prefix.split(':', 1) + return username, password, netloc + + +def get_process_umask(): + result = os.umask(0o22) + os.umask(result) + return result + +def is_string_sequence(seq): + result = True + i = None + for i, s in enumerate(seq): + if not isinstance(s, string_types): + result = False + break + assert i is not None + return result + +PROJECT_NAME_AND_VERSION = re.compile('([a-z0-9_]+([.-][a-z_][a-z0-9_]*)*)-' + '([a-z0-9_.+-]+)', re.I) +PYTHON_VERSION = re.compile(r'-py(\d\.?\d?)') + + +def split_filename(filename, project_name=None): + """ + Extract name, version, python version from a filename (no extension) + + Return name, version, pyver or None + """ + result = None + pyver = None + m = PYTHON_VERSION.search(filename) + if m: + pyver = m.group(1) + filename = filename[:m.start()] + if project_name and len(filename) > len(project_name) + 1: + m = re.match(re.escape(project_name) + r'\b', filename) + if m: + n = m.end() + result = filename[:n], filename[n + 1:], pyver + if result is None: + m = PROJECT_NAME_AND_VERSION.match(filename) + if m: + result = m.group(1), m.group(3), pyver + return result + +# Allow spaces in name because of legacy dists like "Twisted Core" +NAME_VERSION_RE = re.compile(r'(?P[\w .-]+)\s*' + r'\(\s*(?P[^\s)]+)\)$') + +def parse_name_and_version(p): + """ + A utility method used to get name and version from a string. + + From e.g. a Provides-Dist value. + + :param p: A value in a form 'foo (1.0)' + :return: The name and version as a tuple. + """ + m = NAME_VERSION_RE.match(p) + if not m: + raise DistlibException('Ill-formed name/version string: \'%s\'' % p) + d = m.groupdict() + return d['name'].strip().lower(), d['ver'] + +def get_extras(requested, available): + result = set() + requested = set(requested or []) + available = set(available or []) + if '*' in requested: + requested.remove('*') + result |= available + for r in requested: + if r == '-': + result.add(r) + elif r.startswith('-'): + unwanted = r[1:] + if unwanted not in available: + logger.warning('undeclared extra: %s' % unwanted) + if unwanted in result: + result.remove(unwanted) + else: + if r not in available: + logger.warning('undeclared extra: %s' % r) + result.add(r) + return result +# +# Extended metadata functionality +# + +def _get_external_data(url): + result = {} + try: + # urlopen might fail if it runs into redirections, + # because of Python issue #13696. Fixed in locators + # using a custom redirect handler. + resp = urlopen(url) + headers = resp.info() + ct = headers.get('Content-Type') + if not ct.startswith('application/json'): + logger.debug('Unexpected response for JSON request: %s', ct) + else: + reader = codecs.getreader('utf-8')(resp) + #data = reader.read().decode('utf-8') + #result = json.loads(data) + result = json.load(reader) + except Exception as e: + logger.exception('Failed to get external data for %s: %s', url, e) + return result + +_external_data_base_url = 'https://www.red-dove.com/pypi/projects/' + +def get_project_data(name): + url = '%s/%s/project.json' % (name[0].upper(), name) + url = urljoin(_external_data_base_url, url) + result = _get_external_data(url) + return result + +def get_package_data(name, version): + url = '%s/%s/package-%s.json' % (name[0].upper(), name, version) + url = urljoin(_external_data_base_url, url) + return _get_external_data(url) + + +class Cache(object): + """ + A class implementing a cache for resources that need to live in the file system + e.g. shared libraries. This class was moved from resources to here because it + could be used by other modules, e.g. the wheel module. + """ + + def __init__(self, base): + """ + Initialise an instance. + + :param base: The base directory where the cache should be located. + """ + # we use 'isdir' instead of 'exists', because we want to + # fail if there's a file with that name + if not os.path.isdir(base): + os.makedirs(base) + if (os.stat(base).st_mode & 0o77) != 0: + logger.warning('Directory \'%s\' is not private', base) + self.base = os.path.abspath(os.path.normpath(base)) + + def prefix_to_dir(self, prefix): + """ + Converts a resource prefix to a directory name in the cache. + """ + return path_to_cache_dir(prefix) + + def clear(self): + """ + Clear the cache. + """ + not_removed = [] + for fn in os.listdir(self.base): + fn = os.path.join(self.base, fn) + try: + if os.path.islink(fn) or os.path.isfile(fn): + os.remove(fn) + elif os.path.isdir(fn): + shutil.rmtree(fn) + except Exception: + not_removed.append(fn) + return not_removed + + +class EventMixin(object): + """ + A very simple publish/subscribe system. + """ + def __init__(self): + self._subscribers = {} + + def add(self, event, subscriber, append=True): + """ + Add a subscriber for an event. + + :param event: The name of an event. + :param subscriber: The subscriber to be added (and called when the + event is published). + :param append: Whether to append or prepend the subscriber to an + existing subscriber list for the event. + """ + subs = self._subscribers + if event not in subs: + subs[event] = deque([subscriber]) + else: + sq = subs[event] + if append: + sq.append(subscriber) + else: + sq.appendleft(subscriber) + + def remove(self, event, subscriber): + """ + Remove a subscriber for an event. + + :param event: The name of an event. + :param subscriber: The subscriber to be removed. + """ + subs = self._subscribers + if event not in subs: + raise ValueError('No subscribers: %r' % event) + subs[event].remove(subscriber) + + def get_subscribers(self, event): + """ + Return an iterator for the subscribers for an event. + :param event: The event to return subscribers for. + """ + return iter(self._subscribers.get(event, ())) + + def publish(self, event, *args, **kwargs): + """ + Publish a event and return a list of values returned by its + subscribers. + + :param event: The event to publish. + :param args: The positional arguments to pass to the event's + subscribers. + :param kwargs: The keyword arguments to pass to the event's + subscribers. + """ + result = [] + for subscriber in self.get_subscribers(event): + try: + value = subscriber(event, *args, **kwargs) + except Exception: + logger.exception('Exception during event publication') + value = None + result.append(value) + logger.debug('publish %s: args = %s, kwargs = %s, result = %s', + event, args, kwargs, result) + return result + +# +# Simple sequencing +# +class Sequencer(object): + def __init__(self): + self._preds = {} + self._succs = {} + self._nodes = set() # nodes with no preds/succs + + def add_node(self, node): + self._nodes.add(node) + + def remove_node(self, node, edges=False): + if node in self._nodes: + self._nodes.remove(node) + if edges: + for p in set(self._preds.get(node, ())): + self.remove(p, node) + for s in set(self._succs.get(node, ())): + self.remove(node, s) + # Remove empties + for k, v in list(self._preds.items()): + if not v: + del self._preds[k] + for k, v in list(self._succs.items()): + if not v: + del self._succs[k] + + def add(self, pred, succ): + assert pred != succ + self._preds.setdefault(succ, set()).add(pred) + self._succs.setdefault(pred, set()).add(succ) + + def remove(self, pred, succ): + assert pred != succ + try: + preds = self._preds[succ] + succs = self._succs[pred] + except KeyError: + raise ValueError('%r not a successor of anything' % succ) + try: + preds.remove(pred) + succs.remove(succ) + except KeyError: + raise ValueError('%r not a successor of %r' % (succ, pred)) + + def is_step(self, step): + return (step in self._preds or step in self._succs or + step in self._nodes) + + def get_steps(self, final): + if not self.is_step(final): + raise ValueError('Unknown: %r' % final) + result = [] + todo = [] + seen = set() + todo.append(final) + while todo: + step = todo.pop(0) + if step in seen: + # if a step was already seen, + # move it to the end (so it will appear earlier + # when reversed on return) ... but not for the + # final step, as that would be confusing for + # users + if step != final: + result.remove(step) + result.append(step) + else: + seen.add(step) + result.append(step) + preds = self._preds.get(step, ()) + todo.extend(preds) + return reversed(result) + + @property + def strong_connections(self): + #http://en.wikipedia.org/wiki/Tarjan%27s_strongly_connected_components_algorithm + index_counter = [0] + stack = [] + lowlinks = {} + index = {} + result = [] + + graph = self._succs + + def strongconnect(node): + # set the depth index for this node to the smallest unused index + index[node] = index_counter[0] + lowlinks[node] = index_counter[0] + index_counter[0] += 1 + stack.append(node) + + # Consider successors + try: + successors = graph[node] + except Exception: + successors = [] + for successor in successors: + if successor not in lowlinks: + # Successor has not yet been visited + strongconnect(successor) + lowlinks[node] = min(lowlinks[node],lowlinks[successor]) + elif successor in stack: + # the successor is in the stack and hence in the current + # strongly connected component (SCC) + lowlinks[node] = min(lowlinks[node],index[successor]) + + # If `node` is a root node, pop the stack and generate an SCC + if lowlinks[node] == index[node]: + connected_component = [] + + while True: + successor = stack.pop() + connected_component.append(successor) + if successor == node: break + component = tuple(connected_component) + # storing the result + result.append(component) + + for node in graph: + if node not in lowlinks: + strongconnect(node) + + return result + + @property + def dot(self): + result = ['digraph G {'] + for succ in self._preds: + preds = self._preds[succ] + for pred in preds: + result.append(' %s -> %s;' % (pred, succ)) + for node in self._nodes: + result.append(' %s;' % node) + result.append('}') + return '\n'.join(result) + +# +# Unarchiving functionality for zip, tar, tgz, tbz, whl +# + +ARCHIVE_EXTENSIONS = ('.tar.gz', '.tar.bz2', '.tar', '.zip', + '.tgz', '.tbz', '.whl') + +def unarchive(archive_filename, dest_dir, format=None, check=True): + + def check_path(path): + if not isinstance(path, text_type): + path = path.decode('utf-8') + p = os.path.abspath(os.path.join(dest_dir, path)) + if not p.startswith(dest_dir) or p[plen] != os.sep: + raise ValueError('path outside destination: %r' % p) + + dest_dir = os.path.abspath(dest_dir) + plen = len(dest_dir) + archive = None + if format is None: + if archive_filename.endswith(('.zip', '.whl')): + format = 'zip' + elif archive_filename.endswith(('.tar.gz', '.tgz')): + format = 'tgz' + mode = 'r:gz' + elif archive_filename.endswith(('.tar.bz2', '.tbz')): + format = 'tbz' + mode = 'r:bz2' + elif archive_filename.endswith('.tar'): + format = 'tar' + mode = 'r' + else: + raise ValueError('Unknown format for %r' % archive_filename) + try: + if format == 'zip': + archive = ZipFile(archive_filename, 'r') + if check: + names = archive.namelist() + for name in names: + check_path(name) + else: + archive = tarfile.open(archive_filename, mode) + if check: + names = archive.getnames() + for name in names: + check_path(name) + if format != 'zip' and sys.version_info[0] < 3: + # See Python issue 17153. If the dest path contains Unicode, + # tarfile extraction fails on Python 2.x if a member path name + # contains non-ASCII characters - it leads to an implicit + # bytes -> unicode conversion using ASCII to decode. + for tarinfo in archive.getmembers(): + if not isinstance(tarinfo.name, text_type): + tarinfo.name = tarinfo.name.decode('utf-8') + archive.extractall(dest_dir) + + finally: + if archive: + archive.close() + + +def zip_dir(directory): + """zip a directory tree into a BytesIO object""" + result = io.BytesIO() + dlen = len(directory) + with ZipFile(result, "w") as zf: + for root, dirs, files in os.walk(directory): + for name in files: + full = os.path.join(root, name) + rel = root[dlen:] + dest = os.path.join(rel, name) + zf.write(full, dest) + return result + +# +# Simple progress bar +# + +UNITS = ('', 'K', 'M', 'G','T','P') + + +class Progress(object): + unknown = 'UNKNOWN' + + def __init__(self, minval=0, maxval=100): + assert maxval is None or maxval >= minval + self.min = self.cur = minval + self.max = maxval + self.started = None + self.elapsed = 0 + self.done = False + + def update(self, curval): + assert self.min <= curval + assert self.max is None or curval <= self.max + self.cur = curval + now = time.time() + if self.started is None: + self.started = now + else: + self.elapsed = now - self.started + + def increment(self, incr): + assert incr >= 0 + self.update(self.cur + incr) + + def start(self): + self.update(self.min) + return self + + def stop(self): + if self.max is not None: + self.update(self.max) + self.done = True + + @property + def maximum(self): + return self.unknown if self.max is None else self.max + + @property + def percentage(self): + if self.done: + result = '100 %' + elif self.max is None: + result = ' ?? %' + else: + v = 100.0 * (self.cur - self.min) / (self.max - self.min) + result = '%3d %%' % v + return result + + def format_duration(self, duration): + if (duration <= 0) and self.max is None or self.cur == self.min: + result = '??:??:??' + #elif duration < 1: + # result = '--:--:--' + else: + result = time.strftime('%H:%M:%S', time.gmtime(duration)) + return result + + @property + def ETA(self): + if self.done: + prefix = 'Done' + t = self.elapsed + #import pdb; pdb.set_trace() + else: + prefix = 'ETA ' + if self.max is None: + t = -1 + elif self.elapsed == 0 or (self.cur == self.min): + t = 0 + else: + #import pdb; pdb.set_trace() + t = float(self.max - self.min) + t /= self.cur - self.min + t = (t - 1) * self.elapsed + return '%s: %s' % (prefix, self.format_duration(t)) + + @property + def speed(self): + if self.elapsed == 0: + result = 0.0 + else: + result = (self.cur - self.min) / self.elapsed + for unit in UNITS: + if result < 1000: + break + result /= 1000.0 + return '%d %sB/s' % (result, unit) + +# +# Glob functionality +# + +RICH_GLOB = re.compile(r'\{([^}]*)\}') +_CHECK_RECURSIVE_GLOB = re.compile(r'[^/\\,{]\*\*|\*\*[^/\\,}]') +_CHECK_MISMATCH_SET = re.compile(r'^[^{]*\}|\{[^}]*$') + + +def iglob(path_glob): + """Extended globbing function that supports ** and {opt1,opt2,opt3}.""" + if _CHECK_RECURSIVE_GLOB.search(path_glob): + msg = """invalid glob %r: recursive glob "**" must be used alone""" + raise ValueError(msg % path_glob) + if _CHECK_MISMATCH_SET.search(path_glob): + msg = """invalid glob %r: mismatching set marker '{' or '}'""" + raise ValueError(msg % path_glob) + return _iglob(path_glob) + + +def _iglob(path_glob): + rich_path_glob = RICH_GLOB.split(path_glob, 1) + if len(rich_path_glob) > 1: + assert len(rich_path_glob) == 3, rich_path_glob + prefix, set, suffix = rich_path_glob + for item in set.split(','): + for path in _iglob(''.join((prefix, item, suffix))): + yield path + else: + if '**' not in path_glob: + for item in std_iglob(path_glob): + yield item + else: + prefix, radical = path_glob.split('**', 1) + if prefix == '': + prefix = '.' + if radical == '': + radical = '*' + else: + # we support both + radical = radical.lstrip('/') + radical = radical.lstrip('\\') + for path, dir, files in os.walk(prefix): + path = os.path.normpath(path) + for fn in _iglob(os.path.join(path, radical)): + yield fn + + + +# +# HTTPSConnection which verifies certificates/matches domains +# + +class HTTPSConnection(httplib.HTTPSConnection): + ca_certs = None # set this to the path to the certs file (.pem) + check_domain = True # only used if ca_certs is not None + + # noinspection PyPropertyAccess + def connect(self): + sock = socket.create_connection((self.host, self.port), self.timeout) + if getattr(self, '_tunnel_host', False): + self.sock = sock + self._tunnel() + + if not hasattr(ssl, 'SSLContext'): + # For 2.x + if self.ca_certs: + cert_reqs = ssl.CERT_REQUIRED + else: + cert_reqs = ssl.CERT_NONE + self.sock = ssl.wrap_socket(sock, self.key_file, self.cert_file, + cert_reqs=cert_reqs, + ssl_version=ssl.PROTOCOL_SSLv23, + ca_certs=self.ca_certs) + else: + context = ssl.SSLContext(ssl.PROTOCOL_SSLv23) + context.options |= ssl.OP_NO_SSLv2 + if self.cert_file: + context.load_cert_chain(self.cert_file, self.key_file) + kwargs = {} + if self.ca_certs: + context.verify_mode = ssl.CERT_REQUIRED + context.load_verify_locations(cafile=self.ca_certs) + if getattr(ssl, 'HAS_SNI', False): + kwargs['server_hostname'] = self.host + self.sock = context.wrap_socket(sock, **kwargs) + if self.ca_certs and self.check_domain: + try: + match_hostname(self.sock.getpeercert(), self.host) + logger.debug('Host verified: %s', self.host) + except CertificateError: + self.sock.shutdown(socket.SHUT_RDWR) + self.sock.close() + raise + +class HTTPSHandler(BaseHTTPSHandler): + def __init__(self, ca_certs, check_domain=True): + BaseHTTPSHandler.__init__(self) + self.ca_certs = ca_certs + self.check_domain = check_domain + + def _conn_maker(self, *args, **kwargs): + """ + This is called to create a connection instance. Normally you'd + pass a connection class to do_open, but it doesn't actually check for + a class, and just expects a callable. As long as we behave just as a + constructor would have, we should be OK. If it ever changes so that + we *must* pass a class, we'll create an UnsafeHTTPSConnection class + which just sets check_domain to False in the class definition, and + choose which one to pass to do_open. + """ + result = HTTPSConnection(*args, **kwargs) + if self.ca_certs: + result.ca_certs = self.ca_certs + result.check_domain = self.check_domain + return result + + def https_open(self, req): + try: + return self.do_open(self._conn_maker, req) + except URLError as e: + if 'certificate verify failed' in str(e.reason): + raise CertificateError('Unable to verify server certificate ' + 'for %s' % req.host) + else: + raise + +# +# To prevent against mixing HTTP traffic with HTTPS (examples: A Man-In-The- +# Middle proxy using HTTP listens on port 443, or an index mistakenly serves +# HTML containing a http://xyz link when it should be https://xyz), +# you can use the following handler class, which does not allow HTTP traffic. +# +# It works by inheriting from HTTPHandler - so build_opener won't add a +# handler for HTTP itself. +# +class HTTPSOnlyHandler(HTTPSHandler, HTTPHandler): + def http_open(self, req): + raise URLError('Unexpected HTTP request on what should be a secure ' + 'connection: %s' % req) + +# +# XML-RPC with timeouts +# + +_ver_info = sys.version_info[:2] + +if _ver_info == (2, 6): + class HTTP(httplib.HTTP): + def __init__(self, host='', port=None, **kwargs): + if port == 0: # 0 means use port 0, not the default port + port = None + self._setup(self._connection_class(host, port, **kwargs)) + + + class HTTPS(httplib.HTTPS): + def __init__(self, host='', port=None, **kwargs): + if port == 0: # 0 means use port 0, not the default port + port = None + self._setup(self._connection_class(host, port, **kwargs)) + + +class Transport(xmlrpclib.Transport): + def __init__(self, timeout, use_datetime=0): + self.timeout = timeout + xmlrpclib.Transport.__init__(self, use_datetime) + + def make_connection(self, host): + h, eh, x509 = self.get_host_info(host) + if _ver_info == (2, 6): + result = HTTP(h, timeout=self.timeout) + else: + if not self._connection or host != self._connection[0]: + self._extra_headers = eh + self._connection = host, httplib.HTTPConnection(h) + result = self._connection[1] + return result + +class SafeTransport(xmlrpclib.SafeTransport): + def __init__(self, timeout, use_datetime=0): + self.timeout = timeout + xmlrpclib.SafeTransport.__init__(self, use_datetime) + + def make_connection(self, host): + h, eh, kwargs = self.get_host_info(host) + if not kwargs: + kwargs = {} + kwargs['timeout'] = self.timeout + if _ver_info == (2, 6): + result = HTTPS(host, None, **kwargs) + else: + if not self._connection or host != self._connection[0]: + self._extra_headers = eh + self._connection = host, httplib.HTTPSConnection(h, None, + **kwargs) + result = self._connection[1] + return result + + +class ServerProxy(xmlrpclib.ServerProxy): + def __init__(self, uri, **kwargs): + self.timeout = timeout = kwargs.pop('timeout', None) + # The above classes only come into play if a timeout + # is specified + if timeout is not None: + scheme, _ = splittype(uri) + use_datetime = kwargs.get('use_datetime', 0) + if scheme == 'https': + tcls = SafeTransport + else: + tcls = Transport + kwargs['transport'] = t = tcls(timeout, use_datetime=use_datetime) + self.transport = t + xmlrpclib.ServerProxy.__init__(self, uri, **kwargs) + +# +# CSV functionality. This is provided because on 2.x, the csv module can't +# handle Unicode. However, we need to deal with Unicode in e.g. RECORD files. +# + +def _csv_open(fn, mode, **kwargs): + if sys.version_info[0] < 3: + mode += 'b' + else: + kwargs['newline'] = '' + return open(fn, mode, **kwargs) + + +class CSVBase(object): + defaults = { + 'delimiter': str(','), # The strs are used because we need native + 'quotechar': str('"'), # str in the csv API (2.x won't take + 'lineterminator': str('\n') # Unicode) + } + + def __enter__(self): + return self + + def __exit__(self, *exc_info): + self.stream.close() + + +class CSVReader(CSVBase): + def __init__(self, **kwargs): + if 'stream' in kwargs: + stream = kwargs['stream'] + if sys.version_info[0] >= 3: + # needs to be a text stream + stream = codecs.getreader('utf-8')(stream) + self.stream = stream + else: + self.stream = _csv_open(kwargs['path'], 'r') + self.reader = csv.reader(self.stream, **self.defaults) + + def __iter__(self): + return self + + def next(self): + result = next(self.reader) + if sys.version_info[0] < 3: + for i, item in enumerate(result): + if not isinstance(item, text_type): + result[i] = item.decode('utf-8') + return result + + __next__ = next + +class CSVWriter(CSVBase): + def __init__(self, fn, **kwargs): + self.stream = _csv_open(fn, 'w') + self.writer = csv.writer(self.stream, **self.defaults) + + def writerow(self, row): + if sys.version_info[0] < 3: + r = [] + for item in row: + if isinstance(item, text_type): + item = item.encode('utf-8') + r.append(item) + row = r + self.writer.writerow(row) + +# +# Configurator functionality +# + +class Configurator(BaseConfigurator): + + value_converters = dict(BaseConfigurator.value_converters) + value_converters['inc'] = 'inc_convert' + + def __init__(self, config, base=None): + super(Configurator, self).__init__(config) + self.base = base or os.getcwd() + + def configure_custom(self, config): + def convert(o): + if isinstance(o, (list, tuple)): + result = type(o)([convert(i) for i in o]) + elif isinstance(o, dict): + if '()' in o: + result = self.configure_custom(o) + else: + result = {} + for k in o: + result[k] = convert(o[k]) + else: + result = self.convert(o) + return result + + c = config.pop('()') + if not callable(c): + c = self.resolve(c) + props = config.pop('.', None) + # Check for valid identifiers + args = config.pop('[]', ()) + if args: + args = tuple([convert(o) for o in args]) + items = [(k, convert(config[k])) for k in config if valid_ident(k)] + kwargs = dict(items) + result = c(*args, **kwargs) + if props: + for n, v in props.items(): + setattr(result, n, convert(v)) + return result + + def __getitem__(self, key): + result = self.config[key] + if isinstance(result, dict) and '()' in result: + self.config[key] = result = self.configure_custom(result) + return result + + def inc_convert(self, value): + """Default converter for the inc:// protocol.""" + if not os.path.isabs(value): + value = os.path.join(self.base, value) + with codecs.open(value, 'r', encoding='utf-8') as f: + result = json.load(f) + return result + +# +# Mixin for running subprocesses and capturing their output +# + +class SubprocessMixin(object): + def __init__(self, verbose=False, progress=None): + self.verbose = verbose + self.progress = progress + + def reader(self, stream, context): + """ + Read lines from a subprocess' output stream and either pass to a progress + callable (if specified) or write progress information to sys.stderr. + """ + progress = self.progress + verbose = self.verbose + while True: + s = stream.readline() + if not s: + break + if progress is not None: + progress(s, context) + else: + if not verbose: + sys.stderr.write('.') + else: + sys.stderr.write(s.decode('utf-8')) + sys.stderr.flush() + stream.close() + + def run_command(self, cmd, **kwargs): + p = subprocess.Popen(cmd, stdout=subprocess.PIPE, + stderr=subprocess.PIPE, **kwargs) + t1 = threading.Thread(target=self.reader, args=(p.stdout, 'stdout')) + t1.start() + t2 = threading.Thread(target=self.reader, args=(p.stderr, 'stderr')) + t2.start() + p.wait() + t1.join() + t2.join() + if self.progress is not None: + self.progress('done.', 'main') + elif self.verbose: + sys.stderr.write('done.\n') + return p diff --git a/venv/lib/python2.7/site-packages/pip/_vendor/distlib/version.py b/venv/lib/python2.7/site-packages/pip/_vendor/distlib/version.py new file mode 100644 index 0000000..d3dcfa0 --- /dev/null +++ b/venv/lib/python2.7/site-packages/pip/_vendor/distlib/version.py @@ -0,0 +1,742 @@ +# -*- coding: utf-8 -*- +# +# Copyright (C) 2012-2016 The Python Software Foundation. +# See LICENSE.txt and CONTRIBUTORS.txt. +# +""" +Implementation of a flexible versioning scheme providing support for PEP-440, +setuptools-compatible and semantic versioning. +""" + +import logging +import re + +from .compat import string_types + +__all__ = ['NormalizedVersion', 'NormalizedMatcher', + 'LegacyVersion', 'LegacyMatcher', + 'SemanticVersion', 'SemanticMatcher', + 'UnsupportedVersionError', 'get_scheme'] + +logger = logging.getLogger(__name__) + + +class UnsupportedVersionError(ValueError): + """This is an unsupported version.""" + pass + + +class Version(object): + def __init__(self, s): + self._string = s = s.strip() + self._parts = parts = self.parse(s) + assert isinstance(parts, tuple) + assert len(parts) > 0 + + def parse(self, s): + raise NotImplementedError('please implement in a subclass') + + def _check_compatible(self, other): + if type(self) != type(other): + raise TypeError('cannot compare %r and %r' % (self, other)) + + def __eq__(self, other): + self._check_compatible(other) + return self._parts == other._parts + + def __ne__(self, other): + return not self.__eq__(other) + + def __lt__(self, other): + self._check_compatible(other) + return self._parts < other._parts + + def __gt__(self, other): + return not (self.__lt__(other) or self.__eq__(other)) + + def __le__(self, other): + return self.__lt__(other) or self.__eq__(other) + + def __ge__(self, other): + return self.__gt__(other) or self.__eq__(other) + + # See http://docs.python.org/reference/datamodel#object.__hash__ + def __hash__(self): + return hash(self._parts) + + def __repr__(self): + return "%s('%s')" % (self.__class__.__name__, self._string) + + def __str__(self): + return self._string + + @property + def is_prerelease(self): + raise NotImplementedError('Please implement in subclasses.') + + +class Matcher(object): + version_class = None + + dist_re = re.compile(r"^(\w[\s\w'.-]*)(\((.*)\))?") + comp_re = re.compile(r'^(<=|>=|<|>|!=|={2,3}|~=)?\s*([^\s,]+)$') + num_re = re.compile(r'^\d+(\.\d+)*$') + + # value is either a callable or the name of a method + _operators = { + '<': lambda v, c, p: v < c, + '>': lambda v, c, p: v > c, + '<=': lambda v, c, p: v == c or v < c, + '>=': lambda v, c, p: v == c or v > c, + '==': lambda v, c, p: v == c, + '===': lambda v, c, p: v == c, + # by default, compatible => >=. + '~=': lambda v, c, p: v == c or v > c, + '!=': lambda v, c, p: v != c, + } + + def __init__(self, s): + if self.version_class is None: + raise ValueError('Please specify a version class') + self._string = s = s.strip() + m = self.dist_re.match(s) + if not m: + raise ValueError('Not valid: %r' % s) + groups = m.groups('') + self.name = groups[0].strip() + self.key = self.name.lower() # for case-insensitive comparisons + clist = [] + if groups[2]: + constraints = [c.strip() for c in groups[2].split(',')] + for c in constraints: + m = self.comp_re.match(c) + if not m: + raise ValueError('Invalid %r in %r' % (c, s)) + groups = m.groups() + op = groups[0] or '~=' + s = groups[1] + if s.endswith('.*'): + if op not in ('==', '!='): + raise ValueError('\'.*\' not allowed for ' + '%r constraints' % op) + # Could be a partial version (e.g. for '2.*') which + # won't parse as a version, so keep it as a string + vn, prefix = s[:-2], True + if not self.num_re.match(vn): + # Just to check that vn is a valid version + self.version_class(vn) + else: + # Should parse as a version, so we can create an + # instance for the comparison + vn, prefix = self.version_class(s), False + clist.append((op, vn, prefix)) + self._parts = tuple(clist) + + def match(self, version): + """ + Check if the provided version matches the constraints. + + :param version: The version to match against this instance. + :type version: Strring or :class:`Version` instance. + """ + if isinstance(version, string_types): + version = self.version_class(version) + for operator, constraint, prefix in self._parts: + f = self._operators.get(operator) + if isinstance(f, string_types): + f = getattr(self, f) + if not f: + msg = ('%r not implemented ' + 'for %s' % (operator, self.__class__.__name__)) + raise NotImplementedError(msg) + if not f(version, constraint, prefix): + return False + return True + + @property + def exact_version(self): + result = None + if len(self._parts) == 1 and self._parts[0][0] in ('==', '==='): + result = self._parts[0][1] + return result + + def _check_compatible(self, other): + if type(self) != type(other) or self.name != other.name: + raise TypeError('cannot compare %s and %s' % (self, other)) + + def __eq__(self, other): + self._check_compatible(other) + return self.key == other.key and self._parts == other._parts + + def __ne__(self, other): + return not self.__eq__(other) + + # See http://docs.python.org/reference/datamodel#object.__hash__ + def __hash__(self): + return hash(self.key) + hash(self._parts) + + def __repr__(self): + return "%s(%r)" % (self.__class__.__name__, self._string) + + def __str__(self): + return self._string + + +PEP440_VERSION_RE = re.compile(r'^v?(\d+!)?(\d+(\.\d+)*)((a|b|c|rc)(\d+))?' + r'(\.(post)(\d+))?(\.(dev)(\d+))?' + r'(\+([a-zA-Z\d]+(\.[a-zA-Z\d]+)?))?$') + + +def _pep_440_key(s): + s = s.strip() + m = PEP440_VERSION_RE.match(s) + if not m: + raise UnsupportedVersionError('Not a valid version: %s' % s) + groups = m.groups() + nums = tuple(int(v) for v in groups[1].split('.')) + while len(nums) > 1 and nums[-1] == 0: + nums = nums[:-1] + + if not groups[0]: + epoch = 0 + else: + epoch = int(groups[0]) + pre = groups[4:6] + post = groups[7:9] + dev = groups[10:12] + local = groups[13] + if pre == (None, None): + pre = () + else: + pre = pre[0], int(pre[1]) + if post == (None, None): + post = () + else: + post = post[0], int(post[1]) + if dev == (None, None): + dev = () + else: + dev = dev[0], int(dev[1]) + if local is None: + local = () + else: + parts = [] + for part in local.split('.'): + # to ensure that numeric compares as > lexicographic, avoid + # comparing them directly, but encode a tuple which ensures + # correct sorting + if part.isdigit(): + part = (1, int(part)) + else: + part = (0, part) + parts.append(part) + local = tuple(parts) + if not pre: + # either before pre-release, or final release and after + if not post and dev: + # before pre-release + pre = ('a', -1) # to sort before a0 + else: + pre = ('z',) # to sort after all pre-releases + # now look at the state of post and dev. + if not post: + post = ('_',) # sort before 'a' + if not dev: + dev = ('final',) + + #print('%s -> %s' % (s, m.groups())) + return epoch, nums, pre, post, dev, local + + +_normalized_key = _pep_440_key + + +class NormalizedVersion(Version): + """A rational version. + + Good: + 1.2 # equivalent to "1.2.0" + 1.2.0 + 1.2a1 + 1.2.3a2 + 1.2.3b1 + 1.2.3c1 + 1.2.3.4 + TODO: fill this out + + Bad: + 1 # mininum two numbers + 1.2a # release level must have a release serial + 1.2.3b + """ + def parse(self, s): + result = _normalized_key(s) + # _normalized_key loses trailing zeroes in the release + # clause, since that's needed to ensure that X.Y == X.Y.0 == X.Y.0.0 + # However, PEP 440 prefix matching needs it: for example, + # (~= 1.4.5.0) matches differently to (~= 1.4.5.0.0). + m = PEP440_VERSION_RE.match(s) # must succeed + groups = m.groups() + self._release_clause = tuple(int(v) for v in groups[1].split('.')) + return result + + PREREL_TAGS = set(['a', 'b', 'c', 'rc', 'dev']) + + @property + def is_prerelease(self): + return any(t[0] in self.PREREL_TAGS for t in self._parts if t) + + +def _match_prefix(x, y): + x = str(x) + y = str(y) + if x == y: + return True + if not x.startswith(y): + return False + n = len(y) + return x[n] == '.' + + +class NormalizedMatcher(Matcher): + version_class = NormalizedVersion + + # value is either a callable or the name of a method + _operators = { + '~=': '_match_compatible', + '<': '_match_lt', + '>': '_match_gt', + '<=': '_match_le', + '>=': '_match_ge', + '==': '_match_eq', + '===': '_match_arbitrary', + '!=': '_match_ne', + } + + def _adjust_local(self, version, constraint, prefix): + if prefix: + strip_local = '+' not in constraint and version._parts[-1] + else: + # both constraint and version are + # NormalizedVersion instances. + # If constraint does not have a local component, + # ensure the version doesn't, either. + strip_local = not constraint._parts[-1] and version._parts[-1] + if strip_local: + s = version._string.split('+', 1)[0] + version = self.version_class(s) + return version, constraint + + def _match_lt(self, version, constraint, prefix): + version, constraint = self._adjust_local(version, constraint, prefix) + if version >= constraint: + return False + release_clause = constraint._release_clause + pfx = '.'.join([str(i) for i in release_clause]) + return not _match_prefix(version, pfx) + + def _match_gt(self, version, constraint, prefix): + version, constraint = self._adjust_local(version, constraint, prefix) + if version <= constraint: + return False + release_clause = constraint._release_clause + pfx = '.'.join([str(i) for i in release_clause]) + return not _match_prefix(version, pfx) + + def _match_le(self, version, constraint, prefix): + version, constraint = self._adjust_local(version, constraint, prefix) + return version <= constraint + + def _match_ge(self, version, constraint, prefix): + version, constraint = self._adjust_local(version, constraint, prefix) + return version >= constraint + + def _match_eq(self, version, constraint, prefix): + version, constraint = self._adjust_local(version, constraint, prefix) + if not prefix: + result = (version == constraint) + else: + result = _match_prefix(version, constraint) + return result + + def _match_arbitrary(self, version, constraint, prefix): + return str(version) == str(constraint) + + def _match_ne(self, version, constraint, prefix): + version, constraint = self._adjust_local(version, constraint, prefix) + if not prefix: + result = (version != constraint) + else: + result = not _match_prefix(version, constraint) + return result + + def _match_compatible(self, version, constraint, prefix): + version, constraint = self._adjust_local(version, constraint, prefix) + if version == constraint: + return True + if version < constraint: + return False +# if not prefix: +# return True + release_clause = constraint._release_clause + if len(release_clause) > 1: + release_clause = release_clause[:-1] + pfx = '.'.join([str(i) for i in release_clause]) + return _match_prefix(version, pfx) + +_REPLACEMENTS = ( + (re.compile('[.+-]$'), ''), # remove trailing puncts + (re.compile(r'^[.](\d)'), r'0.\1'), # .N -> 0.N at start + (re.compile('^[.-]'), ''), # remove leading puncts + (re.compile(r'^\((.*)\)$'), r'\1'), # remove parentheses + (re.compile(r'^v(ersion)?\s*(\d+)'), r'\2'), # remove leading v(ersion) + (re.compile(r'^r(ev)?\s*(\d+)'), r'\2'), # remove leading v(ersion) + (re.compile('[.]{2,}'), '.'), # multiple runs of '.' + (re.compile(r'\b(alfa|apha)\b'), 'alpha'), # misspelt alpha + (re.compile(r'\b(pre-alpha|prealpha)\b'), + 'pre.alpha'), # standardise + (re.compile(r'\(beta\)$'), 'beta'), # remove parentheses +) + +_SUFFIX_REPLACEMENTS = ( + (re.compile('^[:~._+-]+'), ''), # remove leading puncts + (re.compile('[,*")([\]]'), ''), # remove unwanted chars + (re.compile('[~:+_ -]'), '.'), # replace illegal chars + (re.compile('[.]{2,}'), '.'), # multiple runs of '.' + (re.compile(r'\.$'), ''), # trailing '.' +) + +_NUMERIC_PREFIX = re.compile(r'(\d+(\.\d+)*)') + + +def _suggest_semantic_version(s): + """ + Try to suggest a semantic form for a version for which + _suggest_normalized_version couldn't come up with anything. + """ + result = s.strip().lower() + for pat, repl in _REPLACEMENTS: + result = pat.sub(repl, result) + if not result: + result = '0.0.0' + + # Now look for numeric prefix, and separate it out from + # the rest. + #import pdb; pdb.set_trace() + m = _NUMERIC_PREFIX.match(result) + if not m: + prefix = '0.0.0' + suffix = result + else: + prefix = m.groups()[0].split('.') + prefix = [int(i) for i in prefix] + while len(prefix) < 3: + prefix.append(0) + if len(prefix) == 3: + suffix = result[m.end():] + else: + suffix = '.'.join([str(i) for i in prefix[3:]]) + result[m.end():] + prefix = prefix[:3] + prefix = '.'.join([str(i) for i in prefix]) + suffix = suffix.strip() + if suffix: + #import pdb; pdb.set_trace() + # massage the suffix. + for pat, repl in _SUFFIX_REPLACEMENTS: + suffix = pat.sub(repl, suffix) + + if not suffix: + result = prefix + else: + sep = '-' if 'dev' in suffix else '+' + result = prefix + sep + suffix + if not is_semver(result): + result = None + return result + + +def _suggest_normalized_version(s): + """Suggest a normalized version close to the given version string. + + If you have a version string that isn't rational (i.e. NormalizedVersion + doesn't like it) then you might be able to get an equivalent (or close) + rational version from this function. + + This does a number of simple normalizations to the given string, based + on observation of versions currently in use on PyPI. Given a dump of + those version during PyCon 2009, 4287 of them: + - 2312 (53.93%) match NormalizedVersion without change + with the automatic suggestion + - 3474 (81.04%) match when using this suggestion method + + @param s {str} An irrational version string. + @returns A rational version string, or None, if couldn't determine one. + """ + try: + _normalized_key(s) + return s # already rational + except UnsupportedVersionError: + pass + + rs = s.lower() + + # part of this could use maketrans + for orig, repl in (('-alpha', 'a'), ('-beta', 'b'), ('alpha', 'a'), + ('beta', 'b'), ('rc', 'c'), ('-final', ''), + ('-pre', 'c'), + ('-release', ''), ('.release', ''), ('-stable', ''), + ('+', '.'), ('_', '.'), (' ', ''), ('.final', ''), + ('final', '')): + rs = rs.replace(orig, repl) + + # if something ends with dev or pre, we add a 0 + rs = re.sub(r"pre$", r"pre0", rs) + rs = re.sub(r"dev$", r"dev0", rs) + + # if we have something like "b-2" or "a.2" at the end of the + # version, that is pobably beta, alpha, etc + # let's remove the dash or dot + rs = re.sub(r"([abc]|rc)[\-\.](\d+)$", r"\1\2", rs) + + # 1.0-dev-r371 -> 1.0.dev371 + # 0.1-dev-r79 -> 0.1.dev79 + rs = re.sub(r"[\-\.](dev)[\-\.]?r?(\d+)$", r".\1\2", rs) + + # Clean: 2.0.a.3, 2.0.b1, 0.9.0~c1 + rs = re.sub(r"[.~]?([abc])\.?", r"\1", rs) + + # Clean: v0.3, v1.0 + if rs.startswith('v'): + rs = rs[1:] + + # Clean leading '0's on numbers. + #TODO: unintended side-effect on, e.g., "2003.05.09" + # PyPI stats: 77 (~2%) better + rs = re.sub(r"\b0+(\d+)(?!\d)", r"\1", rs) + + # Clean a/b/c with no version. E.g. "1.0a" -> "1.0a0". Setuptools infers + # zero. + # PyPI stats: 245 (7.56%) better + rs = re.sub(r"(\d+[abc])$", r"\g<1>0", rs) + + # the 'dev-rNNN' tag is a dev tag + rs = re.sub(r"\.?(dev-r|dev\.r)\.?(\d+)$", r".dev\2", rs) + + # clean the - when used as a pre delimiter + rs = re.sub(r"-(a|b|c)(\d+)$", r"\1\2", rs) + + # a terminal "dev" or "devel" can be changed into ".dev0" + rs = re.sub(r"[\.\-](dev|devel)$", r".dev0", rs) + + # a terminal "dev" can be changed into ".dev0" + rs = re.sub(r"(?![\.\-])dev$", r".dev0", rs) + + # a terminal "final" or "stable" can be removed + rs = re.sub(r"(final|stable)$", "", rs) + + # The 'r' and the '-' tags are post release tags + # 0.4a1.r10 -> 0.4a1.post10 + # 0.9.33-17222 -> 0.9.33.post17222 + # 0.9.33-r17222 -> 0.9.33.post17222 + rs = re.sub(r"\.?(r|-|-r)\.?(\d+)$", r".post\2", rs) + + # Clean 'r' instead of 'dev' usage: + # 0.9.33+r17222 -> 0.9.33.dev17222 + # 1.0dev123 -> 1.0.dev123 + # 1.0.git123 -> 1.0.dev123 + # 1.0.bzr123 -> 1.0.dev123 + # 0.1a0dev.123 -> 0.1a0.dev123 + # PyPI stats: ~150 (~4%) better + rs = re.sub(r"\.?(dev|git|bzr)\.?(\d+)$", r".dev\2", rs) + + # Clean '.pre' (normalized from '-pre' above) instead of 'c' usage: + # 0.2.pre1 -> 0.2c1 + # 0.2-c1 -> 0.2c1 + # 1.0preview123 -> 1.0c123 + # PyPI stats: ~21 (0.62%) better + rs = re.sub(r"\.?(pre|preview|-c)(\d+)$", r"c\g<2>", rs) + + # Tcl/Tk uses "px" for their post release markers + rs = re.sub(r"p(\d+)$", r".post\1", rs) + + try: + _normalized_key(rs) + except UnsupportedVersionError: + rs = None + return rs + +# +# Legacy version processing (distribute-compatible) +# + +_VERSION_PART = re.compile(r'([a-z]+|\d+|[\.-])', re.I) +_VERSION_REPLACE = { + 'pre': 'c', + 'preview': 'c', + '-': 'final-', + 'rc': 'c', + 'dev': '@', + '': None, + '.': None, +} + + +def _legacy_key(s): + def get_parts(s): + result = [] + for p in _VERSION_PART.split(s.lower()): + p = _VERSION_REPLACE.get(p, p) + if p: + if '0' <= p[:1] <= '9': + p = p.zfill(8) + else: + p = '*' + p + result.append(p) + result.append('*final') + return result + + result = [] + for p in get_parts(s): + if p.startswith('*'): + if p < '*final': + while result and result[-1] == '*final-': + result.pop() + while result and result[-1] == '00000000': + result.pop() + result.append(p) + return tuple(result) + + +class LegacyVersion(Version): + def parse(self, s): + return _legacy_key(s) + + @property + def is_prerelease(self): + result = False + for x in self._parts: + if (isinstance(x, string_types) and x.startswith('*') and + x < '*final'): + result = True + break + return result + + +class LegacyMatcher(Matcher): + version_class = LegacyVersion + + _operators = dict(Matcher._operators) + _operators['~='] = '_match_compatible' + + numeric_re = re.compile('^(\d+(\.\d+)*)') + + def _match_compatible(self, version, constraint, prefix): + if version < constraint: + return False + m = self.numeric_re.match(str(constraint)) + if not m: + logger.warning('Cannot compute compatible match for version %s ' + ' and constraint %s', version, constraint) + return True + s = m.groups()[0] + if '.' in s: + s = s.rsplit('.', 1)[0] + return _match_prefix(version, s) + +# +# Semantic versioning +# + +_SEMVER_RE = re.compile(r'^(\d+)\.(\d+)\.(\d+)' + r'(-[a-z0-9]+(\.[a-z0-9-]+)*)?' + r'(\+[a-z0-9]+(\.[a-z0-9-]+)*)?$', re.I) + + +def is_semver(s): + return _SEMVER_RE.match(s) + + +def _semantic_key(s): + def make_tuple(s, absent): + if s is None: + result = (absent,) + else: + parts = s[1:].split('.') + # We can't compare ints and strings on Python 3, so fudge it + # by zero-filling numeric values so simulate a numeric comparison + result = tuple([p.zfill(8) if p.isdigit() else p for p in parts]) + return result + + m = is_semver(s) + if not m: + raise UnsupportedVersionError(s) + groups = m.groups() + major, minor, patch = [int(i) for i in groups[:3]] + # choose the '|' and '*' so that versions sort correctly + pre, build = make_tuple(groups[3], '|'), make_tuple(groups[5], '*') + return (major, minor, patch), pre, build + + +class SemanticVersion(Version): + def parse(self, s): + return _semantic_key(s) + + @property + def is_prerelease(self): + return self._parts[1][0] != '|' + + +class SemanticMatcher(Matcher): + version_class = SemanticVersion + + +class VersionScheme(object): + def __init__(self, key, matcher, suggester=None): + self.key = key + self.matcher = matcher + self.suggester = suggester + + def is_valid_version(self, s): + try: + self.matcher.version_class(s) + result = True + except UnsupportedVersionError: + result = False + return result + + def is_valid_matcher(self, s): + try: + self.matcher(s) + result = True + except UnsupportedVersionError: + result = False + return result + + def is_valid_constraint_list(self, s): + """ + Used for processing some metadata fields + """ + return self.is_valid_matcher('dummy_name (%s)' % s) + + def suggest(self, s): + if self.suggester is None: + result = None + else: + result = self.suggester(s) + return result + +_SCHEMES = { + 'normalized': VersionScheme(_normalized_key, NormalizedMatcher, + _suggest_normalized_version), + 'legacy': VersionScheme(_legacy_key, LegacyMatcher, lambda self, s: s), + 'semantic': VersionScheme(_semantic_key, SemanticMatcher, + _suggest_semantic_version), +} + +_SCHEMES['default'] = _SCHEMES['normalized'] + + +def get_scheme(name): + if name not in _SCHEMES: + raise ValueError('unknown scheme name: %r' % name) + return _SCHEMES[name] diff --git a/venv/lib/python2.7/site-packages/pip/_vendor/distlib/w32.exe b/venv/lib/python2.7/site-packages/pip/_vendor/distlib/w32.exe new file mode 100644 index 0000000..f27573a Binary files /dev/null and b/venv/lib/python2.7/site-packages/pip/_vendor/distlib/w32.exe differ diff --git a/venv/lib/python2.7/site-packages/pip/_vendor/distlib/w64.exe b/venv/lib/python2.7/site-packages/pip/_vendor/distlib/w64.exe new file mode 100644 index 0000000..fd37d9e Binary files /dev/null and b/venv/lib/python2.7/site-packages/pip/_vendor/distlib/w64.exe differ diff --git a/venv/lib/python2.7/site-packages/pip/_vendor/distlib/wheel.py b/venv/lib/python2.7/site-packages/pip/_vendor/distlib/wheel.py new file mode 100644 index 0000000..2952b8e --- /dev/null +++ b/venv/lib/python2.7/site-packages/pip/_vendor/distlib/wheel.py @@ -0,0 +1,978 @@ +# -*- coding: utf-8 -*- +# +# Copyright (C) 2013-2016 Vinay Sajip. +# Licensed to the Python Software Foundation under a contributor agreement. +# See LICENSE.txt and CONTRIBUTORS.txt. +# +from __future__ import unicode_literals + +import base64 +import codecs +import datetime +import distutils.util +from email import message_from_file +import hashlib +import imp +import json +import logging +import os +import posixpath +import re +import shutil +import sys +import tempfile +import zipfile + +from . import __version__, DistlibException +from .compat import sysconfig, ZipFile, fsdecode, text_type, filter +from .database import InstalledDistribution +from .metadata import Metadata, METADATA_FILENAME +from .util import (FileOperator, convert_path, CSVReader, CSVWriter, Cache, + cached_property, get_cache_base, read_exports, tempdir) +from .version import NormalizedVersion, UnsupportedVersionError + +logger = logging.getLogger(__name__) + +cache = None # created when needed + +if hasattr(sys, 'pypy_version_info'): + IMP_PREFIX = 'pp' +elif sys.platform.startswith('java'): + IMP_PREFIX = 'jy' +elif sys.platform == 'cli': + IMP_PREFIX = 'ip' +else: + IMP_PREFIX = 'cp' + +VER_SUFFIX = sysconfig.get_config_var('py_version_nodot') +if not VER_SUFFIX: # pragma: no cover + VER_SUFFIX = '%s%s' % sys.version_info[:2] +PYVER = 'py' + VER_SUFFIX +IMPVER = IMP_PREFIX + VER_SUFFIX + +ARCH = distutils.util.get_platform().replace('-', '_').replace('.', '_') + +ABI = sysconfig.get_config_var('SOABI') +if ABI and ABI.startswith('cpython-'): + ABI = ABI.replace('cpython-', 'cp') +else: + def _derive_abi(): + parts = ['cp', VER_SUFFIX] + if sysconfig.get_config_var('Py_DEBUG'): + parts.append('d') + if sysconfig.get_config_var('WITH_PYMALLOC'): + parts.append('m') + if sysconfig.get_config_var('Py_UNICODE_SIZE') == 4: + parts.append('u') + return ''.join(parts) + ABI = _derive_abi() + del _derive_abi + +FILENAME_RE = re.compile(r''' +(?P[^-]+) +-(?P\d+[^-]*) +(-(?P\d+[^-]*))? +-(?P\w+\d+(\.\w+\d+)*) +-(?P\w+) +-(?P\w+(\.\w+)*) +\.whl$ +''', re.IGNORECASE | re.VERBOSE) + +NAME_VERSION_RE = re.compile(r''' +(?P[^-]+) +-(?P\d+[^-]*) +(-(?P\d+[^-]*))?$ +''', re.IGNORECASE | re.VERBOSE) + +SHEBANG_RE = re.compile(br'\s*#![^\r\n]*') +SHEBANG_DETAIL_RE = re.compile(br'^(\s*#!("[^"]+"|\S+))\s+(.*)$') +SHEBANG_PYTHON = b'#!python' +SHEBANG_PYTHONW = b'#!pythonw' + +if os.sep == '/': + to_posix = lambda o: o +else: + to_posix = lambda o: o.replace(os.sep, '/') + + +class Mounter(object): + def __init__(self): + self.impure_wheels = {} + self.libs = {} + + def add(self, pathname, extensions): + self.impure_wheels[pathname] = extensions + self.libs.update(extensions) + + def remove(self, pathname): + extensions = self.impure_wheels.pop(pathname) + for k, v in extensions: + if k in self.libs: + del self.libs[k] + + def find_module(self, fullname, path=None): + if fullname in self.libs: + result = self + else: + result = None + return result + + def load_module(self, fullname): + if fullname in sys.modules: + result = sys.modules[fullname] + else: + if fullname not in self.libs: + raise ImportError('unable to find extension for %s' % fullname) + result = imp.load_dynamic(fullname, self.libs[fullname]) + result.__loader__ = self + parts = fullname.rsplit('.', 1) + if len(parts) > 1: + result.__package__ = parts[0] + return result + +_hook = Mounter() + + +class Wheel(object): + """ + Class to build and install from Wheel files (PEP 427). + """ + + wheel_version = (1, 1) + hash_kind = 'sha256' + + def __init__(self, filename=None, sign=False, verify=False): + """ + Initialise an instance using a (valid) filename. + """ + self.sign = sign + self.should_verify = verify + self.buildver = '' + self.pyver = [PYVER] + self.abi = ['none'] + self.arch = ['any'] + self.dirname = os.getcwd() + if filename is None: + self.name = 'dummy' + self.version = '0.1' + self._filename = self.filename + else: + m = NAME_VERSION_RE.match(filename) + if m: + info = m.groupdict('') + self.name = info['nm'] + # Reinstate the local version separator + self.version = info['vn'].replace('_', '-') + self.buildver = info['bn'] + self._filename = self.filename + else: + dirname, filename = os.path.split(filename) + m = FILENAME_RE.match(filename) + if not m: + raise DistlibException('Invalid name or ' + 'filename: %r' % filename) + if dirname: + self.dirname = os.path.abspath(dirname) + self._filename = filename + info = m.groupdict('') + self.name = info['nm'] + self.version = info['vn'] + self.buildver = info['bn'] + self.pyver = info['py'].split('.') + self.abi = info['bi'].split('.') + self.arch = info['ar'].split('.') + + @property + def filename(self): + """ + Build and return a filename from the various components. + """ + if self.buildver: + buildver = '-' + self.buildver + else: + buildver = '' + pyver = '.'.join(self.pyver) + abi = '.'.join(self.abi) + arch = '.'.join(self.arch) + # replace - with _ as a local version separator + version = self.version.replace('-', '_') + return '%s-%s%s-%s-%s-%s.whl' % (self.name, version, buildver, + pyver, abi, arch) + + @property + def exists(self): + path = os.path.join(self.dirname, self.filename) + return os.path.isfile(path) + + @property + def tags(self): + for pyver in self.pyver: + for abi in self.abi: + for arch in self.arch: + yield pyver, abi, arch + + @cached_property + def metadata(self): + pathname = os.path.join(self.dirname, self.filename) + name_ver = '%s-%s' % (self.name, self.version) + info_dir = '%s.dist-info' % name_ver + wrapper = codecs.getreader('utf-8') + with ZipFile(pathname, 'r') as zf: + wheel_metadata = self.get_wheel_metadata(zf) + wv = wheel_metadata['Wheel-Version'].split('.', 1) + file_version = tuple([int(i) for i in wv]) + if file_version < (1, 1): + fn = 'METADATA' + else: + fn = METADATA_FILENAME + try: + metadata_filename = posixpath.join(info_dir, fn) + with zf.open(metadata_filename) as bf: + wf = wrapper(bf) + result = Metadata(fileobj=wf) + except KeyError: + raise ValueError('Invalid wheel, because %s is ' + 'missing' % fn) + return result + + def get_wheel_metadata(self, zf): + name_ver = '%s-%s' % (self.name, self.version) + info_dir = '%s.dist-info' % name_ver + metadata_filename = posixpath.join(info_dir, 'WHEEL') + with zf.open(metadata_filename) as bf: + wf = codecs.getreader('utf-8')(bf) + message = message_from_file(wf) + return dict(message) + + @cached_property + def info(self): + pathname = os.path.join(self.dirname, self.filename) + with ZipFile(pathname, 'r') as zf: + result = self.get_wheel_metadata(zf) + return result + + def process_shebang(self, data): + m = SHEBANG_RE.match(data) + if m: + end = m.end() + shebang, data_after_shebang = data[:end], data[end:] + # Preserve any arguments after the interpreter + if b'pythonw' in shebang.lower(): + shebang_python = SHEBANG_PYTHONW + else: + shebang_python = SHEBANG_PYTHON + m = SHEBANG_DETAIL_RE.match(shebang) + if m: + args = b' ' + m.groups()[-1] + else: + args = b'' + shebang = shebang_python + args + data = shebang + data_after_shebang + else: + cr = data.find(b'\r') + lf = data.find(b'\n') + if cr < 0 or cr > lf: + term = b'\n' + else: + if data[cr:cr + 2] == b'\r\n': + term = b'\r\n' + else: + term = b'\r' + data = SHEBANG_PYTHON + term + data + return data + + def get_hash(self, data, hash_kind=None): + if hash_kind is None: + hash_kind = self.hash_kind + try: + hasher = getattr(hashlib, hash_kind) + except AttributeError: + raise DistlibException('Unsupported hash algorithm: %r' % hash_kind) + result = hasher(data).digest() + result = base64.urlsafe_b64encode(result).rstrip(b'=').decode('ascii') + return hash_kind, result + + def write_record(self, records, record_path, base): + records = list(records) # make a copy for sorting + p = to_posix(os.path.relpath(record_path, base)) + records.append((p, '', '')) + records.sort() + with CSVWriter(record_path) as writer: + for row in records: + writer.writerow(row) + + def write_records(self, info, libdir, archive_paths): + records = [] + distinfo, info_dir = info + hasher = getattr(hashlib, self.hash_kind) + for ap, p in archive_paths: + with open(p, 'rb') as f: + data = f.read() + digest = '%s=%s' % self.get_hash(data) + size = os.path.getsize(p) + records.append((ap, digest, size)) + + p = os.path.join(distinfo, 'RECORD') + self.write_record(records, p, libdir) + ap = to_posix(os.path.join(info_dir, 'RECORD')) + archive_paths.append((ap, p)) + + def build_zip(self, pathname, archive_paths): + with ZipFile(pathname, 'w', zipfile.ZIP_DEFLATED) as zf: + for ap, p in archive_paths: + logger.debug('Wrote %s to %s in wheel', p, ap) + zf.write(p, ap) + + def build(self, paths, tags=None, wheel_version=None): + """ + Build a wheel from files in specified paths, and use any specified tags + when determining the name of the wheel. + """ + if tags is None: + tags = {} + + libkey = list(filter(lambda o: o in paths, ('purelib', 'platlib')))[0] + if libkey == 'platlib': + is_pure = 'false' + default_pyver = [IMPVER] + default_abi = [ABI] + default_arch = [ARCH] + else: + is_pure = 'true' + default_pyver = [PYVER] + default_abi = ['none'] + default_arch = ['any'] + + self.pyver = tags.get('pyver', default_pyver) + self.abi = tags.get('abi', default_abi) + self.arch = tags.get('arch', default_arch) + + libdir = paths[libkey] + + name_ver = '%s-%s' % (self.name, self.version) + data_dir = '%s.data' % name_ver + info_dir = '%s.dist-info' % name_ver + + archive_paths = [] + + # First, stuff which is not in site-packages + for key in ('data', 'headers', 'scripts'): + if key not in paths: + continue + path = paths[key] + if os.path.isdir(path): + for root, dirs, files in os.walk(path): + for fn in files: + p = fsdecode(os.path.join(root, fn)) + rp = os.path.relpath(p, path) + ap = to_posix(os.path.join(data_dir, key, rp)) + archive_paths.append((ap, p)) + if key == 'scripts' and not p.endswith('.exe'): + with open(p, 'rb') as f: + data = f.read() + data = self.process_shebang(data) + with open(p, 'wb') as f: + f.write(data) + + # Now, stuff which is in site-packages, other than the + # distinfo stuff. + path = libdir + distinfo = None + for root, dirs, files in os.walk(path): + if root == path: + # At the top level only, save distinfo for later + # and skip it for now + for i, dn in enumerate(dirs): + dn = fsdecode(dn) + if dn.endswith('.dist-info'): + distinfo = os.path.join(root, dn) + del dirs[i] + break + assert distinfo, '.dist-info directory expected, not found' + + for fn in files: + # comment out next suite to leave .pyc files in + if fsdecode(fn).endswith(('.pyc', '.pyo')): + continue + p = os.path.join(root, fn) + rp = to_posix(os.path.relpath(p, path)) + archive_paths.append((rp, p)) + + # Now distinfo. Assumed to be flat, i.e. os.listdir is enough. + files = os.listdir(distinfo) + for fn in files: + if fn not in ('RECORD', 'INSTALLER', 'SHARED', 'WHEEL'): + p = fsdecode(os.path.join(distinfo, fn)) + ap = to_posix(os.path.join(info_dir, fn)) + archive_paths.append((ap, p)) + + wheel_metadata = [ + 'Wheel-Version: %d.%d' % (wheel_version or self.wheel_version), + 'Generator: distlib %s' % __version__, + 'Root-Is-Purelib: %s' % is_pure, + ] + for pyver, abi, arch in self.tags: + wheel_metadata.append('Tag: %s-%s-%s' % (pyver, abi, arch)) + p = os.path.join(distinfo, 'WHEEL') + with open(p, 'w') as f: + f.write('\n'.join(wheel_metadata)) + ap = to_posix(os.path.join(info_dir, 'WHEEL')) + archive_paths.append((ap, p)) + + # Now, at last, RECORD. + # Paths in here are archive paths - nothing else makes sense. + self.write_records((distinfo, info_dir), libdir, archive_paths) + # Now, ready to build the zip file + pathname = os.path.join(self.dirname, self.filename) + self.build_zip(pathname, archive_paths) + return pathname + + def install(self, paths, maker, **kwargs): + """ + Install a wheel to the specified paths. If kwarg ``warner`` is + specified, it should be a callable, which will be called with two + tuples indicating the wheel version of this software and the wheel + version in the file, if there is a discrepancy in the versions. + This can be used to issue any warnings to raise any exceptions. + If kwarg ``lib_only`` is True, only the purelib/platlib files are + installed, and the headers, scripts, data and dist-info metadata are + not written. + + The return value is a :class:`InstalledDistribution` instance unless + ``options.lib_only`` is True, in which case the return value is ``None``. + """ + + dry_run = maker.dry_run + warner = kwargs.get('warner') + lib_only = kwargs.get('lib_only', False) + + pathname = os.path.join(self.dirname, self.filename) + name_ver = '%s-%s' % (self.name, self.version) + data_dir = '%s.data' % name_ver + info_dir = '%s.dist-info' % name_ver + + metadata_name = posixpath.join(info_dir, METADATA_FILENAME) + wheel_metadata_name = posixpath.join(info_dir, 'WHEEL') + record_name = posixpath.join(info_dir, 'RECORD') + + wrapper = codecs.getreader('utf-8') + + with ZipFile(pathname, 'r') as zf: + with zf.open(wheel_metadata_name) as bwf: + wf = wrapper(bwf) + message = message_from_file(wf) + wv = message['Wheel-Version'].split('.', 1) + file_version = tuple([int(i) for i in wv]) + if (file_version != self.wheel_version) and warner: + warner(self.wheel_version, file_version) + + if message['Root-Is-Purelib'] == 'true': + libdir = paths['purelib'] + else: + libdir = paths['platlib'] + + records = {} + with zf.open(record_name) as bf: + with CSVReader(stream=bf) as reader: + for row in reader: + p = row[0] + records[p] = row + + data_pfx = posixpath.join(data_dir, '') + info_pfx = posixpath.join(info_dir, '') + script_pfx = posixpath.join(data_dir, 'scripts', '') + + # make a new instance rather than a copy of maker's, + # as we mutate it + fileop = FileOperator(dry_run=dry_run) + fileop.record = True # so we can rollback if needed + + bc = not sys.dont_write_bytecode # Double negatives. Lovely! + + outfiles = [] # for RECORD writing + + # for script copying/shebang processing + workdir = tempfile.mkdtemp() + # set target dir later + # we default add_launchers to False, as the + # Python Launcher should be used instead + maker.source_dir = workdir + maker.target_dir = None + try: + for zinfo in zf.infolist(): + arcname = zinfo.filename + if isinstance(arcname, text_type): + u_arcname = arcname + else: + u_arcname = arcname.decode('utf-8') + # The signature file won't be in RECORD, + # and we don't currently don't do anything with it + if u_arcname.endswith('/RECORD.jws'): + continue + row = records[u_arcname] + if row[2] and str(zinfo.file_size) != row[2]: + raise DistlibException('size mismatch for ' + '%s' % u_arcname) + if row[1]: + kind, value = row[1].split('=', 1) + with zf.open(arcname) as bf: + data = bf.read() + _, digest = self.get_hash(data, kind) + if digest != value: + raise DistlibException('digest mismatch for ' + '%s' % arcname) + + if lib_only and u_arcname.startswith((info_pfx, data_pfx)): + logger.debug('lib_only: skipping %s', u_arcname) + continue + is_script = (u_arcname.startswith(script_pfx) + and not u_arcname.endswith('.exe')) + + if u_arcname.startswith(data_pfx): + _, where, rp = u_arcname.split('/', 2) + outfile = os.path.join(paths[where], convert_path(rp)) + else: + # meant for site-packages. + if u_arcname in (wheel_metadata_name, record_name): + continue + outfile = os.path.join(libdir, convert_path(u_arcname)) + if not is_script: + with zf.open(arcname) as bf: + fileop.copy_stream(bf, outfile) + outfiles.append(outfile) + # Double check the digest of the written file + if not dry_run and row[1]: + with open(outfile, 'rb') as bf: + data = bf.read() + _, newdigest = self.get_hash(data, kind) + if newdigest != digest: + raise DistlibException('digest mismatch ' + 'on write for ' + '%s' % outfile) + if bc and outfile.endswith('.py'): + try: + pyc = fileop.byte_compile(outfile) + outfiles.append(pyc) + except Exception: + # Don't give up if byte-compilation fails, + # but log it and perhaps warn the user + logger.warning('Byte-compilation failed', + exc_info=True) + else: + fn = os.path.basename(convert_path(arcname)) + workname = os.path.join(workdir, fn) + with zf.open(arcname) as bf: + fileop.copy_stream(bf, workname) + + dn, fn = os.path.split(outfile) + maker.target_dir = dn + filenames = maker.make(fn) + fileop.set_executable_mode(filenames) + outfiles.extend(filenames) + + if lib_only: + logger.debug('lib_only: returning None') + dist = None + else: + # Generate scripts + + # Try to get pydist.json so we can see if there are + # any commands to generate. If this fails (e.g. because + # of a legacy wheel), log a warning but don't give up. + commands = None + file_version = self.info['Wheel-Version'] + if file_version == '1.0': + # Use legacy info + ep = posixpath.join(info_dir, 'entry_points.txt') + try: + with zf.open(ep) as bwf: + epdata = read_exports(bwf) + commands = {} + for key in ('console', 'gui'): + k = '%s_scripts' % key + if k in epdata: + commands['wrap_%s' % key] = d = {} + for v in epdata[k].values(): + s = '%s:%s' % (v.prefix, v.suffix) + if v.flags: + s += ' %s' % v.flags + d[v.name] = s + except Exception: + logger.warning('Unable to read legacy script ' + 'metadata, so cannot generate ' + 'scripts') + else: + try: + with zf.open(metadata_name) as bwf: + wf = wrapper(bwf) + commands = json.load(wf).get('extensions') + if commands: + commands = commands.get('python.commands') + except Exception: + logger.warning('Unable to read JSON metadata, so ' + 'cannot generate scripts') + if commands: + console_scripts = commands.get('wrap_console', {}) + gui_scripts = commands.get('wrap_gui', {}) + if console_scripts or gui_scripts: + script_dir = paths.get('scripts', '') + if not os.path.isdir(script_dir): + raise ValueError('Valid script path not ' + 'specified') + maker.target_dir = script_dir + for k, v in console_scripts.items(): + script = '%s = %s' % (k, v) + filenames = maker.make(script) + fileop.set_executable_mode(filenames) + + if gui_scripts: + options = {'gui': True } + for k, v in gui_scripts.items(): + script = '%s = %s' % (k, v) + filenames = maker.make(script, options) + fileop.set_executable_mode(filenames) + + p = os.path.join(libdir, info_dir) + dist = InstalledDistribution(p) + + # Write SHARED + paths = dict(paths) # don't change passed in dict + del paths['purelib'] + del paths['platlib'] + paths['lib'] = libdir + p = dist.write_shared_locations(paths, dry_run) + if p: + outfiles.append(p) + + # Write RECORD + dist.write_installed_files(outfiles, paths['prefix'], + dry_run) + return dist + except Exception: # pragma: no cover + logger.exception('installation failed.') + fileop.rollback() + raise + finally: + shutil.rmtree(workdir) + + def _get_dylib_cache(self): + global cache + if cache is None: + # Use native string to avoid issues on 2.x: see Python #20140. + base = os.path.join(get_cache_base(), str('dylib-cache'), + sys.version[:3]) + cache = Cache(base) + return cache + + def _get_extensions(self): + pathname = os.path.join(self.dirname, self.filename) + name_ver = '%s-%s' % (self.name, self.version) + info_dir = '%s.dist-info' % name_ver + arcname = posixpath.join(info_dir, 'EXTENSIONS') + wrapper = codecs.getreader('utf-8') + result = [] + with ZipFile(pathname, 'r') as zf: + try: + with zf.open(arcname) as bf: + wf = wrapper(bf) + extensions = json.load(wf) + cache = self._get_dylib_cache() + prefix = cache.prefix_to_dir(pathname) + cache_base = os.path.join(cache.base, prefix) + if not os.path.isdir(cache_base): + os.makedirs(cache_base) + for name, relpath in extensions.items(): + dest = os.path.join(cache_base, convert_path(relpath)) + if not os.path.exists(dest): + extract = True + else: + file_time = os.stat(dest).st_mtime + file_time = datetime.datetime.fromtimestamp(file_time) + info = zf.getinfo(relpath) + wheel_time = datetime.datetime(*info.date_time) + extract = wheel_time > file_time + if extract: + zf.extract(relpath, cache_base) + result.append((name, dest)) + except KeyError: + pass + return result + + def is_compatible(self): + """ + Determine if a wheel is compatible with the running system. + """ + return is_compatible(self) + + def is_mountable(self): + """ + Determine if a wheel is asserted as mountable by its metadata. + """ + return True # for now - metadata details TBD + + def mount(self, append=False): + pathname = os.path.abspath(os.path.join(self.dirname, self.filename)) + if not self.is_compatible(): + msg = 'Wheel %s not compatible with this Python.' % pathname + raise DistlibException(msg) + if not self.is_mountable(): + msg = 'Wheel %s is marked as not mountable.' % pathname + raise DistlibException(msg) + if pathname in sys.path: + logger.debug('%s already in path', pathname) + else: + if append: + sys.path.append(pathname) + else: + sys.path.insert(0, pathname) + extensions = self._get_extensions() + if extensions: + if _hook not in sys.meta_path: + sys.meta_path.append(_hook) + _hook.add(pathname, extensions) + + def unmount(self): + pathname = os.path.abspath(os.path.join(self.dirname, self.filename)) + if pathname not in sys.path: + logger.debug('%s not in path', pathname) + else: + sys.path.remove(pathname) + if pathname in _hook.impure_wheels: + _hook.remove(pathname) + if not _hook.impure_wheels: + if _hook in sys.meta_path: + sys.meta_path.remove(_hook) + + def verify(self): + pathname = os.path.join(self.dirname, self.filename) + name_ver = '%s-%s' % (self.name, self.version) + data_dir = '%s.data' % name_ver + info_dir = '%s.dist-info' % name_ver + + metadata_name = posixpath.join(info_dir, METADATA_FILENAME) + wheel_metadata_name = posixpath.join(info_dir, 'WHEEL') + record_name = posixpath.join(info_dir, 'RECORD') + + wrapper = codecs.getreader('utf-8') + + with ZipFile(pathname, 'r') as zf: + with zf.open(wheel_metadata_name) as bwf: + wf = wrapper(bwf) + message = message_from_file(wf) + wv = message['Wheel-Version'].split('.', 1) + file_version = tuple([int(i) for i in wv]) + # TODO version verification + + records = {} + with zf.open(record_name) as bf: + with CSVReader(stream=bf) as reader: + for row in reader: + p = row[0] + records[p] = row + + for zinfo in zf.infolist(): + arcname = zinfo.filename + if isinstance(arcname, text_type): + u_arcname = arcname + else: + u_arcname = arcname.decode('utf-8') + if '..' in u_arcname: + raise DistlibException('invalid entry in ' + 'wheel: %r' % u_arcname) + + # The signature file won't be in RECORD, + # and we don't currently don't do anything with it + if u_arcname.endswith('/RECORD.jws'): + continue + row = records[u_arcname] + if row[2] and str(zinfo.file_size) != row[2]: + raise DistlibException('size mismatch for ' + '%s' % u_arcname) + if row[1]: + kind, value = row[1].split('=', 1) + with zf.open(arcname) as bf: + data = bf.read() + _, digest = self.get_hash(data, kind) + if digest != value: + raise DistlibException('digest mismatch for ' + '%s' % arcname) + + def update(self, modifier, dest_dir=None, **kwargs): + """ + Update the contents of a wheel in a generic way. The modifier should + be a callable which expects a dictionary argument: its keys are + archive-entry paths, and its values are absolute filesystem paths + where the contents the corresponding archive entries can be found. The + modifier is free to change the contents of the files pointed to, add + new entries and remove entries, before returning. This method will + extract the entire contents of the wheel to a temporary location, call + the modifier, and then use the passed (and possibly updated) + dictionary to write a new wheel. If ``dest_dir`` is specified, the new + wheel is written there -- otherwise, the original wheel is overwritten. + + The modifier should return True if it updated the wheel, else False. + This method returns the same value the modifier returns. + """ + + def get_version(path_map, info_dir): + version = path = None + key = '%s/%s' % (info_dir, METADATA_FILENAME) + if key not in path_map: + key = '%s/PKG-INFO' % info_dir + if key in path_map: + path = path_map[key] + version = Metadata(path=path).version + return version, path + + def update_version(version, path): + updated = None + try: + v = NormalizedVersion(version) + i = version.find('-') + if i < 0: + updated = '%s+1' % version + else: + parts = [int(s) for s in version[i + 1:].split('.')] + parts[-1] += 1 + updated = '%s+%s' % (version[:i], + '.'.join(str(i) for i in parts)) + except UnsupportedVersionError: + logger.debug('Cannot update non-compliant (PEP-440) ' + 'version %r', version) + if updated: + md = Metadata(path=path) + md.version = updated + legacy = not path.endswith(METADATA_FILENAME) + md.write(path=path, legacy=legacy) + logger.debug('Version updated from %r to %r', version, + updated) + + pathname = os.path.join(self.dirname, self.filename) + name_ver = '%s-%s' % (self.name, self.version) + info_dir = '%s.dist-info' % name_ver + record_name = posixpath.join(info_dir, 'RECORD') + with tempdir() as workdir: + with ZipFile(pathname, 'r') as zf: + path_map = {} + for zinfo in zf.infolist(): + arcname = zinfo.filename + if isinstance(arcname, text_type): + u_arcname = arcname + else: + u_arcname = arcname.decode('utf-8') + if u_arcname == record_name: + continue + if '..' in u_arcname: + raise DistlibException('invalid entry in ' + 'wheel: %r' % u_arcname) + zf.extract(zinfo, workdir) + path = os.path.join(workdir, convert_path(u_arcname)) + path_map[u_arcname] = path + + # Remember the version. + original_version, _ = get_version(path_map, info_dir) + # Files extracted. Call the modifier. + modified = modifier(path_map, **kwargs) + if modified: + # Something changed - need to build a new wheel. + current_version, path = get_version(path_map, info_dir) + if current_version and (current_version == original_version): + # Add or update local version to signify changes. + update_version(current_version, path) + # Decide where the new wheel goes. + if dest_dir is None: + fd, newpath = tempfile.mkstemp(suffix='.whl', + prefix='wheel-update-', + dir=workdir) + os.close(fd) + else: + if not os.path.isdir(dest_dir): + raise DistlibException('Not a directory: %r' % dest_dir) + newpath = os.path.join(dest_dir, self.filename) + archive_paths = list(path_map.items()) + distinfo = os.path.join(workdir, info_dir) + info = distinfo, info_dir + self.write_records(info, workdir, archive_paths) + self.build_zip(newpath, archive_paths) + if dest_dir is None: + shutil.copyfile(newpath, pathname) + return modified + +def compatible_tags(): + """ + Return (pyver, abi, arch) tuples compatible with this Python. + """ + versions = [VER_SUFFIX] + major = VER_SUFFIX[0] + for minor in range(sys.version_info[1] - 1, - 1, -1): + versions.append(''.join([major, str(minor)])) + + abis = [] + for suffix, _, _ in imp.get_suffixes(): + if suffix.startswith('.abi'): + abis.append(suffix.split('.', 2)[1]) + abis.sort() + if ABI != 'none': + abis.insert(0, ABI) + abis.append('none') + result = [] + + arches = [ARCH] + if sys.platform == 'darwin': + m = re.match('(\w+)_(\d+)_(\d+)_(\w+)$', ARCH) + if m: + name, major, minor, arch = m.groups() + minor = int(minor) + matches = [arch] + if arch in ('i386', 'ppc'): + matches.append('fat') + if arch in ('i386', 'ppc', 'x86_64'): + matches.append('fat3') + if arch in ('ppc64', 'x86_64'): + matches.append('fat64') + if arch in ('i386', 'x86_64'): + matches.append('intel') + if arch in ('i386', 'x86_64', 'intel', 'ppc', 'ppc64'): + matches.append('universal') + while minor >= 0: + for match in matches: + s = '%s_%s_%s_%s' % (name, major, minor, match) + if s != ARCH: # already there + arches.append(s) + minor -= 1 + + # Most specific - our Python version, ABI and arch + for abi in abis: + for arch in arches: + result.append((''.join((IMP_PREFIX, versions[0])), abi, arch)) + + # where no ABI / arch dependency, but IMP_PREFIX dependency + for i, version in enumerate(versions): + result.append((''.join((IMP_PREFIX, version)), 'none', 'any')) + if i == 0: + result.append((''.join((IMP_PREFIX, version[0])), 'none', 'any')) + + # no IMP_PREFIX, ABI or arch dependency + for i, version in enumerate(versions): + result.append((''.join(('py', version)), 'none', 'any')) + if i == 0: + result.append((''.join(('py', version[0])), 'none', 'any')) + return set(result) + + +COMPATIBLE_TAGS = compatible_tags() + +del compatible_tags + + +def is_compatible(wheel, tags=None): + if not isinstance(wheel, Wheel): + wheel = Wheel(wheel) # assume it's a filename + result = False + if tags is None: + tags = COMPATIBLE_TAGS + for ver, abi, arch in tags: + if ver in wheel.pyver and abi in wheel.abi and arch in wheel.arch: + result = True + break + return result diff --git a/venv/lib/python2.7/site-packages/pip/_vendor/html5lib/__init__.py b/venv/lib/python2.7/site-packages/pip/_vendor/html5lib/__init__.py new file mode 100644 index 0000000..9484fdc --- /dev/null +++ b/venv/lib/python2.7/site-packages/pip/_vendor/html5lib/__init__.py @@ -0,0 +1,25 @@ +""" +HTML parsing library based on the WHATWG "HTML5" +specification. The parser is designed to be compatible with existing +HTML found in the wild and implements well-defined error recovery that +is largely compatible with modern desktop web browsers. + +Example usage: + +import html5lib +f = open("my_document.html") +tree = html5lib.parse(f) +""" + +from __future__ import absolute_import, division, unicode_literals + +from .html5parser import HTMLParser, parse, parseFragment +from .treebuilders import getTreeBuilder +from .treewalkers import getTreeWalker +from .serializer import serialize + +__all__ = ["HTMLParser", "parse", "parseFragment", "getTreeBuilder", + "getTreeWalker", "serialize"] + +# this has to be at the top level, see how setup.py parses this +__version__ = "1.0b8" diff --git a/venv/lib/python2.7/site-packages/pip/_vendor/html5lib/constants.py b/venv/lib/python2.7/site-packages/pip/_vendor/html5lib/constants.py new file mode 100644 index 0000000..d938e0a --- /dev/null +++ b/venv/lib/python2.7/site-packages/pip/_vendor/html5lib/constants.py @@ -0,0 +1,3102 @@ +from __future__ import absolute_import, division, unicode_literals + +import string + +EOF = None + +E = { + "null-character": + "Null character in input stream, replaced with U+FFFD.", + "invalid-codepoint": + "Invalid codepoint in stream.", + "incorrectly-placed-solidus": + "Solidus (/) incorrectly placed in tag.", + "incorrect-cr-newline-entity": + "Incorrect CR newline entity, replaced with LF.", + "illegal-windows-1252-entity": + "Entity used with illegal number (windows-1252 reference).", + "cant-convert-numeric-entity": + "Numeric entity couldn't be converted to character " + "(codepoint U+%(charAsInt)08x).", + "illegal-codepoint-for-numeric-entity": + "Numeric entity represents an illegal codepoint: " + "U+%(charAsInt)08x.", + "numeric-entity-without-semicolon": + "Numeric entity didn't end with ';'.", + "expected-numeric-entity-but-got-eof": + "Numeric entity expected. Got end of file instead.", + "expected-numeric-entity": + "Numeric entity expected but none found.", + "named-entity-without-semicolon": + "Named entity didn't end with ';'.", + "expected-named-entity": + "Named entity expected. Got none.", + "attributes-in-end-tag": + "End tag contains unexpected attributes.", + 'self-closing-flag-on-end-tag': + "End tag contains unexpected self-closing flag.", + "expected-tag-name-but-got-right-bracket": + "Expected tag name. Got '>' instead.", + "expected-tag-name-but-got-question-mark": + "Expected tag name. Got '?' instead. (HTML doesn't " + "support processing instructions.)", + "expected-tag-name": + "Expected tag name. Got something else instead", + "expected-closing-tag-but-got-right-bracket": + "Expected closing tag. Got '>' instead. Ignoring ''.", + "expected-closing-tag-but-got-eof": + "Expected closing tag. Unexpected end of file.", + "expected-closing-tag-but-got-char": + "Expected closing tag. Unexpected character '%(data)s' found.", + "eof-in-tag-name": + "Unexpected end of file in the tag name.", + "expected-attribute-name-but-got-eof": + "Unexpected end of file. Expected attribute name instead.", + "eof-in-attribute-name": + "Unexpected end of file in attribute name.", + "invalid-character-in-attribute-name": + "Invalid character in attribute name", + "duplicate-attribute": + "Dropped duplicate attribute on tag.", + "expected-end-of-tag-name-but-got-eof": + "Unexpected end of file. Expected = or end of tag.", + "expected-attribute-value-but-got-eof": + "Unexpected end of file. Expected attribute value.", + "expected-attribute-value-but-got-right-bracket": + "Expected attribute value. Got '>' instead.", + 'equals-in-unquoted-attribute-value': + "Unexpected = in unquoted attribute", + 'unexpected-character-in-unquoted-attribute-value': + "Unexpected character in unquoted attribute", + "invalid-character-after-attribute-name": + "Unexpected character after attribute name.", + "unexpected-character-after-attribute-value": + "Unexpected character after attribute value.", + "eof-in-attribute-value-double-quote": + "Unexpected end of file in attribute value (\").", + "eof-in-attribute-value-single-quote": + "Unexpected end of file in attribute value (').", + "eof-in-attribute-value-no-quotes": + "Unexpected end of file in attribute value.", + "unexpected-EOF-after-solidus-in-tag": + "Unexpected end of file in tag. Expected >", + "unexpected-character-after-solidus-in-tag": + "Unexpected character after / in tag. Expected >", + "expected-dashes-or-doctype": + "Expected '--' or 'DOCTYPE'. Not found.", + "unexpected-bang-after-double-dash-in-comment": + "Unexpected ! after -- in comment", + "unexpected-space-after-double-dash-in-comment": + "Unexpected space after -- in comment", + "incorrect-comment": + "Incorrect comment.", + "eof-in-comment": + "Unexpected end of file in comment.", + "eof-in-comment-end-dash": + "Unexpected end of file in comment (-)", + "unexpected-dash-after-double-dash-in-comment": + "Unexpected '-' after '--' found in comment.", + "eof-in-comment-double-dash": + "Unexpected end of file in comment (--).", + "eof-in-comment-end-space-state": + "Unexpected end of file in comment.", + "eof-in-comment-end-bang-state": + "Unexpected end of file in comment.", + "unexpected-char-in-comment": + "Unexpected character in comment found.", + "need-space-after-doctype": + "No space after literal string 'DOCTYPE'.", + "expected-doctype-name-but-got-right-bracket": + "Unexpected > character. Expected DOCTYPE name.", + "expected-doctype-name-but-got-eof": + "Unexpected end of file. Expected DOCTYPE name.", + "eof-in-doctype-name": + "Unexpected end of file in DOCTYPE name.", + "eof-in-doctype": + "Unexpected end of file in DOCTYPE.", + "expected-space-or-right-bracket-in-doctype": + "Expected space or '>'. Got '%(data)s'", + "unexpected-end-of-doctype": + "Unexpected end of DOCTYPE.", + "unexpected-char-in-doctype": + "Unexpected character in DOCTYPE.", + "eof-in-innerhtml": + "XXX innerHTML EOF", + "unexpected-doctype": + "Unexpected DOCTYPE. Ignored.", + "non-html-root": + "html needs to be the first start tag.", + "expected-doctype-but-got-eof": + "Unexpected End of file. Expected DOCTYPE.", + "unknown-doctype": + "Erroneous DOCTYPE.", + "expected-doctype-but-got-chars": + "Unexpected non-space characters. Expected DOCTYPE.", + "expected-doctype-but-got-start-tag": + "Unexpected start tag (%(name)s). Expected DOCTYPE.", + "expected-doctype-but-got-end-tag": + "Unexpected end tag (%(name)s). Expected DOCTYPE.", + "end-tag-after-implied-root": + "Unexpected end tag (%(name)s) after the (implied) root element.", + "expected-named-closing-tag-but-got-eof": + "Unexpected end of file. Expected end tag (%(name)s).", + "two-heads-are-not-better-than-one": + "Unexpected start tag head in existing head. Ignored.", + "unexpected-end-tag": + "Unexpected end tag (%(name)s). Ignored.", + "unexpected-start-tag-out-of-my-head": + "Unexpected start tag (%(name)s) that can be in head. Moved.", + "unexpected-start-tag": + "Unexpected start tag (%(name)s).", + "missing-end-tag": + "Missing end tag (%(name)s).", + "missing-end-tags": + "Missing end tags (%(name)s).", + "unexpected-start-tag-implies-end-tag": + "Unexpected start tag (%(startName)s) " + "implies end tag (%(endName)s).", + "unexpected-start-tag-treated-as": + "Unexpected start tag (%(originalName)s). Treated as %(newName)s.", + "deprecated-tag": + "Unexpected start tag %(name)s. Don't use it!", + "unexpected-start-tag-ignored": + "Unexpected start tag %(name)s. Ignored.", + "expected-one-end-tag-but-got-another": + "Unexpected end tag (%(gotName)s). " + "Missing end tag (%(expectedName)s).", + "end-tag-too-early": + "End tag (%(name)s) seen too early. Expected other end tag.", + "end-tag-too-early-named": + "Unexpected end tag (%(gotName)s). Expected end tag (%(expectedName)s).", + "end-tag-too-early-ignored": + "End tag (%(name)s) seen too early. Ignored.", + "adoption-agency-1.1": + "End tag (%(name)s) violates step 1, " + "paragraph 1 of the adoption agency algorithm.", + "adoption-agency-1.2": + "End tag (%(name)s) violates step 1, " + "paragraph 2 of the adoption agency algorithm.", + "adoption-agency-1.3": + "End tag (%(name)s) violates step 1, " + "paragraph 3 of the adoption agency algorithm.", + "adoption-agency-4.4": + "End tag (%(name)s) violates step 4, " + "paragraph 4 of the adoption agency algorithm.", + "unexpected-end-tag-treated-as": + "Unexpected end tag (%(originalName)s). Treated as %(newName)s.", + "no-end-tag": + "This element (%(name)s) has no end tag.", + "unexpected-implied-end-tag-in-table": + "Unexpected implied end tag (%(name)s) in the table phase.", + "unexpected-implied-end-tag-in-table-body": + "Unexpected implied end tag (%(name)s) in the table body phase.", + "unexpected-char-implies-table-voodoo": + "Unexpected non-space characters in " + "table context caused voodoo mode.", + "unexpected-hidden-input-in-table": + "Unexpected input with type hidden in table context.", + "unexpected-form-in-table": + "Unexpected form in table context.", + "unexpected-start-tag-implies-table-voodoo": + "Unexpected start tag (%(name)s) in " + "table context caused voodoo mode.", + "unexpected-end-tag-implies-table-voodoo": + "Unexpected end tag (%(name)s) in " + "table context caused voodoo mode.", + "unexpected-cell-in-table-body": + "Unexpected table cell start tag (%(name)s) " + "in the table body phase.", + "unexpected-cell-end-tag": + "Got table cell end tag (%(name)s) " + "while required end tags are missing.", + "unexpected-end-tag-in-table-body": + "Unexpected end tag (%(name)s) in the table body phase. Ignored.", + "unexpected-implied-end-tag-in-table-row": + "Unexpected implied end tag (%(name)s) in the table row phase.", + "unexpected-end-tag-in-table-row": + "Unexpected end tag (%(name)s) in the table row phase. Ignored.", + "unexpected-select-in-select": + "Unexpected select start tag in the select phase " + "treated as select end tag.", + "unexpected-input-in-select": + "Unexpected input start tag in the select phase.", + "unexpected-start-tag-in-select": + "Unexpected start tag token (%(name)s in the select phase. " + "Ignored.", + "unexpected-end-tag-in-select": + "Unexpected end tag (%(name)s) in the select phase. Ignored.", + "unexpected-table-element-start-tag-in-select-in-table": + "Unexpected table element start tag (%(name)s) in the select in table phase.", + "unexpected-table-element-end-tag-in-select-in-table": + "Unexpected table element end tag (%(name)s) in the select in table phase.", + "unexpected-char-after-body": + "Unexpected non-space characters in the after body phase.", + "unexpected-start-tag-after-body": + "Unexpected start tag token (%(name)s)" + " in the after body phase.", + "unexpected-end-tag-after-body": + "Unexpected end tag token (%(name)s)" + " in the after body phase.", + "unexpected-char-in-frameset": + "Unexpected characters in the frameset phase. Characters ignored.", + "unexpected-start-tag-in-frameset": + "Unexpected start tag token (%(name)s)" + " in the frameset phase. Ignored.", + "unexpected-frameset-in-frameset-innerhtml": + "Unexpected end tag token (frameset) " + "in the frameset phase (innerHTML).", + "unexpected-end-tag-in-frameset": + "Unexpected end tag token (%(name)s)" + " in the frameset phase. Ignored.", + "unexpected-char-after-frameset": + "Unexpected non-space characters in the " + "after frameset phase. Ignored.", + "unexpected-start-tag-after-frameset": + "Unexpected start tag (%(name)s)" + " in the after frameset phase. Ignored.", + "unexpected-end-tag-after-frameset": + "Unexpected end tag (%(name)s)" + " in the after frameset phase. Ignored.", + "unexpected-end-tag-after-body-innerhtml": + "Unexpected end tag after body(innerHtml)", + "expected-eof-but-got-char": + "Unexpected non-space characters. Expected end of file.", + "expected-eof-but-got-start-tag": + "Unexpected start tag (%(name)s)" + ". Expected end of file.", + "expected-eof-but-got-end-tag": + "Unexpected end tag (%(name)s)" + ". Expected end of file.", + "eof-in-table": + "Unexpected end of file. Expected table content.", + "eof-in-select": + "Unexpected end of file. Expected select content.", + "eof-in-frameset": + "Unexpected end of file. Expected frameset content.", + "eof-in-script-in-script": + "Unexpected end of file. Expected script content.", + "eof-in-foreign-lands": + "Unexpected end of file. Expected foreign content", + "non-void-element-with-trailing-solidus": + "Trailing solidus not allowed on element %(name)s", + "unexpected-html-element-in-foreign-content": + "Element %(name)s not allowed in a non-html context", + "unexpected-end-tag-before-html": + "Unexpected end tag (%(name)s) before html.", + "XXX-undefined-error": + "Undefined error (this sucks and should be fixed)", +} + +namespaces = { + "html": "http://www.w3.org/1999/xhtml", + "mathml": "http://www.w3.org/1998/Math/MathML", + "svg": "http://www.w3.org/2000/svg", + "xlink": "http://www.w3.org/1999/xlink", + "xml": "http://www.w3.org/XML/1998/namespace", + "xmlns": "http://www.w3.org/2000/xmlns/" +} + +scopingElements = frozenset([ + (namespaces["html"], "applet"), + (namespaces["html"], "caption"), + (namespaces["html"], "html"), + (namespaces["html"], "marquee"), + (namespaces["html"], "object"), + (namespaces["html"], "table"), + (namespaces["html"], "td"), + (namespaces["html"], "th"), + (namespaces["mathml"], "mi"), + (namespaces["mathml"], "mo"), + (namespaces["mathml"], "mn"), + (namespaces["mathml"], "ms"), + (namespaces["mathml"], "mtext"), + (namespaces["mathml"], "annotation-xml"), + (namespaces["svg"], "foreignObject"), + (namespaces["svg"], "desc"), + (namespaces["svg"], "title"), +]) + +formattingElements = frozenset([ + (namespaces["html"], "a"), + (namespaces["html"], "b"), + (namespaces["html"], "big"), + (namespaces["html"], "code"), + (namespaces["html"], "em"), + (namespaces["html"], "font"), + (namespaces["html"], "i"), + (namespaces["html"], "nobr"), + (namespaces["html"], "s"), + (namespaces["html"], "small"), + (namespaces["html"], "strike"), + (namespaces["html"], "strong"), + (namespaces["html"], "tt"), + (namespaces["html"], "u") +]) + +specialElements = frozenset([ + (namespaces["html"], "address"), + (namespaces["html"], "applet"), + (namespaces["html"], "area"), + (namespaces["html"], "article"), + (namespaces["html"], "aside"), + (namespaces["html"], "base"), + (namespaces["html"], "basefont"), + (namespaces["html"], "bgsound"), + (namespaces["html"], "blockquote"), + (namespaces["html"], "body"), + (namespaces["html"], "br"), + (namespaces["html"], "button"), + (namespaces["html"], "caption"), + (namespaces["html"], "center"), + (namespaces["html"], "col"), + (namespaces["html"], "colgroup"), + (namespaces["html"], "command"), + (namespaces["html"], "dd"), + (namespaces["html"], "details"), + (namespaces["html"], "dir"), + (namespaces["html"], "div"), + (namespaces["html"], "dl"), + (namespaces["html"], "dt"), + (namespaces["html"], "embed"), + (namespaces["html"], "fieldset"), + (namespaces["html"], "figure"), + (namespaces["html"], "footer"), + (namespaces["html"], "form"), + (namespaces["html"], "frame"), + (namespaces["html"], "frameset"), + (namespaces["html"], "h1"), + (namespaces["html"], "h2"), + (namespaces["html"], "h3"), + (namespaces["html"], "h4"), + (namespaces["html"], "h5"), + (namespaces["html"], "h6"), + (namespaces["html"], "head"), + (namespaces["html"], "header"), + (namespaces["html"], "hr"), + (namespaces["html"], "html"), + (namespaces["html"], "iframe"), + # Note that image is commented out in the spec as "this isn't an + # element that can end up on the stack, so it doesn't matter," + (namespaces["html"], "image"), + (namespaces["html"], "img"), + (namespaces["html"], "input"), + (namespaces["html"], "isindex"), + (namespaces["html"], "li"), + (namespaces["html"], "link"), + (namespaces["html"], "listing"), + (namespaces["html"], "marquee"), + (namespaces["html"], "menu"), + (namespaces["html"], "meta"), + (namespaces["html"], "nav"), + (namespaces["html"], "noembed"), + (namespaces["html"], "noframes"), + (namespaces["html"], "noscript"), + (namespaces["html"], "object"), + (namespaces["html"], "ol"), + (namespaces["html"], "p"), + (namespaces["html"], "param"), + (namespaces["html"], "plaintext"), + (namespaces["html"], "pre"), + (namespaces["html"], "script"), + (namespaces["html"], "section"), + (namespaces["html"], "select"), + (namespaces["html"], "style"), + (namespaces["html"], "table"), + (namespaces["html"], "tbody"), + (namespaces["html"], "td"), + (namespaces["html"], "textarea"), + (namespaces["html"], "tfoot"), + (namespaces["html"], "th"), + (namespaces["html"], "thead"), + (namespaces["html"], "title"), + (namespaces["html"], "tr"), + (namespaces["html"], "ul"), + (namespaces["html"], "wbr"), + (namespaces["html"], "xmp"), + (namespaces["svg"], "foreignObject") +]) + +htmlIntegrationPointElements = frozenset([ + (namespaces["mathml"], "annotaion-xml"), + (namespaces["svg"], "foreignObject"), + (namespaces["svg"], "desc"), + (namespaces["svg"], "title") +]) + +mathmlTextIntegrationPointElements = frozenset([ + (namespaces["mathml"], "mi"), + (namespaces["mathml"], "mo"), + (namespaces["mathml"], "mn"), + (namespaces["mathml"], "ms"), + (namespaces["mathml"], "mtext") +]) + +adjustForeignAttributes = { + "xlink:actuate": ("xlink", "actuate", namespaces["xlink"]), + "xlink:arcrole": ("xlink", "arcrole", namespaces["xlink"]), + "xlink:href": ("xlink", "href", namespaces["xlink"]), + "xlink:role": ("xlink", "role", namespaces["xlink"]), + "xlink:show": ("xlink", "show", namespaces["xlink"]), + "xlink:title": ("xlink", "title", namespaces["xlink"]), + "xlink:type": ("xlink", "type", namespaces["xlink"]), + "xml:base": ("xml", "base", namespaces["xml"]), + "xml:lang": ("xml", "lang", namespaces["xml"]), + "xml:space": ("xml", "space", namespaces["xml"]), + "xmlns": (None, "xmlns", namespaces["xmlns"]), + "xmlns:xlink": ("xmlns", "xlink", namespaces["xmlns"]) +} + +unadjustForeignAttributes = dict([((ns, local), qname) for qname, (prefix, local, ns) in + adjustForeignAttributes.items()]) + +spaceCharacters = frozenset([ + "\t", + "\n", + "\u000C", + " ", + "\r" +]) + +tableInsertModeElements = frozenset([ + "table", + "tbody", + "tfoot", + "thead", + "tr" +]) + +asciiLowercase = frozenset(string.ascii_lowercase) +asciiUppercase = frozenset(string.ascii_uppercase) +asciiLetters = frozenset(string.ascii_letters) +digits = frozenset(string.digits) +hexDigits = frozenset(string.hexdigits) + +asciiUpper2Lower = dict([(ord(c), ord(c.lower())) + for c in string.ascii_uppercase]) + +# Heading elements need to be ordered +headingElements = ( + "h1", + "h2", + "h3", + "h4", + "h5", + "h6" +) + +voidElements = frozenset([ + "base", + "command", + "event-source", + "link", + "meta", + "hr", + "br", + "img", + "embed", + "param", + "area", + "col", + "input", + "source", + "track" +]) + +cdataElements = frozenset(['title', 'textarea']) + +rcdataElements = frozenset([ + 'style', + 'script', + 'xmp', + 'iframe', + 'noembed', + 'noframes', + 'noscript' +]) + +booleanAttributes = { + "": frozenset(["irrelevant"]), + "style": frozenset(["scoped"]), + "img": frozenset(["ismap"]), + "audio": frozenset(["autoplay", "controls"]), + "video": frozenset(["autoplay", "controls"]), + "script": frozenset(["defer", "async"]), + "details": frozenset(["open"]), + "datagrid": frozenset(["multiple", "disabled"]), + "command": frozenset(["hidden", "disabled", "checked", "default"]), + "hr": frozenset(["noshade"]), + "menu": frozenset(["autosubmit"]), + "fieldset": frozenset(["disabled", "readonly"]), + "option": frozenset(["disabled", "readonly", "selected"]), + "optgroup": frozenset(["disabled", "readonly"]), + "button": frozenset(["disabled", "autofocus"]), + "input": frozenset(["disabled", "readonly", "required", "autofocus", "checked", "ismap"]), + "select": frozenset(["disabled", "readonly", "autofocus", "multiple"]), + "output": frozenset(["disabled", "readonly"]), +} + +# entitiesWindows1252 has to be _ordered_ and needs to have an index. It +# therefore can't be a frozenset. +entitiesWindows1252 = ( + 8364, # 0x80 0x20AC EURO SIGN + 65533, # 0x81 UNDEFINED + 8218, # 0x82 0x201A SINGLE LOW-9 QUOTATION MARK + 402, # 0x83 0x0192 LATIN SMALL LETTER F WITH HOOK + 8222, # 0x84 0x201E DOUBLE LOW-9 QUOTATION MARK + 8230, # 0x85 0x2026 HORIZONTAL ELLIPSIS + 8224, # 0x86 0x2020 DAGGER + 8225, # 0x87 0x2021 DOUBLE DAGGER + 710, # 0x88 0x02C6 MODIFIER LETTER CIRCUMFLEX ACCENT + 8240, # 0x89 0x2030 PER MILLE SIGN + 352, # 0x8A 0x0160 LATIN CAPITAL LETTER S WITH CARON + 8249, # 0x8B 0x2039 SINGLE LEFT-POINTING ANGLE QUOTATION MARK + 338, # 0x8C 0x0152 LATIN CAPITAL LIGATURE OE + 65533, # 0x8D UNDEFINED + 381, # 0x8E 0x017D LATIN CAPITAL LETTER Z WITH CARON + 65533, # 0x8F UNDEFINED + 65533, # 0x90 UNDEFINED + 8216, # 0x91 0x2018 LEFT SINGLE QUOTATION MARK + 8217, # 0x92 0x2019 RIGHT SINGLE QUOTATION MARK + 8220, # 0x93 0x201C LEFT DOUBLE QUOTATION MARK + 8221, # 0x94 0x201D RIGHT DOUBLE QUOTATION MARK + 8226, # 0x95 0x2022 BULLET + 8211, # 0x96 0x2013 EN DASH + 8212, # 0x97 0x2014 EM DASH + 732, # 0x98 0x02DC SMALL TILDE + 8482, # 0x99 0x2122 TRADE MARK SIGN + 353, # 0x9A 0x0161 LATIN SMALL LETTER S WITH CARON + 8250, # 0x9B 0x203A SINGLE RIGHT-POINTING ANGLE QUOTATION MARK + 339, # 0x9C 0x0153 LATIN SMALL LIGATURE OE + 65533, # 0x9D UNDEFINED + 382, # 0x9E 0x017E LATIN SMALL LETTER Z WITH CARON + 376 # 0x9F 0x0178 LATIN CAPITAL LETTER Y WITH DIAERESIS +) + +xmlEntities = frozenset(['lt;', 'gt;', 'amp;', 'apos;', 'quot;']) + +entities = { + "AElig": "\xc6", + "AElig;": "\xc6", + "AMP": "&", + "AMP;": "&", + "Aacute": "\xc1", + "Aacute;": "\xc1", + "Abreve;": "\u0102", + "Acirc": "\xc2", + "Acirc;": "\xc2", + "Acy;": "\u0410", + "Afr;": "\U0001d504", + "Agrave": "\xc0", + "Agrave;": "\xc0", + "Alpha;": "\u0391", + "Amacr;": "\u0100", + "And;": "\u2a53", + "Aogon;": "\u0104", + "Aopf;": "\U0001d538", + "ApplyFunction;": "\u2061", + "Aring": "\xc5", + "Aring;": "\xc5", + "Ascr;": "\U0001d49c", + "Assign;": "\u2254", + "Atilde": "\xc3", + "Atilde;": "\xc3", + "Auml": "\xc4", + "Auml;": "\xc4", + "Backslash;": "\u2216", + "Barv;": "\u2ae7", + "Barwed;": "\u2306", + "Bcy;": "\u0411", + "Because;": "\u2235", + "Bernoullis;": "\u212c", + "Beta;": "\u0392", + "Bfr;": "\U0001d505", + "Bopf;": "\U0001d539", + "Breve;": "\u02d8", + "Bscr;": "\u212c", + "Bumpeq;": "\u224e", + "CHcy;": "\u0427", + "COPY": "\xa9", + "COPY;": "\xa9", + "Cacute;": "\u0106", + "Cap;": "\u22d2", + "CapitalDifferentialD;": "\u2145", + "Cayleys;": "\u212d", + "Ccaron;": "\u010c", + "Ccedil": "\xc7", + "Ccedil;": "\xc7", + "Ccirc;": "\u0108", + "Cconint;": "\u2230", + "Cdot;": "\u010a", + "Cedilla;": "\xb8", + "CenterDot;": "\xb7", + "Cfr;": "\u212d", + "Chi;": "\u03a7", + "CircleDot;": "\u2299", + "CircleMinus;": "\u2296", + "CirclePlus;": "\u2295", + "CircleTimes;": "\u2297", + "ClockwiseContourIntegral;": "\u2232", + "CloseCurlyDoubleQuote;": "\u201d", + "CloseCurlyQuote;": "\u2019", + "Colon;": "\u2237", + "Colone;": "\u2a74", + "Congruent;": "\u2261", + "Conint;": "\u222f", + "ContourIntegral;": "\u222e", + "Copf;": "\u2102", + "Coproduct;": "\u2210", + "CounterClockwiseContourIntegral;": "\u2233", + "Cross;": "\u2a2f", + "Cscr;": "\U0001d49e", + "Cup;": "\u22d3", + "CupCap;": "\u224d", + "DD;": "\u2145", + "DDotrahd;": "\u2911", + "DJcy;": "\u0402", + "DScy;": "\u0405", + "DZcy;": "\u040f", + "Dagger;": "\u2021", + "Darr;": "\u21a1", + "Dashv;": "\u2ae4", + "Dcaron;": "\u010e", + "Dcy;": "\u0414", + "Del;": "\u2207", + "Delta;": "\u0394", + "Dfr;": "\U0001d507", + "DiacriticalAcute;": "\xb4", + "DiacriticalDot;": "\u02d9", + "DiacriticalDoubleAcute;": "\u02dd", + "DiacriticalGrave;": "`", + "DiacriticalTilde;": "\u02dc", + "Diamond;": "\u22c4", + "DifferentialD;": "\u2146", + "Dopf;": "\U0001d53b", + "Dot;": "\xa8", + "DotDot;": "\u20dc", + "DotEqual;": "\u2250", + "DoubleContourIntegral;": "\u222f", + "DoubleDot;": "\xa8", + "DoubleDownArrow;": "\u21d3", + "DoubleLeftArrow;": "\u21d0", + "DoubleLeftRightArrow;": "\u21d4", + "DoubleLeftTee;": "\u2ae4", + "DoubleLongLeftArrow;": "\u27f8", + "DoubleLongLeftRightArrow;": "\u27fa", + "DoubleLongRightArrow;": "\u27f9", + "DoubleRightArrow;": "\u21d2", + "DoubleRightTee;": "\u22a8", + "DoubleUpArrow;": "\u21d1", + "DoubleUpDownArrow;": "\u21d5", + "DoubleVerticalBar;": "\u2225", + "DownArrow;": "\u2193", + "DownArrowBar;": "\u2913", + "DownArrowUpArrow;": "\u21f5", + "DownBreve;": "\u0311", + "DownLeftRightVector;": "\u2950", + "DownLeftTeeVector;": "\u295e", + "DownLeftVector;": "\u21bd", + "DownLeftVectorBar;": "\u2956", + "DownRightTeeVector;": "\u295f", + "DownRightVector;": "\u21c1", + "DownRightVectorBar;": "\u2957", + "DownTee;": "\u22a4", + "DownTeeArrow;": "\u21a7", + "Downarrow;": "\u21d3", + "Dscr;": "\U0001d49f", + "Dstrok;": "\u0110", + "ENG;": "\u014a", + "ETH": "\xd0", + "ETH;": "\xd0", + "Eacute": "\xc9", + "Eacute;": "\xc9", + "Ecaron;": "\u011a", + "Ecirc": "\xca", + "Ecirc;": "\xca", + "Ecy;": "\u042d", + "Edot;": "\u0116", + "Efr;": "\U0001d508", + "Egrave": "\xc8", + "Egrave;": "\xc8", + "Element;": "\u2208", + "Emacr;": "\u0112", + "EmptySmallSquare;": "\u25fb", + "EmptyVerySmallSquare;": "\u25ab", + "Eogon;": "\u0118", + "Eopf;": "\U0001d53c", + "Epsilon;": "\u0395", + "Equal;": "\u2a75", + "EqualTilde;": "\u2242", + "Equilibrium;": "\u21cc", + "Escr;": "\u2130", + "Esim;": "\u2a73", + "Eta;": "\u0397", + "Euml": "\xcb", + "Euml;": "\xcb", + "Exists;": "\u2203", + "ExponentialE;": "\u2147", + "Fcy;": "\u0424", + "Ffr;": "\U0001d509", + "FilledSmallSquare;": "\u25fc", + "FilledVerySmallSquare;": "\u25aa", + "Fopf;": "\U0001d53d", + "ForAll;": "\u2200", + "Fouriertrf;": "\u2131", + "Fscr;": "\u2131", + "GJcy;": "\u0403", + "GT": ">", + "GT;": ">", + "Gamma;": "\u0393", + "Gammad;": "\u03dc", + "Gbreve;": "\u011e", + "Gcedil;": "\u0122", + "Gcirc;": "\u011c", + "Gcy;": "\u0413", + "Gdot;": "\u0120", + "Gfr;": "\U0001d50a", + "Gg;": "\u22d9", + "Gopf;": "\U0001d53e", + "GreaterEqual;": "\u2265", + "GreaterEqualLess;": "\u22db", + "GreaterFullEqual;": "\u2267", + "GreaterGreater;": "\u2aa2", + "GreaterLess;": "\u2277", + "GreaterSlantEqual;": "\u2a7e", + "GreaterTilde;": "\u2273", + "Gscr;": "\U0001d4a2", + "Gt;": "\u226b", + "HARDcy;": "\u042a", + "Hacek;": "\u02c7", + "Hat;": "^", + "Hcirc;": "\u0124", + "Hfr;": "\u210c", + "HilbertSpace;": "\u210b", + "Hopf;": "\u210d", + "HorizontalLine;": "\u2500", + "Hscr;": "\u210b", + "Hstrok;": "\u0126", + "HumpDownHump;": "\u224e", + "HumpEqual;": "\u224f", + "IEcy;": "\u0415", + "IJlig;": "\u0132", + "IOcy;": "\u0401", + "Iacute": "\xcd", + "Iacute;": "\xcd", + "Icirc": "\xce", + "Icirc;": "\xce", + "Icy;": "\u0418", + "Idot;": "\u0130", + "Ifr;": "\u2111", + "Igrave": "\xcc", + "Igrave;": "\xcc", + "Im;": "\u2111", + "Imacr;": "\u012a", + "ImaginaryI;": "\u2148", + "Implies;": "\u21d2", + "Int;": "\u222c", + "Integral;": "\u222b", + "Intersection;": "\u22c2", + "InvisibleComma;": "\u2063", + "InvisibleTimes;": "\u2062", + "Iogon;": "\u012e", + "Iopf;": "\U0001d540", + "Iota;": "\u0399", + "Iscr;": "\u2110", + "Itilde;": "\u0128", + "Iukcy;": "\u0406", + "Iuml": "\xcf", + "Iuml;": "\xcf", + "Jcirc;": "\u0134", + "Jcy;": "\u0419", + "Jfr;": "\U0001d50d", + "Jopf;": "\U0001d541", + "Jscr;": "\U0001d4a5", + "Jsercy;": "\u0408", + "Jukcy;": "\u0404", + "KHcy;": "\u0425", + "KJcy;": "\u040c", + "Kappa;": "\u039a", + "Kcedil;": "\u0136", + "Kcy;": "\u041a", + "Kfr;": "\U0001d50e", + "Kopf;": "\U0001d542", + "Kscr;": "\U0001d4a6", + "LJcy;": "\u0409", + "LT": "<", + "LT;": "<", + "Lacute;": "\u0139", + "Lambda;": "\u039b", + "Lang;": "\u27ea", + "Laplacetrf;": "\u2112", + "Larr;": "\u219e", + "Lcaron;": "\u013d", + "Lcedil;": "\u013b", + "Lcy;": "\u041b", + "LeftAngleBracket;": "\u27e8", + "LeftArrow;": "\u2190", + "LeftArrowBar;": "\u21e4", + "LeftArrowRightArrow;": "\u21c6", + "LeftCeiling;": "\u2308", + "LeftDoubleBracket;": "\u27e6", + "LeftDownTeeVector;": "\u2961", + "LeftDownVector;": "\u21c3", + "LeftDownVectorBar;": "\u2959", + "LeftFloor;": "\u230a", + "LeftRightArrow;": "\u2194", + "LeftRightVector;": "\u294e", + "LeftTee;": "\u22a3", + "LeftTeeArrow;": "\u21a4", + "LeftTeeVector;": "\u295a", + "LeftTriangle;": "\u22b2", + "LeftTriangleBar;": "\u29cf", + "LeftTriangleEqual;": "\u22b4", + "LeftUpDownVector;": "\u2951", + "LeftUpTeeVector;": "\u2960", + "LeftUpVector;": "\u21bf", + "LeftUpVectorBar;": "\u2958", + "LeftVector;": "\u21bc", + "LeftVectorBar;": "\u2952", + "Leftarrow;": "\u21d0", + "Leftrightarrow;": "\u21d4", + "LessEqualGreater;": "\u22da", + "LessFullEqual;": "\u2266", + "LessGreater;": "\u2276", + "LessLess;": "\u2aa1", + "LessSlantEqual;": "\u2a7d", + "LessTilde;": "\u2272", + "Lfr;": "\U0001d50f", + "Ll;": "\u22d8", + "Lleftarrow;": "\u21da", + "Lmidot;": "\u013f", + "LongLeftArrow;": "\u27f5", + "LongLeftRightArrow;": "\u27f7", + "LongRightArrow;": "\u27f6", + "Longleftarrow;": "\u27f8", + "Longleftrightarrow;": "\u27fa", + "Longrightarrow;": "\u27f9", + "Lopf;": "\U0001d543", + "LowerLeftArrow;": "\u2199", + "LowerRightArrow;": "\u2198", + "Lscr;": "\u2112", + "Lsh;": "\u21b0", + "Lstrok;": "\u0141", + "Lt;": "\u226a", + "Map;": "\u2905", + "Mcy;": "\u041c", + "MediumSpace;": "\u205f", + "Mellintrf;": "\u2133", + "Mfr;": "\U0001d510", + "MinusPlus;": "\u2213", + "Mopf;": "\U0001d544", + "Mscr;": "\u2133", + "Mu;": "\u039c", + "NJcy;": "\u040a", + "Nacute;": "\u0143", + "Ncaron;": "\u0147", + "Ncedil;": "\u0145", + "Ncy;": "\u041d", + "NegativeMediumSpace;": "\u200b", + "NegativeThickSpace;": "\u200b", + "NegativeThinSpace;": "\u200b", + "NegativeVeryThinSpace;": "\u200b", + "NestedGreaterGreater;": "\u226b", + "NestedLessLess;": "\u226a", + "NewLine;": "\n", + "Nfr;": "\U0001d511", + "NoBreak;": "\u2060", + "NonBreakingSpace;": "\xa0", + "Nopf;": "\u2115", + "Not;": "\u2aec", + "NotCongruent;": "\u2262", + "NotCupCap;": "\u226d", + "NotDoubleVerticalBar;": "\u2226", + "NotElement;": "\u2209", + "NotEqual;": "\u2260", + "NotEqualTilde;": "\u2242\u0338", + "NotExists;": "\u2204", + "NotGreater;": "\u226f", + "NotGreaterEqual;": "\u2271", + "NotGreaterFullEqual;": "\u2267\u0338", + "NotGreaterGreater;": "\u226b\u0338", + "NotGreaterLess;": "\u2279", + "NotGreaterSlantEqual;": "\u2a7e\u0338", + "NotGreaterTilde;": "\u2275", + "NotHumpDownHump;": "\u224e\u0338", + "NotHumpEqual;": "\u224f\u0338", + "NotLeftTriangle;": "\u22ea", + "NotLeftTriangleBar;": "\u29cf\u0338", + "NotLeftTriangleEqual;": "\u22ec", + "NotLess;": "\u226e", + "NotLessEqual;": "\u2270", + "NotLessGreater;": "\u2278", + "NotLessLess;": "\u226a\u0338", + "NotLessSlantEqual;": "\u2a7d\u0338", + "NotLessTilde;": "\u2274", + "NotNestedGreaterGreater;": "\u2aa2\u0338", + "NotNestedLessLess;": "\u2aa1\u0338", + "NotPrecedes;": "\u2280", + "NotPrecedesEqual;": "\u2aaf\u0338", + "NotPrecedesSlantEqual;": "\u22e0", + "NotReverseElement;": "\u220c", + "NotRightTriangle;": "\u22eb", + "NotRightTriangleBar;": "\u29d0\u0338", + "NotRightTriangleEqual;": "\u22ed", + "NotSquareSubset;": "\u228f\u0338", + "NotSquareSubsetEqual;": "\u22e2", + "NotSquareSuperset;": "\u2290\u0338", + "NotSquareSupersetEqual;": "\u22e3", + "NotSubset;": "\u2282\u20d2", + "NotSubsetEqual;": "\u2288", + "NotSucceeds;": "\u2281", + "NotSucceedsEqual;": "\u2ab0\u0338", + "NotSucceedsSlantEqual;": "\u22e1", + "NotSucceedsTilde;": "\u227f\u0338", + "NotSuperset;": "\u2283\u20d2", + "NotSupersetEqual;": "\u2289", + "NotTilde;": "\u2241", + "NotTildeEqual;": "\u2244", + "NotTildeFullEqual;": "\u2247", + "NotTildeTilde;": "\u2249", + "NotVerticalBar;": "\u2224", + "Nscr;": "\U0001d4a9", + "Ntilde": "\xd1", + "Ntilde;": "\xd1", + "Nu;": "\u039d", + "OElig;": "\u0152", + "Oacute": "\xd3", + "Oacute;": "\xd3", + "Ocirc": "\xd4", + "Ocirc;": "\xd4", + "Ocy;": "\u041e", + "Odblac;": "\u0150", + "Ofr;": "\U0001d512", + "Ograve": "\xd2", + "Ograve;": "\xd2", + "Omacr;": "\u014c", + "Omega;": "\u03a9", + "Omicron;": "\u039f", + "Oopf;": "\U0001d546", + "OpenCurlyDoubleQuote;": "\u201c", + "OpenCurlyQuote;": "\u2018", + "Or;": "\u2a54", + "Oscr;": "\U0001d4aa", + "Oslash": "\xd8", + "Oslash;": "\xd8", + "Otilde": "\xd5", + "Otilde;": "\xd5", + "Otimes;": "\u2a37", + "Ouml": "\xd6", + "Ouml;": "\xd6", + "OverBar;": "\u203e", + "OverBrace;": "\u23de", + "OverBracket;": "\u23b4", + "OverParenthesis;": "\u23dc", + "PartialD;": "\u2202", + "Pcy;": "\u041f", + "Pfr;": "\U0001d513", + "Phi;": "\u03a6", + "Pi;": "\u03a0", + "PlusMinus;": "\xb1", + "Poincareplane;": "\u210c", + "Popf;": "\u2119", + "Pr;": "\u2abb", + "Precedes;": "\u227a", + "PrecedesEqual;": "\u2aaf", + "PrecedesSlantEqual;": "\u227c", + "PrecedesTilde;": "\u227e", + "Prime;": "\u2033", + "Product;": "\u220f", + "Proportion;": "\u2237", + "Proportional;": "\u221d", + "Pscr;": "\U0001d4ab", + "Psi;": "\u03a8", + "QUOT": "\"", + "QUOT;": "\"", + "Qfr;": "\U0001d514", + "Qopf;": "\u211a", + "Qscr;": "\U0001d4ac", + "RBarr;": "\u2910", + "REG": "\xae", + "REG;": "\xae", + "Racute;": "\u0154", + "Rang;": "\u27eb", + "Rarr;": "\u21a0", + "Rarrtl;": "\u2916", + "Rcaron;": "\u0158", + "Rcedil;": "\u0156", + "Rcy;": "\u0420", + "Re;": "\u211c", + "ReverseElement;": "\u220b", + "ReverseEquilibrium;": "\u21cb", + "ReverseUpEquilibrium;": "\u296f", + "Rfr;": "\u211c", + "Rho;": "\u03a1", + "RightAngleBracket;": "\u27e9", + "RightArrow;": "\u2192", + "RightArrowBar;": "\u21e5", + "RightArrowLeftArrow;": "\u21c4", + "RightCeiling;": "\u2309", + "RightDoubleBracket;": "\u27e7", + "RightDownTeeVector;": "\u295d", + "RightDownVector;": "\u21c2", + "RightDownVectorBar;": "\u2955", + "RightFloor;": "\u230b", + "RightTee;": "\u22a2", + "RightTeeArrow;": "\u21a6", + "RightTeeVector;": "\u295b", + "RightTriangle;": "\u22b3", + "RightTriangleBar;": "\u29d0", + "RightTriangleEqual;": "\u22b5", + "RightUpDownVector;": "\u294f", + "RightUpTeeVector;": "\u295c", + "RightUpVector;": "\u21be", + "RightUpVectorBar;": "\u2954", + "RightVector;": "\u21c0", + "RightVectorBar;": "\u2953", + "Rightarrow;": "\u21d2", + "Ropf;": "\u211d", + "RoundImplies;": "\u2970", + "Rrightarrow;": "\u21db", + "Rscr;": "\u211b", + "Rsh;": "\u21b1", + "RuleDelayed;": "\u29f4", + "SHCHcy;": "\u0429", + "SHcy;": "\u0428", + "SOFTcy;": "\u042c", + "Sacute;": "\u015a", + "Sc;": "\u2abc", + "Scaron;": "\u0160", + "Scedil;": "\u015e", + "Scirc;": "\u015c", + "Scy;": "\u0421", + "Sfr;": "\U0001d516", + "ShortDownArrow;": "\u2193", + "ShortLeftArrow;": "\u2190", + "ShortRightArrow;": "\u2192", + "ShortUpArrow;": "\u2191", + "Sigma;": "\u03a3", + "SmallCircle;": "\u2218", + "Sopf;": "\U0001d54a", + "Sqrt;": "\u221a", + "Square;": "\u25a1", + "SquareIntersection;": "\u2293", + "SquareSubset;": "\u228f", + "SquareSubsetEqual;": "\u2291", + "SquareSuperset;": "\u2290", + "SquareSupersetEqual;": "\u2292", + "SquareUnion;": "\u2294", + "Sscr;": "\U0001d4ae", + "Star;": "\u22c6", + "Sub;": "\u22d0", + "Subset;": "\u22d0", + "SubsetEqual;": "\u2286", + "Succeeds;": "\u227b", + "SucceedsEqual;": "\u2ab0", + "SucceedsSlantEqual;": "\u227d", + "SucceedsTilde;": "\u227f", + "SuchThat;": "\u220b", + "Sum;": "\u2211", + "Sup;": "\u22d1", + "Superset;": "\u2283", + "SupersetEqual;": "\u2287", + "Supset;": "\u22d1", + "THORN": "\xde", + "THORN;": "\xde", + "TRADE;": "\u2122", + "TSHcy;": "\u040b", + "TScy;": "\u0426", + "Tab;": "\t", + "Tau;": "\u03a4", + "Tcaron;": "\u0164", + "Tcedil;": "\u0162", + "Tcy;": "\u0422", + "Tfr;": "\U0001d517", + "Therefore;": "\u2234", + "Theta;": "\u0398", + "ThickSpace;": "\u205f\u200a", + "ThinSpace;": "\u2009", + "Tilde;": "\u223c", + "TildeEqual;": "\u2243", + "TildeFullEqual;": "\u2245", + "TildeTilde;": "\u2248", + "Topf;": "\U0001d54b", + "TripleDot;": "\u20db", + "Tscr;": "\U0001d4af", + "Tstrok;": "\u0166", + "Uacute": "\xda", + "Uacute;": "\xda", + "Uarr;": "\u219f", + "Uarrocir;": "\u2949", + "Ubrcy;": "\u040e", + "Ubreve;": "\u016c", + "Ucirc": "\xdb", + "Ucirc;": "\xdb", + "Ucy;": "\u0423", + "Udblac;": "\u0170", + "Ufr;": "\U0001d518", + "Ugrave": "\xd9", + "Ugrave;": "\xd9", + "Umacr;": "\u016a", + "UnderBar;": "_", + "UnderBrace;": "\u23df", + "UnderBracket;": "\u23b5", + "UnderParenthesis;": "\u23dd", + "Union;": "\u22c3", + "UnionPlus;": "\u228e", + "Uogon;": "\u0172", + "Uopf;": "\U0001d54c", + "UpArrow;": "\u2191", + "UpArrowBar;": "\u2912", + "UpArrowDownArrow;": "\u21c5", + "UpDownArrow;": "\u2195", + "UpEquilibrium;": "\u296e", + "UpTee;": "\u22a5", + "UpTeeArrow;": "\u21a5", + "Uparrow;": "\u21d1", + "Updownarrow;": "\u21d5", + "UpperLeftArrow;": "\u2196", + "UpperRightArrow;": "\u2197", + "Upsi;": "\u03d2", + "Upsilon;": "\u03a5", + "Uring;": "\u016e", + "Uscr;": "\U0001d4b0", + "Utilde;": "\u0168", + "Uuml": "\xdc", + "Uuml;": "\xdc", + "VDash;": "\u22ab", + "Vbar;": "\u2aeb", + "Vcy;": "\u0412", + "Vdash;": "\u22a9", + "Vdashl;": "\u2ae6", + "Vee;": "\u22c1", + "Verbar;": "\u2016", + "Vert;": "\u2016", + "VerticalBar;": "\u2223", + "VerticalLine;": "|", + "VerticalSeparator;": "\u2758", + "VerticalTilde;": "\u2240", + "VeryThinSpace;": "\u200a", + "Vfr;": "\U0001d519", + "Vopf;": "\U0001d54d", + "Vscr;": "\U0001d4b1", + "Vvdash;": "\u22aa", + "Wcirc;": "\u0174", + "Wedge;": "\u22c0", + "Wfr;": "\U0001d51a", + "Wopf;": "\U0001d54e", + "Wscr;": "\U0001d4b2", + "Xfr;": "\U0001d51b", + "Xi;": "\u039e", + "Xopf;": "\U0001d54f", + "Xscr;": "\U0001d4b3", + "YAcy;": "\u042f", + "YIcy;": "\u0407", + "YUcy;": "\u042e", + "Yacute": "\xdd", + "Yacute;": "\xdd", + "Ycirc;": "\u0176", + "Ycy;": "\u042b", + "Yfr;": "\U0001d51c", + "Yopf;": "\U0001d550", + "Yscr;": "\U0001d4b4", + "Yuml;": "\u0178", + "ZHcy;": "\u0416", + "Zacute;": "\u0179", + "Zcaron;": "\u017d", + "Zcy;": "\u0417", + "Zdot;": "\u017b", + "ZeroWidthSpace;": "\u200b", + "Zeta;": "\u0396", + "Zfr;": "\u2128", + "Zopf;": "\u2124", + "Zscr;": "\U0001d4b5", + "aacute": "\xe1", + "aacute;": "\xe1", + "abreve;": "\u0103", + "ac;": "\u223e", + "acE;": "\u223e\u0333", + "acd;": "\u223f", + "acirc": "\xe2", + "acirc;": "\xe2", + "acute": "\xb4", + "acute;": "\xb4", + "acy;": "\u0430", + "aelig": "\xe6", + "aelig;": "\xe6", + "af;": "\u2061", + "afr;": "\U0001d51e", + "agrave": "\xe0", + "agrave;": "\xe0", + "alefsym;": "\u2135", + "aleph;": "\u2135", + "alpha;": "\u03b1", + "amacr;": "\u0101", + "amalg;": "\u2a3f", + "amp": "&", + "amp;": "&", + "and;": "\u2227", + "andand;": "\u2a55", + "andd;": "\u2a5c", + "andslope;": "\u2a58", + "andv;": "\u2a5a", + "ang;": "\u2220", + "ange;": "\u29a4", + "angle;": "\u2220", + "angmsd;": "\u2221", + "angmsdaa;": "\u29a8", + "angmsdab;": "\u29a9", + "angmsdac;": "\u29aa", + "angmsdad;": "\u29ab", + "angmsdae;": "\u29ac", + "angmsdaf;": "\u29ad", + "angmsdag;": "\u29ae", + "angmsdah;": "\u29af", + "angrt;": "\u221f", + "angrtvb;": "\u22be", + "angrtvbd;": "\u299d", + "angsph;": "\u2222", + "angst;": "\xc5", + "angzarr;": "\u237c", + "aogon;": "\u0105", + "aopf;": "\U0001d552", + "ap;": "\u2248", + "apE;": "\u2a70", + "apacir;": "\u2a6f", + "ape;": "\u224a", + "apid;": "\u224b", + "apos;": "'", + "approx;": "\u2248", + "approxeq;": "\u224a", + "aring": "\xe5", + "aring;": "\xe5", + "ascr;": "\U0001d4b6", + "ast;": "*", + "asymp;": "\u2248", + "asympeq;": "\u224d", + "atilde": "\xe3", + "atilde;": "\xe3", + "auml": "\xe4", + "auml;": "\xe4", + "awconint;": "\u2233", + "awint;": "\u2a11", + "bNot;": "\u2aed", + "backcong;": "\u224c", + "backepsilon;": "\u03f6", + "backprime;": "\u2035", + "backsim;": "\u223d", + "backsimeq;": "\u22cd", + "barvee;": "\u22bd", + "barwed;": "\u2305", + "barwedge;": "\u2305", + "bbrk;": "\u23b5", + "bbrktbrk;": "\u23b6", + "bcong;": "\u224c", + "bcy;": "\u0431", + "bdquo;": "\u201e", + "becaus;": "\u2235", + "because;": "\u2235", + "bemptyv;": "\u29b0", + "bepsi;": "\u03f6", + "bernou;": "\u212c", + "beta;": "\u03b2", + "beth;": "\u2136", + "between;": "\u226c", + "bfr;": "\U0001d51f", + "bigcap;": "\u22c2", + "bigcirc;": "\u25ef", + "bigcup;": "\u22c3", + "bigodot;": "\u2a00", + "bigoplus;": "\u2a01", + "bigotimes;": "\u2a02", + "bigsqcup;": "\u2a06", + "bigstar;": "\u2605", + "bigtriangledown;": "\u25bd", + "bigtriangleup;": "\u25b3", + "biguplus;": "\u2a04", + "bigvee;": "\u22c1", + "bigwedge;": "\u22c0", + "bkarow;": "\u290d", + "blacklozenge;": "\u29eb", + "blacksquare;": "\u25aa", + "blacktriangle;": "\u25b4", + "blacktriangledown;": "\u25be", + "blacktriangleleft;": "\u25c2", + "blacktriangleright;": "\u25b8", + "blank;": "\u2423", + "blk12;": "\u2592", + "blk14;": "\u2591", + "blk34;": "\u2593", + "block;": "\u2588", + "bne;": "=\u20e5", + "bnequiv;": "\u2261\u20e5", + "bnot;": "\u2310", + "bopf;": "\U0001d553", + "bot;": "\u22a5", + "bottom;": "\u22a5", + "bowtie;": "\u22c8", + "boxDL;": "\u2557", + "boxDR;": "\u2554", + "boxDl;": "\u2556", + "boxDr;": "\u2553", + "boxH;": "\u2550", + "boxHD;": "\u2566", + "boxHU;": "\u2569", + "boxHd;": "\u2564", + "boxHu;": "\u2567", + "boxUL;": "\u255d", + "boxUR;": "\u255a", + "boxUl;": "\u255c", + "boxUr;": "\u2559", + "boxV;": "\u2551", + "boxVH;": "\u256c", + "boxVL;": "\u2563", + "boxVR;": "\u2560", + "boxVh;": "\u256b", + "boxVl;": "\u2562", + "boxVr;": "\u255f", + "boxbox;": "\u29c9", + "boxdL;": "\u2555", + "boxdR;": "\u2552", + "boxdl;": "\u2510", + "boxdr;": "\u250c", + "boxh;": "\u2500", + "boxhD;": "\u2565", + "boxhU;": "\u2568", + "boxhd;": "\u252c", + "boxhu;": "\u2534", + "boxminus;": "\u229f", + "boxplus;": "\u229e", + "boxtimes;": "\u22a0", + "boxuL;": "\u255b", + "boxuR;": "\u2558", + "boxul;": "\u2518", + "boxur;": "\u2514", + "boxv;": "\u2502", + "boxvH;": "\u256a", + "boxvL;": "\u2561", + "boxvR;": "\u255e", + "boxvh;": "\u253c", + "boxvl;": "\u2524", + "boxvr;": "\u251c", + "bprime;": "\u2035", + "breve;": "\u02d8", + "brvbar": "\xa6", + "brvbar;": "\xa6", + "bscr;": "\U0001d4b7", + "bsemi;": "\u204f", + "bsim;": "\u223d", + "bsime;": "\u22cd", + "bsol;": "\\", + "bsolb;": "\u29c5", + "bsolhsub;": "\u27c8", + "bull;": "\u2022", + "bullet;": "\u2022", + "bump;": "\u224e", + "bumpE;": "\u2aae", + "bumpe;": "\u224f", + "bumpeq;": "\u224f", + "cacute;": "\u0107", + "cap;": "\u2229", + "capand;": "\u2a44", + "capbrcup;": "\u2a49", + "capcap;": "\u2a4b", + "capcup;": "\u2a47", + "capdot;": "\u2a40", + "caps;": "\u2229\ufe00", + "caret;": "\u2041", + "caron;": "\u02c7", + "ccaps;": "\u2a4d", + "ccaron;": "\u010d", + "ccedil": "\xe7", + "ccedil;": "\xe7", + "ccirc;": "\u0109", + "ccups;": "\u2a4c", + "ccupssm;": "\u2a50", + "cdot;": "\u010b", + "cedil": "\xb8", + "cedil;": "\xb8", + "cemptyv;": "\u29b2", + "cent": "\xa2", + "cent;": "\xa2", + "centerdot;": "\xb7", + "cfr;": "\U0001d520", + "chcy;": "\u0447", + "check;": "\u2713", + "checkmark;": "\u2713", + "chi;": "\u03c7", + "cir;": "\u25cb", + "cirE;": "\u29c3", + "circ;": "\u02c6", + "circeq;": "\u2257", + "circlearrowleft;": "\u21ba", + "circlearrowright;": "\u21bb", + "circledR;": "\xae", + "circledS;": "\u24c8", + "circledast;": "\u229b", + "circledcirc;": "\u229a", + "circleddash;": "\u229d", + "cire;": "\u2257", + "cirfnint;": "\u2a10", + "cirmid;": "\u2aef", + "cirscir;": "\u29c2", + "clubs;": "\u2663", + "clubsuit;": "\u2663", + "colon;": ":", + "colone;": "\u2254", + "coloneq;": "\u2254", + "comma;": ",", + "commat;": "@", + "comp;": "\u2201", + "compfn;": "\u2218", + "complement;": "\u2201", + "complexes;": "\u2102", + "cong;": "\u2245", + "congdot;": "\u2a6d", + "conint;": "\u222e", + "copf;": "\U0001d554", + "coprod;": "\u2210", + "copy": "\xa9", + "copy;": "\xa9", + "copysr;": "\u2117", + "crarr;": "\u21b5", + "cross;": "\u2717", + "cscr;": "\U0001d4b8", + "csub;": "\u2acf", + "csube;": "\u2ad1", + "csup;": "\u2ad0", + "csupe;": "\u2ad2", + "ctdot;": "\u22ef", + "cudarrl;": "\u2938", + "cudarrr;": "\u2935", + "cuepr;": "\u22de", + "cuesc;": "\u22df", + "cularr;": "\u21b6", + "cularrp;": "\u293d", + "cup;": "\u222a", + "cupbrcap;": "\u2a48", + "cupcap;": "\u2a46", + "cupcup;": "\u2a4a", + "cupdot;": "\u228d", + "cupor;": "\u2a45", + "cups;": "\u222a\ufe00", + "curarr;": "\u21b7", + "curarrm;": "\u293c", + "curlyeqprec;": "\u22de", + "curlyeqsucc;": "\u22df", + "curlyvee;": "\u22ce", + "curlywedge;": "\u22cf", + "curren": "\xa4", + "curren;": "\xa4", + "curvearrowleft;": "\u21b6", + "curvearrowright;": "\u21b7", + "cuvee;": "\u22ce", + "cuwed;": "\u22cf", + "cwconint;": "\u2232", + "cwint;": "\u2231", + "cylcty;": "\u232d", + "dArr;": "\u21d3", + "dHar;": "\u2965", + "dagger;": "\u2020", + "daleth;": "\u2138", + "darr;": "\u2193", + "dash;": "\u2010", + "dashv;": "\u22a3", + "dbkarow;": "\u290f", + "dblac;": "\u02dd", + "dcaron;": "\u010f", + "dcy;": "\u0434", + "dd;": "\u2146", + "ddagger;": "\u2021", + "ddarr;": "\u21ca", + "ddotseq;": "\u2a77", + "deg": "\xb0", + "deg;": "\xb0", + "delta;": "\u03b4", + "demptyv;": "\u29b1", + "dfisht;": "\u297f", + "dfr;": "\U0001d521", + "dharl;": "\u21c3", + "dharr;": "\u21c2", + "diam;": "\u22c4", + "diamond;": "\u22c4", + "diamondsuit;": "\u2666", + "diams;": "\u2666", + "die;": "\xa8", + "digamma;": "\u03dd", + "disin;": "\u22f2", + "div;": "\xf7", + "divide": "\xf7", + "divide;": "\xf7", + "divideontimes;": "\u22c7", + "divonx;": "\u22c7", + "djcy;": "\u0452", + "dlcorn;": "\u231e", + "dlcrop;": "\u230d", + "dollar;": "$", + "dopf;": "\U0001d555", + "dot;": "\u02d9", + "doteq;": "\u2250", + "doteqdot;": "\u2251", + "dotminus;": "\u2238", + "dotplus;": "\u2214", + "dotsquare;": "\u22a1", + "doublebarwedge;": "\u2306", + "downarrow;": "\u2193", + "downdownarrows;": "\u21ca", + "downharpoonleft;": "\u21c3", + "downharpoonright;": "\u21c2", + "drbkarow;": "\u2910", + "drcorn;": "\u231f", + "drcrop;": "\u230c", + "dscr;": "\U0001d4b9", + "dscy;": "\u0455", + "dsol;": "\u29f6", + "dstrok;": "\u0111", + "dtdot;": "\u22f1", + "dtri;": "\u25bf", + "dtrif;": "\u25be", + "duarr;": "\u21f5", + "duhar;": "\u296f", + "dwangle;": "\u29a6", + "dzcy;": "\u045f", + "dzigrarr;": "\u27ff", + "eDDot;": "\u2a77", + "eDot;": "\u2251", + "eacute": "\xe9", + "eacute;": "\xe9", + "easter;": "\u2a6e", + "ecaron;": "\u011b", + "ecir;": "\u2256", + "ecirc": "\xea", + "ecirc;": "\xea", + "ecolon;": "\u2255", + "ecy;": "\u044d", + "edot;": "\u0117", + "ee;": "\u2147", + "efDot;": "\u2252", + "efr;": "\U0001d522", + "eg;": "\u2a9a", + "egrave": "\xe8", + "egrave;": "\xe8", + "egs;": "\u2a96", + "egsdot;": "\u2a98", + "el;": "\u2a99", + "elinters;": "\u23e7", + "ell;": "\u2113", + "els;": "\u2a95", + "elsdot;": "\u2a97", + "emacr;": "\u0113", + "empty;": "\u2205", + "emptyset;": "\u2205", + "emptyv;": "\u2205", + "emsp13;": "\u2004", + "emsp14;": "\u2005", + "emsp;": "\u2003", + "eng;": "\u014b", + "ensp;": "\u2002", + "eogon;": "\u0119", + "eopf;": "\U0001d556", + "epar;": "\u22d5", + "eparsl;": "\u29e3", + "eplus;": "\u2a71", + "epsi;": "\u03b5", + "epsilon;": "\u03b5", + "epsiv;": "\u03f5", + "eqcirc;": "\u2256", + "eqcolon;": "\u2255", + "eqsim;": "\u2242", + "eqslantgtr;": "\u2a96", + "eqslantless;": "\u2a95", + "equals;": "=", + "equest;": "\u225f", + "equiv;": "\u2261", + "equivDD;": "\u2a78", + "eqvparsl;": "\u29e5", + "erDot;": "\u2253", + "erarr;": "\u2971", + "escr;": "\u212f", + "esdot;": "\u2250", + "esim;": "\u2242", + "eta;": "\u03b7", + "eth": "\xf0", + "eth;": "\xf0", + "euml": "\xeb", + "euml;": "\xeb", + "euro;": "\u20ac", + "excl;": "!", + "exist;": "\u2203", + "expectation;": "\u2130", + "exponentiale;": "\u2147", + "fallingdotseq;": "\u2252", + "fcy;": "\u0444", + "female;": "\u2640", + "ffilig;": "\ufb03", + "fflig;": "\ufb00", + "ffllig;": "\ufb04", + "ffr;": "\U0001d523", + "filig;": "\ufb01", + "fjlig;": "fj", + "flat;": "\u266d", + "fllig;": "\ufb02", + "fltns;": "\u25b1", + "fnof;": "\u0192", + "fopf;": "\U0001d557", + "forall;": "\u2200", + "fork;": "\u22d4", + "forkv;": "\u2ad9", + "fpartint;": "\u2a0d", + "frac12": "\xbd", + "frac12;": "\xbd", + "frac13;": "\u2153", + "frac14": "\xbc", + "frac14;": "\xbc", + "frac15;": "\u2155", + "frac16;": "\u2159", + "frac18;": "\u215b", + "frac23;": "\u2154", + "frac25;": "\u2156", + "frac34": "\xbe", + "frac34;": "\xbe", + "frac35;": "\u2157", + "frac38;": "\u215c", + "frac45;": "\u2158", + "frac56;": "\u215a", + "frac58;": "\u215d", + "frac78;": "\u215e", + "frasl;": "\u2044", + "frown;": "\u2322", + "fscr;": "\U0001d4bb", + "gE;": "\u2267", + "gEl;": "\u2a8c", + "gacute;": "\u01f5", + "gamma;": "\u03b3", + "gammad;": "\u03dd", + "gap;": "\u2a86", + "gbreve;": "\u011f", + "gcirc;": "\u011d", + "gcy;": "\u0433", + "gdot;": "\u0121", + "ge;": "\u2265", + "gel;": "\u22db", + "geq;": "\u2265", + "geqq;": "\u2267", + "geqslant;": "\u2a7e", + "ges;": "\u2a7e", + "gescc;": "\u2aa9", + "gesdot;": "\u2a80", + "gesdoto;": "\u2a82", + "gesdotol;": "\u2a84", + "gesl;": "\u22db\ufe00", + "gesles;": "\u2a94", + "gfr;": "\U0001d524", + "gg;": "\u226b", + "ggg;": "\u22d9", + "gimel;": "\u2137", + "gjcy;": "\u0453", + "gl;": "\u2277", + "glE;": "\u2a92", + "gla;": "\u2aa5", + "glj;": "\u2aa4", + "gnE;": "\u2269", + "gnap;": "\u2a8a", + "gnapprox;": "\u2a8a", + "gne;": "\u2a88", + "gneq;": "\u2a88", + "gneqq;": "\u2269", + "gnsim;": "\u22e7", + "gopf;": "\U0001d558", + "grave;": "`", + "gscr;": "\u210a", + "gsim;": "\u2273", + "gsime;": "\u2a8e", + "gsiml;": "\u2a90", + "gt": ">", + "gt;": ">", + "gtcc;": "\u2aa7", + "gtcir;": "\u2a7a", + "gtdot;": "\u22d7", + "gtlPar;": "\u2995", + "gtquest;": "\u2a7c", + "gtrapprox;": "\u2a86", + "gtrarr;": "\u2978", + "gtrdot;": "\u22d7", + "gtreqless;": "\u22db", + "gtreqqless;": "\u2a8c", + "gtrless;": "\u2277", + "gtrsim;": "\u2273", + "gvertneqq;": "\u2269\ufe00", + "gvnE;": "\u2269\ufe00", + "hArr;": "\u21d4", + "hairsp;": "\u200a", + "half;": "\xbd", + "hamilt;": "\u210b", + "hardcy;": "\u044a", + "harr;": "\u2194", + "harrcir;": "\u2948", + "harrw;": "\u21ad", + "hbar;": "\u210f", + "hcirc;": "\u0125", + "hearts;": "\u2665", + "heartsuit;": "\u2665", + "hellip;": "\u2026", + "hercon;": "\u22b9", + "hfr;": "\U0001d525", + "hksearow;": "\u2925", + "hkswarow;": "\u2926", + "hoarr;": "\u21ff", + "homtht;": "\u223b", + "hookleftarrow;": "\u21a9", + "hookrightarrow;": "\u21aa", + "hopf;": "\U0001d559", + "horbar;": "\u2015", + "hscr;": "\U0001d4bd", + "hslash;": "\u210f", + "hstrok;": "\u0127", + "hybull;": "\u2043", + "hyphen;": "\u2010", + "iacute": "\xed", + "iacute;": "\xed", + "ic;": "\u2063", + "icirc": "\xee", + "icirc;": "\xee", + "icy;": "\u0438", + "iecy;": "\u0435", + "iexcl": "\xa1", + "iexcl;": "\xa1", + "iff;": "\u21d4", + "ifr;": "\U0001d526", + "igrave": "\xec", + "igrave;": "\xec", + "ii;": "\u2148", + "iiiint;": "\u2a0c", + "iiint;": "\u222d", + "iinfin;": "\u29dc", + "iiota;": "\u2129", + "ijlig;": "\u0133", + "imacr;": "\u012b", + "image;": "\u2111", + "imagline;": "\u2110", + "imagpart;": "\u2111", + "imath;": "\u0131", + "imof;": "\u22b7", + "imped;": "\u01b5", + "in;": "\u2208", + "incare;": "\u2105", + "infin;": "\u221e", + "infintie;": "\u29dd", + "inodot;": "\u0131", + "int;": "\u222b", + "intcal;": "\u22ba", + "integers;": "\u2124", + "intercal;": "\u22ba", + "intlarhk;": "\u2a17", + "intprod;": "\u2a3c", + "iocy;": "\u0451", + "iogon;": "\u012f", + "iopf;": "\U0001d55a", + "iota;": "\u03b9", + "iprod;": "\u2a3c", + "iquest": "\xbf", + "iquest;": "\xbf", + "iscr;": "\U0001d4be", + "isin;": "\u2208", + "isinE;": "\u22f9", + "isindot;": "\u22f5", + "isins;": "\u22f4", + "isinsv;": "\u22f3", + "isinv;": "\u2208", + "it;": "\u2062", + "itilde;": "\u0129", + "iukcy;": "\u0456", + "iuml": "\xef", + "iuml;": "\xef", + "jcirc;": "\u0135", + "jcy;": "\u0439", + "jfr;": "\U0001d527", + "jmath;": "\u0237", + "jopf;": "\U0001d55b", + "jscr;": "\U0001d4bf", + "jsercy;": "\u0458", + "jukcy;": "\u0454", + "kappa;": "\u03ba", + "kappav;": "\u03f0", + "kcedil;": "\u0137", + "kcy;": "\u043a", + "kfr;": "\U0001d528", + "kgreen;": "\u0138", + "khcy;": "\u0445", + "kjcy;": "\u045c", + "kopf;": "\U0001d55c", + "kscr;": "\U0001d4c0", + "lAarr;": "\u21da", + "lArr;": "\u21d0", + "lAtail;": "\u291b", + "lBarr;": "\u290e", + "lE;": "\u2266", + "lEg;": "\u2a8b", + "lHar;": "\u2962", + "lacute;": "\u013a", + "laemptyv;": "\u29b4", + "lagran;": "\u2112", + "lambda;": "\u03bb", + "lang;": "\u27e8", + "langd;": "\u2991", + "langle;": "\u27e8", + "lap;": "\u2a85", + "laquo": "\xab", + "laquo;": "\xab", + "larr;": "\u2190", + "larrb;": "\u21e4", + "larrbfs;": "\u291f", + "larrfs;": "\u291d", + "larrhk;": "\u21a9", + "larrlp;": "\u21ab", + "larrpl;": "\u2939", + "larrsim;": "\u2973", + "larrtl;": "\u21a2", + "lat;": "\u2aab", + "latail;": "\u2919", + "late;": "\u2aad", + "lates;": "\u2aad\ufe00", + "lbarr;": "\u290c", + "lbbrk;": "\u2772", + "lbrace;": "{", + "lbrack;": "[", + "lbrke;": "\u298b", + "lbrksld;": "\u298f", + "lbrkslu;": "\u298d", + "lcaron;": "\u013e", + "lcedil;": "\u013c", + "lceil;": "\u2308", + "lcub;": "{", + "lcy;": "\u043b", + "ldca;": "\u2936", + "ldquo;": "\u201c", + "ldquor;": "\u201e", + "ldrdhar;": "\u2967", + "ldrushar;": "\u294b", + "ldsh;": "\u21b2", + "le;": "\u2264", + "leftarrow;": "\u2190", + "leftarrowtail;": "\u21a2", + "leftharpoondown;": "\u21bd", + "leftharpoonup;": "\u21bc", + "leftleftarrows;": "\u21c7", + "leftrightarrow;": "\u2194", + "leftrightarrows;": "\u21c6", + "leftrightharpoons;": "\u21cb", + "leftrightsquigarrow;": "\u21ad", + "leftthreetimes;": "\u22cb", + "leg;": "\u22da", + "leq;": "\u2264", + "leqq;": "\u2266", + "leqslant;": "\u2a7d", + "les;": "\u2a7d", + "lescc;": "\u2aa8", + "lesdot;": "\u2a7f", + "lesdoto;": "\u2a81", + "lesdotor;": "\u2a83", + "lesg;": "\u22da\ufe00", + "lesges;": "\u2a93", + "lessapprox;": "\u2a85", + "lessdot;": "\u22d6", + "lesseqgtr;": "\u22da", + "lesseqqgtr;": "\u2a8b", + "lessgtr;": "\u2276", + "lesssim;": "\u2272", + "lfisht;": "\u297c", + "lfloor;": "\u230a", + "lfr;": "\U0001d529", + "lg;": "\u2276", + "lgE;": "\u2a91", + "lhard;": "\u21bd", + "lharu;": "\u21bc", + "lharul;": "\u296a", + "lhblk;": "\u2584", + "ljcy;": "\u0459", + "ll;": "\u226a", + "llarr;": "\u21c7", + "llcorner;": "\u231e", + "llhard;": "\u296b", + "lltri;": "\u25fa", + "lmidot;": "\u0140", + "lmoust;": "\u23b0", + "lmoustache;": "\u23b0", + "lnE;": "\u2268", + "lnap;": "\u2a89", + "lnapprox;": "\u2a89", + "lne;": "\u2a87", + "lneq;": "\u2a87", + "lneqq;": "\u2268", + "lnsim;": "\u22e6", + "loang;": "\u27ec", + "loarr;": "\u21fd", + "lobrk;": "\u27e6", + "longleftarrow;": "\u27f5", + "longleftrightarrow;": "\u27f7", + "longmapsto;": "\u27fc", + "longrightarrow;": "\u27f6", + "looparrowleft;": "\u21ab", + "looparrowright;": "\u21ac", + "lopar;": "\u2985", + "lopf;": "\U0001d55d", + "loplus;": "\u2a2d", + "lotimes;": "\u2a34", + "lowast;": "\u2217", + "lowbar;": "_", + "loz;": "\u25ca", + "lozenge;": "\u25ca", + "lozf;": "\u29eb", + "lpar;": "(", + "lparlt;": "\u2993", + "lrarr;": "\u21c6", + "lrcorner;": "\u231f", + "lrhar;": "\u21cb", + "lrhard;": "\u296d", + "lrm;": "\u200e", + "lrtri;": "\u22bf", + "lsaquo;": "\u2039", + "lscr;": "\U0001d4c1", + "lsh;": "\u21b0", + "lsim;": "\u2272", + "lsime;": "\u2a8d", + "lsimg;": "\u2a8f", + "lsqb;": "[", + "lsquo;": "\u2018", + "lsquor;": "\u201a", + "lstrok;": "\u0142", + "lt": "<", + "lt;": "<", + "ltcc;": "\u2aa6", + "ltcir;": "\u2a79", + "ltdot;": "\u22d6", + "lthree;": "\u22cb", + "ltimes;": "\u22c9", + "ltlarr;": "\u2976", + "ltquest;": "\u2a7b", + "ltrPar;": "\u2996", + "ltri;": "\u25c3", + "ltrie;": "\u22b4", + "ltrif;": "\u25c2", + "lurdshar;": "\u294a", + "luruhar;": "\u2966", + "lvertneqq;": "\u2268\ufe00", + "lvnE;": "\u2268\ufe00", + "mDDot;": "\u223a", + "macr": "\xaf", + "macr;": "\xaf", + "male;": "\u2642", + "malt;": "\u2720", + "maltese;": "\u2720", + "map;": "\u21a6", + "mapsto;": "\u21a6", + "mapstodown;": "\u21a7", + "mapstoleft;": "\u21a4", + "mapstoup;": "\u21a5", + "marker;": "\u25ae", + "mcomma;": "\u2a29", + "mcy;": "\u043c", + "mdash;": "\u2014", + "measuredangle;": "\u2221", + "mfr;": "\U0001d52a", + "mho;": "\u2127", + "micro": "\xb5", + "micro;": "\xb5", + "mid;": "\u2223", + "midast;": "*", + "midcir;": "\u2af0", + "middot": "\xb7", + "middot;": "\xb7", + "minus;": "\u2212", + "minusb;": "\u229f", + "minusd;": "\u2238", + "minusdu;": "\u2a2a", + "mlcp;": "\u2adb", + "mldr;": "\u2026", + "mnplus;": "\u2213", + "models;": "\u22a7", + "mopf;": "\U0001d55e", + "mp;": "\u2213", + "mscr;": "\U0001d4c2", + "mstpos;": "\u223e", + "mu;": "\u03bc", + "multimap;": "\u22b8", + "mumap;": "\u22b8", + "nGg;": "\u22d9\u0338", + "nGt;": "\u226b\u20d2", + "nGtv;": "\u226b\u0338", + "nLeftarrow;": "\u21cd", + "nLeftrightarrow;": "\u21ce", + "nLl;": "\u22d8\u0338", + "nLt;": "\u226a\u20d2", + "nLtv;": "\u226a\u0338", + "nRightarrow;": "\u21cf", + "nVDash;": "\u22af", + "nVdash;": "\u22ae", + "nabla;": "\u2207", + "nacute;": "\u0144", + "nang;": "\u2220\u20d2", + "nap;": "\u2249", + "napE;": "\u2a70\u0338", + "napid;": "\u224b\u0338", + "napos;": "\u0149", + "napprox;": "\u2249", + "natur;": "\u266e", + "natural;": "\u266e", + "naturals;": "\u2115", + "nbsp": "\xa0", + "nbsp;": "\xa0", + "nbump;": "\u224e\u0338", + "nbumpe;": "\u224f\u0338", + "ncap;": "\u2a43", + "ncaron;": "\u0148", + "ncedil;": "\u0146", + "ncong;": "\u2247", + "ncongdot;": "\u2a6d\u0338", + "ncup;": "\u2a42", + "ncy;": "\u043d", + "ndash;": "\u2013", + "ne;": "\u2260", + "neArr;": "\u21d7", + "nearhk;": "\u2924", + "nearr;": "\u2197", + "nearrow;": "\u2197", + "nedot;": "\u2250\u0338", + "nequiv;": "\u2262", + "nesear;": "\u2928", + "nesim;": "\u2242\u0338", + "nexist;": "\u2204", + "nexists;": "\u2204", + "nfr;": "\U0001d52b", + "ngE;": "\u2267\u0338", + "nge;": "\u2271", + "ngeq;": "\u2271", + "ngeqq;": "\u2267\u0338", + "ngeqslant;": "\u2a7e\u0338", + "nges;": "\u2a7e\u0338", + "ngsim;": "\u2275", + "ngt;": "\u226f", + "ngtr;": "\u226f", + "nhArr;": "\u21ce", + "nharr;": "\u21ae", + "nhpar;": "\u2af2", + "ni;": "\u220b", + "nis;": "\u22fc", + "nisd;": "\u22fa", + "niv;": "\u220b", + "njcy;": "\u045a", + "nlArr;": "\u21cd", + "nlE;": "\u2266\u0338", + "nlarr;": "\u219a", + "nldr;": "\u2025", + "nle;": "\u2270", + "nleftarrow;": "\u219a", + "nleftrightarrow;": "\u21ae", + "nleq;": "\u2270", + "nleqq;": "\u2266\u0338", + "nleqslant;": "\u2a7d\u0338", + "nles;": "\u2a7d\u0338", + "nless;": "\u226e", + "nlsim;": "\u2274", + "nlt;": "\u226e", + "nltri;": "\u22ea", + "nltrie;": "\u22ec", + "nmid;": "\u2224", + "nopf;": "\U0001d55f", + "not": "\xac", + "not;": "\xac", + "notin;": "\u2209", + "notinE;": "\u22f9\u0338", + "notindot;": "\u22f5\u0338", + "notinva;": "\u2209", + "notinvb;": "\u22f7", + "notinvc;": "\u22f6", + "notni;": "\u220c", + "notniva;": "\u220c", + "notnivb;": "\u22fe", + "notnivc;": "\u22fd", + "npar;": "\u2226", + "nparallel;": "\u2226", + "nparsl;": "\u2afd\u20e5", + "npart;": "\u2202\u0338", + "npolint;": "\u2a14", + "npr;": "\u2280", + "nprcue;": "\u22e0", + "npre;": "\u2aaf\u0338", + "nprec;": "\u2280", + "npreceq;": "\u2aaf\u0338", + "nrArr;": "\u21cf", + "nrarr;": "\u219b", + "nrarrc;": "\u2933\u0338", + "nrarrw;": "\u219d\u0338", + "nrightarrow;": "\u219b", + "nrtri;": "\u22eb", + "nrtrie;": "\u22ed", + "nsc;": "\u2281", + "nsccue;": "\u22e1", + "nsce;": "\u2ab0\u0338", + "nscr;": "\U0001d4c3", + "nshortmid;": "\u2224", + "nshortparallel;": "\u2226", + "nsim;": "\u2241", + "nsime;": "\u2244", + "nsimeq;": "\u2244", + "nsmid;": "\u2224", + "nspar;": "\u2226", + "nsqsube;": "\u22e2", + "nsqsupe;": "\u22e3", + "nsub;": "\u2284", + "nsubE;": "\u2ac5\u0338", + "nsube;": "\u2288", + "nsubset;": "\u2282\u20d2", + "nsubseteq;": "\u2288", + "nsubseteqq;": "\u2ac5\u0338", + "nsucc;": "\u2281", + "nsucceq;": "\u2ab0\u0338", + "nsup;": "\u2285", + "nsupE;": "\u2ac6\u0338", + "nsupe;": "\u2289", + "nsupset;": "\u2283\u20d2", + "nsupseteq;": "\u2289", + "nsupseteqq;": "\u2ac6\u0338", + "ntgl;": "\u2279", + "ntilde": "\xf1", + "ntilde;": "\xf1", + "ntlg;": "\u2278", + "ntriangleleft;": "\u22ea", + "ntrianglelefteq;": "\u22ec", + "ntriangleright;": "\u22eb", + "ntrianglerighteq;": "\u22ed", + "nu;": "\u03bd", + "num;": "#", + "numero;": "\u2116", + "numsp;": "\u2007", + "nvDash;": "\u22ad", + "nvHarr;": "\u2904", + "nvap;": "\u224d\u20d2", + "nvdash;": "\u22ac", + "nvge;": "\u2265\u20d2", + "nvgt;": ">\u20d2", + "nvinfin;": "\u29de", + "nvlArr;": "\u2902", + "nvle;": "\u2264\u20d2", + "nvlt;": "<\u20d2", + "nvltrie;": "\u22b4\u20d2", + "nvrArr;": "\u2903", + "nvrtrie;": "\u22b5\u20d2", + "nvsim;": "\u223c\u20d2", + "nwArr;": "\u21d6", + "nwarhk;": "\u2923", + "nwarr;": "\u2196", + "nwarrow;": "\u2196", + "nwnear;": "\u2927", + "oS;": "\u24c8", + "oacute": "\xf3", + "oacute;": "\xf3", + "oast;": "\u229b", + "ocir;": "\u229a", + "ocirc": "\xf4", + "ocirc;": "\xf4", + "ocy;": "\u043e", + "odash;": "\u229d", + "odblac;": "\u0151", + "odiv;": "\u2a38", + "odot;": "\u2299", + "odsold;": "\u29bc", + "oelig;": "\u0153", + "ofcir;": "\u29bf", + "ofr;": "\U0001d52c", + "ogon;": "\u02db", + "ograve": "\xf2", + "ograve;": "\xf2", + "ogt;": "\u29c1", + "ohbar;": "\u29b5", + "ohm;": "\u03a9", + "oint;": "\u222e", + "olarr;": "\u21ba", + "olcir;": "\u29be", + "olcross;": "\u29bb", + "oline;": "\u203e", + "olt;": "\u29c0", + "omacr;": "\u014d", + "omega;": "\u03c9", + "omicron;": "\u03bf", + "omid;": "\u29b6", + "ominus;": "\u2296", + "oopf;": "\U0001d560", + "opar;": "\u29b7", + "operp;": "\u29b9", + "oplus;": "\u2295", + "or;": "\u2228", + "orarr;": "\u21bb", + "ord;": "\u2a5d", + "order;": "\u2134", + "orderof;": "\u2134", + "ordf": "\xaa", + "ordf;": "\xaa", + "ordm": "\xba", + "ordm;": "\xba", + "origof;": "\u22b6", + "oror;": "\u2a56", + "orslope;": "\u2a57", + "orv;": "\u2a5b", + "oscr;": "\u2134", + "oslash": "\xf8", + "oslash;": "\xf8", + "osol;": "\u2298", + "otilde": "\xf5", + "otilde;": "\xf5", + "otimes;": "\u2297", + "otimesas;": "\u2a36", + "ouml": "\xf6", + "ouml;": "\xf6", + "ovbar;": "\u233d", + "par;": "\u2225", + "para": "\xb6", + "para;": "\xb6", + "parallel;": "\u2225", + "parsim;": "\u2af3", + "parsl;": "\u2afd", + "part;": "\u2202", + "pcy;": "\u043f", + "percnt;": "%", + "period;": ".", + "permil;": "\u2030", + "perp;": "\u22a5", + "pertenk;": "\u2031", + "pfr;": "\U0001d52d", + "phi;": "\u03c6", + "phiv;": "\u03d5", + "phmmat;": "\u2133", + "phone;": "\u260e", + "pi;": "\u03c0", + "pitchfork;": "\u22d4", + "piv;": "\u03d6", + "planck;": "\u210f", + "planckh;": "\u210e", + "plankv;": "\u210f", + "plus;": "+", + "plusacir;": "\u2a23", + "plusb;": "\u229e", + "pluscir;": "\u2a22", + "plusdo;": "\u2214", + "plusdu;": "\u2a25", + "pluse;": "\u2a72", + "plusmn": "\xb1", + "plusmn;": "\xb1", + "plussim;": "\u2a26", + "plustwo;": "\u2a27", + "pm;": "\xb1", + "pointint;": "\u2a15", + "popf;": "\U0001d561", + "pound": "\xa3", + "pound;": "\xa3", + "pr;": "\u227a", + "prE;": "\u2ab3", + "prap;": "\u2ab7", + "prcue;": "\u227c", + "pre;": "\u2aaf", + "prec;": "\u227a", + "precapprox;": "\u2ab7", + "preccurlyeq;": "\u227c", + "preceq;": "\u2aaf", + "precnapprox;": "\u2ab9", + "precneqq;": "\u2ab5", + "precnsim;": "\u22e8", + "precsim;": "\u227e", + "prime;": "\u2032", + "primes;": "\u2119", + "prnE;": "\u2ab5", + "prnap;": "\u2ab9", + "prnsim;": "\u22e8", + "prod;": "\u220f", + "profalar;": "\u232e", + "profline;": "\u2312", + "profsurf;": "\u2313", + "prop;": "\u221d", + "propto;": "\u221d", + "prsim;": "\u227e", + "prurel;": "\u22b0", + "pscr;": "\U0001d4c5", + "psi;": "\u03c8", + "puncsp;": "\u2008", + "qfr;": "\U0001d52e", + "qint;": "\u2a0c", + "qopf;": "\U0001d562", + "qprime;": "\u2057", + "qscr;": "\U0001d4c6", + "quaternions;": "\u210d", + "quatint;": "\u2a16", + "quest;": "?", + "questeq;": "\u225f", + "quot": "\"", + "quot;": "\"", + "rAarr;": "\u21db", + "rArr;": "\u21d2", + "rAtail;": "\u291c", + "rBarr;": "\u290f", + "rHar;": "\u2964", + "race;": "\u223d\u0331", + "racute;": "\u0155", + "radic;": "\u221a", + "raemptyv;": "\u29b3", + "rang;": "\u27e9", + "rangd;": "\u2992", + "range;": "\u29a5", + "rangle;": "\u27e9", + "raquo": "\xbb", + "raquo;": "\xbb", + "rarr;": "\u2192", + "rarrap;": "\u2975", + "rarrb;": "\u21e5", + "rarrbfs;": "\u2920", + "rarrc;": "\u2933", + "rarrfs;": "\u291e", + "rarrhk;": "\u21aa", + "rarrlp;": "\u21ac", + "rarrpl;": "\u2945", + "rarrsim;": "\u2974", + "rarrtl;": "\u21a3", + "rarrw;": "\u219d", + "ratail;": "\u291a", + "ratio;": "\u2236", + "rationals;": "\u211a", + "rbarr;": "\u290d", + "rbbrk;": "\u2773", + "rbrace;": "}", + "rbrack;": "]", + "rbrke;": "\u298c", + "rbrksld;": "\u298e", + "rbrkslu;": "\u2990", + "rcaron;": "\u0159", + "rcedil;": "\u0157", + "rceil;": "\u2309", + "rcub;": "}", + "rcy;": "\u0440", + "rdca;": "\u2937", + "rdldhar;": "\u2969", + "rdquo;": "\u201d", + "rdquor;": "\u201d", + "rdsh;": "\u21b3", + "real;": "\u211c", + "realine;": "\u211b", + "realpart;": "\u211c", + "reals;": "\u211d", + "rect;": "\u25ad", + "reg": "\xae", + "reg;": "\xae", + "rfisht;": "\u297d", + "rfloor;": "\u230b", + "rfr;": "\U0001d52f", + "rhard;": "\u21c1", + "rharu;": "\u21c0", + "rharul;": "\u296c", + "rho;": "\u03c1", + "rhov;": "\u03f1", + "rightarrow;": "\u2192", + "rightarrowtail;": "\u21a3", + "rightharpoondown;": "\u21c1", + "rightharpoonup;": "\u21c0", + "rightleftarrows;": "\u21c4", + "rightleftharpoons;": "\u21cc", + "rightrightarrows;": "\u21c9", + "rightsquigarrow;": "\u219d", + "rightthreetimes;": "\u22cc", + "ring;": "\u02da", + "risingdotseq;": "\u2253", + "rlarr;": "\u21c4", + "rlhar;": "\u21cc", + "rlm;": "\u200f", + "rmoust;": "\u23b1", + "rmoustache;": "\u23b1", + "rnmid;": "\u2aee", + "roang;": "\u27ed", + "roarr;": "\u21fe", + "robrk;": "\u27e7", + "ropar;": "\u2986", + "ropf;": "\U0001d563", + "roplus;": "\u2a2e", + "rotimes;": "\u2a35", + "rpar;": ")", + "rpargt;": "\u2994", + "rppolint;": "\u2a12", + "rrarr;": "\u21c9", + "rsaquo;": "\u203a", + "rscr;": "\U0001d4c7", + "rsh;": "\u21b1", + "rsqb;": "]", + "rsquo;": "\u2019", + "rsquor;": "\u2019", + "rthree;": "\u22cc", + "rtimes;": "\u22ca", + "rtri;": "\u25b9", + "rtrie;": "\u22b5", + "rtrif;": "\u25b8", + "rtriltri;": "\u29ce", + "ruluhar;": "\u2968", + "rx;": "\u211e", + "sacute;": "\u015b", + "sbquo;": "\u201a", + "sc;": "\u227b", + "scE;": "\u2ab4", + "scap;": "\u2ab8", + "scaron;": "\u0161", + "sccue;": "\u227d", + "sce;": "\u2ab0", + "scedil;": "\u015f", + "scirc;": "\u015d", + "scnE;": "\u2ab6", + "scnap;": "\u2aba", + "scnsim;": "\u22e9", + "scpolint;": "\u2a13", + "scsim;": "\u227f", + "scy;": "\u0441", + "sdot;": "\u22c5", + "sdotb;": "\u22a1", + "sdote;": "\u2a66", + "seArr;": "\u21d8", + "searhk;": "\u2925", + "searr;": "\u2198", + "searrow;": "\u2198", + "sect": "\xa7", + "sect;": "\xa7", + "semi;": ";", + "seswar;": "\u2929", + "setminus;": "\u2216", + "setmn;": "\u2216", + "sext;": "\u2736", + "sfr;": "\U0001d530", + "sfrown;": "\u2322", + "sharp;": "\u266f", + "shchcy;": "\u0449", + "shcy;": "\u0448", + "shortmid;": "\u2223", + "shortparallel;": "\u2225", + "shy": "\xad", + "shy;": "\xad", + "sigma;": "\u03c3", + "sigmaf;": "\u03c2", + "sigmav;": "\u03c2", + "sim;": "\u223c", + "simdot;": "\u2a6a", + "sime;": "\u2243", + "simeq;": "\u2243", + "simg;": "\u2a9e", + "simgE;": "\u2aa0", + "siml;": "\u2a9d", + "simlE;": "\u2a9f", + "simne;": "\u2246", + "simplus;": "\u2a24", + "simrarr;": "\u2972", + "slarr;": "\u2190", + "smallsetminus;": "\u2216", + "smashp;": "\u2a33", + "smeparsl;": "\u29e4", + "smid;": "\u2223", + "smile;": "\u2323", + "smt;": "\u2aaa", + "smte;": "\u2aac", + "smtes;": "\u2aac\ufe00", + "softcy;": "\u044c", + "sol;": "/", + "solb;": "\u29c4", + "solbar;": "\u233f", + "sopf;": "\U0001d564", + "spades;": "\u2660", + "spadesuit;": "\u2660", + "spar;": "\u2225", + "sqcap;": "\u2293", + "sqcaps;": "\u2293\ufe00", + "sqcup;": "\u2294", + "sqcups;": "\u2294\ufe00", + "sqsub;": "\u228f", + "sqsube;": "\u2291", + "sqsubset;": "\u228f", + "sqsubseteq;": "\u2291", + "sqsup;": "\u2290", + "sqsupe;": "\u2292", + "sqsupset;": "\u2290", + "sqsupseteq;": "\u2292", + "squ;": "\u25a1", + "square;": "\u25a1", + "squarf;": "\u25aa", + "squf;": "\u25aa", + "srarr;": "\u2192", + "sscr;": "\U0001d4c8", + "ssetmn;": "\u2216", + "ssmile;": "\u2323", + "sstarf;": "\u22c6", + "star;": "\u2606", + "starf;": "\u2605", + "straightepsilon;": "\u03f5", + "straightphi;": "\u03d5", + "strns;": "\xaf", + "sub;": "\u2282", + "subE;": "\u2ac5", + "subdot;": "\u2abd", + "sube;": "\u2286", + "subedot;": "\u2ac3", + "submult;": "\u2ac1", + "subnE;": "\u2acb", + "subne;": "\u228a", + "subplus;": "\u2abf", + "subrarr;": "\u2979", + "subset;": "\u2282", + "subseteq;": "\u2286", + "subseteqq;": "\u2ac5", + "subsetneq;": "\u228a", + "subsetneqq;": "\u2acb", + "subsim;": "\u2ac7", + "subsub;": "\u2ad5", + "subsup;": "\u2ad3", + "succ;": "\u227b", + "succapprox;": "\u2ab8", + "succcurlyeq;": "\u227d", + "succeq;": "\u2ab0", + "succnapprox;": "\u2aba", + "succneqq;": "\u2ab6", + "succnsim;": "\u22e9", + "succsim;": "\u227f", + "sum;": "\u2211", + "sung;": "\u266a", + "sup1": "\xb9", + "sup1;": "\xb9", + "sup2": "\xb2", + "sup2;": "\xb2", + "sup3": "\xb3", + "sup3;": "\xb3", + "sup;": "\u2283", + "supE;": "\u2ac6", + "supdot;": "\u2abe", + "supdsub;": "\u2ad8", + "supe;": "\u2287", + "supedot;": "\u2ac4", + "suphsol;": "\u27c9", + "suphsub;": "\u2ad7", + "suplarr;": "\u297b", + "supmult;": "\u2ac2", + "supnE;": "\u2acc", + "supne;": "\u228b", + "supplus;": "\u2ac0", + "supset;": "\u2283", + "supseteq;": "\u2287", + "supseteqq;": "\u2ac6", + "supsetneq;": "\u228b", + "supsetneqq;": "\u2acc", + "supsim;": "\u2ac8", + "supsub;": "\u2ad4", + "supsup;": "\u2ad6", + "swArr;": "\u21d9", + "swarhk;": "\u2926", + "swarr;": "\u2199", + "swarrow;": "\u2199", + "swnwar;": "\u292a", + "szlig": "\xdf", + "szlig;": "\xdf", + "target;": "\u2316", + "tau;": "\u03c4", + "tbrk;": "\u23b4", + "tcaron;": "\u0165", + "tcedil;": "\u0163", + "tcy;": "\u0442", + "tdot;": "\u20db", + "telrec;": "\u2315", + "tfr;": "\U0001d531", + "there4;": "\u2234", + "therefore;": "\u2234", + "theta;": "\u03b8", + "thetasym;": "\u03d1", + "thetav;": "\u03d1", + "thickapprox;": "\u2248", + "thicksim;": "\u223c", + "thinsp;": "\u2009", + "thkap;": "\u2248", + "thksim;": "\u223c", + "thorn": "\xfe", + "thorn;": "\xfe", + "tilde;": "\u02dc", + "times": "\xd7", + "times;": "\xd7", + "timesb;": "\u22a0", + "timesbar;": "\u2a31", + "timesd;": "\u2a30", + "tint;": "\u222d", + "toea;": "\u2928", + "top;": "\u22a4", + "topbot;": "\u2336", + "topcir;": "\u2af1", + "topf;": "\U0001d565", + "topfork;": "\u2ada", + "tosa;": "\u2929", + "tprime;": "\u2034", + "trade;": "\u2122", + "triangle;": "\u25b5", + "triangledown;": "\u25bf", + "triangleleft;": "\u25c3", + "trianglelefteq;": "\u22b4", + "triangleq;": "\u225c", + "triangleright;": "\u25b9", + "trianglerighteq;": "\u22b5", + "tridot;": "\u25ec", + "trie;": "\u225c", + "triminus;": "\u2a3a", + "triplus;": "\u2a39", + "trisb;": "\u29cd", + "tritime;": "\u2a3b", + "trpezium;": "\u23e2", + "tscr;": "\U0001d4c9", + "tscy;": "\u0446", + "tshcy;": "\u045b", + "tstrok;": "\u0167", + "twixt;": "\u226c", + "twoheadleftarrow;": "\u219e", + "twoheadrightarrow;": "\u21a0", + "uArr;": "\u21d1", + "uHar;": "\u2963", + "uacute": "\xfa", + "uacute;": "\xfa", + "uarr;": "\u2191", + "ubrcy;": "\u045e", + "ubreve;": "\u016d", + "ucirc": "\xfb", + "ucirc;": "\xfb", + "ucy;": "\u0443", + "udarr;": "\u21c5", + "udblac;": "\u0171", + "udhar;": "\u296e", + "ufisht;": "\u297e", + "ufr;": "\U0001d532", + "ugrave": "\xf9", + "ugrave;": "\xf9", + "uharl;": "\u21bf", + "uharr;": "\u21be", + "uhblk;": "\u2580", + "ulcorn;": "\u231c", + "ulcorner;": "\u231c", + "ulcrop;": "\u230f", + "ultri;": "\u25f8", + "umacr;": "\u016b", + "uml": "\xa8", + "uml;": "\xa8", + "uogon;": "\u0173", + "uopf;": "\U0001d566", + "uparrow;": "\u2191", + "updownarrow;": "\u2195", + "upharpoonleft;": "\u21bf", + "upharpoonright;": "\u21be", + "uplus;": "\u228e", + "upsi;": "\u03c5", + "upsih;": "\u03d2", + "upsilon;": "\u03c5", + "upuparrows;": "\u21c8", + "urcorn;": "\u231d", + "urcorner;": "\u231d", + "urcrop;": "\u230e", + "uring;": "\u016f", + "urtri;": "\u25f9", + "uscr;": "\U0001d4ca", + "utdot;": "\u22f0", + "utilde;": "\u0169", + "utri;": "\u25b5", + "utrif;": "\u25b4", + "uuarr;": "\u21c8", + "uuml": "\xfc", + "uuml;": "\xfc", + "uwangle;": "\u29a7", + "vArr;": "\u21d5", + "vBar;": "\u2ae8", + "vBarv;": "\u2ae9", + "vDash;": "\u22a8", + "vangrt;": "\u299c", + "varepsilon;": "\u03f5", + "varkappa;": "\u03f0", + "varnothing;": "\u2205", + "varphi;": "\u03d5", + "varpi;": "\u03d6", + "varpropto;": "\u221d", + "varr;": "\u2195", + "varrho;": "\u03f1", + "varsigma;": "\u03c2", + "varsubsetneq;": "\u228a\ufe00", + "varsubsetneqq;": "\u2acb\ufe00", + "varsupsetneq;": "\u228b\ufe00", + "varsupsetneqq;": "\u2acc\ufe00", + "vartheta;": "\u03d1", + "vartriangleleft;": "\u22b2", + "vartriangleright;": "\u22b3", + "vcy;": "\u0432", + "vdash;": "\u22a2", + "vee;": "\u2228", + "veebar;": "\u22bb", + "veeeq;": "\u225a", + "vellip;": "\u22ee", + "verbar;": "|", + "vert;": "|", + "vfr;": "\U0001d533", + "vltri;": "\u22b2", + "vnsub;": "\u2282\u20d2", + "vnsup;": "\u2283\u20d2", + "vopf;": "\U0001d567", + "vprop;": "\u221d", + "vrtri;": "\u22b3", + "vscr;": "\U0001d4cb", + "vsubnE;": "\u2acb\ufe00", + "vsubne;": "\u228a\ufe00", + "vsupnE;": "\u2acc\ufe00", + "vsupne;": "\u228b\ufe00", + "vzigzag;": "\u299a", + "wcirc;": "\u0175", + "wedbar;": "\u2a5f", + "wedge;": "\u2227", + "wedgeq;": "\u2259", + "weierp;": "\u2118", + "wfr;": "\U0001d534", + "wopf;": "\U0001d568", + "wp;": "\u2118", + "wr;": "\u2240", + "wreath;": "\u2240", + "wscr;": "\U0001d4cc", + "xcap;": "\u22c2", + "xcirc;": "\u25ef", + "xcup;": "\u22c3", + "xdtri;": "\u25bd", + "xfr;": "\U0001d535", + "xhArr;": "\u27fa", + "xharr;": "\u27f7", + "xi;": "\u03be", + "xlArr;": "\u27f8", + "xlarr;": "\u27f5", + "xmap;": "\u27fc", + "xnis;": "\u22fb", + "xodot;": "\u2a00", + "xopf;": "\U0001d569", + "xoplus;": "\u2a01", + "xotime;": "\u2a02", + "xrArr;": "\u27f9", + "xrarr;": "\u27f6", + "xscr;": "\U0001d4cd", + "xsqcup;": "\u2a06", + "xuplus;": "\u2a04", + "xutri;": "\u25b3", + "xvee;": "\u22c1", + "xwedge;": "\u22c0", + "yacute": "\xfd", + "yacute;": "\xfd", + "yacy;": "\u044f", + "ycirc;": "\u0177", + "ycy;": "\u044b", + "yen": "\xa5", + "yen;": "\xa5", + "yfr;": "\U0001d536", + "yicy;": "\u0457", + "yopf;": "\U0001d56a", + "yscr;": "\U0001d4ce", + "yucy;": "\u044e", + "yuml": "\xff", + "yuml;": "\xff", + "zacute;": "\u017a", + "zcaron;": "\u017e", + "zcy;": "\u0437", + "zdot;": "\u017c", + "zeetrf;": "\u2128", + "zeta;": "\u03b6", + "zfr;": "\U0001d537", + "zhcy;": "\u0436", + "zigrarr;": "\u21dd", + "zopf;": "\U0001d56b", + "zscr;": "\U0001d4cf", + "zwj;": "\u200d", + "zwnj;": "\u200c", +} + +replacementCharacters = { + 0x0: "\uFFFD", + 0x0d: "\u000D", + 0x80: "\u20AC", + 0x81: "\u0081", + 0x81: "\u0081", + 0x82: "\u201A", + 0x83: "\u0192", + 0x84: "\u201E", + 0x85: "\u2026", + 0x86: "\u2020", + 0x87: "\u2021", + 0x88: "\u02C6", + 0x89: "\u2030", + 0x8A: "\u0160", + 0x8B: "\u2039", + 0x8C: "\u0152", + 0x8D: "\u008D", + 0x8E: "\u017D", + 0x8F: "\u008F", + 0x90: "\u0090", + 0x91: "\u2018", + 0x92: "\u2019", + 0x93: "\u201C", + 0x94: "\u201D", + 0x95: "\u2022", + 0x96: "\u2013", + 0x97: "\u2014", + 0x98: "\u02DC", + 0x99: "\u2122", + 0x9A: "\u0161", + 0x9B: "\u203A", + 0x9C: "\u0153", + 0x9D: "\u009D", + 0x9E: "\u017E", + 0x9F: "\u0178", +} + +encodings = { + '437': 'cp437', + '850': 'cp850', + '852': 'cp852', + '855': 'cp855', + '857': 'cp857', + '860': 'cp860', + '861': 'cp861', + '862': 'cp862', + '863': 'cp863', + '865': 'cp865', + '866': 'cp866', + '869': 'cp869', + 'ansix341968': 'ascii', + 'ansix341986': 'ascii', + 'arabic': 'iso8859-6', + 'ascii': 'ascii', + 'asmo708': 'iso8859-6', + 'big5': 'big5', + 'big5hkscs': 'big5hkscs', + 'chinese': 'gbk', + 'cp037': 'cp037', + 'cp1026': 'cp1026', + 'cp154': 'ptcp154', + 'cp367': 'ascii', + 'cp424': 'cp424', + 'cp437': 'cp437', + 'cp500': 'cp500', + 'cp775': 'cp775', + 'cp819': 'windows-1252', + 'cp850': 'cp850', + 'cp852': 'cp852', + 'cp855': 'cp855', + 'cp857': 'cp857', + 'cp860': 'cp860', + 'cp861': 'cp861', + 'cp862': 'cp862', + 'cp863': 'cp863', + 'cp864': 'cp864', + 'cp865': 'cp865', + 'cp866': 'cp866', + 'cp869': 'cp869', + 'cp936': 'gbk', + 'cpgr': 'cp869', + 'cpis': 'cp861', + 'csascii': 'ascii', + 'csbig5': 'big5', + 'cseuckr': 'cp949', + 'cseucpkdfmtjapanese': 'euc_jp', + 'csgb2312': 'gbk', + 'cshproman8': 'hp-roman8', + 'csibm037': 'cp037', + 'csibm1026': 'cp1026', + 'csibm424': 'cp424', + 'csibm500': 'cp500', + 'csibm855': 'cp855', + 'csibm857': 'cp857', + 'csibm860': 'cp860', + 'csibm861': 'cp861', + 'csibm863': 'cp863', + 'csibm864': 'cp864', + 'csibm865': 'cp865', + 'csibm866': 'cp866', + 'csibm869': 'cp869', + 'csiso2022jp': 'iso2022_jp', + 'csiso2022jp2': 'iso2022_jp_2', + 'csiso2022kr': 'iso2022_kr', + 'csiso58gb231280': 'gbk', + 'csisolatin1': 'windows-1252', + 'csisolatin2': 'iso8859-2', + 'csisolatin3': 'iso8859-3', + 'csisolatin4': 'iso8859-4', + 'csisolatin5': 'windows-1254', + 'csisolatin6': 'iso8859-10', + 'csisolatinarabic': 'iso8859-6', + 'csisolatincyrillic': 'iso8859-5', + 'csisolatingreek': 'iso8859-7', + 'csisolatinhebrew': 'iso8859-8', + 'cskoi8r': 'koi8-r', + 'csksc56011987': 'cp949', + 'cspc775baltic': 'cp775', + 'cspc850multilingual': 'cp850', + 'cspc862latinhebrew': 'cp862', + 'cspc8codepage437': 'cp437', + 'cspcp852': 'cp852', + 'csptcp154': 'ptcp154', + 'csshiftjis': 'shift_jis', + 'csunicode11utf7': 'utf-7', + 'cyrillic': 'iso8859-5', + 'cyrillicasian': 'ptcp154', + 'ebcdiccpbe': 'cp500', + 'ebcdiccpca': 'cp037', + 'ebcdiccpch': 'cp500', + 'ebcdiccphe': 'cp424', + 'ebcdiccpnl': 'cp037', + 'ebcdiccpus': 'cp037', + 'ebcdiccpwt': 'cp037', + 'ecma114': 'iso8859-6', + 'ecma118': 'iso8859-7', + 'elot928': 'iso8859-7', + 'eucjp': 'euc_jp', + 'euckr': 'cp949', + 'extendedunixcodepackedformatforjapanese': 'euc_jp', + 'gb18030': 'gb18030', + 'gb2312': 'gbk', + 'gb231280': 'gbk', + 'gbk': 'gbk', + 'greek': 'iso8859-7', + 'greek8': 'iso8859-7', + 'hebrew': 'iso8859-8', + 'hproman8': 'hp-roman8', + 'hzgb2312': 'hz', + 'ibm037': 'cp037', + 'ibm1026': 'cp1026', + 'ibm367': 'ascii', + 'ibm424': 'cp424', + 'ibm437': 'cp437', + 'ibm500': 'cp500', + 'ibm775': 'cp775', + 'ibm819': 'windows-1252', + 'ibm850': 'cp850', + 'ibm852': 'cp852', + 'ibm855': 'cp855', + 'ibm857': 'cp857', + 'ibm860': 'cp860', + 'ibm861': 'cp861', + 'ibm862': 'cp862', + 'ibm863': 'cp863', + 'ibm864': 'cp864', + 'ibm865': 'cp865', + 'ibm866': 'cp866', + 'ibm869': 'cp869', + 'iso2022jp': 'iso2022_jp', + 'iso2022jp2': 'iso2022_jp_2', + 'iso2022kr': 'iso2022_kr', + 'iso646irv1991': 'ascii', + 'iso646us': 'ascii', + 'iso88591': 'windows-1252', + 'iso885910': 'iso8859-10', + 'iso8859101992': 'iso8859-10', + 'iso885911987': 'windows-1252', + 'iso885913': 'iso8859-13', + 'iso885914': 'iso8859-14', + 'iso8859141998': 'iso8859-14', + 'iso885915': 'iso8859-15', + 'iso885916': 'iso8859-16', + 'iso8859162001': 'iso8859-16', + 'iso88592': 'iso8859-2', + 'iso885921987': 'iso8859-2', + 'iso88593': 'iso8859-3', + 'iso885931988': 'iso8859-3', + 'iso88594': 'iso8859-4', + 'iso885941988': 'iso8859-4', + 'iso88595': 'iso8859-5', + 'iso885951988': 'iso8859-5', + 'iso88596': 'iso8859-6', + 'iso885961987': 'iso8859-6', + 'iso88597': 'iso8859-7', + 'iso885971987': 'iso8859-7', + 'iso88598': 'iso8859-8', + 'iso885981988': 'iso8859-8', + 'iso88599': 'windows-1254', + 'iso885991989': 'windows-1254', + 'isoceltic': 'iso8859-14', + 'isoir100': 'windows-1252', + 'isoir101': 'iso8859-2', + 'isoir109': 'iso8859-3', + 'isoir110': 'iso8859-4', + 'isoir126': 'iso8859-7', + 'isoir127': 'iso8859-6', + 'isoir138': 'iso8859-8', + 'isoir144': 'iso8859-5', + 'isoir148': 'windows-1254', + 'isoir149': 'cp949', + 'isoir157': 'iso8859-10', + 'isoir199': 'iso8859-14', + 'isoir226': 'iso8859-16', + 'isoir58': 'gbk', + 'isoir6': 'ascii', + 'koi8r': 'koi8-r', + 'koi8u': 'koi8-u', + 'korean': 'cp949', + 'ksc5601': 'cp949', + 'ksc56011987': 'cp949', + 'ksc56011989': 'cp949', + 'l1': 'windows-1252', + 'l10': 'iso8859-16', + 'l2': 'iso8859-2', + 'l3': 'iso8859-3', + 'l4': 'iso8859-4', + 'l5': 'windows-1254', + 'l6': 'iso8859-10', + 'l8': 'iso8859-14', + 'latin1': 'windows-1252', + 'latin10': 'iso8859-16', + 'latin2': 'iso8859-2', + 'latin3': 'iso8859-3', + 'latin4': 'iso8859-4', + 'latin5': 'windows-1254', + 'latin6': 'iso8859-10', + 'latin8': 'iso8859-14', + 'latin9': 'iso8859-15', + 'ms936': 'gbk', + 'mskanji': 'shift_jis', + 'pt154': 'ptcp154', + 'ptcp154': 'ptcp154', + 'r8': 'hp-roman8', + 'roman8': 'hp-roman8', + 'shiftjis': 'shift_jis', + 'tis620': 'cp874', + 'unicode11utf7': 'utf-7', + 'us': 'ascii', + 'usascii': 'ascii', + 'utf16': 'utf-16', + 'utf16be': 'utf-16-be', + 'utf16le': 'utf-16-le', + 'utf8': 'utf-8', + 'windows1250': 'cp1250', + 'windows1251': 'cp1251', + 'windows1252': 'cp1252', + 'windows1253': 'cp1253', + 'windows1254': 'cp1254', + 'windows1255': 'cp1255', + 'windows1256': 'cp1256', + 'windows1257': 'cp1257', + 'windows1258': 'cp1258', + 'windows936': 'gbk', + 'x-x-big5': 'big5'} + +tokenTypes = { + "Doctype": 0, + "Characters": 1, + "SpaceCharacters": 2, + "StartTag": 3, + "EndTag": 4, + "EmptyTag": 5, + "Comment": 6, + "ParseError": 7 +} + +tagTokenTypes = frozenset([tokenTypes["StartTag"], tokenTypes["EndTag"], + tokenTypes["EmptyTag"]]) + + +prefixes = dict([(v, k) for k, v in namespaces.items()]) +prefixes["http://www.w3.org/1998/Math/MathML"] = "math" + + +class DataLossWarning(UserWarning): + pass + + +class ReparseException(Exception): + pass diff --git a/venv/lib/python2.7/site-packages/pip/_vendor/html5lib/filters/__init__.py b/venv/lib/python2.7/site-packages/pip/_vendor/html5lib/filters/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/venv/lib/python2.7/site-packages/pip/_vendor/html5lib/filters/_base.py b/venv/lib/python2.7/site-packages/pip/_vendor/html5lib/filters/_base.py new file mode 100644 index 0000000..c7dbaed --- /dev/null +++ b/venv/lib/python2.7/site-packages/pip/_vendor/html5lib/filters/_base.py @@ -0,0 +1,12 @@ +from __future__ import absolute_import, division, unicode_literals + + +class Filter(object): + def __init__(self, source): + self.source = source + + def __iter__(self): + return iter(self.source) + + def __getattr__(self, name): + return getattr(self.source, name) diff --git a/venv/lib/python2.7/site-packages/pip/_vendor/html5lib/filters/alphabeticalattributes.py b/venv/lib/python2.7/site-packages/pip/_vendor/html5lib/filters/alphabeticalattributes.py new file mode 100644 index 0000000..fed6996 --- /dev/null +++ b/venv/lib/python2.7/site-packages/pip/_vendor/html5lib/filters/alphabeticalattributes.py @@ -0,0 +1,20 @@ +from __future__ import absolute_import, division, unicode_literals + +from . import _base + +try: + from collections import OrderedDict +except ImportError: + from ordereddict import OrderedDict + + +class Filter(_base.Filter): + def __iter__(self): + for token in _base.Filter.__iter__(self): + if token["type"] in ("StartTag", "EmptyTag"): + attrs = OrderedDict() + for name, value in sorted(token["data"].items(), + key=lambda x: x[0]): + attrs[name] = value + token["data"] = attrs + yield token diff --git a/venv/lib/python2.7/site-packages/pip/_vendor/html5lib/filters/inject_meta_charset.py b/venv/lib/python2.7/site-packages/pip/_vendor/html5lib/filters/inject_meta_charset.py new file mode 100644 index 0000000..ca33b70 --- /dev/null +++ b/venv/lib/python2.7/site-packages/pip/_vendor/html5lib/filters/inject_meta_charset.py @@ -0,0 +1,65 @@ +from __future__ import absolute_import, division, unicode_literals + +from . import _base + + +class Filter(_base.Filter): + def __init__(self, source, encoding): + _base.Filter.__init__(self, source) + self.encoding = encoding + + def __iter__(self): + state = "pre_head" + meta_found = (self.encoding is None) + pending = [] + + for token in _base.Filter.__iter__(self): + type = token["type"] + if type == "StartTag": + if token["name"].lower() == "head": + state = "in_head" + + elif type == "EmptyTag": + if token["name"].lower() == "meta": + # replace charset with actual encoding + has_http_equiv_content_type = False + for (namespace, name), value in token["data"].items(): + if namespace is not None: + continue + elif name.lower() == 'charset': + token["data"][(namespace, name)] = self.encoding + meta_found = True + break + elif name == 'http-equiv' and value.lower() == 'content-type': + has_http_equiv_content_type = True + else: + if has_http_equiv_content_type and (None, "content") in token["data"]: + token["data"][(None, "content")] = 'text/html; charset=%s' % self.encoding + meta_found = True + + elif token["name"].lower() == "head" and not meta_found: + # insert meta into empty head + yield {"type": "StartTag", "name": "head", + "data": token["data"]} + yield {"type": "EmptyTag", "name": "meta", + "data": {(None, "charset"): self.encoding}} + yield {"type": "EndTag", "name": "head"} + meta_found = True + continue + + elif type == "EndTag": + if token["name"].lower() == "head" and pending: + # insert meta into head (if necessary) and flush pending queue + yield pending.pop(0) + if not meta_found: + yield {"type": "EmptyTag", "name": "meta", + "data": {(None, "charset"): self.encoding}} + while pending: + yield pending.pop(0) + meta_found = True + state = "post_head" + + if state == "in_head": + pending.append(token) + else: + yield token diff --git a/venv/lib/python2.7/site-packages/pip/_vendor/html5lib/filters/lint.py b/venv/lib/python2.7/site-packages/pip/_vendor/html5lib/filters/lint.py new file mode 100644 index 0000000..8884696 --- /dev/null +++ b/venv/lib/python2.7/site-packages/pip/_vendor/html5lib/filters/lint.py @@ -0,0 +1,90 @@ +from __future__ import absolute_import, division, unicode_literals + +from . import _base +from ..constants import cdataElements, rcdataElements, voidElements + +from ..constants import spaceCharacters +spaceCharacters = "".join(spaceCharacters) + + +class LintError(Exception): + pass + + +class Filter(_base.Filter): + def __iter__(self): + open_elements = [] + contentModelFlag = "PCDATA" + for token in _base.Filter.__iter__(self): + type = token["type"] + if type in ("StartTag", "EmptyTag"): + name = token["name"] + if contentModelFlag != "PCDATA": + raise LintError("StartTag not in PCDATA content model flag: %(tag)s" % {"tag": name}) + if not isinstance(name, str): + raise LintError("Tag name is not a string: %(tag)r" % {"tag": name}) + if not name: + raise LintError("Empty tag name") + if type == "StartTag" and name in voidElements: + raise LintError("Void element reported as StartTag token: %(tag)s" % {"tag": name}) + elif type == "EmptyTag" and name not in voidElements: + raise LintError("Non-void element reported as EmptyTag token: %(tag)s" % {"tag": token["name"]}) + if type == "StartTag": + open_elements.append(name) + for name, value in token["data"]: + if not isinstance(name, str): + raise LintError("Attribute name is not a string: %(name)r" % {"name": name}) + if not name: + raise LintError("Empty attribute name") + if not isinstance(value, str): + raise LintError("Attribute value is not a string: %(value)r" % {"value": value}) + if name in cdataElements: + contentModelFlag = "CDATA" + elif name in rcdataElements: + contentModelFlag = "RCDATA" + elif name == "plaintext": + contentModelFlag = "PLAINTEXT" + + elif type == "EndTag": + name = token["name"] + if not isinstance(name, str): + raise LintError("Tag name is not a string: %(tag)r" % {"tag": name}) + if not name: + raise LintError("Empty tag name") + if name in voidElements: + raise LintError("Void element reported as EndTag token: %(tag)s" % {"tag": name}) + start_name = open_elements.pop() + if start_name != name: + raise LintError("EndTag (%(end)s) does not match StartTag (%(start)s)" % {"end": name, "start": start_name}) + contentModelFlag = "PCDATA" + + elif type == "Comment": + if contentModelFlag != "PCDATA": + raise LintError("Comment not in PCDATA content model flag") + + elif type in ("Characters", "SpaceCharacters"): + data = token["data"] + if not isinstance(data, str): + raise LintError("Attribute name is not a string: %(name)r" % {"name": data}) + if not data: + raise LintError("%(type)s token with empty data" % {"type": type}) + if type == "SpaceCharacters": + data = data.strip(spaceCharacters) + if data: + raise LintError("Non-space character(s) found in SpaceCharacters token: %(token)r" % {"token": data}) + + elif type == "Doctype": + name = token["name"] + if contentModelFlag != "PCDATA": + raise LintError("Doctype not in PCDATA content model flag: %(name)s" % {"name": name}) + if not isinstance(name, str): + raise LintError("Tag name is not a string: %(tag)r" % {"tag": name}) + # XXX: what to do with token["data"] ? + + elif type in ("ParseError", "SerializeError"): + pass + + else: + raise LintError("Unknown token type: %(type)s" % {"type": type}) + + yield token diff --git a/venv/lib/python2.7/site-packages/pip/_vendor/html5lib/filters/optionaltags.py b/venv/lib/python2.7/site-packages/pip/_vendor/html5lib/filters/optionaltags.py new file mode 100644 index 0000000..fefe0b3 --- /dev/null +++ b/venv/lib/python2.7/site-packages/pip/_vendor/html5lib/filters/optionaltags.py @@ -0,0 +1,205 @@ +from __future__ import absolute_import, division, unicode_literals + +from . import _base + + +class Filter(_base.Filter): + def slider(self): + previous1 = previous2 = None + for token in self.source: + if previous1 is not None: + yield previous2, previous1, token + previous2 = previous1 + previous1 = token + yield previous2, previous1, None + + def __iter__(self): + for previous, token, next in self.slider(): + type = token["type"] + if type == "StartTag": + if (token["data"] or + not self.is_optional_start(token["name"], previous, next)): + yield token + elif type == "EndTag": + if not self.is_optional_end(token["name"], next): + yield token + else: + yield token + + def is_optional_start(self, tagname, previous, next): + type = next and next["type"] or None + if tagname in 'html': + # An html element's start tag may be omitted if the first thing + # inside the html element is not a space character or a comment. + return type not in ("Comment", "SpaceCharacters") + elif tagname == 'head': + # A head element's start tag may be omitted if the first thing + # inside the head element is an element. + # XXX: we also omit the start tag if the head element is empty + if type in ("StartTag", "EmptyTag"): + return True + elif type == "EndTag": + return next["name"] == "head" + elif tagname == 'body': + # A body element's start tag may be omitted if the first thing + # inside the body element is not a space character or a comment, + # except if the first thing inside the body element is a script + # or style element and the node immediately preceding the body + # element is a head element whose end tag has been omitted. + if type in ("Comment", "SpaceCharacters"): + return False + elif type == "StartTag": + # XXX: we do not look at the preceding event, so we never omit + # the body element's start tag if it's followed by a script or + # a style element. + return next["name"] not in ('script', 'style') + else: + return True + elif tagname == 'colgroup': + # A colgroup element's start tag may be omitted if the first thing + # inside the colgroup element is a col element, and if the element + # is not immediately preceeded by another colgroup element whose + # end tag has been omitted. + if type in ("StartTag", "EmptyTag"): + # XXX: we do not look at the preceding event, so instead we never + # omit the colgroup element's end tag when it is immediately + # followed by another colgroup element. See is_optional_end. + return next["name"] == "col" + else: + return False + elif tagname == 'tbody': + # A tbody element's start tag may be omitted if the first thing + # inside the tbody element is a tr element, and if the element is + # not immediately preceeded by a tbody, thead, or tfoot element + # whose end tag has been omitted. + if type == "StartTag": + # omit the thead and tfoot elements' end tag when they are + # immediately followed by a tbody element. See is_optional_end. + if previous and previous['type'] == 'EndTag' and \ + previous['name'] in ('tbody', 'thead', 'tfoot'): + return False + return next["name"] == 'tr' + else: + return False + return False + + def is_optional_end(self, tagname, next): + type = next and next["type"] or None + if tagname in ('html', 'head', 'body'): + # An html element's end tag may be omitted if the html element + # is not immediately followed by a space character or a comment. + return type not in ("Comment", "SpaceCharacters") + elif tagname in ('li', 'optgroup', 'tr'): + # A li element's end tag may be omitted if the li element is + # immediately followed by another li element or if there is + # no more content in the parent element. + # An optgroup element's end tag may be omitted if the optgroup + # element is immediately followed by another optgroup element, + # or if there is no more content in the parent element. + # A tr element's end tag may be omitted if the tr element is + # immediately followed by another tr element, or if there is + # no more content in the parent element. + if type == "StartTag": + return next["name"] == tagname + else: + return type == "EndTag" or type is None + elif tagname in ('dt', 'dd'): + # A dt element's end tag may be omitted if the dt element is + # immediately followed by another dt element or a dd element. + # A dd element's end tag may be omitted if the dd element is + # immediately followed by another dd element or a dt element, + # or if there is no more content in the parent element. + if type == "StartTag": + return next["name"] in ('dt', 'dd') + elif tagname == 'dd': + return type == "EndTag" or type is None + else: + return False + elif tagname == 'p': + # A p element's end tag may be omitted if the p element is + # immediately followed by an address, article, aside, + # blockquote, datagrid, dialog, dir, div, dl, fieldset, + # footer, form, h1, h2, h3, h4, h5, h6, header, hr, menu, + # nav, ol, p, pre, section, table, or ul, element, or if + # there is no more content in the parent element. + if type in ("StartTag", "EmptyTag"): + return next["name"] in ('address', 'article', 'aside', + 'blockquote', 'datagrid', 'dialog', + 'dir', 'div', 'dl', 'fieldset', 'footer', + 'form', 'h1', 'h2', 'h3', 'h4', 'h5', 'h6', + 'header', 'hr', 'menu', 'nav', 'ol', + 'p', 'pre', 'section', 'table', 'ul') + else: + return type == "EndTag" or type is None + elif tagname == 'option': + # An option element's end tag may be omitted if the option + # element is immediately followed by another option element, + # or if it is immediately followed by an optgroup + # element, or if there is no more content in the parent + # element. + if type == "StartTag": + return next["name"] in ('option', 'optgroup') + else: + return type == "EndTag" or type is None + elif tagname in ('rt', 'rp'): + # An rt element's end tag may be omitted if the rt element is + # immediately followed by an rt or rp element, or if there is + # no more content in the parent element. + # An rp element's end tag may be omitted if the rp element is + # immediately followed by an rt or rp element, or if there is + # no more content in the parent element. + if type == "StartTag": + return next["name"] in ('rt', 'rp') + else: + return type == "EndTag" or type is None + elif tagname == 'colgroup': + # A colgroup element's end tag may be omitted if the colgroup + # element is not immediately followed by a space character or + # a comment. + if type in ("Comment", "SpaceCharacters"): + return False + elif type == "StartTag": + # XXX: we also look for an immediately following colgroup + # element. See is_optional_start. + return next["name"] != 'colgroup' + else: + return True + elif tagname in ('thead', 'tbody'): + # A thead element's end tag may be omitted if the thead element + # is immediately followed by a tbody or tfoot element. + # A tbody element's end tag may be omitted if the tbody element + # is immediately followed by a tbody or tfoot element, or if + # there is no more content in the parent element. + # A tfoot element's end tag may be omitted if the tfoot element + # is immediately followed by a tbody element, or if there is no + # more content in the parent element. + # XXX: we never omit the end tag when the following element is + # a tbody. See is_optional_start. + if type == "StartTag": + return next["name"] in ['tbody', 'tfoot'] + elif tagname == 'tbody': + return type == "EndTag" or type is None + else: + return False + elif tagname == 'tfoot': + # A tfoot element's end tag may be omitted if the tfoot element + # is immediately followed by a tbody element, or if there is no + # more content in the parent element. + # XXX: we never omit the end tag when the following element is + # a tbody. See is_optional_start. + if type == "StartTag": + return next["name"] == 'tbody' + else: + return type == "EndTag" or type is None + elif tagname in ('td', 'th'): + # A td element's end tag may be omitted if the td element is + # immediately followed by a td or th element, or if there is + # no more content in the parent element. + # A th element's end tag may be omitted if the th element is + # immediately followed by a td or th element, or if there is + # no more content in the parent element. + if type == "StartTag": + return next["name"] in ('td', 'th') + else: + return type == "EndTag" or type is None + return False diff --git a/venv/lib/python2.7/site-packages/pip/_vendor/html5lib/filters/sanitizer.py b/venv/lib/python2.7/site-packages/pip/_vendor/html5lib/filters/sanitizer.py new file mode 100644 index 0000000..b206b54 --- /dev/null +++ b/venv/lib/python2.7/site-packages/pip/_vendor/html5lib/filters/sanitizer.py @@ -0,0 +1,12 @@ +from __future__ import absolute_import, division, unicode_literals + +from . import _base +from ..sanitizer import HTMLSanitizerMixin + + +class Filter(_base.Filter, HTMLSanitizerMixin): + def __iter__(self): + for token in _base.Filter.__iter__(self): + token = self.sanitize_token(token) + if token: + yield token diff --git a/venv/lib/python2.7/site-packages/pip/_vendor/html5lib/filters/whitespace.py b/venv/lib/python2.7/site-packages/pip/_vendor/html5lib/filters/whitespace.py new file mode 100644 index 0000000..dfc60ee --- /dev/null +++ b/venv/lib/python2.7/site-packages/pip/_vendor/html5lib/filters/whitespace.py @@ -0,0 +1,38 @@ +from __future__ import absolute_import, division, unicode_literals + +import re + +from . import _base +from ..constants import rcdataElements, spaceCharacters +spaceCharacters = "".join(spaceCharacters) + +SPACES_REGEX = re.compile("[%s]+" % spaceCharacters) + + +class Filter(_base.Filter): + + spacePreserveElements = frozenset(["pre", "textarea"] + list(rcdataElements)) + + def __iter__(self): + preserve = 0 + for token in _base.Filter.__iter__(self): + type = token["type"] + if type == "StartTag" \ + and (preserve or token["name"] in self.spacePreserveElements): + preserve += 1 + + elif type == "EndTag" and preserve: + preserve -= 1 + + elif not preserve and type == "SpaceCharacters" and token["data"]: + # Test on token["data"] above to not introduce spaces where there were not + token["data"] = " " + + elif not preserve and type == "Characters": + token["data"] = collapse_spaces(token["data"]) + + yield token + + +def collapse_spaces(text): + return SPACES_REGEX.sub(' ', text) diff --git a/venv/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.py b/venv/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.py new file mode 100644 index 0000000..40f3d09 --- /dev/null +++ b/venv/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.py @@ -0,0 +1,2724 @@ +from __future__ import absolute_import, division, unicode_literals +from pip._vendor.six import with_metaclass + +import types + +from . import inputstream +from . import tokenizer + +from . import treebuilders +from .treebuilders._base import Marker + +from . import utils +from . import constants +from .constants import spaceCharacters, asciiUpper2Lower +from .constants import specialElements +from .constants import headingElements +from .constants import cdataElements, rcdataElements +from .constants import tokenTypes, ReparseException, namespaces +from .constants import htmlIntegrationPointElements, mathmlTextIntegrationPointElements +from .constants import adjustForeignAttributes as adjustForeignAttributesMap +from .constants import E + + +def parse(doc, treebuilder="etree", encoding=None, + namespaceHTMLElements=True): + """Parse a string or file-like object into a tree""" + tb = treebuilders.getTreeBuilder(treebuilder) + p = HTMLParser(tb, namespaceHTMLElements=namespaceHTMLElements) + return p.parse(doc, encoding=encoding) + + +def parseFragment(doc, container="div", treebuilder="etree", encoding=None, + namespaceHTMLElements=True): + tb = treebuilders.getTreeBuilder(treebuilder) + p = HTMLParser(tb, namespaceHTMLElements=namespaceHTMLElements) + return p.parseFragment(doc, container=container, encoding=encoding) + + +def method_decorator_metaclass(function): + class Decorated(type): + def __new__(meta, classname, bases, classDict): + for attributeName, attribute in classDict.items(): + if isinstance(attribute, types.FunctionType): + attribute = function(attribute) + + classDict[attributeName] = attribute + return type.__new__(meta, classname, bases, classDict) + return Decorated + + +class HTMLParser(object): + """HTML parser. Generates a tree structure from a stream of (possibly + malformed) HTML""" + + def __init__(self, tree=None, tokenizer=tokenizer.HTMLTokenizer, + strict=False, namespaceHTMLElements=True, debug=False): + """ + strict - raise an exception when a parse error is encountered + + tree - a treebuilder class controlling the type of tree that will be + returned. Built in treebuilders can be accessed through + html5lib.treebuilders.getTreeBuilder(treeType) + + tokenizer - a class that provides a stream of tokens to the treebuilder. + This may be replaced for e.g. a sanitizer which converts some tags to + text + """ + + # Raise an exception on the first error encountered + self.strict = strict + + if tree is None: + tree = treebuilders.getTreeBuilder("etree") + self.tree = tree(namespaceHTMLElements) + self.tokenizer_class = tokenizer + self.errors = [] + + self.phases = dict([(name, cls(self, self.tree)) for name, cls in + getPhases(debug).items()]) + + def _parse(self, stream, innerHTML=False, container="div", + encoding=None, parseMeta=True, useChardet=True, **kwargs): + + self.innerHTMLMode = innerHTML + self.container = container + self.tokenizer = self.tokenizer_class(stream, encoding=encoding, + parseMeta=parseMeta, + useChardet=useChardet, + parser=self, **kwargs) + self.reset() + + while True: + try: + self.mainLoop() + break + except ReparseException: + self.reset() + + def reset(self): + self.tree.reset() + self.firstStartTag = False + self.errors = [] + self.log = [] # only used with debug mode + # "quirks" / "limited quirks" / "no quirks" + self.compatMode = "no quirks" + + if self.innerHTMLMode: + self.innerHTML = self.container.lower() + + if self.innerHTML in cdataElements: + self.tokenizer.state = self.tokenizer.rcdataState + elif self.innerHTML in rcdataElements: + self.tokenizer.state = self.tokenizer.rawtextState + elif self.innerHTML == 'plaintext': + self.tokenizer.state = self.tokenizer.plaintextState + else: + # state already is data state + # self.tokenizer.state = self.tokenizer.dataState + pass + self.phase = self.phases["beforeHtml"] + self.phase.insertHtmlElement() + self.resetInsertionMode() + else: + self.innerHTML = False + self.phase = self.phases["initial"] + + self.lastPhase = None + + self.beforeRCDataPhase = None + + self.framesetOK = True + + @property + def documentEncoding(self): + """The name of the character encoding + that was used to decode the input stream, + or :obj:`None` if that is not determined yet. + + """ + if not hasattr(self, 'tokenizer'): + return None + return self.tokenizer.stream.charEncoding[0] + + def isHTMLIntegrationPoint(self, element): + if (element.name == "annotation-xml" and + element.namespace == namespaces["mathml"]): + return ("encoding" in element.attributes and + element.attributes["encoding"].translate( + asciiUpper2Lower) in + ("text/html", "application/xhtml+xml")) + else: + return (element.namespace, element.name) in htmlIntegrationPointElements + + def isMathMLTextIntegrationPoint(self, element): + return (element.namespace, element.name) in mathmlTextIntegrationPointElements + + def mainLoop(self): + CharactersToken = tokenTypes["Characters"] + SpaceCharactersToken = tokenTypes["SpaceCharacters"] + StartTagToken = tokenTypes["StartTag"] + EndTagToken = tokenTypes["EndTag"] + CommentToken = tokenTypes["Comment"] + DoctypeToken = tokenTypes["Doctype"] + ParseErrorToken = tokenTypes["ParseError"] + + for token in self.normalizedTokens(): + new_token = token + while new_token is not None: + currentNode = self.tree.openElements[-1] if self.tree.openElements else None + currentNodeNamespace = currentNode.namespace if currentNode else None + currentNodeName = currentNode.name if currentNode else None + + type = new_token["type"] + + if type == ParseErrorToken: + self.parseError(new_token["data"], new_token.get("datavars", {})) + new_token = None + else: + if (len(self.tree.openElements) == 0 or + currentNodeNamespace == self.tree.defaultNamespace or + (self.isMathMLTextIntegrationPoint(currentNode) and + ((type == StartTagToken and + token["name"] not in frozenset(["mglyph", "malignmark"])) or + type in (CharactersToken, SpaceCharactersToken))) or + (currentNodeNamespace == namespaces["mathml"] and + currentNodeName == "annotation-xml" and + token["name"] == "svg") or + (self.isHTMLIntegrationPoint(currentNode) and + type in (StartTagToken, CharactersToken, SpaceCharactersToken))): + phase = self.phase + else: + phase = self.phases["inForeignContent"] + + if type == CharactersToken: + new_token = phase.processCharacters(new_token) + elif type == SpaceCharactersToken: + new_token = phase.processSpaceCharacters(new_token) + elif type == StartTagToken: + new_token = phase.processStartTag(new_token) + elif type == EndTagToken: + new_token = phase.processEndTag(new_token) + elif type == CommentToken: + new_token = phase.processComment(new_token) + elif type == DoctypeToken: + new_token = phase.processDoctype(new_token) + + if (type == StartTagToken and token["selfClosing"] + and not token["selfClosingAcknowledged"]): + self.parseError("non-void-element-with-trailing-solidus", + {"name": token["name"]}) + + # When the loop finishes it's EOF + reprocess = True + phases = [] + while reprocess: + phases.append(self.phase) + reprocess = self.phase.processEOF() + if reprocess: + assert self.phase not in phases + + def normalizedTokens(self): + for token in self.tokenizer: + yield self.normalizeToken(token) + + def parse(self, stream, encoding=None, parseMeta=True, useChardet=True): + """Parse a HTML document into a well-formed tree + + stream - a filelike object or string containing the HTML to be parsed + + The optional encoding parameter must be a string that indicates + the encoding. If specified, that encoding will be used, + regardless of any BOM or later declaration (such as in a meta + element) + """ + self._parse(stream, innerHTML=False, encoding=encoding, + parseMeta=parseMeta, useChardet=useChardet) + return self.tree.getDocument() + + def parseFragment(self, stream, container="div", encoding=None, + parseMeta=False, useChardet=True): + """Parse a HTML fragment into a well-formed tree fragment + + container - name of the element we're setting the innerHTML property + if set to None, default to 'div' + + stream - a filelike object or string containing the HTML to be parsed + + The optional encoding parameter must be a string that indicates + the encoding. If specified, that encoding will be used, + regardless of any BOM or later declaration (such as in a meta + element) + """ + self._parse(stream, True, container=container, encoding=encoding) + return self.tree.getFragment() + + def parseError(self, errorcode="XXX-undefined-error", datavars={}): + # XXX The idea is to make errorcode mandatory. + self.errors.append((self.tokenizer.stream.position(), errorcode, datavars)) + if self.strict: + raise ParseError(E[errorcode] % datavars) + + def normalizeToken(self, token): + """ HTML5 specific normalizations to the token stream """ + + if token["type"] == tokenTypes["StartTag"]: + token["data"] = dict(token["data"][::-1]) + + return token + + def adjustMathMLAttributes(self, token): + replacements = {"definitionurl": "definitionURL"} + for k, v in replacements.items(): + if k in token["data"]: + token["data"][v] = token["data"][k] + del token["data"][k] + + def adjustSVGAttributes(self, token): + replacements = { + "attributename": "attributeName", + "attributetype": "attributeType", + "basefrequency": "baseFrequency", + "baseprofile": "baseProfile", + "calcmode": "calcMode", + "clippathunits": "clipPathUnits", + "contentscripttype": "contentScriptType", + "contentstyletype": "contentStyleType", + "diffuseconstant": "diffuseConstant", + "edgemode": "edgeMode", + "externalresourcesrequired": "externalResourcesRequired", + "filterres": "filterRes", + "filterunits": "filterUnits", + "glyphref": "glyphRef", + "gradienttransform": "gradientTransform", + "gradientunits": "gradientUnits", + "kernelmatrix": "kernelMatrix", + "kernelunitlength": "kernelUnitLength", + "keypoints": "keyPoints", + "keysplines": "keySplines", + "keytimes": "keyTimes", + "lengthadjust": "lengthAdjust", + "limitingconeangle": "limitingConeAngle", + "markerheight": "markerHeight", + "markerunits": "markerUnits", + "markerwidth": "markerWidth", + "maskcontentunits": "maskContentUnits", + "maskunits": "maskUnits", + "numoctaves": "numOctaves", + "pathlength": "pathLength", + "patterncontentunits": "patternContentUnits", + "patterntransform": "patternTransform", + "patternunits": "patternUnits", + "pointsatx": "pointsAtX", + "pointsaty": "pointsAtY", + "pointsatz": "pointsAtZ", + "preservealpha": "preserveAlpha", + "preserveaspectratio": "preserveAspectRatio", + "primitiveunits": "primitiveUnits", + "refx": "refX", + "refy": "refY", + "repeatcount": "repeatCount", + "repeatdur": "repeatDur", + "requiredextensions": "requiredExtensions", + "requiredfeatures": "requiredFeatures", + "specularconstant": "specularConstant", + "specularexponent": "specularExponent", + "spreadmethod": "spreadMethod", + "startoffset": "startOffset", + "stddeviation": "stdDeviation", + "stitchtiles": "stitchTiles", + "surfacescale": "surfaceScale", + "systemlanguage": "systemLanguage", + "tablevalues": "tableValues", + "targetx": "targetX", + "targety": "targetY", + "textlength": "textLength", + "viewbox": "viewBox", + "viewtarget": "viewTarget", + "xchannelselector": "xChannelSelector", + "ychannelselector": "yChannelSelector", + "zoomandpan": "zoomAndPan" + } + for originalName in list(token["data"].keys()): + if originalName in replacements: + svgName = replacements[originalName] + token["data"][svgName] = token["data"][originalName] + del token["data"][originalName] + + def adjustForeignAttributes(self, token): + replacements = adjustForeignAttributesMap + + for originalName in token["data"].keys(): + if originalName in replacements: + foreignName = replacements[originalName] + token["data"][foreignName] = token["data"][originalName] + del token["data"][originalName] + + def reparseTokenNormal(self, token): + self.parser.phase() + + def resetInsertionMode(self): + # The name of this method is mostly historical. (It's also used in the + # specification.) + last = False + newModes = { + "select": "inSelect", + "td": "inCell", + "th": "inCell", + "tr": "inRow", + "tbody": "inTableBody", + "thead": "inTableBody", + "tfoot": "inTableBody", + "caption": "inCaption", + "colgroup": "inColumnGroup", + "table": "inTable", + "head": "inBody", + "body": "inBody", + "frameset": "inFrameset", + "html": "beforeHead" + } + for node in self.tree.openElements[::-1]: + nodeName = node.name + new_phase = None + if node == self.tree.openElements[0]: + assert self.innerHTML + last = True + nodeName = self.innerHTML + # Check for conditions that should only happen in the innerHTML + # case + if nodeName in ("select", "colgroup", "head", "html"): + assert self.innerHTML + + if not last and node.namespace != self.tree.defaultNamespace: + continue + + if nodeName in newModes: + new_phase = self.phases[newModes[nodeName]] + break + elif last: + new_phase = self.phases["inBody"] + break + + self.phase = new_phase + + def parseRCDataRawtext(self, token, contentType): + """Generic RCDATA/RAWTEXT Parsing algorithm + contentType - RCDATA or RAWTEXT + """ + assert contentType in ("RAWTEXT", "RCDATA") + + self.tree.insertElement(token) + + if contentType == "RAWTEXT": + self.tokenizer.state = self.tokenizer.rawtextState + else: + self.tokenizer.state = self.tokenizer.rcdataState + + self.originalPhase = self.phase + + self.phase = self.phases["text"] + + +def getPhases(debug): + def log(function): + """Logger that records which phase processes each token""" + type_names = dict((value, key) for key, value in + constants.tokenTypes.items()) + + def wrapped(self, *args, **kwargs): + if function.__name__.startswith("process") and len(args) > 0: + token = args[0] + try: + info = {"type": type_names[token['type']]} + except: + raise + if token['type'] in constants.tagTokenTypes: + info["name"] = token['name'] + + self.parser.log.append((self.parser.tokenizer.state.__name__, + self.parser.phase.__class__.__name__, + self.__class__.__name__, + function.__name__, + info)) + return function(self, *args, **kwargs) + else: + return function(self, *args, **kwargs) + return wrapped + + def getMetaclass(use_metaclass, metaclass_func): + if use_metaclass: + return method_decorator_metaclass(metaclass_func) + else: + return type + + class Phase(with_metaclass(getMetaclass(debug, log))): + """Base class for helper object that implements each phase of processing + """ + + def __init__(self, parser, tree): + self.parser = parser + self.tree = tree + + def processEOF(self): + raise NotImplementedError + + def processComment(self, token): + # For most phases the following is correct. Where it's not it will be + # overridden. + self.tree.insertComment(token, self.tree.openElements[-1]) + + def processDoctype(self, token): + self.parser.parseError("unexpected-doctype") + + def processCharacters(self, token): + self.tree.insertText(token["data"]) + + def processSpaceCharacters(self, token): + self.tree.insertText(token["data"]) + + def processStartTag(self, token): + return self.startTagHandler[token["name"]](token) + + def startTagHtml(self, token): + if not self.parser.firstStartTag and token["name"] == "html": + self.parser.parseError("non-html-root") + # XXX Need a check here to see if the first start tag token emitted is + # this token... If it's not, invoke self.parser.parseError(). + for attr, value in token["data"].items(): + if attr not in self.tree.openElements[0].attributes: + self.tree.openElements[0].attributes[attr] = value + self.parser.firstStartTag = False + + def processEndTag(self, token): + return self.endTagHandler[token["name"]](token) + + class InitialPhase(Phase): + def processSpaceCharacters(self, token): + pass + + def processComment(self, token): + self.tree.insertComment(token, self.tree.document) + + def processDoctype(self, token): + name = token["name"] + publicId = token["publicId"] + systemId = token["systemId"] + correct = token["correct"] + + if (name != "html" or publicId is not None or + systemId is not None and systemId != "about:legacy-compat"): + self.parser.parseError("unknown-doctype") + + if publicId is None: + publicId = "" + + self.tree.insertDoctype(token) + + if publicId != "": + publicId = publicId.translate(asciiUpper2Lower) + + if (not correct or token["name"] != "html" + or publicId.startswith( + ("+//silmaril//dtd html pro v0r11 19970101//", + "-//advasoft ltd//dtd html 3.0 aswedit + extensions//", + "-//as//dtd html 3.0 aswedit + extensions//", + "-//ietf//dtd html 2.0 level 1//", + "-//ietf//dtd html 2.0 level 2//", + "-//ietf//dtd html 2.0 strict level 1//", + "-//ietf//dtd html 2.0 strict level 2//", + "-//ietf//dtd html 2.0 strict//", + "-//ietf//dtd html 2.0//", + "-//ietf//dtd html 2.1e//", + "-//ietf//dtd html 3.0//", + "-//ietf//dtd html 3.2 final//", + "-//ietf//dtd html 3.2//", + "-//ietf//dtd html 3//", + "-//ietf//dtd html level 0//", + "-//ietf//dtd html level 1//", + "-//ietf//dtd html level 2//", + "-//ietf//dtd html level 3//", + "-//ietf//dtd html strict level 0//", + "-//ietf//dtd html strict level 1//", + "-//ietf//dtd html strict level 2//", + "-//ietf//dtd html strict level 3//", + "-//ietf//dtd html strict//", + "-//ietf//dtd html//", + "-//metrius//dtd metrius presentational//", + "-//microsoft//dtd internet explorer 2.0 html strict//", + "-//microsoft//dtd internet explorer 2.0 html//", + "-//microsoft//dtd internet explorer 2.0 tables//", + "-//microsoft//dtd internet explorer 3.0 html strict//", + "-//microsoft//dtd internet explorer 3.0 html//", + "-//microsoft//dtd internet explorer 3.0 tables//", + "-//netscape comm. corp.//dtd html//", + "-//netscape comm. corp.//dtd strict html//", + "-//o'reilly and associates//dtd html 2.0//", + "-//o'reilly and associates//dtd html extended 1.0//", + "-//o'reilly and associates//dtd html extended relaxed 1.0//", + "-//softquad software//dtd hotmetal pro 6.0::19990601::extensions to html 4.0//", + "-//softquad//dtd hotmetal pro 4.0::19971010::extensions to html 4.0//", + "-//spyglass//dtd html 2.0 extended//", + "-//sq//dtd html 2.0 hotmetal + extensions//", + "-//sun microsystems corp.//dtd hotjava html//", + "-//sun microsystems corp.//dtd hotjava strict html//", + "-//w3c//dtd html 3 1995-03-24//", + "-//w3c//dtd html 3.2 draft//", + "-//w3c//dtd html 3.2 final//", + "-//w3c//dtd html 3.2//", + "-//w3c//dtd html 3.2s draft//", + "-//w3c//dtd html 4.0 frameset//", + "-//w3c//dtd html 4.0 transitional//", + "-//w3c//dtd html experimental 19960712//", + "-//w3c//dtd html experimental 970421//", + "-//w3c//dtd w3 html//", + "-//w3o//dtd w3 html 3.0//", + "-//webtechs//dtd mozilla html 2.0//", + "-//webtechs//dtd mozilla html//")) + or publicId in + ("-//w3o//dtd w3 html strict 3.0//en//", + "-/w3c/dtd html 4.0 transitional/en", + "html") + or publicId.startswith( + ("-//w3c//dtd html 4.01 frameset//", + "-//w3c//dtd html 4.01 transitional//")) and + systemId is None + or systemId and systemId.lower() == "http://www.ibm.com/data/dtd/v11/ibmxhtml1-transitional.dtd"): + self.parser.compatMode = "quirks" + elif (publicId.startswith( + ("-//w3c//dtd xhtml 1.0 frameset//", + "-//w3c//dtd xhtml 1.0 transitional//")) + or publicId.startswith( + ("-//w3c//dtd html 4.01 frameset//", + "-//w3c//dtd html 4.01 transitional//")) and + systemId is not None): + self.parser.compatMode = "limited quirks" + + self.parser.phase = self.parser.phases["beforeHtml"] + + def anythingElse(self): + self.parser.compatMode = "quirks" + self.parser.phase = self.parser.phases["beforeHtml"] + + def processCharacters(self, token): + self.parser.parseError("expected-doctype-but-got-chars") + self.anythingElse() + return token + + def processStartTag(self, token): + self.parser.parseError("expected-doctype-but-got-start-tag", + {"name": token["name"]}) + self.anythingElse() + return token + + def processEndTag(self, token): + self.parser.parseError("expected-doctype-but-got-end-tag", + {"name": token["name"]}) + self.anythingElse() + return token + + def processEOF(self): + self.parser.parseError("expected-doctype-but-got-eof") + self.anythingElse() + return True + + class BeforeHtmlPhase(Phase): + # helper methods + def insertHtmlElement(self): + self.tree.insertRoot(impliedTagToken("html", "StartTag")) + self.parser.phase = self.parser.phases["beforeHead"] + + # other + def processEOF(self): + self.insertHtmlElement() + return True + + def processComment(self, token): + self.tree.insertComment(token, self.tree.document) + + def processSpaceCharacters(self, token): + pass + + def processCharacters(self, token): + self.insertHtmlElement() + return token + + def processStartTag(self, token): + if token["name"] == "html": + self.parser.firstStartTag = True + self.insertHtmlElement() + return token + + def processEndTag(self, token): + if token["name"] not in ("head", "body", "html", "br"): + self.parser.parseError("unexpected-end-tag-before-html", + {"name": token["name"]}) + else: + self.insertHtmlElement() + return token + + class BeforeHeadPhase(Phase): + def __init__(self, parser, tree): + Phase.__init__(self, parser, tree) + + self.startTagHandler = utils.MethodDispatcher([ + ("html", self.startTagHtml), + ("head", self.startTagHead) + ]) + self.startTagHandler.default = self.startTagOther + + self.endTagHandler = utils.MethodDispatcher([ + (("head", "body", "html", "br"), self.endTagImplyHead) + ]) + self.endTagHandler.default = self.endTagOther + + def processEOF(self): + self.startTagHead(impliedTagToken("head", "StartTag")) + return True + + def processSpaceCharacters(self, token): + pass + + def processCharacters(self, token): + self.startTagHead(impliedTagToken("head", "StartTag")) + return token + + def startTagHtml(self, token): + return self.parser.phases["inBody"].processStartTag(token) + + def startTagHead(self, token): + self.tree.insertElement(token) + self.tree.headPointer = self.tree.openElements[-1] + self.parser.phase = self.parser.phases["inHead"] + + def startTagOther(self, token): + self.startTagHead(impliedTagToken("head", "StartTag")) + return token + + def endTagImplyHead(self, token): + self.startTagHead(impliedTagToken("head", "StartTag")) + return token + + def endTagOther(self, token): + self.parser.parseError("end-tag-after-implied-root", + {"name": token["name"]}) + + class InHeadPhase(Phase): + def __init__(self, parser, tree): + Phase.__init__(self, parser, tree) + + self.startTagHandler = utils.MethodDispatcher([ + ("html", self.startTagHtml), + ("title", self.startTagTitle), + (("noscript", "noframes", "style"), self.startTagNoScriptNoFramesStyle), + ("script", self.startTagScript), + (("base", "basefont", "bgsound", "command", "link"), + self.startTagBaseLinkCommand), + ("meta", self.startTagMeta), + ("head", self.startTagHead) + ]) + self.startTagHandler.default = self.startTagOther + + self. endTagHandler = utils.MethodDispatcher([ + ("head", self.endTagHead), + (("br", "html", "body"), self.endTagHtmlBodyBr) + ]) + self.endTagHandler.default = self.endTagOther + + # the real thing + def processEOF(self): + self.anythingElse() + return True + + def processCharacters(self, token): + self.anythingElse() + return token + + def startTagHtml(self, token): + return self.parser.phases["inBody"].processStartTag(token) + + def startTagHead(self, token): + self.parser.parseError("two-heads-are-not-better-than-one") + + def startTagBaseLinkCommand(self, token): + self.tree.insertElement(token) + self.tree.openElements.pop() + token["selfClosingAcknowledged"] = True + + def startTagMeta(self, token): + self.tree.insertElement(token) + self.tree.openElements.pop() + token["selfClosingAcknowledged"] = True + + attributes = token["data"] + if self.parser.tokenizer.stream.charEncoding[1] == "tentative": + if "charset" in attributes: + self.parser.tokenizer.stream.changeEncoding(attributes["charset"]) + elif ("content" in attributes and + "http-equiv" in attributes and + attributes["http-equiv"].lower() == "content-type"): + # Encoding it as UTF-8 here is a hack, as really we should pass + # the abstract Unicode string, and just use the + # ContentAttrParser on that, but using UTF-8 allows all chars + # to be encoded and as a ASCII-superset works. + data = inputstream.EncodingBytes(attributes["content"].encode("utf-8")) + parser = inputstream.ContentAttrParser(data) + codec = parser.parse() + self.parser.tokenizer.stream.changeEncoding(codec) + + def startTagTitle(self, token): + self.parser.parseRCDataRawtext(token, "RCDATA") + + def startTagNoScriptNoFramesStyle(self, token): + # Need to decide whether to implement the scripting-disabled case + self.parser.parseRCDataRawtext(token, "RAWTEXT") + + def startTagScript(self, token): + self.tree.insertElement(token) + self.parser.tokenizer.state = self.parser.tokenizer.scriptDataState + self.parser.originalPhase = self.parser.phase + self.parser.phase = self.parser.phases["text"] + + def startTagOther(self, token): + self.anythingElse() + return token + + def endTagHead(self, token): + node = self.parser.tree.openElements.pop() + assert node.name == "head", "Expected head got %s" % node.name + self.parser.phase = self.parser.phases["afterHead"] + + def endTagHtmlBodyBr(self, token): + self.anythingElse() + return token + + def endTagOther(self, token): + self.parser.parseError("unexpected-end-tag", {"name": token["name"]}) + + def anythingElse(self): + self.endTagHead(impliedTagToken("head")) + + # XXX If we implement a parser for which scripting is disabled we need to + # implement this phase. + # + # class InHeadNoScriptPhase(Phase): + class AfterHeadPhase(Phase): + def __init__(self, parser, tree): + Phase.__init__(self, parser, tree) + + self.startTagHandler = utils.MethodDispatcher([ + ("html", self.startTagHtml), + ("body", self.startTagBody), + ("frameset", self.startTagFrameset), + (("base", "basefont", "bgsound", "link", "meta", "noframes", "script", + "style", "title"), + self.startTagFromHead), + ("head", self.startTagHead) + ]) + self.startTagHandler.default = self.startTagOther + self.endTagHandler = utils.MethodDispatcher([(("body", "html", "br"), + self.endTagHtmlBodyBr)]) + self.endTagHandler.default = self.endTagOther + + def processEOF(self): + self.anythingElse() + return True + + def processCharacters(self, token): + self.anythingElse() + return token + + def startTagHtml(self, token): + return self.parser.phases["inBody"].processStartTag(token) + + def startTagBody(self, token): + self.parser.framesetOK = False + self.tree.insertElement(token) + self.parser.phase = self.parser.phases["inBody"] + + def startTagFrameset(self, token): + self.tree.insertElement(token) + self.parser.phase = self.parser.phases["inFrameset"] + + def startTagFromHead(self, token): + self.parser.parseError("unexpected-start-tag-out-of-my-head", + {"name": token["name"]}) + self.tree.openElements.append(self.tree.headPointer) + self.parser.phases["inHead"].processStartTag(token) + for node in self.tree.openElements[::-1]: + if node.name == "head": + self.tree.openElements.remove(node) + break + + def startTagHead(self, token): + self.parser.parseError("unexpected-start-tag", {"name": token["name"]}) + + def startTagOther(self, token): + self.anythingElse() + return token + + def endTagHtmlBodyBr(self, token): + self.anythingElse() + return token + + def endTagOther(self, token): + self.parser.parseError("unexpected-end-tag", {"name": token["name"]}) + + def anythingElse(self): + self.tree.insertElement(impliedTagToken("body", "StartTag")) + self.parser.phase = self.parser.phases["inBody"] + self.parser.framesetOK = True + + class InBodyPhase(Phase): + # http://www.whatwg.org/specs/web-apps/current-work/#parsing-main-inbody + # the really-really-really-very crazy mode + def __init__(self, parser, tree): + Phase.__init__(self, parser, tree) + + # Keep a ref to this for special handling of whitespace in
+            self.processSpaceCharactersNonPre = self.processSpaceCharacters
+
+            self.startTagHandler = utils.MethodDispatcher([
+                ("html", self.startTagHtml),
+                (("base", "basefont", "bgsound", "command", "link", "meta",
+                  "script", "style", "title"),
+                 self.startTagProcessInHead),
+                ("body", self.startTagBody),
+                ("frameset", self.startTagFrameset),
+                (("address", "article", "aside", "blockquote", "center", "details",
+                  "details", "dir", "div", "dl", "fieldset", "figcaption", "figure",
+                  "footer", "header", "hgroup", "main", "menu", "nav", "ol", "p",
+                  "section", "summary", "ul"),
+                 self.startTagCloseP),
+                (headingElements, self.startTagHeading),
+                (("pre", "listing"), self.startTagPreListing),
+                ("form", self.startTagForm),
+                (("li", "dd", "dt"), self.startTagListItem),
+                ("plaintext", self.startTagPlaintext),
+                ("a", self.startTagA),
+                (("b", "big", "code", "em", "font", "i", "s", "small", "strike",
+                  "strong", "tt", "u"), self.startTagFormatting),
+                ("nobr", self.startTagNobr),
+                ("button", self.startTagButton),
+                (("applet", "marquee", "object"), self.startTagAppletMarqueeObject),
+                ("xmp", self.startTagXmp),
+                ("table", self.startTagTable),
+                (("area", "br", "embed", "img", "keygen", "wbr"),
+                 self.startTagVoidFormatting),
+                (("param", "source", "track"), self.startTagParamSource),
+                ("input", self.startTagInput),
+                ("hr", self.startTagHr),
+                ("image", self.startTagImage),
+                ("isindex", self.startTagIsIndex),
+                ("textarea", self.startTagTextarea),
+                ("iframe", self.startTagIFrame),
+                (("noembed", "noframes", "noscript"), self.startTagRawtext),
+                ("select", self.startTagSelect),
+                (("rp", "rt"), self.startTagRpRt),
+                (("option", "optgroup"), self.startTagOpt),
+                (("math"), self.startTagMath),
+                (("svg"), self.startTagSvg),
+                (("caption", "col", "colgroup", "frame", "head",
+                  "tbody", "td", "tfoot", "th", "thead",
+                  "tr"), self.startTagMisplaced)
+            ])
+            self.startTagHandler.default = self.startTagOther
+
+            self.endTagHandler = utils.MethodDispatcher([
+                ("body", self.endTagBody),
+                ("html", self.endTagHtml),
+                (("address", "article", "aside", "blockquote", "button", "center",
+                  "details", "dialog", "dir", "div", "dl", "fieldset", "figcaption", "figure",
+                  "footer", "header", "hgroup", "listing", "main", "menu", "nav", "ol", "pre",
+                  "section", "summary", "ul"), self.endTagBlock),
+                ("form", self.endTagForm),
+                ("p", self.endTagP),
+                (("dd", "dt", "li"), self.endTagListItem),
+                (headingElements, self.endTagHeading),
+                (("a", "b", "big", "code", "em", "font", "i", "nobr", "s", "small",
+                  "strike", "strong", "tt", "u"), self.endTagFormatting),
+                (("applet", "marquee", "object"), self.endTagAppletMarqueeObject),
+                ("br", self.endTagBr),
+            ])
+            self.endTagHandler.default = self.endTagOther
+
+        def isMatchingFormattingElement(self, node1, node2):
+            if node1.name != node2.name or node1.namespace != node2.namespace:
+                return False
+            elif len(node1.attributes) != len(node2.attributes):
+                return False
+            else:
+                attributes1 = sorted(node1.attributes.items())
+                attributes2 = sorted(node2.attributes.items())
+                for attr1, attr2 in zip(attributes1, attributes2):
+                    if attr1 != attr2:
+                        return False
+            return True
+
+        # helper
+        def addFormattingElement(self, token):
+            self.tree.insertElement(token)
+            element = self.tree.openElements[-1]
+
+            matchingElements = []
+            for node in self.tree.activeFormattingElements[::-1]:
+                if node is Marker:
+                    break
+                elif self.isMatchingFormattingElement(node, element):
+                    matchingElements.append(node)
+
+            assert len(matchingElements) <= 3
+            if len(matchingElements) == 3:
+                self.tree.activeFormattingElements.remove(matchingElements[-1])
+            self.tree.activeFormattingElements.append(element)
+
+        # the real deal
+        def processEOF(self):
+            allowed_elements = frozenset(("dd", "dt", "li", "p", "tbody", "td",
+                                          "tfoot", "th", "thead", "tr", "body",
+                                          "html"))
+            for node in self.tree.openElements[::-1]:
+                if node.name not in allowed_elements:
+                    self.parser.parseError("expected-closing-tag-but-got-eof")
+                    break
+            # Stop parsing
+
+        def processSpaceCharactersDropNewline(self, token):
+            # Sometimes (start of 
, , and ",k.noCloneChecked=!!b.cloneNode(!0).lastChild.defaultValue,c.appendChild(b),b.innerHTML="",k.checkClone=b.cloneNode(!0).cloneNode(!0).lastChild.checked,k.noCloneEvent=!0,b.attachEvent&&(b.attachEvent("onclick",function(){k.noCloneEvent=!1}),b.cloneNode(!0).click()),null==k.deleteExpando){k.deleteExpando=!0;try{delete b.test}catch(d){k.deleteExpando=!1}}}(),function(){var b,c,d=y.createElement("div");for(b in{submit:!0,change:!0,focusin:!0})c="on"+b,(k[b+"Bubbles"]=c in a)||(d.setAttribute(c,"t"),k[b+"Bubbles"]=d.attributes[c].expando===!1);d=null}();var X=/^(?:input|select|textarea)$/i,Y=/^key/,Z=/^(?:mouse|pointer|contextmenu)|click/,$=/^(?:focusinfocus|focusoutblur)$/,_=/^([^.]*)(?:\.(.+)|)$/;function aa(){return!0}function ba(){return!1}function ca(){try{return y.activeElement}catch(a){}}m.event={global:{},add:function(a,b,c,d,e){var f,g,h,i,j,k,l,n,o,p,q,r=m._data(a);if(r){c.handler&&(i=c,c=i.handler,e=i.selector),c.guid||(c.guid=m.guid++),(g=r.events)||(g=r.events={}),(k=r.handle)||(k=r.handle=function(a){return typeof m===K||a&&m.event.triggered===a.type?void 0:m.event.dispatch.apply(k.elem,arguments)},k.elem=a),b=(b||"").match(E)||[""],h=b.length;while(h--)f=_.exec(b[h])||[],o=q=f[1],p=(f[2]||"").split(".").sort(),o&&(j=m.event.special[o]||{},o=(e?j.delegateType:j.bindType)||o,j=m.event.special[o]||{},l=m.extend({type:o,origType:q,data:d,handler:c,guid:c.guid,selector:e,needsContext:e&&m.expr.match.needsContext.test(e),namespace:p.join(".")},i),(n=g[o])||(n=g[o]=[],n.delegateCount=0,j.setup&&j.setup.call(a,d,p,k)!==!1||(a.addEventListener?a.addEventListener(o,k,!1):a.attachEvent&&a.attachEvent("on"+o,k))),j.add&&(j.add.call(a,l),l.handler.guid||(l.handler.guid=c.guid)),e?n.splice(n.delegateCount++,0,l):n.push(l),m.event.global[o]=!0);a=null}},remove:function(a,b,c,d,e){var f,g,h,i,j,k,l,n,o,p,q,r=m.hasData(a)&&m._data(a);if(r&&(k=r.events)){b=(b||"").match(E)||[""],j=b.length;while(j--)if(h=_.exec(b[j])||[],o=q=h[1],p=(h[2]||"").split(".").sort(),o){l=m.event.special[o]||{},o=(d?l.delegateType:l.bindType)||o,n=k[o]||[],h=h[2]&&new RegExp("(^|\\.)"+p.join("\\.(?:.*\\.|)")+"(\\.|$)"),i=f=n.length;while(f--)g=n[f],!e&&q!==g.origType||c&&c.guid!==g.guid||h&&!h.test(g.namespace)||d&&d!==g.selector&&("**"!==d||!g.selector)||(n.splice(f,1),g.selector&&n.delegateCount--,l.remove&&l.remove.call(a,g));i&&!n.length&&(l.teardown&&l.teardown.call(a,p,r.handle)!==!1||m.removeEvent(a,o,r.handle),delete k[o])}else for(o in k)m.event.remove(a,o+b[j],c,d,!0);m.isEmptyObject(k)&&(delete r.handle,m._removeData(a,"events"))}},trigger:function(b,c,d,e){var f,g,h,i,k,l,n,o=[d||y],p=j.call(b,"type")?b.type:b,q=j.call(b,"namespace")?b.namespace.split("."):[];if(h=l=d=d||y,3!==d.nodeType&&8!==d.nodeType&&!$.test(p+m.event.triggered)&&(p.indexOf(".")>=0&&(q=p.split("."),p=q.shift(),q.sort()),g=p.indexOf(":")<0&&"on"+p,b=b[m.expando]?b:new m.Event(p,"object"==typeof b&&b),b.isTrigger=e?2:3,b.namespace=q.join("."),b.namespace_re=b.namespace?new RegExp("(^|\\.)"+q.join("\\.(?:.*\\.|)")+"(\\.|$)"):null,b.result=void 0,b.target||(b.target=d),c=null==c?[b]:m.makeArray(c,[b]),k=m.event.special[p]||{},e||!k.trigger||k.trigger.apply(d,c)!==!1)){if(!e&&!k.noBubble&&!m.isWindow(d)){for(i=k.delegateType||p,$.test(i+p)||(h=h.parentNode);h;h=h.parentNode)o.push(h),l=h;l===(d.ownerDocument||y)&&o.push(l.defaultView||l.parentWindow||a)}n=0;while((h=o[n++])&&!b.isPropagationStopped())b.type=n>1?i:k.bindType||p,f=(m._data(h,"events")||{})[b.type]&&m._data(h,"handle"),f&&f.apply(h,c),f=g&&h[g],f&&f.apply&&m.acceptData(h)&&(b.result=f.apply(h,c),b.result===!1&&b.preventDefault());if(b.type=p,!e&&!b.isDefaultPrevented()&&(!k._default||k._default.apply(o.pop(),c)===!1)&&m.acceptData(d)&&g&&d[p]&&!m.isWindow(d)){l=d[g],l&&(d[g]=null),m.event.triggered=p;try{d[p]()}catch(r){}m.event.triggered=void 0,l&&(d[g]=l)}return b.result}},dispatch:function(a){a=m.event.fix(a);var b,c,e,f,g,h=[],i=d.call(arguments),j=(m._data(this,"events")||{})[a.type]||[],k=m.event.special[a.type]||{};if(i[0]=a,a.delegateTarget=this,!k.preDispatch||k.preDispatch.call(this,a)!==!1){h=m.event.handlers.call(this,a,j),b=0;while((f=h[b++])&&!a.isPropagationStopped()){a.currentTarget=f.elem,g=0;while((e=f.handlers[g++])&&!a.isImmediatePropagationStopped())(!a.namespace_re||a.namespace_re.test(e.namespace))&&(a.handleObj=e,a.data=e.data,c=((m.event.special[e.origType]||{}).handle||e.handler).apply(f.elem,i),void 0!==c&&(a.result=c)===!1&&(a.preventDefault(),a.stopPropagation()))}return k.postDispatch&&k.postDispatch.call(this,a),a.result}},handlers:function(a,b){var c,d,e,f,g=[],h=b.delegateCount,i=a.target;if(h&&i.nodeType&&(!a.button||"click"!==a.type))for(;i!=this;i=i.parentNode||this)if(1===i.nodeType&&(i.disabled!==!0||"click"!==a.type)){for(e=[],f=0;h>f;f++)d=b[f],c=d.selector+" ",void 0===e[c]&&(e[c]=d.needsContext?m(c,this).index(i)>=0:m.find(c,this,null,[i]).length),e[c]&&e.push(d);e.length&&g.push({elem:i,handlers:e})}return h]","i"),ha=/^\s+/,ia=/<(?!area|br|col|embed|hr|img|input|link|meta|param)(([\w:]+)[^>]*)\/>/gi,ja=/<([\w:]+)/,ka=/\s*$/g,ra={option:[1,""],legend:[1,"
","
"],area:[1,"",""],param:[1,"",""],thead:[1,"","
"],tr:[2,"","
"],col:[2,"","
"],td:[3,"","
"],_default:k.htmlSerialize?[0,"",""]:[1,"X
","
"]},sa=da(y),ta=sa.appendChild(y.createElement("div"));ra.optgroup=ra.option,ra.tbody=ra.tfoot=ra.colgroup=ra.caption=ra.thead,ra.th=ra.td;function ua(a,b){var c,d,e=0,f=typeof a.getElementsByTagName!==K?a.getElementsByTagName(b||"*"):typeof a.querySelectorAll!==K?a.querySelectorAll(b||"*"):void 0;if(!f)for(f=[],c=a.childNodes||a;null!=(d=c[e]);e++)!b||m.nodeName(d,b)?f.push(d):m.merge(f,ua(d,b));return void 0===b||b&&m.nodeName(a,b)?m.merge([a],f):f}function va(a){W.test(a.type)&&(a.defaultChecked=a.checked)}function wa(a,b){return m.nodeName(a,"table")&&m.nodeName(11!==b.nodeType?b:b.firstChild,"tr")?a.getElementsByTagName("tbody")[0]||a.appendChild(a.ownerDocument.createElement("tbody")):a}function xa(a){return a.type=(null!==m.find.attr(a,"type"))+"/"+a.type,a}function ya(a){var b=pa.exec(a.type);return b?a.type=b[1]:a.removeAttribute("type"),a}function za(a,b){for(var c,d=0;null!=(c=a[d]);d++)m._data(c,"globalEval",!b||m._data(b[d],"globalEval"))}function Aa(a,b){if(1===b.nodeType&&m.hasData(a)){var c,d,e,f=m._data(a),g=m._data(b,f),h=f.events;if(h){delete g.handle,g.events={};for(c in h)for(d=0,e=h[c].length;e>d;d++)m.event.add(b,c,h[c][d])}g.data&&(g.data=m.extend({},g.data))}}function Ba(a,b){var c,d,e;if(1===b.nodeType){if(c=b.nodeName.toLowerCase(),!k.noCloneEvent&&b[m.expando]){e=m._data(b);for(d in e.events)m.removeEvent(b,d,e.handle);b.removeAttribute(m.expando)}"script"===c&&b.text!==a.text?(xa(b).text=a.text,ya(b)):"object"===c?(b.parentNode&&(b.outerHTML=a.outerHTML),k.html5Clone&&a.innerHTML&&!m.trim(b.innerHTML)&&(b.innerHTML=a.innerHTML)):"input"===c&&W.test(a.type)?(b.defaultChecked=b.checked=a.checked,b.value!==a.value&&(b.value=a.value)):"option"===c?b.defaultSelected=b.selected=a.defaultSelected:("input"===c||"textarea"===c)&&(b.defaultValue=a.defaultValue)}}m.extend({clone:function(a,b,c){var d,e,f,g,h,i=m.contains(a.ownerDocument,a);if(k.html5Clone||m.isXMLDoc(a)||!ga.test("<"+a.nodeName+">")?f=a.cloneNode(!0):(ta.innerHTML=a.outerHTML,ta.removeChild(f=ta.firstChild)),!(k.noCloneEvent&&k.noCloneChecked||1!==a.nodeType&&11!==a.nodeType||m.isXMLDoc(a)))for(d=ua(f),h=ua(a),g=0;null!=(e=h[g]);++g)d[g]&&Ba(e,d[g]);if(b)if(c)for(h=h||ua(a),d=d||ua(f),g=0;null!=(e=h[g]);g++)Aa(e,d[g]);else Aa(a,f);return d=ua(f,"script"),d.length>0&&za(d,!i&&ua(a,"script")),d=h=e=null,f},buildFragment:function(a,b,c,d){for(var e,f,g,h,i,j,l,n=a.length,o=da(b),p=[],q=0;n>q;q++)if(f=a[q],f||0===f)if("object"===m.type(f))m.merge(p,f.nodeType?[f]:f);else if(la.test(f)){h=h||o.appendChild(b.createElement("div")),i=(ja.exec(f)||["",""])[1].toLowerCase(),l=ra[i]||ra._default,h.innerHTML=l[1]+f.replace(ia,"<$1>")+l[2],e=l[0];while(e--)h=h.lastChild;if(!k.leadingWhitespace&&ha.test(f)&&p.push(b.createTextNode(ha.exec(f)[0])),!k.tbody){f="table"!==i||ka.test(f)?""!==l[1]||ka.test(f)?0:h:h.firstChild,e=f&&f.childNodes.length;while(e--)m.nodeName(j=f.childNodes[e],"tbody")&&!j.childNodes.length&&f.removeChild(j)}m.merge(p,h.childNodes),h.textContent="";while(h.firstChild)h.removeChild(h.firstChild);h=o.lastChild}else p.push(b.createTextNode(f));h&&o.removeChild(h),k.appendChecked||m.grep(ua(p,"input"),va),q=0;while(f=p[q++])if((!d||-1===m.inArray(f,d))&&(g=m.contains(f.ownerDocument,f),h=ua(o.appendChild(f),"script"),g&&za(h),c)){e=0;while(f=h[e++])oa.test(f.type||"")&&c.push(f)}return h=null,o},cleanData:function(a,b){for(var d,e,f,g,h=0,i=m.expando,j=m.cache,l=k.deleteExpando,n=m.event.special;null!=(d=a[h]);h++)if((b||m.acceptData(d))&&(f=d[i],g=f&&j[f])){if(g.events)for(e in g.events)n[e]?m.event.remove(d,e):m.removeEvent(d,e,g.handle);j[f]&&(delete j[f],l?delete d[i]:typeof d.removeAttribute!==K?d.removeAttribute(i):d[i]=null,c.push(f))}}}),m.fn.extend({text:function(a){return V(this,function(a){return void 0===a?m.text(this):this.empty().append((this[0]&&this[0].ownerDocument||y).createTextNode(a))},null,a,arguments.length)},append:function(){return this.domManip(arguments,function(a){if(1===this.nodeType||11===this.nodeType||9===this.nodeType){var b=wa(this,a);b.appendChild(a)}})},prepend:function(){return this.domManip(arguments,function(a){if(1===this.nodeType||11===this.nodeType||9===this.nodeType){var b=wa(this,a);b.insertBefore(a,b.firstChild)}})},before:function(){return this.domManip(arguments,function(a){this.parentNode&&this.parentNode.insertBefore(a,this)})},after:function(){return this.domManip(arguments,function(a){this.parentNode&&this.parentNode.insertBefore(a,this.nextSibling)})},remove:function(a,b){for(var c,d=a?m.filter(a,this):this,e=0;null!=(c=d[e]);e++)b||1!==c.nodeType||m.cleanData(ua(c)),c.parentNode&&(b&&m.contains(c.ownerDocument,c)&&za(ua(c,"script")),c.parentNode.removeChild(c));return this},empty:function(){for(var a,b=0;null!=(a=this[b]);b++){1===a.nodeType&&m.cleanData(ua(a,!1));while(a.firstChild)a.removeChild(a.firstChild);a.options&&m.nodeName(a,"select")&&(a.options.length=0)}return this},clone:function(a,b){return a=null==a?!1:a,b=null==b?a:b,this.map(function(){return m.clone(this,a,b)})},html:function(a){return V(this,function(a){var b=this[0]||{},c=0,d=this.length;if(void 0===a)return 1===b.nodeType?b.innerHTML.replace(fa,""):void 0;if(!("string"!=typeof a||ma.test(a)||!k.htmlSerialize&&ga.test(a)||!k.leadingWhitespace&&ha.test(a)||ra[(ja.exec(a)||["",""])[1].toLowerCase()])){a=a.replace(ia,"<$1>");try{for(;d>c;c++)b=this[c]||{},1===b.nodeType&&(m.cleanData(ua(b,!1)),b.innerHTML=a);b=0}catch(e){}}b&&this.empty().append(a)},null,a,arguments.length)},replaceWith:function(){var a=arguments[0];return this.domManip(arguments,function(b){a=this.parentNode,m.cleanData(ua(this)),a&&a.replaceChild(b,this)}),a&&(a.length||a.nodeType)?this:this.remove()},detach:function(a){return this.remove(a,!0)},domManip:function(a,b){a=e.apply([],a);var c,d,f,g,h,i,j=0,l=this.length,n=this,o=l-1,p=a[0],q=m.isFunction(p);if(q||l>1&&"string"==typeof p&&!k.checkClone&&na.test(p))return this.each(function(c){var d=n.eq(c);q&&(a[0]=p.call(this,c,d.html())),d.domManip(a,b)});if(l&&(i=m.buildFragment(a,this[0].ownerDocument,!1,this),c=i.firstChild,1===i.childNodes.length&&(i=c),c)){for(g=m.map(ua(i,"script"),xa),f=g.length;l>j;j++)d=i,j!==o&&(d=m.clone(d,!0,!0),f&&m.merge(g,ua(d,"script"))),b.call(this[j],d,j);if(f)for(h=g[g.length-1].ownerDocument,m.map(g,ya),j=0;f>j;j++)d=g[j],oa.test(d.type||"")&&!m._data(d,"globalEval")&&m.contains(h,d)&&(d.src?m._evalUrl&&m._evalUrl(d.src):m.globalEval((d.text||d.textContent||d.innerHTML||"").replace(qa,"")));i=c=null}return this}}),m.each({appendTo:"append",prependTo:"prepend",insertBefore:"before",insertAfter:"after",replaceAll:"replaceWith"},function(a,b){m.fn[a]=function(a){for(var c,d=0,e=[],g=m(a),h=g.length-1;h>=d;d++)c=d===h?this:this.clone(!0),m(g[d])[b](c),f.apply(e,c.get());return this.pushStack(e)}});var Ca,Da={};function Ea(b,c){var d,e=m(c.createElement(b)).appendTo(c.body),f=a.getDefaultComputedStyle&&(d=a.getDefaultComputedStyle(e[0]))?d.display:m.css(e[0],"display");return e.detach(),f}function Fa(a){var b=y,c=Da[a];return c||(c=Ea(a,b),"none"!==c&&c||(Ca=(Ca||m("