removed .venv

This commit is contained in:
Peter Molnar 2018-08-10 09:48:36 +01:00
parent fc2288c2fc
commit d4a61a3b91
1030 changed files with 0 additions and 247214 deletions

View file

@ -1,91 +0,0 @@
.. image:: http://docs.wand-py.org/en/latest/_static/wand.png
:width: 120
:height: 120
Wand_
=====
Wand is a ``ctypes``-based simple ImageMagick_ binding for Python.
It doesn't cover all functionalities of MagickWand API currently.
It works on Python 2.6, 2.7, 3.2--3.5, and PyPy.
You can install the package from PyPI_ by using ``pip``:
.. code-block:: console
$ pip install Wand
Or would you like to enjoy with bleeding edge? Check out the head
revision of the source code from the `GitHub repository`__:
.. code-block:: console
$ git clone git://github.com/dahlia/wand.git
$ cd wand/
$ python setup.py install
.. _Wand: http://wand-py.org/
.. _ImageMagick: http://www.imagemagick.org/
.. _PyPI: https://pypi.python.org/pypi/Wand
__ https://github.com/dahlia/wand
Docs
----
Recent version
http://docs.wand-py.org/
Development version
http://docs.wand-py.org/en/latest/
.. image:: https://readthedocs.org/projects/wand/badge/
:alt: Documentation Status
:target: http://docs.wand-py.org/en/latest/
Community
---------
Website
http://wand-py.org/
GitHub
https://github.com/dahlia/wand
Package Index (Cheeseshop)
https://pypi.python.org/pypi/Wand
.. image:: https://badge.fury.io/py/Wand.svg?
:alt: Latest PyPI version
:target: https://pypi.python.org/pypi/Wand
Mailing list
wand@librelist.com
List archive
http://librelist.com/browser/wand/
http://dir.gmane.org/gmane.comp.python.wand
Stack Overflow tag (Q&A)
http://stackoverflow.com/questions/tagged/wand
Quora topic (Q&A)
https://www.quora.com/Wand-ImageMagick-binding
IRC
`irc://irc.freenode.net/wand <http://webchat.freenode.net/?channels=wand>`_
Continuous Integration (Travis CI)
https://travis-ci.org/dahlia/wand
.. image:: https://secure.travis-ci.org/dahlia/wand.svg?branch=master
:alt: Build Status
:target: https://travis-ci.org/dahlia/wand
Code Coverage
https://coveralls.io/r/dahlia/wand
.. image:: https://img.shields.io/coveralls/dahlia/wand.svg?style=flat
:alt: Coverage Status
:target: https://coveralls.io/r/dahlia/wand

View file

@ -1,76 +0,0 @@
# This file must be used with "source bin/activate" *from bash*
# you cannot run it directly
deactivate () {
# reset old environment variables
if [ -n "${_OLD_VIRTUAL_PATH:-}" ] ; then
PATH="${_OLD_VIRTUAL_PATH:-}"
export PATH
unset _OLD_VIRTUAL_PATH
fi
if [ -n "${_OLD_VIRTUAL_PYTHONHOME:-}" ] ; then
PYTHONHOME="${_OLD_VIRTUAL_PYTHONHOME:-}"
export PYTHONHOME
unset _OLD_VIRTUAL_PYTHONHOME
fi
# This should detect bash and zsh, which have a hash command that must
# be called to get it to forget past commands. Without forgetting
# past commands the $PATH changes we made may not be respected
if [ -n "${BASH:-}" -o -n "${ZSH_VERSION:-}" ] ; then
hash -r
fi
if [ -n "${_OLD_VIRTUAL_PS1:-}" ] ; then
PS1="${_OLD_VIRTUAL_PS1:-}"
export PS1
unset _OLD_VIRTUAL_PS1
fi
unset VIRTUAL_ENV
if [ ! "$1" = "nondestructive" ] ; then
# Self destruct!
unset -f deactivate
fi
}
# unset irrelevant variables
deactivate nondestructive
VIRTUAL_ENV="/home/cadeyrn/Projects/petermolnar.net/nasg/.venv"
export VIRTUAL_ENV
_OLD_VIRTUAL_PATH="$PATH"
PATH="$VIRTUAL_ENV/bin:$PATH"
export PATH
# unset PYTHONHOME if set
# this will fail if PYTHONHOME is set to the empty string (which is bad anyway)
# could use `if (set -u; : $PYTHONHOME) ;` in bash
if [ -n "${PYTHONHOME:-}" ] ; then
_OLD_VIRTUAL_PYTHONHOME="${PYTHONHOME:-}"
unset PYTHONHOME
fi
if [ -z "${VIRTUAL_ENV_DISABLE_PROMPT:-}" ] ; then
_OLD_VIRTUAL_PS1="${PS1:-}"
if [ "x(.venv) " != x ] ; then
PS1="(.venv) ${PS1:-}"
else
if [ "`basename \"$VIRTUAL_ENV\"`" = "__" ] ; then
# special case for Aspen magic directories
# see http://www.zetadev.com/software/aspen/
PS1="[`basename \`dirname \"$VIRTUAL_ENV\"\``] $PS1"
else
PS1="(`basename \"$VIRTUAL_ENV\"`)$PS1"
fi
fi
export PS1
fi
# This should detect bash and zsh, which have a hash command that must
# be called to get it to forget past commands. Without forgetting
# past commands the $PATH changes we made may not be respected
if [ -n "${BASH:-}" -o -n "${ZSH_VERSION:-}" ] ; then
hash -r
fi

View file

@ -1,37 +0,0 @@
# This file must be used with "source bin/activate.csh" *from csh*.
# You cannot run it directly.
# Created by Davide Di Blasi <davidedb@gmail.com>.
# Ported to Python 3.3 venv by Andrew Svetlov <andrew.svetlov@gmail.com>
alias deactivate 'test $?_OLD_VIRTUAL_PATH != 0 && setenv PATH "$_OLD_VIRTUAL_PATH" && unset _OLD_VIRTUAL_PATH; rehash; test $?_OLD_VIRTUAL_PROMPT != 0 && set prompt="$_OLD_VIRTUAL_PROMPT" && unset _OLD_VIRTUAL_PROMPT; unsetenv VIRTUAL_ENV; test "\!:*" != "nondestructive" && unalias deactivate'
# Unset irrelevant variables.
deactivate nondestructive
setenv VIRTUAL_ENV "/home/cadeyrn/Projects/petermolnar.net/nasg/.venv"
set _OLD_VIRTUAL_PATH="$PATH"
setenv PATH "$VIRTUAL_ENV/bin:$PATH"
set _OLD_VIRTUAL_PROMPT="$prompt"
if (! "$?VIRTUAL_ENV_DISABLE_PROMPT") then
if (".venv" != "") then
set env_name = ".venv"
else
if (`basename "VIRTUAL_ENV"` == "__") then
# special case for Aspen magic directories
# see http://www.zetadev.com/software/aspen/
set env_name = `basename \`dirname "$VIRTUAL_ENV"\``
else
set env_name = `basename "$VIRTUAL_ENV"`
endif
endif
set prompt = "[$env_name] $prompt"
unset env_name
endif
alias pydoc python -m pydoc
rehash

View file

@ -1,75 +0,0 @@
# This file must be used with ". bin/activate.fish" *from fish* (http://fishshell.org)
# you cannot run it directly
function deactivate -d "Exit virtualenv and return to normal shell environment"
# reset old environment variables
if test -n "$_OLD_VIRTUAL_PATH"
set -gx PATH $_OLD_VIRTUAL_PATH
set -e _OLD_VIRTUAL_PATH
end
if test -n "$_OLD_VIRTUAL_PYTHONHOME"
set -gx PYTHONHOME $_OLD_VIRTUAL_PYTHONHOME
set -e _OLD_VIRTUAL_PYTHONHOME
end
if test -n "$_OLD_FISH_PROMPT_OVERRIDE"
functions -e fish_prompt
set -e _OLD_FISH_PROMPT_OVERRIDE
functions -c _old_fish_prompt fish_prompt
functions -e _old_fish_prompt
end
set -e VIRTUAL_ENV
if test "$argv[1]" != "nondestructive"
# Self destruct!
functions -e deactivate
end
end
# unset irrelevant variables
deactivate nondestructive
set -gx VIRTUAL_ENV "/home/cadeyrn/Projects/petermolnar.net/nasg/.venv"
set -gx _OLD_VIRTUAL_PATH $PATH
set -gx PATH "$VIRTUAL_ENV/bin" $PATH
# unset PYTHONHOME if set
if set -q PYTHONHOME
set -gx _OLD_VIRTUAL_PYTHONHOME $PYTHONHOME
set -e PYTHONHOME
end
if test -z "$VIRTUAL_ENV_DISABLE_PROMPT"
# fish uses a function instead of an env var to generate the prompt.
# save the current fish_prompt function as the function _old_fish_prompt
functions -c fish_prompt _old_fish_prompt
# with the original prompt function renamed, we can override with our own.
function fish_prompt
# Save the return status of the last command
set -l old_status $status
# Prompt override?
if test -n "(.venv) "
printf "%s%s" "(.venv) " (set_color normal)
else
# ...Otherwise, prepend env
set -l _checkbase (basename "$VIRTUAL_ENV")
if test $_checkbase = "__"
# special case for Aspen magic directories
# see http://www.zetadev.com/software/aspen/
printf "%s[%s]%s " (set_color -b blue white) (basename (dirname "$VIRTUAL_ENV")) (set_color normal)
else
printf "%s(%s)%s" (set_color -b blue white) (basename "$VIRTUAL_ENV") (set_color normal)
end
end
# Restore the return status of the previous command.
echo "exit $old_status" | .
_old_fish_prompt
end
set -gx _OLD_FISH_PROMPT_OVERRIDE "$VIRTUAL_ENV"
end

View file

@ -1,11 +0,0 @@
#!/home/cadeyrn/Projects/petermolnar.net/nasg/.venv/bin/python3
# -*- coding: utf-8 -*-
import re
import sys
from chardet.cli.chardetect import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(main())

View file

@ -1,11 +0,0 @@
#!/home/cadeyrn/Projects/petermolnar.net/nasg/.venv/bin/python3
# -*- coding: utf-8 -*-
import re
import sys
from setuptools.command.easy_install import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(main())

View file

@ -1,11 +0,0 @@
#!/home/cadeyrn/Projects/petermolnar.net/nasg/.venv/bin/python3
# -*- coding: utf-8 -*-
import re
import sys
from setuptools.command.easy_install import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(main())

View file

@ -1,11 +0,0 @@
#!/home/cadeyrn/Projects/petermolnar.net/nasg/.venv/bin/python3
# -*- coding: utf-8 -*-
import re
import sys
from pip import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(main())

View file

@ -1,11 +0,0 @@
#!/home/cadeyrn/Projects/petermolnar.net/nasg/.venv/bin/python3
# -*- coding: utf-8 -*-
import re
import sys
from pip import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(main())

View file

@ -1,11 +0,0 @@
#!/home/cadeyrn/Projects/petermolnar.net/nasg/.venv/bin/python3
# -*- coding: utf-8 -*-
import re
import sys
from pip import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(main())

View file

@ -1 +0,0 @@
python3

View file

@ -1 +0,0 @@
/usr/bin/python3

View file

@ -1,11 +0,0 @@
#!/home/cadeyrn/Projects/petermolnar.net/nasg/.venv/bin/python3
# -*- coding: utf-8 -*-
import re
import sys
from unidecode.util import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(main())

View file

@ -1,37 +0,0 @@
Jinja2
~~~~~~
Jinja2 is a template engine written in pure Python. It provides a
`Django`_ inspired non-XML syntax but supports inline expressions and
an optional `sandboxed`_ environment.
Nutshell
--------
Here a small example of a Jinja template::
{% extends 'base.html' %}
{% block title %}Memberlist{% endblock %}
{% block content %}
<ul>
{% for user in users %}
<li><a href="{{ user.url }}">{{ user.username }}</a></li>
{% endfor %}
</ul>
{% endblock %}
Philosophy
----------
Application logic is for the controller but don't try to make the life
for the template designer too hard by giving him too few functionality.
For more informations visit the new `Jinja2 webpage`_ and `documentation`_.
.. _sandboxed: https://en.wikipedia.org/wiki/Sandbox_(computer_security)
.. _Django: https://www.djangoproject.com/
.. _Jinja2 webpage: http://jinja.pocoo.org/
.. _documentation: http://jinja.pocoo.org/2/documentation/

View file

@ -1,31 +0,0 @@
Copyright (c) 2009 by the Jinja Team, see AUTHORS for more details.
Some rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following
disclaimer in the documentation and/or other materials provided
with the distribution.
* The names of the contributors may not be used to endorse or
promote products derived from this software without specific
prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

View file

@ -1,68 +0,0 @@
Metadata-Version: 2.0
Name: Jinja2
Version: 2.10
Summary: A small but fast and easy to use stand-alone template engine written in pure python.
Home-page: http://jinja.pocoo.org/
Author: Armin Ronacher
Author-email: armin.ronacher@active-4.com
License: BSD
Description-Content-Type: UNKNOWN
Platform: UNKNOWN
Classifier: Development Status :: 5 - Production/Stable
Classifier: Environment :: Web Environment
Classifier: Intended Audience :: Developers
Classifier: License :: OSI Approved :: BSD License
Classifier: Operating System :: OS Independent
Classifier: Programming Language :: Python
Classifier: Programming Language :: Python :: 2
Classifier: Programming Language :: Python :: 2.6
Classifier: Programming Language :: Python :: 2.7
Classifier: Programming Language :: Python :: 3
Classifier: Programming Language :: Python :: 3.3
Classifier: Programming Language :: Python :: 3.4
Classifier: Programming Language :: Python :: 3.5
Classifier: Programming Language :: Python :: 3.6
Classifier: Topic :: Internet :: WWW/HTTP :: Dynamic Content
Classifier: Topic :: Software Development :: Libraries :: Python Modules
Classifier: Topic :: Text Processing :: Markup :: HTML
Requires-Dist: MarkupSafe (>=0.23)
Provides-Extra: i18n
Requires-Dist: Babel (>=0.8); extra == 'i18n'
Jinja2
~~~~~~
Jinja2 is a template engine written in pure Python. It provides a
`Django`_ inspired non-XML syntax but supports inline expressions and
an optional `sandboxed`_ environment.
Nutshell
--------
Here a small example of a Jinja template::
{% extends 'base.html' %}
{% block title %}Memberlist{% endblock %}
{% block content %}
<ul>
{% for user in users %}
<li><a href="{{ user.url }}">{{ user.username }}</a></li>
{% endfor %}
</ul>
{% endblock %}
Philosophy
----------
Application logic is for the controller but don't try to make the life
for the template designer too hard by giving him too few functionality.
For more informations visit the new `Jinja2 webpage`_ and `documentation`_.
.. _sandboxed: https://en.wikipedia.org/wiki/Sandbox_(computer_security)
.. _Django: https://www.djangoproject.com/
.. _Jinja2 webpage: http://jinja.pocoo.org/
.. _documentation: http://jinja.pocoo.org/2/documentation/

View file

@ -1,63 +0,0 @@
Jinja2-2.10.dist-info/DESCRIPTION.rst,sha256=b5ckFDoM7vVtz_mAsJD4OPteFKCqE7beu353g4COoYI,978
Jinja2-2.10.dist-info/LICENSE.txt,sha256=JvzUNv3Io51EiWrAPm8d_SXjhJnEjyDYvB3Tvwqqils,1554
Jinja2-2.10.dist-info/METADATA,sha256=18EgU8zR6-av-0-5y_gXebzK4GnBB_76lALUsl-6QHM,2258
Jinja2-2.10.dist-info/RECORD,,
Jinja2-2.10.dist-info/WHEEL,sha256=kdsN-5OJAZIiHN-iO4Rhl82KyS0bDWf4uBwMbkNafr8,110
Jinja2-2.10.dist-info/entry_points.txt,sha256=NdzVcOrqyNyKDxD09aERj__3bFx2paZhizFDsKmVhiA,72
Jinja2-2.10.dist-info/metadata.json,sha256=NPUJ9TMBxVQAv_kTJzvU8HwmP-4XZvbK9mz6_4YUVl4,1473
Jinja2-2.10.dist-info/top_level.txt,sha256=PkeVWtLb3-CqjWi1fO29OCbj55EhX_chhKrCdrVe_zs,7
jinja2/__init__.py,sha256=xJHjaMoy51_KXn1wf0cysH6tUUifUxZCwSOfcJGEYZw,2614
jinja2/_compat.py,sha256=xP60CE5Qr8FTYcDE1f54tbZLKGvMwYml4-8T7Q4KG9k,2596
jinja2/_identifier.py,sha256=W1QBSY-iJsyt6oR_nKSuNNCzV95vLIOYgUNPUI1d5gU,1726
jinja2/asyncfilters.py,sha256=cTDPvrS8Hp_IkwsZ1m9af_lr5nHysw7uTa5gV0NmZVE,4144
jinja2/asyncsupport.py,sha256=UErQ3YlTLaSjFb94P4MVn08-aVD9jJxty2JVfMRb-1M,7878
jinja2/bccache.py,sha256=nQldx0ZRYANMyfvOihRoYFKSlUdd5vJkS7BjxNwlOZM,12794
jinja2/compiler.py,sha256=BqC5U6JxObSRhblyT_a6Tp5GtEU5z3US1a4jLQaxxgo,65386
jinja2/constants.py,sha256=uwwV8ZUhHhacAuz5PTwckfsbqBaqM7aKfyJL7kGX5YQ,1626
jinja2/debug.py,sha256=WTVeUFGUa4v6ReCsYv-iVPa3pkNB75OinJt3PfxNdXs,12045
jinja2/defaults.py,sha256=Em-95hmsJxIenDCZFB1YSvf9CNhe9rBmytN3yUrBcWA,1400
jinja2/environment.py,sha256=VnkAkqw8JbjZct4tAyHlpBrka2vqB-Z58RAP-32P1ZY,50849
jinja2/exceptions.py,sha256=_Rj-NVi98Q6AiEjYQOsP8dEIdu5AlmRHzcSNOPdWix4,4428
jinja2/ext.py,sha256=atMQydEC86tN1zUsdQiHw5L5cF62nDbqGue25Yiu3N4,24500
jinja2/filters.py,sha256=yOAJk0MsH-_gEC0i0U6NweVQhbtYaC-uE8xswHFLF4w,36528
jinja2/idtracking.py,sha256=2GbDSzIvGArEBGLkovLkqEfmYxmWsEf8c3QZwM4uNsw,9197
jinja2/lexer.py,sha256=ySEPoXd1g7wRjsuw23uimS6nkGN5aqrYwcOKxCaVMBQ,28559
jinja2/loaders.py,sha256=xiTuURKAEObyym0nU8PCIXu_Qp8fn0AJ5oIADUUm-5Q,17382
jinja2/meta.py,sha256=fmKHxkmZYAOm9QyWWy8EMd6eefAIh234rkBMW2X4ZR8,4340
jinja2/nativetypes.py,sha256=_sJhS8f-8Q0QMIC0dm1YEdLyxEyoO-kch8qOL5xUDfE,7308
jinja2/nodes.py,sha256=L10L_nQDfubLhO3XjpF9qz46FSh2clL-3e49ogVlMmA,30853
jinja2/optimizer.py,sha256=MsdlFACJ0FRdPtjmCAdt7JQ9SGrXFaDNUaslsWQaG3M,1722
jinja2/parser.py,sha256=lPzTEbcpTRBLw8ii6OYyExHeAhaZLMA05Hpv4ll3ULk,35875
jinja2/runtime.py,sha256=DHdD38Pq8gj7uWQC5usJyWFoNWL317A9AvXOW_CLB34,27755
jinja2/sandbox.py,sha256=TVyZHlNqqTzsv9fv2NvJNmSdWRHTguhyMHdxjWms32U,16708
jinja2/tests.py,sha256=iJQLwbapZr-EKquTG_fVOVdwHUUKf3SX9eNkjQDF8oU,4237
jinja2/utils.py,sha256=q24VupGZotQ-uOyrJxCaXtDWhZC1RgsQG7kcdmjck2Q,20629
jinja2/visitor.py,sha256=JD1H1cANA29JcntFfN5fPyqQxB4bI4wC00BzZa-XHks,3316
Jinja2-2.10.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4
jinja2/__pycache__/__init__.cpython-36.pyc,,
jinja2/__pycache__/_compat.cpython-36.pyc,,
jinja2/__pycache__/_identifier.cpython-36.pyc,,
jinja2/__pycache__/asyncfilters.cpython-36.pyc,,
jinja2/__pycache__/asyncsupport.cpython-36.pyc,,
jinja2/__pycache__/bccache.cpython-36.pyc,,
jinja2/__pycache__/compiler.cpython-36.pyc,,
jinja2/__pycache__/constants.cpython-36.pyc,,
jinja2/__pycache__/debug.cpython-36.pyc,,
jinja2/__pycache__/defaults.cpython-36.pyc,,
jinja2/__pycache__/environment.cpython-36.pyc,,
jinja2/__pycache__/exceptions.cpython-36.pyc,,
jinja2/__pycache__/ext.cpython-36.pyc,,
jinja2/__pycache__/filters.cpython-36.pyc,,
jinja2/__pycache__/idtracking.cpython-36.pyc,,
jinja2/__pycache__/lexer.cpython-36.pyc,,
jinja2/__pycache__/loaders.cpython-36.pyc,,
jinja2/__pycache__/meta.cpython-36.pyc,,
jinja2/__pycache__/nativetypes.cpython-36.pyc,,
jinja2/__pycache__/nodes.cpython-36.pyc,,
jinja2/__pycache__/optimizer.cpython-36.pyc,,
jinja2/__pycache__/parser.cpython-36.pyc,,
jinja2/__pycache__/runtime.cpython-36.pyc,,
jinja2/__pycache__/sandbox.cpython-36.pyc,,
jinja2/__pycache__/tests.cpython-36.pyc,,
jinja2/__pycache__/utils.cpython-36.pyc,,
jinja2/__pycache__/visitor.cpython-36.pyc,,

View file

@ -1,6 +0,0 @@
Wheel-Version: 1.0
Generator: bdist_wheel (0.30.0)
Root-Is-Purelib: true
Tag: py2-none-any
Tag: py3-none-any

View file

@ -1,4 +0,0 @@
[babel.extractors]
jinja2 = jinja2.ext:babel_extract[i18n]

View file

@ -1 +0,0 @@
{"classifiers": ["Development Status :: 5 - Production/Stable", "Environment :: Web Environment", "Intended Audience :: Developers", "License :: OSI Approved :: BSD License", "Operating System :: OS Independent", "Programming Language :: Python", "Programming Language :: Python :: 2", "Programming Language :: Python :: 2.6", "Programming Language :: Python :: 2.7", "Programming Language :: Python :: 3", "Programming Language :: Python :: 3.3", "Programming Language :: Python :: 3.4", "Programming Language :: Python :: 3.5", "Programming Language :: Python :: 3.6", "Topic :: Internet :: WWW/HTTP :: Dynamic Content", "Topic :: Software Development :: Libraries :: Python Modules", "Topic :: Text Processing :: Markup :: HTML"], "description_content_type": "UNKNOWN", "extensions": {"python.details": {"contacts": [{"email": "armin.ronacher@active-4.com", "name": "Armin Ronacher", "role": "author"}], "document_names": {"description": "DESCRIPTION.rst", "license": "LICENSE.txt"}, "project_urls": {"Home": "http://jinja.pocoo.org/"}}, "python.exports": {"babel.extractors": {"jinja2": "jinja2.ext:babel_extract [i18n]"}}}, "extras": ["i18n"], "generator": "bdist_wheel (0.30.0)", "license": "BSD", "metadata_version": "2.0", "name": "Jinja2", "run_requires": [{"extra": "i18n", "requires": ["Babel (>=0.8)"]}, {"requires": ["MarkupSafe (>=0.23)"]}], "summary": "A small but fast and easy to use stand-alone template engine written in pure python.", "version": "2.10"}

View file

@ -1,115 +0,0 @@
MarkupSafe
==========
Implements a unicode subclass that supports HTML strings:
.. code-block:: python
>>> from markupsafe import Markup, escape
>>> escape("<script>alert(document.cookie);</script>")
Markup(u'&lt;script&gt;alert(document.cookie);&lt;/script&gt;')
>>> tmpl = Markup("<em>%s</em>")
>>> tmpl % "Peter > Lustig"
Markup(u'<em>Peter &gt; Lustig</em>')
If you want to make an object unicode that is not yet unicode
but don't want to lose the taint information, you can use the
``soft_unicode`` function. (On Python 3 you can also use ``soft_str`` which
is a different name for the same function).
.. code-block:: python
>>> from markupsafe import soft_unicode
>>> soft_unicode(42)
u'42'
>>> soft_unicode(Markup('foo'))
Markup(u'foo')
HTML Representations
--------------------
Objects can customize their HTML markup equivalent by overriding
the ``__html__`` function:
.. code-block:: python
>>> class Foo(object):
... def __html__(self):
... return '<strong>Nice</strong>'
...
>>> escape(Foo())
Markup(u'<strong>Nice</strong>')
>>> Markup(Foo())
Markup(u'<strong>Nice</strong>')
Silent Escapes
--------------
Since MarkupSafe 0.10 there is now also a separate escape function
called ``escape_silent`` that returns an empty string for ``None`` for
consistency with other systems that return empty strings for ``None``
when escaping (for instance Pylons' webhelpers).
If you also want to use this for the escape method of the Markup
object, you can create your own subclass that does that:
.. code-block:: python
from markupsafe import Markup, escape_silent as escape
class SilentMarkup(Markup):
__slots__ = ()
@classmethod
def escape(cls, s):
return cls(escape(s))
New-Style String Formatting
---------------------------
Starting with MarkupSafe 0.21 new style string formats from Python 2.6 and
3.x are now fully supported. Previously the escape behavior of those
functions was spotty at best. The new implementations operates under the
following algorithm:
1. if an object has an ``__html_format__`` method it is called as
replacement for ``__format__`` with the format specifier. It either
has to return a string or markup object.
2. if an object has an ``__html__`` method it is called.
3. otherwise the default format system of Python kicks in and the result
is HTML escaped.
Here is how you can implement your own formatting:
.. code-block:: python
class User(object):
def __init__(self, id, username):
self.id = id
self.username = username
def __html_format__(self, format_spec):
if format_spec == 'link':
return Markup('<a href="/user/{0}">{1}</a>').format(
self.id,
self.__html__(),
)
elif format_spec:
raise ValueError('Invalid format spec')
return self.__html__()
def __html__(self):
return Markup('<span class=user>{0}</span>').format(self.username)
And to format that user:
.. code-block:: python
>>> user = User(1, 'foo')
>>> Markup('<p>User: {0:link}').format(user)
Markup(u'<p>User: <a href="/user/1"><span class=user>foo</span></a>')
Markupsafe supports Python 2.6, 2.7 and Python 3.3 and higher.

View file

@ -1,33 +0,0 @@
Copyright (c) 2010 by Armin Ronacher and contributors. See AUTHORS
for more details.
Some rights reserved.
Redistribution and use in source and binary forms of the software as well
as documentation, with or without modification, are permitted provided
that the following conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following
disclaimer in the documentation and/or other materials provided
with the distribution.
* The names of the contributors may not be used to endorse or
promote products derived from this software without specific
prior written permission.
THIS SOFTWARE AND DOCUMENTATION IS PROVIDED BY THE COPYRIGHT HOLDERS AND
CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT
NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER
OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE AND DOCUMENTATION, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
DAMAGE.

View file

@ -1,135 +0,0 @@
Metadata-Version: 2.0
Name: MarkupSafe
Version: 1.0
Summary: Implements a XML/HTML/XHTML Markup safe string for Python
Home-page: http://github.com/pallets/markupsafe
Author: Armin Ronacher
Author-email: armin.ronacher@active-4.com
License: BSD
Platform: UNKNOWN
Classifier: Development Status :: 5 - Production/Stable
Classifier: Environment :: Web Environment
Classifier: Intended Audience :: Developers
Classifier: License :: OSI Approved :: BSD License
Classifier: Operating System :: OS Independent
Classifier: Programming Language :: Python
Classifier: Programming Language :: Python :: 3
Classifier: Topic :: Internet :: WWW/HTTP :: Dynamic Content
Classifier: Topic :: Software Development :: Libraries :: Python Modules
Classifier: Topic :: Text Processing :: Markup :: HTML
MarkupSafe
==========
Implements a unicode subclass that supports HTML strings:
.. code-block:: python
>>> from markupsafe import Markup, escape
>>> escape("<script>alert(document.cookie);</script>")
Markup(u'&lt;script&gt;alert(document.cookie);&lt;/script&gt;')
>>> tmpl = Markup("<em>%s</em>")
>>> tmpl % "Peter > Lustig"
Markup(u'<em>Peter &gt; Lustig</em>')
If you want to make an object unicode that is not yet unicode
but don't want to lose the taint information, you can use the
``soft_unicode`` function. (On Python 3 you can also use ``soft_str`` which
is a different name for the same function).
.. code-block:: python
>>> from markupsafe import soft_unicode
>>> soft_unicode(42)
u'42'
>>> soft_unicode(Markup('foo'))
Markup(u'foo')
HTML Representations
--------------------
Objects can customize their HTML markup equivalent by overriding
the ``__html__`` function:
.. code-block:: python
>>> class Foo(object):
... def __html__(self):
... return '<strong>Nice</strong>'
...
>>> escape(Foo())
Markup(u'<strong>Nice</strong>')
>>> Markup(Foo())
Markup(u'<strong>Nice</strong>')
Silent Escapes
--------------
Since MarkupSafe 0.10 there is now also a separate escape function
called ``escape_silent`` that returns an empty string for ``None`` for
consistency with other systems that return empty strings for ``None``
when escaping (for instance Pylons' webhelpers).
If you also want to use this for the escape method of the Markup
object, you can create your own subclass that does that:
.. code-block:: python
from markupsafe import Markup, escape_silent as escape
class SilentMarkup(Markup):
__slots__ = ()
@classmethod
def escape(cls, s):
return cls(escape(s))
New-Style String Formatting
---------------------------
Starting with MarkupSafe 0.21 new style string formats from Python 2.6 and
3.x are now fully supported. Previously the escape behavior of those
functions was spotty at best. The new implementations operates under the
following algorithm:
1. if an object has an ``__html_format__`` method it is called as
replacement for ``__format__`` with the format specifier. It either
has to return a string or markup object.
2. if an object has an ``__html__`` method it is called.
3. otherwise the default format system of Python kicks in and the result
is HTML escaped.
Here is how you can implement your own formatting:
.. code-block:: python
class User(object):
def __init__(self, id, username):
self.id = id
self.username = username
def __html_format__(self, format_spec):
if format_spec == 'link':
return Markup('<a href="/user/{0}">{1}</a>').format(
self.id,
self.__html__(),
)
elif format_spec:
raise ValueError('Invalid format spec')
return self.__html__()
def __html__(self):
return Markup('<span class=user>{0}</span>').format(self.username)
And to format that user:
.. code-block:: python
>>> user = User(1, 'foo')
>>> Markup('<p>User: {0:link}').format(user)
Markup(u'<p>User: <a href="/user/1"><span class=user>foo</span></a>')
Markupsafe supports Python 2.6, 2.7 and Python 3.3 and higher.

View file

@ -1,18 +0,0 @@
MarkupSafe-1.0.dist-info/DESCRIPTION.rst,sha256=3B3J0YLzzmJQVaWQ_XlVMhGeHA_DvBqysABvul_5fko,3397
MarkupSafe-1.0.dist-info/LICENSE.txt,sha256=C76IIo_WPSDsCX9k5Y1aCkZRI64TkUChjUBsYLSIJLU,1582
MarkupSafe-1.0.dist-info/METADATA,sha256=EUwvRzJbtRP3hBMc8Z2TDT44TBDeZdIurbGzIc7FOkg,4182
MarkupSafe-1.0.dist-info/RECORD,,
MarkupSafe-1.0.dist-info/WHEEL,sha256=X8kVdBCq85ICewwfaE6btv5qKsFQfVq8NYJIXUK0i1A,104
MarkupSafe-1.0.dist-info/metadata.json,sha256=LPb3W7qq-SH_u1SFzjXQqT8sCGpE-b4NmYAxMcw91e8,924
MarkupSafe-1.0.dist-info/top_level.txt,sha256=qy0Plje5IJuvsCBjejJyhDCjEAdcDLK_2agVcex8Z6U,11
markupsafe/__init__.py,sha256=xtkRdxhzJzgp65wUo1D4DjnazxHU88pPldaAuDekBeY,10697
markupsafe/_compat.py,sha256=r1HE0CpcAZeb-AiTV9wITR91PeLHn0CzZ_XHkYoozpI,565
markupsafe/_constants.py,sha256=U_xybFQsyXKCgHSfranJnFzo-z9nn9fuBeSk243sE5Q,4795
markupsafe/_native.py,sha256=E2Un1ysOf-w45d18YCj8UelT5UP7Vt__IuFPYJ7YRIs,1187
markupsafe/_speedups.c,sha256=B6Mf6Fn33WqkagfwY7q5ZBSm_vJoHDYxDB0Jp_DP7Jw,5936
markupsafe/_speedups.cpython-36m-x86_64-linux-gnu.so,sha256=Vg-SXdIoD4ej1JxRCvyNfKTycFLxOLmCzGK-ZvaYrR8,43720
MarkupSafe-1.0.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4
markupsafe/__pycache__/__init__.cpython-36.pyc,,
markupsafe/__pycache__/_compat.cpython-36.pyc,,
markupsafe/__pycache__/_constants.cpython-36.pyc,,
markupsafe/__pycache__/_native.cpython-36.pyc,,

View file

@ -1,5 +0,0 @@
Wheel-Version: 1.0
Generator: bdist_wheel (0.30.0)
Root-Is-Purelib: false
Tag: cp36-cp36m-linux_x86_64

View file

@ -1 +0,0 @@
{"classifiers": ["Development Status :: 5 - Production/Stable", "Environment :: Web Environment", "Intended Audience :: Developers", "License :: OSI Approved :: BSD License", "Operating System :: OS Independent", "Programming Language :: Python", "Programming Language :: Python :: 3", "Topic :: Internet :: WWW/HTTP :: Dynamic Content", "Topic :: Software Development :: Libraries :: Python Modules", "Topic :: Text Processing :: Markup :: HTML"], "extensions": {"python.details": {"contacts": [{"email": "armin.ronacher@active-4.com", "name": "Armin Ronacher", "role": "author"}], "document_names": {"description": "DESCRIPTION.rst", "license": "LICENSE.txt"}, "project_urls": {"Home": "http://github.com/pallets/markupsafe"}}}, "generator": "bdist_wheel (0.30.0)", "license": "BSD", "metadata_version": "2.0", "name": "MarkupSafe", "summary": "Implements a XML/HTML/XHTML Markup safe string for Python", "version": "1.0"}

View file

@ -1,12 +0,0 @@
YAML is a data serialization format designed for human readability
and interaction with scripting languages. PyYAML is a YAML parser
and emitter for Python.
PyYAML features a complete YAML 1.1 parser, Unicode support, pickle
support, capable extension API, and sensible error messages. PyYAML
supports standard YAML tags and provides Python-specific tags that
allow to represent an arbitrary Python object.
PyYAML is applicable for a broad range of tasks from complex
configuration files to object serialization and persistance.

View file

@ -1,35 +0,0 @@
Metadata-Version: 2.0
Name: PyYAML
Version: 3.13
Summary: YAML parser and emitter for Python
Home-page: http://pyyaml.org/wiki/PyYAML
Author: Kirill Simonov
Author-email: xi@resolvent.net
License: MIT
Download-URL: http://pyyaml.org/download/pyyaml/PyYAML-3.13.tar.gz
Platform: Any
Classifier: Development Status :: 5 - Production/Stable
Classifier: Intended Audience :: Developers
Classifier: License :: OSI Approved :: MIT License
Classifier: Operating System :: OS Independent
Classifier: Programming Language :: Python
Classifier: Programming Language :: Python :: 2
Classifier: Programming Language :: Python :: 2.7
Classifier: Programming Language :: Python :: 3
Classifier: Programming Language :: Python :: 3.4
Classifier: Programming Language :: Python :: 3.5
Classifier: Topic :: Software Development :: Libraries :: Python Modules
Classifier: Topic :: Text Processing :: Markup
YAML is a data serialization format designed for human readability
and interaction with scripting languages. PyYAML is a YAML parser
and emitter for Python.
PyYAML features a complete YAML 1.1 parser, Unicode support, pickle
support, capable extension API, and sensible error messages. PyYAML
supports standard YAML tags and provides Python-specific tags that
allow to represent an arbitrary Python object.
PyYAML is applicable for a broad range of tasks from complex
configuration files to object serialization and persistance.

View file

@ -1,41 +0,0 @@
PyYAML-3.13.dist-info/DESCRIPTION.rst,sha256=4nzkrOwMTYfusIfdRz4-dl_9Blan5axHPKMiVJEOV-4,534
PyYAML-3.13.dist-info/METADATA,sha256=U0dUImL7hk_qDCAP0kWKM2_ECiVADYEJNjcAiiaDFxM,1424
PyYAML-3.13.dist-info/RECORD,,
PyYAML-3.13.dist-info/WHEEL,sha256=X8kVdBCq85ICewwfaE6btv5qKsFQfVq8NYJIXUK0i1A,104
PyYAML-3.13.dist-info/metadata.json,sha256=mTri214HvzoAZouJtqaJpjm4_LiLbQIlnYVvQEt5QYQ,1013
PyYAML-3.13.dist-info/top_level.txt,sha256=rpj0IVMTisAjh_1vG3Ccf9v5jpCQwAz6cD1IVU5ZdhQ,11
yaml/__init__.py,sha256=RVZ2KJeYiiJ-E0aHAahiYelgmPb2n-mwtHBJvw87TsY,9607
yaml/composer.py,sha256=rAskP6VXqfAZcZxGIQFyPwVkNZYtnmmylV4TBSn4EUk,4881
yaml/constructor.py,sha256=SVt8piayRVwV7Pd9RDvIM60BftFrqkA35z7HWwubHkg,25554
yaml/cyaml.py,sha256=zajOEEo_11ZJC0pko7O-mqT9JYxhXVdFIXYAHTW9COU,3294
yaml/dumper.py,sha256=86Yz2GmUk6lk6xUKRRWZwDmvSMp3pPekmWDIarq93Iw,2723
yaml/emitter.py,sha256=16CwscYPx2DejDyE8MqfMybsm1DnFsSUwDvSQuC-3Rc,42954
yaml/error.py,sha256=Ah9z-toHJUbE9j-M8YpxgSRM5CgLCcwVzJgLLRF2Fxo,2533
yaml/events.py,sha256=50_TksgQiE4up-lKo_V-nBy-tAIxkIPQxY5qDhKCeHw,2445
yaml/loader.py,sha256=aQ71rMxcAW_oA1dt0x3hlRDdqYjzwXXfpvSFFewh9NA,1138
yaml/nodes.py,sha256=gPKNj8pKCdh2d4gr3gIYINnPOaOxGhJAUiYhGRnPE84,1440
yaml/parser.py,sha256=ilWp5vvgoHFGzvOZDItFoGjD6D42nhlZrZyjAwa0oJo,25495
yaml/reader.py,sha256=r22-ns0QvKAqGopu3IgczoK5VLGA7oZ3XN9sBESkb04,6854
yaml/representer.py,sha256=ZmBbegM-yVa3NBBTo-LqF-O2IcgGIT5LlDweB8SDgAE,14097
yaml/resolver.py,sha256=DJCjpQr8YQCEYYjKEYqTl0GrsZil2H4aFOI9b0Oe-U4,8970
yaml/scanner.py,sha256=6LVPebouAqunvxw3tFVMa34pDgPsDKg2JuaaeNeTLtk,51695
yaml/serializer.py,sha256=ChuFgmhU01hj4xgI8GaKv6vfM2Bujwa9i7d2FAHj7cA,4165
yaml/tokens.py,sha256=lTQIzSVw8Mg9wv459-TjiOQe6wVziqaRlqX2_89rp54,2573
PyYAML-3.13.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4
yaml/__pycache__/__init__.cpython-36.pyc,,
yaml/__pycache__/composer.cpython-36.pyc,,
yaml/__pycache__/constructor.cpython-36.pyc,,
yaml/__pycache__/cyaml.cpython-36.pyc,,
yaml/__pycache__/dumper.cpython-36.pyc,,
yaml/__pycache__/emitter.cpython-36.pyc,,
yaml/__pycache__/error.cpython-36.pyc,,
yaml/__pycache__/events.cpython-36.pyc,,
yaml/__pycache__/loader.cpython-36.pyc,,
yaml/__pycache__/nodes.cpython-36.pyc,,
yaml/__pycache__/parser.cpython-36.pyc,,
yaml/__pycache__/reader.cpython-36.pyc,,
yaml/__pycache__/representer.cpython-36.pyc,,
yaml/__pycache__/resolver.cpython-36.pyc,,
yaml/__pycache__/scanner.cpython-36.pyc,,
yaml/__pycache__/serializer.cpython-36.pyc,,
yaml/__pycache__/tokens.cpython-36.pyc,,

View file

@ -1,5 +0,0 @@
Wheel-Version: 1.0
Generator: bdist_wheel (0.30.0)
Root-Is-Purelib: false
Tag: cp36-cp36m-linux_x86_64

View file

@ -1 +0,0 @@
{"classifiers": ["Development Status :: 5 - Production/Stable", "Intended Audience :: Developers", "License :: OSI Approved :: MIT License", "Operating System :: OS Independent", "Programming Language :: Python", "Programming Language :: Python :: 2", "Programming Language :: Python :: 2.7", "Programming Language :: Python :: 3", "Programming Language :: Python :: 3.4", "Programming Language :: Python :: 3.5", "Topic :: Software Development :: Libraries :: Python Modules", "Topic :: Text Processing :: Markup"], "download_url": "http://pyyaml.org/download/pyyaml/PyYAML-3.13.tar.gz", "extensions": {"python.details": {"contacts": [{"email": "xi@resolvent.net", "name": "Kirill Simonov", "role": "author"}], "document_names": {"description": "DESCRIPTION.rst"}, "project_urls": {"Home": "http://pyyaml.org/wiki/PyYAML"}}}, "generator": "bdist_wheel (0.30.0)", "license": "MIT", "metadata_version": "2.0", "name": "PyYAML", "platform": "Any", "summary": "YAML parser and emitter for Python", "version": "3.13"}

View file

@ -1,187 +0,0 @@
Unidecode, lossy ASCII transliterations of Unicode text
=======================================================
It often happens that you have text data in Unicode, but you need to
represent it in ASCII. For example when integrating with legacy code that
doesn't support Unicode, or for ease of entry of non-Roman names on a US
keyboard, or when constructing ASCII machine identifiers from
human-readable Unicode strings that should still be somewhat intelligible
(a popular example of this is when making an URL slug from an article
title).
In most of these examples you could represent Unicode characters as
`???` or `\\15BA\\15A0\\1610`, to mention two extreme cases. But that's
nearly useless to someone who actually wants to read what the text says.
What Unidecode provides is a middle road: function `unidecode()` takes
Unicode data and tries to represent it in ASCII characters (i.e., the
universally displayable characters between 0x00 and 0x7F), where the
compromises taken when mapping between two character sets are chosen to be
near what a human with a US keyboard would choose.
The quality of resulting ASCII representation varies. For languages of
western origin it should be between perfect and good. On the other hand
transliteration (i.e., conveying, in Roman letters, the pronunciation
expressed by the text in some other writing system) of languages like
Chinese, Japanese or Korean is a very complex issue and this library does
not even attempt to address it. It draws the line at context-free
character-by-character mapping. So a good rule of thumb is that the further
the script you are transliterating is from Latin alphabet, the worse the
transliteration will be.
Note that this module generally produces better results than simply
stripping accents from characters (which can be done in Python with
built-in functions). It is based on hand-tuned character mappings that for
example also contain ASCII approximations for symbols and non-Latin
alphabets.
This is a Python port of `Text::Unidecode` Perl module by
Sean M. Burke <sburke@cpan.org>.
Module content
--------------
The module exports a function that takes an Unicode object (Python 2.x) or
string (Python 3.x) and returns a string (that can be encoded to ASCII bytes in
Python 3.x)::
>>> from unidecode import unidecode
>>> unidecode(u'ko\u017eu\u0161\u010dek')
'kozuscek'
>>> unidecode(u'30 \U0001d5c4\U0001d5c6/\U0001d5c1')
'30 km/h'
>>> unidecode(u"\u5317\u4EB0")
'Bei Jing '
A utility is also included that allows you to transliterate text from the
command line in several ways. Reading from standard input::
$ echo hello | unidecode
hello
from a command line argument::
$ unidecode -c hello
hello
or from a file::
$ unidecode hello.txt
hello
The default encoding used by the utility depends on your system locale. You can
specify another encoding with the `-e` argument. See `unidecode --help` for a
full list of available options.
Requirements
------------
Nothing except Python itself.
You need a Python build with "wide" Unicode characters (also called "UCS-4
build") in order for unidecode to work correctly with characters outside of
Basic Multilingual Plane (BMP). Common characters outside BMP are bold, italic,
script, etc. variants of the Latin alphabet intended for mathematical notation.
Surrogate pair encoding of "narrow" builds is not supported in unidecode.
If your Python build supports "wide" Unicode the following expression will
return True::
>>> import sys
>>> sys.maxunicode > 0xffff
True
See PEP 261 for details regarding support for "wide" Unicode characters in
Python.
Installation
------------
To install the latest version of Unidecode from the Python package index, use
these commands::
$ pip install unidecode
To install Unidecode from the source distribution and run unit tests, use::
$ python setup.py install
$ python setup.py test
Performance notes
-----------------
By default, `unidecode` optimizes for the use case where most of the strings
passed to it are already ASCII-only and no transliteration is necessary (this
default might change in future versions).
For performance critical applications, two additional functions are exposed:
`unidecode_expect_ascii` is optimized for ASCII-only inputs (approximately 5
times faster than `unidecode_expect_nonascii` on 10 character strings, more on
longer strings), but slightly slower for non-ASCII inputs.
`unidecode_expect_nonascii` takes approximately the same amount of time on
ASCII and non-ASCII inputs, but is slightly faster for non-ASCII inputs than
`unidecode_expect_ascii`.
Apart from differences in run time, both functions produce identical results.
For most users of Unidecode, the difference in performance should be
negligible.
Source
------
You can get the latest development version of Unidecode with::
$ git clone https://www.tablix.org/~avian/git/unidecode.git
There is also an official mirror of this repository on GitHub at
https://github.com/avian2/unidecode
Contact
-------
Please send bug reports, patches and suggestions for Unidecode to
tomaz.solc@tablix.org.
Alternatively, you can also open a ticket or pull request at
https://github.com/avian2/unidecode
Copyright
---------
Original character transliteration tables:
Copyright 2001, Sean M. Burke <sburke@cpan.org>, all rights reserved.
Python code and later additions:
Copyright 2018, Tomaz Solc <tomaz.solc@tablix.org>
This program is free software; you can redistribute it and/or modify it
under the terms of the GNU General Public License as published by the Free
Software Foundation; either version 2 of the License, or (at your option)
any later version.
This program is distributed in the hope that it will be useful, but WITHOUT
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
more details.
You should have received a copy of the GNU General Public License along
with this program; if not, write to the Free Software Foundation, Inc., 51
Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. The programs and
documentation in this dist are distributed in the hope that they will be
useful, but without any warranty; without even the implied warranty of
merchantability or fitness for a particular purpose.
..
vim: set filetype=rst:

View file

@ -1,339 +0,0 @@
GNU GENERAL PUBLIC LICENSE
Version 2, June 1991
Copyright (C) 1989, 1991 Free Software Foundation, Inc.,
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
Everyone is permitted to copy and distribute verbatim copies
of this license document, but changing it is not allowed.
Preamble
The licenses for most software are designed to take away your
freedom to share and change it. By contrast, the GNU General Public
License is intended to guarantee your freedom to share and change free
software--to make sure the software is free for all its users. This
General Public License applies to most of the Free Software
Foundation's software and to any other program whose authors commit to
using it. (Some other Free Software Foundation software is covered by
the GNU Lesser General Public License instead.) You can apply it to
your programs, too.
When we speak of free software, we are referring to freedom, not
price. Our General Public Licenses are designed to make sure that you
have the freedom to distribute copies of free software (and charge for
this service if you wish), that you receive source code or can get it
if you want it, that you can change the software or use pieces of it
in new free programs; and that you know you can do these things.
To protect your rights, we need to make restrictions that forbid
anyone to deny you these rights or to ask you to surrender the rights.
These restrictions translate to certain responsibilities for you if you
distribute copies of the software, or if you modify it.
For example, if you distribute copies of such a program, whether
gratis or for a fee, you must give the recipients all the rights that
you have. You must make sure that they, too, receive or can get the
source code. And you must show them these terms so they know their
rights.
We protect your rights with two steps: (1) copyright the software, and
(2) offer you this license which gives you legal permission to copy,
distribute and/or modify the software.
Also, for each author's protection and ours, we want to make certain
that everyone understands that there is no warranty for this free
software. If the software is modified by someone else and passed on, we
want its recipients to know that what they have is not the original, so
that any problems introduced by others will not reflect on the original
authors' reputations.
Finally, any free program is threatened constantly by software
patents. We wish to avoid the danger that redistributors of a free
program will individually obtain patent licenses, in effect making the
program proprietary. To prevent this, we have made it clear that any
patent must be licensed for everyone's free use or not licensed at all.
The precise terms and conditions for copying, distribution and
modification follow.
GNU GENERAL PUBLIC LICENSE
TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION
0. This License applies to any program or other work which contains
a notice placed by the copyright holder saying it may be distributed
under the terms of this General Public License. The "Program", below,
refers to any such program or work, and a "work based on the Program"
means either the Program or any derivative work under copyright law:
that is to say, a work containing the Program or a portion of it,
either verbatim or with modifications and/or translated into another
language. (Hereinafter, translation is included without limitation in
the term "modification".) Each licensee is addressed as "you".
Activities other than copying, distribution and modification are not
covered by this License; they are outside its scope. The act of
running the Program is not restricted, and the output from the Program
is covered only if its contents constitute a work based on the
Program (independent of having been made by running the Program).
Whether that is true depends on what the Program does.
1. You may copy and distribute verbatim copies of the Program's
source code as you receive it, in any medium, provided that you
conspicuously and appropriately publish on each copy an appropriate
copyright notice and disclaimer of warranty; keep intact all the
notices that refer to this License and to the absence of any warranty;
and give any other recipients of the Program a copy of this License
along with the Program.
You may charge a fee for the physical act of transferring a copy, and
you may at your option offer warranty protection in exchange for a fee.
2. You may modify your copy or copies of the Program or any portion
of it, thus forming a work based on the Program, and copy and
distribute such modifications or work under the terms of Section 1
above, provided that you also meet all of these conditions:
a) You must cause the modified files to carry prominent notices
stating that you changed the files and the date of any change.
b) You must cause any work that you distribute or publish, that in
whole or in part contains or is derived from the Program or any
part thereof, to be licensed as a whole at no charge to all third
parties under the terms of this License.
c) If the modified program normally reads commands interactively
when run, you must cause it, when started running for such
interactive use in the most ordinary way, to print or display an
announcement including an appropriate copyright notice and a
notice that there is no warranty (or else, saying that you provide
a warranty) and that users may redistribute the program under
these conditions, and telling the user how to view a copy of this
License. (Exception: if the Program itself is interactive but
does not normally print such an announcement, your work based on
the Program is not required to print an announcement.)
These requirements apply to the modified work as a whole. If
identifiable sections of that work are not derived from the Program,
and can be reasonably considered independent and separate works in
themselves, then this License, and its terms, do not apply to those
sections when you distribute them as separate works. But when you
distribute the same sections as part of a whole which is a work based
on the Program, the distribution of the whole must be on the terms of
this License, whose permissions for other licensees extend to the
entire whole, and thus to each and every part regardless of who wrote it.
Thus, it is not the intent of this section to claim rights or contest
your rights to work written entirely by you; rather, the intent is to
exercise the right to control the distribution of derivative or
collective works based on the Program.
In addition, mere aggregation of another work not based on the Program
with the Program (or with a work based on the Program) on a volume of
a storage or distribution medium does not bring the other work under
the scope of this License.
3. You may copy and distribute the Program (or a work based on it,
under Section 2) in object code or executable form under the terms of
Sections 1 and 2 above provided that you also do one of the following:
a) Accompany it with the complete corresponding machine-readable
source code, which must be distributed under the terms of Sections
1 and 2 above on a medium customarily used for software interchange; or,
b) Accompany it with a written offer, valid for at least three
years, to give any third party, for a charge no more than your
cost of physically performing source distribution, a complete
machine-readable copy of the corresponding source code, to be
distributed under the terms of Sections 1 and 2 above on a medium
customarily used for software interchange; or,
c) Accompany it with the information you received as to the offer
to distribute corresponding source code. (This alternative is
allowed only for noncommercial distribution and only if you
received the program in object code or executable form with such
an offer, in accord with Subsection b above.)
The source code for a work means the preferred form of the work for
making modifications to it. For an executable work, complete source
code means all the source code for all modules it contains, plus any
associated interface definition files, plus the scripts used to
control compilation and installation of the executable. However, as a
special exception, the source code distributed need not include
anything that is normally distributed (in either source or binary
form) with the major components (compiler, kernel, and so on) of the
operating system on which the executable runs, unless that component
itself accompanies the executable.
If distribution of executable or object code is made by offering
access to copy from a designated place, then offering equivalent
access to copy the source code from the same place counts as
distribution of the source code, even though third parties are not
compelled to copy the source along with the object code.
4. You may not copy, modify, sublicense, or distribute the Program
except as expressly provided under this License. Any attempt
otherwise to copy, modify, sublicense or distribute the Program is
void, and will automatically terminate your rights under this License.
However, parties who have received copies, or rights, from you under
this License will not have their licenses terminated so long as such
parties remain in full compliance.
5. You are not required to accept this License, since you have not
signed it. However, nothing else grants you permission to modify or
distribute the Program or its derivative works. These actions are
prohibited by law if you do not accept this License. Therefore, by
modifying or distributing the Program (or any work based on the
Program), you indicate your acceptance of this License to do so, and
all its terms and conditions for copying, distributing or modifying
the Program or works based on it.
6. Each time you redistribute the Program (or any work based on the
Program), the recipient automatically receives a license from the
original licensor to copy, distribute or modify the Program subject to
these terms and conditions. You may not impose any further
restrictions on the recipients' exercise of the rights granted herein.
You are not responsible for enforcing compliance by third parties to
this License.
7. If, as a consequence of a court judgment or allegation of patent
infringement or for any other reason (not limited to patent issues),
conditions are imposed on you (whether by court order, agreement or
otherwise) that contradict the conditions of this License, they do not
excuse you from the conditions of this License. If you cannot
distribute so as to satisfy simultaneously your obligations under this
License and any other pertinent obligations, then as a consequence you
may not distribute the Program at all. For example, if a patent
license would not permit royalty-free redistribution of the Program by
all those who receive copies directly or indirectly through you, then
the only way you could satisfy both it and this License would be to
refrain entirely from distribution of the Program.
If any portion of this section is held invalid or unenforceable under
any particular circumstance, the balance of the section is intended to
apply and the section as a whole is intended to apply in other
circumstances.
It is not the purpose of this section to induce you to infringe any
patents or other property right claims or to contest validity of any
such claims; this section has the sole purpose of protecting the
integrity of the free software distribution system, which is
implemented by public license practices. Many people have made
generous contributions to the wide range of software distributed
through that system in reliance on consistent application of that
system; it is up to the author/donor to decide if he or she is willing
to distribute software through any other system and a licensee cannot
impose that choice.
This section is intended to make thoroughly clear what is believed to
be a consequence of the rest of this License.
8. If the distribution and/or use of the Program is restricted in
certain countries either by patents or by copyrighted interfaces, the
original copyright holder who places the Program under this License
may add an explicit geographical distribution limitation excluding
those countries, so that distribution is permitted only in or among
countries not thus excluded. In such case, this License incorporates
the limitation as if written in the body of this License.
9. The Free Software Foundation may publish revised and/or new versions
of the General Public License from time to time. Such new versions will
be similar in spirit to the present version, but may differ in detail to
address new problems or concerns.
Each version is given a distinguishing version number. If the Program
specifies a version number of this License which applies to it and "any
later version", you have the option of following the terms and conditions
either of that version or of any later version published by the Free
Software Foundation. If the Program does not specify a version number of
this License, you may choose any version ever published by the Free Software
Foundation.
10. If you wish to incorporate parts of the Program into other free
programs whose distribution conditions are different, write to the author
to ask for permission. For software which is copyrighted by the Free
Software Foundation, write to the Free Software Foundation; we sometimes
make exceptions for this. Our decision will be guided by the two goals
of preserving the free status of all derivatives of our free software and
of promoting the sharing and reuse of software generally.
NO WARRANTY
11. BECAUSE THE PROGRAM IS LICENSED FREE OF CHARGE, THERE IS NO WARRANTY
FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW. EXCEPT WHEN
OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES
PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED
OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS
TO THE QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH YOU. SHOULD THE
PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING,
REPAIR OR CORRECTION.
12. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY AND/OR
REDISTRIBUTE THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES,
INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING
OUT OF THE USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED
TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY
YOU OR THIRD PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER
PROGRAMS), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE
POSSIBILITY OF SUCH DAMAGES.
END OF TERMS AND CONDITIONS
How to Apply These Terms to Your New Programs
If you develop a new program, and you want it to be of the greatest
possible use to the public, the best way to achieve this is to make it
free software which everyone can redistribute and change under these terms.
To do so, attach the following notices to the program. It is safest
to attach them to the start of each source file to most effectively
convey the exclusion of warranty; and each file should have at least
the "copyright" line and a pointer to where the full notice is found.
<one line to give the program's name and a brief idea of what it does.>
Copyright (C) <year> <name of author>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License along
with this program; if not, write to the Free Software Foundation, Inc.,
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
Also add information on how to contact you by electronic and paper mail.
If the program is interactive, make it output a short notice like this
when it starts in an interactive mode:
Gnomovision version 69, Copyright (C) year name of author
Gnomovision comes with ABSOLUTELY NO WARRANTY; for details type `show w'.
This is free software, and you are welcome to redistribute it
under certain conditions; type `show c' for details.
The hypothetical commands `show w' and `show c' should show the appropriate
parts of the General Public License. Of course, the commands you use may
be called something other than `show w' and `show c'; they could even be
mouse-clicks or menu items--whatever suits your program.
You should also get your employer (if you work as a programmer) or your
school, if any, to sign a "copyright disclaimer" for the program, if
necessary. Here is a sample; alter the names:
Yoyodyne, Inc., hereby disclaims all copyright interest in the program
`Gnomovision' (which makes passes at compilers) written by James Hacker.
<signature of Ty Coon>, 1 April 1989
Ty Coon, President of Vice
This General Public License does not permit incorporating your program into
proprietary programs. If your program is a subroutine library, you may
consider it more useful to permit linking proprietary applications with the
library. If this is what you want to do, use the GNU Lesser General
Public License instead of this License.

View file

@ -1,211 +0,0 @@
Metadata-Version: 2.0
Name: Unidecode
Version: 1.0.22
Summary: ASCII transliterations of Unicode text
Home-page: UNKNOWN
Author: Tomaz Solc
Author-email: tomaz.solc@tablix.org
License: GPL
Platform: UNKNOWN
Classifier: License :: OSI Approved :: GNU General Public License v2 or later (GPLv2+)
Classifier: Programming Language :: Python
Classifier: Programming Language :: Python :: 2
Classifier: Programming Language :: Python :: 2.6
Classifier: Programming Language :: Python :: 2.7
Classifier: Programming Language :: Python :: 3
Classifier: Programming Language :: Python :: 3.3
Classifier: Programming Language :: Python :: 3.4
Classifier: Programming Language :: Python :: 3.5
Classifier: Programming Language :: Python :: 3.6
Classifier: Programming Language :: Python :: Implementation :: CPython
Classifier: Programming Language :: Python :: Implementation :: PyPy
Classifier: Topic :: Text Processing
Classifier: Topic :: Text Processing :: Filters
Unidecode, lossy ASCII transliterations of Unicode text
=======================================================
It often happens that you have text data in Unicode, but you need to
represent it in ASCII. For example when integrating with legacy code that
doesn't support Unicode, or for ease of entry of non-Roman names on a US
keyboard, or when constructing ASCII machine identifiers from
human-readable Unicode strings that should still be somewhat intelligible
(a popular example of this is when making an URL slug from an article
title).
In most of these examples you could represent Unicode characters as
`???` or `\\15BA\\15A0\\1610`, to mention two extreme cases. But that's
nearly useless to someone who actually wants to read what the text says.
What Unidecode provides is a middle road: function `unidecode()` takes
Unicode data and tries to represent it in ASCII characters (i.e., the
universally displayable characters between 0x00 and 0x7F), where the
compromises taken when mapping between two character sets are chosen to be
near what a human with a US keyboard would choose.
The quality of resulting ASCII representation varies. For languages of
western origin it should be between perfect and good. On the other hand
transliteration (i.e., conveying, in Roman letters, the pronunciation
expressed by the text in some other writing system) of languages like
Chinese, Japanese or Korean is a very complex issue and this library does
not even attempt to address it. It draws the line at context-free
character-by-character mapping. So a good rule of thumb is that the further
the script you are transliterating is from Latin alphabet, the worse the
transliteration will be.
Note that this module generally produces better results than simply
stripping accents from characters (which can be done in Python with
built-in functions). It is based on hand-tuned character mappings that for
example also contain ASCII approximations for symbols and non-Latin
alphabets.
This is a Python port of `Text::Unidecode` Perl module by
Sean M. Burke <sburke@cpan.org>.
Module content
--------------
The module exports a function that takes an Unicode object (Python 2.x) or
string (Python 3.x) and returns a string (that can be encoded to ASCII bytes in
Python 3.x)::
>>> from unidecode import unidecode
>>> unidecode(u'ko\u017eu\u0161\u010dek')
'kozuscek'
>>> unidecode(u'30 \U0001d5c4\U0001d5c6/\U0001d5c1')
'30 km/h'
>>> unidecode(u"\u5317\u4EB0")
'Bei Jing '
A utility is also included that allows you to transliterate text from the
command line in several ways. Reading from standard input::
$ echo hello | unidecode
hello
from a command line argument::
$ unidecode -c hello
hello
or from a file::
$ unidecode hello.txt
hello
The default encoding used by the utility depends on your system locale. You can
specify another encoding with the `-e` argument. See `unidecode --help` for a
full list of available options.
Requirements
------------
Nothing except Python itself.
You need a Python build with "wide" Unicode characters (also called "UCS-4
build") in order for unidecode to work correctly with characters outside of
Basic Multilingual Plane (BMP). Common characters outside BMP are bold, italic,
script, etc. variants of the Latin alphabet intended for mathematical notation.
Surrogate pair encoding of "narrow" builds is not supported in unidecode.
If your Python build supports "wide" Unicode the following expression will
return True::
>>> import sys
>>> sys.maxunicode > 0xffff
True
See PEP 261 for details regarding support for "wide" Unicode characters in
Python.
Installation
------------
To install the latest version of Unidecode from the Python package index, use
these commands::
$ pip install unidecode
To install Unidecode from the source distribution and run unit tests, use::
$ python setup.py install
$ python setup.py test
Performance notes
-----------------
By default, `unidecode` optimizes for the use case where most of the strings
passed to it are already ASCII-only and no transliteration is necessary (this
default might change in future versions).
For performance critical applications, two additional functions are exposed:
`unidecode_expect_ascii` is optimized for ASCII-only inputs (approximately 5
times faster than `unidecode_expect_nonascii` on 10 character strings, more on
longer strings), but slightly slower for non-ASCII inputs.
`unidecode_expect_nonascii` takes approximately the same amount of time on
ASCII and non-ASCII inputs, but is slightly faster for non-ASCII inputs than
`unidecode_expect_ascii`.
Apart from differences in run time, both functions produce identical results.
For most users of Unidecode, the difference in performance should be
negligible.
Source
------
You can get the latest development version of Unidecode with::
$ git clone https://www.tablix.org/~avian/git/unidecode.git
There is also an official mirror of this repository on GitHub at
https://github.com/avian2/unidecode
Contact
-------
Please send bug reports, patches and suggestions for Unidecode to
tomaz.solc@tablix.org.
Alternatively, you can also open a ticket or pull request at
https://github.com/avian2/unidecode
Copyright
---------
Original character transliteration tables:
Copyright 2001, Sean M. Burke <sburke@cpan.org>, all rights reserved.
Python code and later additions:
Copyright 2018, Tomaz Solc <tomaz.solc@tablix.org>
This program is free software; you can redistribute it and/or modify it
under the terms of the GNU General Public License as published by the Free
Software Foundation; either version 2 of the License, or (at your option)
any later version.
This program is distributed in the hope that it will be useful, but WITHOUT
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
more details.
You should have received a copy of the GNU General Public License along
with this program; if not, write to the Free Software Foundation, Inc., 51
Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. The programs and
documentation in this dist are distributed in the hope that they will be
useful, but without any warranty; without even the implied warranty of
merchantability or fitness for a particular purpose.
..
vim: set filetype=rst:

View file

@ -1,392 +0,0 @@
Unidecode-1.0.22.dist-info/DESCRIPTION.rst,sha256=-F5SUD-b0Ya43g1p_eu1gEm8tO4mIpYWhWTgQKMsIqA,6431
Unidecode-1.0.22.dist-info/LICENSE.txt,sha256=gXf5dRMhNSbfLPYYTY_5hsZ1r7UU1OaKQEAQUhuIBkM,18092
Unidecode-1.0.22.dist-info/METADATA,sha256=rKmVrG0-P7Uc9CuCCVDzVcFUKRO05pBxpUuJAd6lVG0,7391
Unidecode-1.0.22.dist-info/RECORD,,
Unidecode-1.0.22.dist-info/WHEEL,sha256=kdsN-5OJAZIiHN-iO4Rhl82KyS0bDWf4uBwMbkNafr8,110
Unidecode-1.0.22.dist-info/entry_points.txt,sha256=ItDp7W6CoSJQAKqdiUkzmecugvKvWzjfN7MMK52GM10,51
Unidecode-1.0.22.dist-info/metadata.json,sha256=DkonES6HR5OcV_EwAawVVXOuDXcDpPxDyaOblLTrvgo,1178
Unidecode-1.0.22.dist-info/top_level.txt,sha256=4uYNG2l04s0dm0mEQmPLo2zrjLbhLPKUesLr2dOTdpo,10
unidecode/__init__.py,sha256=kBAGxkNFR1A9PFMjN4wyfUQpQ8jteUW3xXg-B5SsEf8,3095
unidecode/util.py,sha256=hR86tO0ggBQdrVhi9bLcX92Q53NeO3ZDCLe6xhYtj8w,1790
unidecode/x000.py,sha256=dxTi7LcLTlbF3ZGGk9xPjx8sx2M6qPBIs2b5RS2DVNY,3041
unidecode/x001.py,sha256=ylHh3UVaPtibVuUEEWvdSeDFK0OXrWt4-LnxAgYD6qo,3891
unidecode/x002.py,sha256=Hoks5fu8uyhwDuJt93BiC6iYv_HVY4bIvKklyAtBoFI,3889
unidecode/x003.py,sha256=UdskMuqktghKcjLeegwYkyfhDH3lgi_G5rr7ID5W0-s,3875
unidecode/x004.py,sha256=85Sh4Rem7dRS7kIx6yGQqA7U7MxqCD40DGoqaob9MzI,4071
unidecode/x005.py,sha256=fxbl8HxRujaQRuiH-09LR5sCQaWzDgvnwwIJ5X8ephA,4013
unidecode/x006.py,sha256=4fZWzc5DGZVFD1zvHtkywMHykQTiFgA7M6LMowpaGI0,3961
unidecode/x007.py,sha256=k2kkTXdbKqMFOQy2TlGmRnoRYMCOvqTfjusGV76u9SE,4122
unidecode/x009.py,sha256=aZrWpXwsS2yIyO8oZIIN3Uo4j0XdpaJq5VGdCu3e8cc,4075
unidecode/x00a.py,sha256=UrXdvZ-qVMfWRSRW8HwrxfNXB-Jp0lgW2iPs6roZXz4,4121
unidecode/x00b.py,sha256=MjqdSyCxXLZ6wUrgeS8A-wop9S78EEK9e-BJ4HpAqLA,4132
unidecode/x00c.py,sha256=y-y0RqVolrIBfsj1DtT_rQUbmxxjoKnWlZDMPPC6Om4,4102
unidecode/x00d.py,sha256=OVfHvb44WS_aMXSWrZt-693xJ70L4sepcyJFIkl23TY,4121
unidecode/x00e.py,sha256=9N9w09V225dx1-e8w0HRaIoD66mpDhHgoQ-RlPZznmA,4092
unidecode/x00f.py,sha256=Vak8Z6Dy6ucFysFSNBt24ZE4h2ZSXQcBLiWCSC27bSA,4061
unidecode/x010.py,sha256=9vck5PTRcz5Q64F_2dIMQoPGEFTHWjvwq7ZE90nvOK4,4110
unidecode/x011.py,sha256=8EN-PZS-ythtQlU3HZYNkGePm3oWFzSOgOorkeQUBV4,4135
unidecode/x012.py,sha256=2onQfsL5e7X4RB9DKehMUcG81gaSav4UwGalXulnxOE,4318
unidecode/x013.py,sha256=Nl9CnUVkJkXBux5TEia5Vq1nPjTICUUyT77GhkChbrQ,4247
unidecode/x014.py,sha256=CkrW473dLq_AllpcK8teAUQSeglML_MZ6t9TsK5g9wE,4300
unidecode/x015.py,sha256=TB6O4l2qPxbmF2dejlxXLqX5tTfjl95cMYx1770GHs0,4329
unidecode/x016.py,sha256=M9kiUT0ScE5acj-tkomNZ0eND1lvau0i6tJOWepU1FA,4140
unidecode/x017.py,sha256=nsZP_7vWKOTYY8LOE535g67qwwaMdGblZ-79MbfTNX8,4190
unidecode/x018.py,sha256=_qBETpive71IdV6nD0md6KaSHaxaSpPUsTTHtVlE4KM,4099
unidecode/x01d.py,sha256=fzetc6KyWxK7rYl4XARhLKIoG0u6a7y39ooze17G9aw,3608
unidecode/x01e.py,sha256=oVdWd4v85k-Slc3V0i2NZ4i5G866X6Qw7bKXJDmbXig,3853
unidecode/x01f.py,sha256=pG-fs1iD7O3vSwIx6Ibz5MhpZI_NsQWEDlHiRpxgZC0,3899
unidecode/x020.py,sha256=k9PWwEJiWl7yUFTVR89cl2XuYqWDEiVfrXvSqOaeJH4,4010
unidecode/x021.py,sha256=Ns9H51Q51tDB-mRSERyMN1y2EiE7UPQx9K3BxwaFrQs,4012
unidecode/x022.py,sha256=OzIXC-VMfUskTtEe5_m3zpmgtKtJSDY0XBZ5C0codi8,4329
unidecode/x023.py,sha256=FFgn4hJ7Q4SbsxFtHUa8SuQ0VBJ9hkod5QTWaMmkk9U,4341
unidecode/x024.py,sha256=pMZqUxkwfgz9n9NXpUaAaNr-p9ACNCyfTQKo2PFJ11w,4049
unidecode/x025.py,sha256=-dvBV3byxxngHQcQx7Jjt9dwtehBaQwRoCnX3ZAzWe0,3871
unidecode/x026.py,sha256=N7i11hEwuiW9mSEp0Dk4Aa9iIsHsxAYhLAplqZnUMs0,4020
unidecode/x027.py,sha256=wZ1l328qv5BWbk-FPr34ayyJ4rWQG3EQIsPxd7GilFg,3783
unidecode/x028.py,sha256=FZPCZ9w3N3WOI42h2gHEQgVOAlLBNTZjMu_KQQkIMdk,5069
unidecode/x029.py,sha256=TlYRf7ym0R-L7DncmX4RNZX5kKctvGFLbtu8GPkxqpE,3584
unidecode/x02a.py,sha256=WD3uhv1sNhQB45ugiKUk4Btj3aj8qcH_kpwrV3jRWJw,3589
unidecode/x02c.py,sha256=zRG2Elxv5SlYrBpJb1NUz7WsJOFAkzwSi991cMyhnJs,3596
unidecode/x02e.py,sha256=_ntc-U9pwAkGQkFC3zdQgJNUZSv9W4115ouIbINGyw4,4461
unidecode/x02f.py,sha256=9cxem6MFm7acjUDcmYLk9lbxEyfP2xMjfE3c-6PUEbg,4572
unidecode/x030.py,sha256=aHVHcoSl5FcxX9QQALaW5n1zYJ0Ymap-wgWuzojXLyY,4037
unidecode/x031.py,sha256=vRlzxBKPo5s3ZDpUojrXelZKjTEwR8fsnm3vUeB5bp8,4125
unidecode/x032.py,sha256=BSZV_t8jVsWX_RsoqajWAqigQaHh3wXmoMG5wUZtgos,4485
unidecode/x033.py,sha256=ZD2k8XlsdicjcaptuQrRt0nXGAFRs7SnF8RidIR9Alg,4573
unidecode/x04d.py,sha256=d5K-HbR3Gg1VBdMk1GO6PJdoseRRIV5ef4aQbslnsBo,4528
unidecode/x04e.py,sha256=z04XMxt3y016alep4Xg8Zjh4cvBj6CddjD9Qv6sr6v4,4646
unidecode/x04f.py,sha256=zEf_S6bDF755svnPRWyreVf2Q4SekYMxIpGf1Jb2alc,4607
unidecode/x050.py,sha256=MNhEf7TRcQ2CdgkMcFloEGSbTtrsXK-6r6Ru7HDG6hU,4682
unidecode/x051.py,sha256=VY0jC10xdU7bm21Cig5omd7L-4hiSk_rk2UTR_yTF3g,4685
unidecode/x052.py,sha256=a09eo_5pL6jpU9TW-zG2w2iXTYp6awtQ4OxGnLdcwKg,4654
unidecode/x053.py,sha256=so5U-CQ5jRbp7AYZZPCdmkdnNtfNn_-Nl_761eBMtIU,4616
unidecode/x054.py,sha256=Htu6ZFPTTyBHUU1Ia-mc7Y3Dy4j1cp-fRwag0HGwmwk,4583
unidecode/x055.py,sha256=WzgwsrHInE1H-O519FOIybJpofzdyfu7w5NZ5I2OtQI,4599
unidecode/x056.py,sha256=t4ZVJlxic1vcqhrypLWRd3LyIfEuWoPIz343pCrfW7k,4615
unidecode/x057.py,sha256=ndJuRj4TnvSe6aLX-yzDYHnWEl1JfqA6HnQvzsf2Fyo,4631
unidecode/x058.py,sha256=nkaS7T1PVlhKlxVd-WrDw4Gx14mciLEZQrUt-9NpkD0,4678
unidecode/x059.py,sha256=9wAKGpczWuyqMb89px7Ldy1McHecDXd8RMJ7lXwcBCU,4644
unidecode/x05a.py,sha256=F150z3X248dkDgTq-0lyL-bLRqZZd7U0mkUI6PJRwwM,4636
unidecode/x05b.py,sha256=LeJj8prX04qvLHFaeG-b2YE9LqIbnUec6pFD-7E918c,4668
unidecode/x05c.py,sha256=Citt0KhdqvWkErFdpeSyg6x5CviH1opVRPSD6eBWjsA,4612
unidecode/x05d.py,sha256=w1vKjN5RWPiwP535FCiqakz1IbeB4MGe2ANVM_bcdq4,4670
unidecode/x05e.py,sha256=6Z7gnAXq9tVwvFIwh632oLQuEiHz1atcqIDREeoqldM,4668
unidecode/x05f.py,sha256=Ho5tdX7JErho7LjeVCxf29XlWeEpDt5RUJC3nbw2j8M,4660
unidecode/x060.py,sha256=2x6hmUwn_V3icd1kdfzkkDp5iEdmij7zpUWizfVIE7Q,4642
unidecode/x061.py,sha256=hwSoPcP4PLy5zPITLdSVaYGwt_fIH9kJPpshKJZi-LA,4662
unidecode/x062.py,sha256=rH9eYXX_t-Z4-pOy9-lyVm68zXt114X3lAIBI5JG_Qs,4620
unidecode/x063.py,sha256=n8aXYOPurrEbBzz4DAl-tFOFiqMJ-r1Yt3WpM3ZGTq0,4656
unidecode/x064.py,sha256=uBebjkUmgQVzK0tKWjxLZwQ1oC9KMEppv0W6caB8v1g,4655
unidecode/x065.py,sha256=cFnHSLoNwMG6PJvxWWeWShSkHoB9IYTS2LJCc8W0l4I,4638
unidecode/x066.py,sha256=gV2vx0TqIA44PBOzF02wetf3dxXcXmg8Jr2CtzxMDFU,4677
unidecode/x067.py,sha256=9ck2UFSv8UL3c0RHPTdV4Rzq7ogZVedwsMAYhGE1lmM,4635
unidecode/x068.py,sha256=aTAAeHLr5-VnMqNF0h9KC4tFOusV9PpWdywp7xllAA0,4674
unidecode/x069.py,sha256=8_VMN2vGqNAPrP8iPxICRI9PN81Hts21FM1A4n1_7to,4673
unidecode/x06a.py,sha256=e7ahJ-j5YvomZvQcIxfAMbHgijbKncYoTN9TakDQqig,4674
unidecode/x06b.py,sha256=lBRWVhTMJPBKWAyAT23PueHtw9wXTuUG9S2UVWjosr4,4608
unidecode/x06c.py,sha256=i8xXjlNwhXxvrHdnTbdy-jTfml_fD0uFURctA1BQKk0,4643
unidecode/x06d.py,sha256=BvgJd7TNj6cL6I5bjPXngi3tJPo0YfhISCHSXvfmsTk,4651
unidecode/x06e.py,sha256=jZ6VeQbASYGtx0QXySzZzRQf_LqtPAU6jhTo3U_nFTU,4640
unidecode/x06f.py,sha256=W0A95toB7w7pLrohCaet_d0-S3V84fjzTKgZ6vhUtok,4650
unidecode/x070.py,sha256=lVM1qXUltqIrKIi0WPH1F5Feuq4M007nm3lOkR_EB2s,4693
unidecode/x071.py,sha256=v2V3WNKPVEhuJ_RX6rZA45rFIukgMCJ8rqPoUwj05zc,4670
unidecode/x072.py,sha256=NhOkJEqApO9shkYgwdWVarVUDmWailI4N1vNiLGkOSM,4659
unidecode/x073.py,sha256=loYg-ZrK1rdy2CkbQfd4qydW8lCeiNywEzT6gLTN-GI,4646
unidecode/x074.py,sha256=FLIumUZcrCy9Y6eXL5BLpa_hE5HMGbPo-PWtFBh-rBs,4696
unidecode/x075.py,sha256=P3SrhI5BQ5sJ66hyu_LWDONpuzLZJBKsl7f-A37sJXc,4675
unidecode/x076.py,sha256=3enaJAMy951MK_yBnhJiOmoTdzU0eJ2uEgoRgZRTUn0,4639
unidecode/x077.py,sha256=XH_TjHspGSnu4v3qgNOqFNMvZKZcA-HH6q_GWB48Cos,4675
unidecode/x078.py,sha256=L9XOo8UH_x-prQC_edArnTs-j2asZR940sLaPST2iy0,4648
unidecode/x079.py,sha256=6e29mgaXPmtMrU8_QRfmZpWZW8o-yEeeIOgAHOoepo8,4602
unidecode/x07a.py,sha256=fMACYuAsIumgE8XkYnM2795HtFA6weH4YLn7jgJLbbw,4669
unidecode/x07b.py,sha256=UH8ZXsR11pINz0AaxJ8etTTyaXiCawjuLcctzp4FwZc,4669
unidecode/x07c.py,sha256=fpqGGsF0-rEVxBeVFf0RM8fjweUlg-9UduAtxAjL5vc,4663
unidecode/x07d.py,sha256=EKFrTQTNFLGnsm3qI76ALxrxGCcDuyEbapi9j9jy1B4,4678
unidecode/x07e.py,sha256=n2OG5xe8I-V0pn98Q2E-7PbXSQQI72ozNNUXFnMZHvM,4682
unidecode/x07f.py,sha256=g455qjG3LBu9ujuuTt5xrRn2djK_iVXAJ4dUVl-bYfs,4664
unidecode/x080.py,sha256=Fuqy0RgnvfvFFdmGiaHwK2B60UCU5Aw4fyF79kBfhr8,4651
unidecode/x081.py,sha256=rQg3Hjqo61bEKCpb7TybHDLv2Hgu-_ghKdW6xk9xOhU,4673
unidecode/x082.py,sha256=sRjOiGrYy2RtqqH_xQdL6_i17I-wJZI6ag7404mL4M8,4649
unidecode/x083.py,sha256=8hCxGV2o1kFA6hMFvk4Ici_QKynDCYjDWjzCuMyfmHI,4643
unidecode/x084.py,sha256=jIDgDPhwssUcLgA7N0ZINrB_qZn1P4C7lHyvP7yKA6o,4646
unidecode/x085.py,sha256=5063XP5F72OEYuqjETqFlN_7IaU1A0feVuvIup9R0rI,4636
unidecode/x086.py,sha256=ovNLdMRRHm4jngDGXmwG66zZH6l-V-uMtoYnXB_W_QY,4614
unidecode/x087.py,sha256=-VmLJWGVMGF9BxYD8VcTc8TS83W27qcERuycKCfpLBc,4649
unidecode/x088.py,sha256=E63aAVUF0B1f-5XL7fOUWqXL2juUJLU9TwO_LHKvd2Q,4645
unidecode/x089.py,sha256=mrKWneiJ2hIFkM4cu4bU0IQMvTRWgXZ8rsDW575jp9A,4628
unidecode/x08a.py,sha256=NjMp9ck824PXG2gcJXfi_9oQCFgXhhiallO3bYCtXCE,4647
unidecode/x08b.py,sha256=W1kAtliqhm5x28Kxc6kUrjzqo-xc_HmYY0BjHhEV2x4,4643
unidecode/x08c.py,sha256=aDhwugSrvtUhDdwbACfjS0EkBqGojwny-qbrQRJfPhA,4630
unidecode/x08d.py,sha256=oyydBTJng0ip79lUTBHoTIqGjxmHTb0twkPZqE7LxeU,4636
unidecode/x08e.py,sha256=w-FysLX-LgmggEuhmPZjyT67-i4_EB8Hx44i_X_Q3Nc,4659
unidecode/x08f.py,sha256=adygkkCQn4W6YhJUknf2O-2eM_LzH1LfjjpgenbPh80,4651
unidecode/x090.py,sha256=j-5qrCDDHYKJnbHL5A_fm5ISrdFVgDR5bXQbP18G-14,4631
unidecode/x091.py,sha256=S8jlVjjPNLPCsSXK8qKXqGGoTLj-LWje5J-f-2AAEXY,4655
unidecode/x092.py,sha256=uSF8NVYh_UGJE2pcl4JrVU2Prb-T2crGLCE4XQe7DfQ,4648
unidecode/x093.py,sha256=oMiZM1VfvfKnwVKSJh28iynWJG8iQtKu_1zsrbPdPNs,4666
unidecode/x094.py,sha256=MShhNv4E9bj9jmQEtWHi_8ZjeS4p2Iz6j3j9kJb5rK0,4661
unidecode/x095.py,sha256=BQ1R6QwhWjC_Eb7zIbWP0A2ro7bI-t6fTAQWLFMmzAM,4671
unidecode/x096.py,sha256=N9hLQrZhbTXC9boxDcWu3WESTIB6En82kJkBY-6qBRI,4610
unidecode/x097.py,sha256=K4waHuw6tNchmcY7Glc-Su6cTG3-iF_X_egYuG-E4fA,4643
unidecode/x098.py,sha256=CFFcb5gpK7FBqPsFwRoLP0YcYFJBGgh3l-Rf4PzXbjc,4645
unidecode/x099.py,sha256=e9w1-tsa3tCYYQXn71Ey1bg_n2UYudMQ0y-zSSCdajE,4629
unidecode/x09a.py,sha256=Z8pQsTc62CWgm0JPnj3kokKKf9_qfzRpo0u5iH61CaE,4623
unidecode/x09b.py,sha256=piSZ2AAK5GavHJEa8qwI_lrldSSgNhxYvLua0Au_1aA,4655
unidecode/x09c.py,sha256=NveMhN85_Cm4H1cnfHDTcnSj675MOVBq9Lkjpw3YxA0,4659
unidecode/x09d.py,sha256=2Sj376QIs8rJ7VDrPW5RELhkJ8LI5JI4NRbFcl4DXlE,4632
unidecode/x09e.py,sha256=z1bF6AML_d20dQm9HD7YBrnKqTQVjeTTI999hcLEe0M,4615
unidecode/x09f.py,sha256=T-pS5hli39rA1GDDqZYfyHRupPALqklPXA-1i8pgc1I,4509
unidecode/x0a0.py,sha256=EpopPuuocybgCcpX19Ii-udqsPXJjSces3360lqJ8vs,4428
unidecode/x0a1.py,sha256=0hvF77d5E640SujjdHVqy5gMUH85gEdOv80eRvCEAGM,4469
unidecode/x0a2.py,sha256=9Icpfk_ElebYd_xN09OMziFrpAGPXEUNVmawpnhbBaQ,4503
unidecode/x0a3.py,sha256=G1lPrnCqYz0s4wsSa1qM0WgrZBWO_beRk3AgK0iVZLA,4521
unidecode/x0a4.py,sha256=vS-wPpkfMmwRJjXTBYM4BGpzBfDoKWMadNNWaTPYcpI,4437
unidecode/x0ac.py,sha256=wj7hl88VlCdc_eGpOL4m4CBJILyQqd9atObC5Xvd0aA,4709
unidecode/x0ad.py,sha256=Rz5rn0fM-CqRjaN4TvSq_1StAQdyAF2WX3cUvcQHaWU,4766
unidecode/x0ae.py,sha256=jNIBVB-Pw2ZNihAeyWbDIEq9Yt9zlhdfGylfvAaxUks,4875
unidecode/x0af.py,sha256=Am5YC8Zfrun5NUKxU6LrU2-d5GgkGSBs7fZt2rqSi74,5012
unidecode/x0b0.py,sha256=1bgHerCDAqIcJHYeGddJjJfRWiHCKtU2B0J-XGvcbbc,4853
unidecode/x0b1.py,sha256=Six-lzGdvgJx4YsIa0lTusnBEV1zbCKQCquq17TDJoQ,4746
unidecode/x0b2.py,sha256=HQDbmglNi4QfiRSGucUclgq_4FGpRjbJkWU1JTLAFGc,4680
unidecode/x0b3.py,sha256=1lqxghVZiiStOAx1IG_vc1zZTXrAa7Z__QY6ZWvo2aA,4741
unidecode/x0b4.py,sha256=V6BNSTxpyP8VuqF7x5z7bpF3MQAkwZfKtEu6NFr_vSg,4762
unidecode/x0b5.py,sha256=9NVd2hNLyRlLceVlznba1dreqBGeKU_0gzmkgAw0gyg,4919
unidecode/x0b6.py,sha256=V_vRsB0GICu9hqhO4pnbPWreDSevJ3bbmLRJkuQUxnE,4996
unidecode/x0b7.py,sha256=CwBaCBICyVagnFjUpkwabuDvBJw7gAeqkSRpfBAVv8s,4833
unidecode/x0b8.py,sha256=xYp-xy2LIwq95OWyS9vYMc_Z5od9dud0W1dxeg4P_Jk,4714
unidecode/x0b9.py,sha256=z3hKNzBq_MeK9V3AyQzaY58cgi0-VGOsLk3-UFmszLQ,4704
unidecode/x0ba.py,sha256=4gubifoBeJUUrwXEI4litJygekufEycmWDLrJ-Qvs14,4765
unidecode/x0bb.py,sha256=bsCTABUdC6yTn8_0vhYe5jRP1z_BoAdificB8Y1c1hA,4730
unidecode/x0bc.py,sha256=AhQvAz7yHlbQ_4c2KOIisq07eZJ5JQn6cV8I31oT9kg,4707
unidecode/x0bd.py,sha256=IGtyVxIUr1mU3hokn6iUDJhXZezQozVvfWOyf4Pa5dI,4752
unidecode/x0be.py,sha256=1D-hXu3p3wvOnGVMjEqVsrltYe7UuSwit2yqN5eFizc,4849
unidecode/x0bf.py,sha256=NkEXqr2ER3BNFkTasDV9CHnkRBuX_Ao5OHGv_NgKAew,5010
unidecode/x0c0.py,sha256=zDlHpyM0omza5TqGLb8Rhl7Wd-LlV1AjvH_xdnEnNFw,4856
unidecode/x0c1.py,sha256=AC6xJyx9UblKAGNqGN7AH2Idb3_3vbc-I5U0Myig5fA,4765
unidecode/x0c2.py,sha256=siRYLA8Cv9Z8XsRp3WQOBdRrPkjJOuEh8z1-3SMXOzQ,4710
unidecode/x0c3.py,sha256=hlAFe6lsz0aLMixlpeFjV4I-WTIiA3B2BU58yGlTwRg,4975
unidecode/x0c4.py,sha256=z3xZwSkf5ru1FCdBMHOr5fyglzVdyPhQVtWjq9xInsQ,5024
unidecode/x0c5.py,sha256=F-DR0eVMRkemOnNXOtDjI5i6gW9136XLmWM_yMVvc84,4581
unidecode/x0c6.py,sha256=7p_jMrHf3WUa_zANms-RGVN1bAeshgWLkC16_VcSawA,4490
unidecode/x0c7.py,sha256=5eOAq4jFsPZ-zKO7lHzAGj_EvXdaMC4Kud7gvE-B7Tg,4564
unidecode/x0c8.py,sha256=wltKvhBgn51jULzwUnEbmyDuK9JvQpQee0uTKK42-20,4733
unidecode/x0c9.py,sha256=GoARON07wCoHN2wRHb5fvzqE9L3Yme2hKeciynUIAIk,4722
unidecode/x0ca.py,sha256=BsBZTNj3npIkdo3L9pSEX7XvDT68KV7wFtOOwyEb2So,5007
unidecode/x0cb.py,sha256=8T7vnJMRmYGyySYthMWz0bgN-MremktGImjejodFeMo,5012
unidecode/x0cc.py,sha256=GKoHN-4vL4Y3EL42G0xbN74Tgspew1oMvxQtsIa3ess,4749
unidecode/x0cd.py,sha256=7sZ05OjugbaombMRDYOVxgstZbXMcuX5kHFheKv4W2E,4738
unidecode/x0ce.py,sha256=mOEHFrsAwIvcTnh7OKVK5qbuXUXHfJOR7D4FtXsQmao,4708
unidecode/x0cf.py,sha256=H9PeYcbOG68F_yc7zsELUuN05ANfFNOUX-e3-gzx7Ow,4713
unidecode/x0d0.py,sha256=eULqcGHPmaoEdl0EwRB5wWSu8M43bp4HoFo5gGljacg,4706
unidecode/x0d1.py,sha256=BClLDAjPgsAX6MJCsuHfmfuhH9qfzUy_vb-d9zBs3Oc,4767
unidecode/x0d2.py,sha256=e74nqGo4E4sF1sy8qBFu2ecWoRfJdoXI1xRFRPqYEz8,4724
unidecode/x0d3.py,sha256=8-UmvJ3-ILXo9d3GA-ReOE4OfUenL3tVUJYldZ9gHu0,4705
unidecode/x0d4.py,sha256=fwUmzksoddTKB8fH2rZMxRK3pJtLrxhcrYpHfBauAwE,4758
unidecode/x0d5.py,sha256=rANSL5ndzLgSgYJQNEw57AfXpicRe7pvHRlKTPb4-QQ,4680
unidecode/x0d6.py,sha256=fT8_cRzp7y60IIhn87kM9lLehKGAg5wYmfFOwgGp6e0,4765
unidecode/x0d7.py,sha256=0zY-KFUnKk-CuYpb1zSYj3QdS6UsfZ_lsemOuRSeDwM,4559
unidecode/x0f9.py,sha256=2PD0_fpDnaFO9ftICjYSOhnjAfBppjsj1TcLIuYjnCI,4567
unidecode/x0fa.py,sha256=6X94S2GbR6XOwkzx2SYynZzBMHAbRHC9GvW_vXaTDRU,4406
unidecode/x0fb.py,sha256=qaAP_vF3_-M--wKkyb0DfBjIdnGKuk4GQLxV7fp2-_4,3838
unidecode/x0fc.py,sha256=KcyQnyv7gxNeVcAnRwQrm4NlabZE3CrnmtLqXj_7te8,3595
unidecode/x0fd.py,sha256=fq1BGexi73J3QPUwnL4_LZT4uh8mxYqAgMNtofbfVKE,3764
unidecode/x0fe.py,sha256=mpt-K-jqk36iouLz5HOcthOQJczqsca9aYkEGhJ6Wk4,3825
unidecode/x0ff.py,sha256=KGE3aIdJCR-3kAVaXOyuY44M-KfCA9UQt4B9AlEJiq0,3983
unidecode/x1d4.py,sha256=ZS_7TAX87oGtT7b8tIlWcmeHChVINZ4W9jl9wA6JfmU,3839
unidecode/x1d5.py,sha256=Cuh3bUzoyp8c8lJ7Y_gLmAKQ03XHMCsgTZf3uE2-G2o,3839
unidecode/x1d6.py,sha256=6fIRGTFODh3kysq10Xr_8EmG6HZuWztLjr4vitkk8OQ,3974
unidecode/x1d7.py,sha256=jIs9oZBMbSh5OQvPiyUI4aAdji9EbzeOXigTq4Oq_kY,3645
unidecode/x1f1.py,sha256=rMT7fIIvB6SkwUEbRiSt_lAgr_W06JQ9Rn6pSPVnl0E,3694
../../../bin/unidecode,sha256=ffhNYgwMn5V-dok9wGlnrDOw2knaBd5d7fpXO2ltK8I,262
Unidecode-1.0.22.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4
unidecode/__pycache__/__init__.cpython-36.pyc,,
unidecode/__pycache__/util.cpython-36.pyc,,
unidecode/__pycache__/x000.cpython-36.pyc,,
unidecode/__pycache__/x001.cpython-36.pyc,,
unidecode/__pycache__/x002.cpython-36.pyc,,
unidecode/__pycache__/x003.cpython-36.pyc,,
unidecode/__pycache__/x004.cpython-36.pyc,,
unidecode/__pycache__/x005.cpython-36.pyc,,
unidecode/__pycache__/x006.cpython-36.pyc,,
unidecode/__pycache__/x007.cpython-36.pyc,,
unidecode/__pycache__/x009.cpython-36.pyc,,
unidecode/__pycache__/x00a.cpython-36.pyc,,
unidecode/__pycache__/x00b.cpython-36.pyc,,
unidecode/__pycache__/x00c.cpython-36.pyc,,
unidecode/__pycache__/x00d.cpython-36.pyc,,
unidecode/__pycache__/x00e.cpython-36.pyc,,
unidecode/__pycache__/x00f.cpython-36.pyc,,
unidecode/__pycache__/x010.cpython-36.pyc,,
unidecode/__pycache__/x011.cpython-36.pyc,,
unidecode/__pycache__/x012.cpython-36.pyc,,
unidecode/__pycache__/x013.cpython-36.pyc,,
unidecode/__pycache__/x014.cpython-36.pyc,,
unidecode/__pycache__/x015.cpython-36.pyc,,
unidecode/__pycache__/x016.cpython-36.pyc,,
unidecode/__pycache__/x017.cpython-36.pyc,,
unidecode/__pycache__/x018.cpython-36.pyc,,
unidecode/__pycache__/x01d.cpython-36.pyc,,
unidecode/__pycache__/x01e.cpython-36.pyc,,
unidecode/__pycache__/x01f.cpython-36.pyc,,
unidecode/__pycache__/x020.cpython-36.pyc,,
unidecode/__pycache__/x021.cpython-36.pyc,,
unidecode/__pycache__/x022.cpython-36.pyc,,
unidecode/__pycache__/x023.cpython-36.pyc,,
unidecode/__pycache__/x024.cpython-36.pyc,,
unidecode/__pycache__/x025.cpython-36.pyc,,
unidecode/__pycache__/x026.cpython-36.pyc,,
unidecode/__pycache__/x027.cpython-36.pyc,,
unidecode/__pycache__/x028.cpython-36.pyc,,
unidecode/__pycache__/x029.cpython-36.pyc,,
unidecode/__pycache__/x02a.cpython-36.pyc,,
unidecode/__pycache__/x02c.cpython-36.pyc,,
unidecode/__pycache__/x02e.cpython-36.pyc,,
unidecode/__pycache__/x02f.cpython-36.pyc,,
unidecode/__pycache__/x030.cpython-36.pyc,,
unidecode/__pycache__/x031.cpython-36.pyc,,
unidecode/__pycache__/x032.cpython-36.pyc,,
unidecode/__pycache__/x033.cpython-36.pyc,,
unidecode/__pycache__/x04d.cpython-36.pyc,,
unidecode/__pycache__/x04e.cpython-36.pyc,,
unidecode/__pycache__/x04f.cpython-36.pyc,,
unidecode/__pycache__/x050.cpython-36.pyc,,
unidecode/__pycache__/x051.cpython-36.pyc,,
unidecode/__pycache__/x052.cpython-36.pyc,,
unidecode/__pycache__/x053.cpython-36.pyc,,
unidecode/__pycache__/x054.cpython-36.pyc,,
unidecode/__pycache__/x055.cpython-36.pyc,,
unidecode/__pycache__/x056.cpython-36.pyc,,
unidecode/__pycache__/x057.cpython-36.pyc,,
unidecode/__pycache__/x058.cpython-36.pyc,,
unidecode/__pycache__/x059.cpython-36.pyc,,
unidecode/__pycache__/x05a.cpython-36.pyc,,
unidecode/__pycache__/x05b.cpython-36.pyc,,
unidecode/__pycache__/x05c.cpython-36.pyc,,
unidecode/__pycache__/x05d.cpython-36.pyc,,
unidecode/__pycache__/x05e.cpython-36.pyc,,
unidecode/__pycache__/x05f.cpython-36.pyc,,
unidecode/__pycache__/x060.cpython-36.pyc,,
unidecode/__pycache__/x061.cpython-36.pyc,,
unidecode/__pycache__/x062.cpython-36.pyc,,
unidecode/__pycache__/x063.cpython-36.pyc,,
unidecode/__pycache__/x064.cpython-36.pyc,,
unidecode/__pycache__/x065.cpython-36.pyc,,
unidecode/__pycache__/x066.cpython-36.pyc,,
unidecode/__pycache__/x067.cpython-36.pyc,,
unidecode/__pycache__/x068.cpython-36.pyc,,
unidecode/__pycache__/x069.cpython-36.pyc,,
unidecode/__pycache__/x06a.cpython-36.pyc,,
unidecode/__pycache__/x06b.cpython-36.pyc,,
unidecode/__pycache__/x06c.cpython-36.pyc,,
unidecode/__pycache__/x06d.cpython-36.pyc,,
unidecode/__pycache__/x06e.cpython-36.pyc,,
unidecode/__pycache__/x06f.cpython-36.pyc,,
unidecode/__pycache__/x070.cpython-36.pyc,,
unidecode/__pycache__/x071.cpython-36.pyc,,
unidecode/__pycache__/x072.cpython-36.pyc,,
unidecode/__pycache__/x073.cpython-36.pyc,,
unidecode/__pycache__/x074.cpython-36.pyc,,
unidecode/__pycache__/x075.cpython-36.pyc,,
unidecode/__pycache__/x076.cpython-36.pyc,,
unidecode/__pycache__/x077.cpython-36.pyc,,
unidecode/__pycache__/x078.cpython-36.pyc,,
unidecode/__pycache__/x079.cpython-36.pyc,,
unidecode/__pycache__/x07a.cpython-36.pyc,,
unidecode/__pycache__/x07b.cpython-36.pyc,,
unidecode/__pycache__/x07c.cpython-36.pyc,,
unidecode/__pycache__/x07d.cpython-36.pyc,,
unidecode/__pycache__/x07e.cpython-36.pyc,,
unidecode/__pycache__/x07f.cpython-36.pyc,,
unidecode/__pycache__/x080.cpython-36.pyc,,
unidecode/__pycache__/x081.cpython-36.pyc,,
unidecode/__pycache__/x082.cpython-36.pyc,,
unidecode/__pycache__/x083.cpython-36.pyc,,
unidecode/__pycache__/x084.cpython-36.pyc,,
unidecode/__pycache__/x085.cpython-36.pyc,,
unidecode/__pycache__/x086.cpython-36.pyc,,
unidecode/__pycache__/x087.cpython-36.pyc,,
unidecode/__pycache__/x088.cpython-36.pyc,,
unidecode/__pycache__/x089.cpython-36.pyc,,
unidecode/__pycache__/x08a.cpython-36.pyc,,
unidecode/__pycache__/x08b.cpython-36.pyc,,
unidecode/__pycache__/x08c.cpython-36.pyc,,
unidecode/__pycache__/x08d.cpython-36.pyc,,
unidecode/__pycache__/x08e.cpython-36.pyc,,
unidecode/__pycache__/x08f.cpython-36.pyc,,
unidecode/__pycache__/x090.cpython-36.pyc,,
unidecode/__pycache__/x091.cpython-36.pyc,,
unidecode/__pycache__/x092.cpython-36.pyc,,
unidecode/__pycache__/x093.cpython-36.pyc,,
unidecode/__pycache__/x094.cpython-36.pyc,,
unidecode/__pycache__/x095.cpython-36.pyc,,
unidecode/__pycache__/x096.cpython-36.pyc,,
unidecode/__pycache__/x097.cpython-36.pyc,,
unidecode/__pycache__/x098.cpython-36.pyc,,
unidecode/__pycache__/x099.cpython-36.pyc,,
unidecode/__pycache__/x09a.cpython-36.pyc,,
unidecode/__pycache__/x09b.cpython-36.pyc,,
unidecode/__pycache__/x09c.cpython-36.pyc,,
unidecode/__pycache__/x09d.cpython-36.pyc,,
unidecode/__pycache__/x09e.cpython-36.pyc,,
unidecode/__pycache__/x09f.cpython-36.pyc,,
unidecode/__pycache__/x0a0.cpython-36.pyc,,
unidecode/__pycache__/x0a1.cpython-36.pyc,,
unidecode/__pycache__/x0a2.cpython-36.pyc,,
unidecode/__pycache__/x0a3.cpython-36.pyc,,
unidecode/__pycache__/x0a4.cpython-36.pyc,,
unidecode/__pycache__/x0ac.cpython-36.pyc,,
unidecode/__pycache__/x0ad.cpython-36.pyc,,
unidecode/__pycache__/x0ae.cpython-36.pyc,,
unidecode/__pycache__/x0af.cpython-36.pyc,,
unidecode/__pycache__/x0b0.cpython-36.pyc,,
unidecode/__pycache__/x0b1.cpython-36.pyc,,
unidecode/__pycache__/x0b2.cpython-36.pyc,,
unidecode/__pycache__/x0b3.cpython-36.pyc,,
unidecode/__pycache__/x0b4.cpython-36.pyc,,
unidecode/__pycache__/x0b5.cpython-36.pyc,,
unidecode/__pycache__/x0b6.cpython-36.pyc,,
unidecode/__pycache__/x0b7.cpython-36.pyc,,
unidecode/__pycache__/x0b8.cpython-36.pyc,,
unidecode/__pycache__/x0b9.cpython-36.pyc,,
unidecode/__pycache__/x0ba.cpython-36.pyc,,
unidecode/__pycache__/x0bb.cpython-36.pyc,,
unidecode/__pycache__/x0bc.cpython-36.pyc,,
unidecode/__pycache__/x0bd.cpython-36.pyc,,
unidecode/__pycache__/x0be.cpython-36.pyc,,
unidecode/__pycache__/x0bf.cpython-36.pyc,,
unidecode/__pycache__/x0c0.cpython-36.pyc,,
unidecode/__pycache__/x0c1.cpython-36.pyc,,
unidecode/__pycache__/x0c2.cpython-36.pyc,,
unidecode/__pycache__/x0c3.cpython-36.pyc,,
unidecode/__pycache__/x0c4.cpython-36.pyc,,
unidecode/__pycache__/x0c5.cpython-36.pyc,,
unidecode/__pycache__/x0c6.cpython-36.pyc,,
unidecode/__pycache__/x0c7.cpython-36.pyc,,
unidecode/__pycache__/x0c8.cpython-36.pyc,,
unidecode/__pycache__/x0c9.cpython-36.pyc,,
unidecode/__pycache__/x0ca.cpython-36.pyc,,
unidecode/__pycache__/x0cb.cpython-36.pyc,,
unidecode/__pycache__/x0cc.cpython-36.pyc,,
unidecode/__pycache__/x0cd.cpython-36.pyc,,
unidecode/__pycache__/x0ce.cpython-36.pyc,,
unidecode/__pycache__/x0cf.cpython-36.pyc,,
unidecode/__pycache__/x0d0.cpython-36.pyc,,
unidecode/__pycache__/x0d1.cpython-36.pyc,,
unidecode/__pycache__/x0d2.cpython-36.pyc,,
unidecode/__pycache__/x0d3.cpython-36.pyc,,
unidecode/__pycache__/x0d4.cpython-36.pyc,,
unidecode/__pycache__/x0d5.cpython-36.pyc,,
unidecode/__pycache__/x0d6.cpython-36.pyc,,
unidecode/__pycache__/x0d7.cpython-36.pyc,,
unidecode/__pycache__/x0f9.cpython-36.pyc,,
unidecode/__pycache__/x0fa.cpython-36.pyc,,
unidecode/__pycache__/x0fb.cpython-36.pyc,,
unidecode/__pycache__/x0fc.cpython-36.pyc,,
unidecode/__pycache__/x0fd.cpython-36.pyc,,
unidecode/__pycache__/x0fe.cpython-36.pyc,,
unidecode/__pycache__/x0ff.cpython-36.pyc,,
unidecode/__pycache__/x1d4.cpython-36.pyc,,
unidecode/__pycache__/x1d5.cpython-36.pyc,,
unidecode/__pycache__/x1d6.cpython-36.pyc,,
unidecode/__pycache__/x1d7.cpython-36.pyc,,
unidecode/__pycache__/x1f1.cpython-36.pyc,,

View file

@ -1,6 +0,0 @@
Wheel-Version: 1.0
Generator: bdist_wheel (0.30.0)
Root-Is-Purelib: true
Tag: py2-none-any
Tag: py3-none-any

View file

@ -1,3 +0,0 @@
[console_scripts]
unidecode = unidecode.util:main

View file

@ -1 +0,0 @@
{"classifiers": ["License :: OSI Approved :: GNU General Public License v2 or later (GPLv2+)", "Programming Language :: Python", "Programming Language :: Python :: 2", "Programming Language :: Python :: 2.6", "Programming Language :: Python :: 2.7", "Programming Language :: Python :: 3", "Programming Language :: Python :: 3.3", "Programming Language :: Python :: 3.4", "Programming Language :: Python :: 3.5", "Programming Language :: Python :: 3.6", "Programming Language :: Python :: Implementation :: CPython", "Programming Language :: Python :: Implementation :: PyPy", "Topic :: Text Processing", "Topic :: Text Processing :: Filters"], "extensions": {"python.commands": {"wrap_console": {"unidecode": "unidecode.util:main"}}, "python.details": {"contacts": [{"email": "tomaz.solc@tablix.org", "name": "Tomaz Solc", "role": "author"}], "document_names": {"description": "DESCRIPTION.rst", "license": "LICENSE.txt"}}, "python.exports": {"console_scripts": {"unidecode": "unidecode.util:main"}}}, "generator": "bdist_wheel (0.30.0)", "license": "GPL", "metadata_version": "2.0", "name": "Unidecode", "summary": "ASCII transliterations of Unicode text", "version": "1.0.22"}

View file

@ -1,93 +0,0 @@
.. image:: http://docs.wand-py.org/en/latest/_static/wand.png
:width: 120
:height: 120
Wand_
=====
Wand is a ``ctypes``-based simple ImageMagick_ binding for Python.
It doesn't cover all functionalities of MagickWand API currently.
It works on Python 2.6, 2.7, 3.2--3.5, and PyPy.
You can install the package from PyPI_ by using ``pip``:
.. code-block:: console
$ pip install Wand
Or would you like to enjoy with bleeding edge? Check out the head
revision of the source code from the `GitHub repository`__:
.. code-block:: console
$ git clone git://github.com/dahlia/wand.git
$ cd wand/
$ python setup.py install
.. _Wand: http://wand-py.org/
.. _ImageMagick: http://www.imagemagick.org/
.. _PyPI: https://pypi.python.org/pypi/Wand
__ https://github.com/dahlia/wand
Docs
----
Recent version
http://docs.wand-py.org/
Development version
http://docs.wand-py.org/en/latest/
.. image:: https://readthedocs.org/projects/wand/badge/
:alt: Documentation Status
:target: http://docs.wand-py.org/en/latest/
Community
---------
Website
http://wand-py.org/
GitHub
https://github.com/dahlia/wand
Package Index (Cheeseshop)
https://pypi.python.org/pypi/Wand
.. image:: https://badge.fury.io/py/Wand.svg?
:alt: Latest PyPI version
:target: https://pypi.python.org/pypi/Wand
Mailing list
wand@librelist.com
List archive
http://librelist.com/browser/wand/
http://dir.gmane.org/gmane.comp.python.wand
Stack Overflow tag (Q&A)
http://stackoverflow.com/questions/tagged/wand
Quora topic (Q&A)
https://www.quora.com/Wand-ImageMagick-binding
IRC
`irc://irc.freenode.net/wand <http://webchat.freenode.net/?channels=wand>`_
Continuous Integration (Travis CI)
https://travis-ci.org/dahlia/wand
.. image:: https://secure.travis-ci.org/dahlia/wand.svg?branch=master
:alt: Build Status
:target: https://travis-ci.org/dahlia/wand
Code Coverage
https://coveralls.io/r/dahlia/wand
.. image:: https://img.shields.io/coveralls/dahlia/wand.svg?style=flat
:alt: Coverage Status
:target: https://coveralls.io/r/dahlia/wand

View file

@ -1,123 +0,0 @@
Metadata-Version: 2.0
Name: Wand
Version: 0.4.4
Summary: Ctypes-based simple MagickWand API binding for Python
Home-page: http://wand-py.org/
Author: Hong Minhee
Author-email: hongminhee@member.fsf.org
Maintainer: Hong Minhee
Maintainer-email: hongminhee@member.fsf.org
License: MIT License
Platform: UNKNOWN
Classifier: Development Status :: 5 - Production/Stable
Classifier: Intended Audience :: Developers
Classifier: License :: OSI Approved :: MIT License
Classifier: Operating System :: OS Independent
Classifier: Programming Language :: Python :: 2.6
Classifier: Programming Language :: Python :: 2.7
Classifier: Programming Language :: Python :: 3
Classifier: Programming Language :: Python :: 3.2
Classifier: Programming Language :: Python :: 3.3
Classifier: Programming Language :: Python :: 3.4
Classifier: Programming Language :: Python :: 3.5
Classifier: Programming Language :: Python :: Implementation :: CPython
Classifier: Programming Language :: Python :: Implementation :: PyPy
Classifier: Programming Language :: Python :: Implementation :: Stackless
Classifier: Topic :: Multimedia :: Graphics
Provides-Extra: doc
Provides-Extra: doc
Requires-Dist: Sphinx (>=1.0); extra == 'doc'
.. image:: http://docs.wand-py.org/en/latest/_static/wand.png
:width: 120
:height: 120
Wand_
=====
Wand is a ``ctypes``-based simple ImageMagick_ binding for Python.
It doesn't cover all functionalities of MagickWand API currently.
It works on Python 2.6, 2.7, 3.2--3.5, and PyPy.
You can install the package from PyPI_ by using ``pip``:
.. code-block:: console
$ pip install Wand
Or would you like to enjoy with bleeding edge? Check out the head
revision of the source code from the `GitHub repository`__:
.. code-block:: console
$ git clone git://github.com/dahlia/wand.git
$ cd wand/
$ python setup.py install
.. _Wand: http://wand-py.org/
.. _ImageMagick: http://www.imagemagick.org/
.. _PyPI: https://pypi.python.org/pypi/Wand
__ https://github.com/dahlia/wand
Docs
----
Recent version
http://docs.wand-py.org/
Development version
http://docs.wand-py.org/en/latest/
.. image:: https://readthedocs.org/projects/wand/badge/
:alt: Documentation Status
:target: http://docs.wand-py.org/en/latest/
Community
---------
Website
http://wand-py.org/
GitHub
https://github.com/dahlia/wand
Package Index (Cheeseshop)
https://pypi.python.org/pypi/Wand
.. image:: https://badge.fury.io/py/Wand.svg?
:alt: Latest PyPI version
:target: https://pypi.python.org/pypi/Wand
Mailing list
wand@librelist.com
List archive
http://librelist.com/browser/wand/
http://dir.gmane.org/gmane.comp.python.wand
Stack Overflow tag (Q&A)
http://stackoverflow.com/questions/tagged/wand
Quora topic (Q&A)
https://www.quora.com/Wand-ImageMagick-binding
IRC
`irc://irc.freenode.net/wand <http://webchat.freenode.net/?channels=wand>`_
Continuous Integration (Travis CI)
https://travis-ci.org/dahlia/wand
.. image:: https://secure.travis-ci.org/dahlia/wand.svg?branch=master
:alt: Build Status
:target: https://travis-ci.org/dahlia/wand
Code Coverage
https://coveralls.io/r/dahlia/wand
.. image:: https://img.shields.io/coveralls/dahlia/wand.svg?style=flat
:alt: Coverage Status
:target: https://coveralls.io/r/dahlia/wand

View file

@ -1,32 +0,0 @@
../../../README.rst,sha256=1IUFOLw3u6GbfJtY8sPM1n0b-1eImGHtfCod41TbwqE,2135
Wand-0.4.4.dist-info/DESCRIPTION.rst,sha256=rBao1vVM4Y4j2fxBBWNO28H7sVBptBne2f7kVcl8KnU,2137
Wand-0.4.4.dist-info/METADATA,sha256=xgYWnGBDvBMSW2mZDAiE6PX28Qr70LYZbM6OYyrarss,3338
Wand-0.4.4.dist-info/RECORD,,
Wand-0.4.4.dist-info/WHEEL,sha256=0mO7-aKM6K9CHeURc5NDYZyLWH5lmR-r4TtPinHxz7Y,93
Wand-0.4.4.dist-info/metadata.json,sha256=88lH9TYqE4SR-Rmhak1dXLPuCGnWTbgKIP95BavCIhQ,1414
Wand-0.4.4.dist-info/top_level.txt,sha256=uFTymN2uxamdZLu2fxZzaBcGwv7WW9v60YcsATzndig,5
wand/__init__.py,sha256=bEmSKTbdilJXM5PTgsuaqvpliBlmy2of5f77SJMKRh8,202
wand/api.py,sha256=yEyvaZRmI5TGIZYNlxpgcwYw5O7Fg8T3idbYViZz07A,58919
wand/color.py,sha256=WfDWLirgXUHkDSRU0-jvBC-Po89rY2zv0N8AFA-7lHg,9128
wand/compat.py,sha256=I3IuGsec0RHQBgzkfTXvvlwCMT9GmkZfjmQy6QM916Y,3577
wand/display.py,sha256=H92xA0dOScwrcuMP7i_84FwLhAROysY0LvPv55GR3Z0,2338
wand/drawing.py,sha256=EAqY4NWzXih4HRAaWv_JpYdOPyuJOCEhTzXcjhq_feM,77708
wand/exceptions.py,sha256=6JwI8ASWaN1BI3hDfLtmbcQEFfOIGpq5qM7je3YuVjc,3961
wand/font.py,sha256=Vj_TiueYSURmeMBTB8O6CyDDEp4_gdJX9Ya7ybYCYQs,3231
wand/image.py,sha256=eKlYImV6IWMZPrs6F_bwC0b86voR5aaGeAmNGMUAj-c,140070
wand/resource.py,sha256=HhdcQvMhS8PSa6vZbo_IgXD6TUERfw7-nQue95X9ZbE,7050
wand/sequence.py,sha256=ISYLgbm4voH3L29fO6f6gqB9WQemXAxk9BEPpxbs6zo,12444
wand/version.py,sha256=BvZLnbOQocrRnaPAqsmaGmPiun8RYz0QrAsrxCK4yks,8399
Wand-0.4.4.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4
wand/__pycache__/__init__.cpython-36.pyc,,
wand/__pycache__/api.cpython-36.pyc,,
wand/__pycache__/color.cpython-36.pyc,,
wand/__pycache__/compat.cpython-36.pyc,,
wand/__pycache__/display.cpython-36.pyc,,
wand/__pycache__/drawing.cpython-36.pyc,,
wand/__pycache__/exceptions.cpython-36.pyc,,
wand/__pycache__/font.cpython-36.pyc,,
wand/__pycache__/image.cpython-36.pyc,,
wand/__pycache__/resource.cpython-36.pyc,,
wand/__pycache__/sequence.cpython-36.pyc,,
wand/__pycache__/version.cpython-36.pyc,,

View file

@ -1,5 +0,0 @@
Wheel-Version: 1.0
Generator: bdist_wheel (0.30.0)
Root-Is-Purelib: true
Tag: cp36-none-any

View file

@ -1 +0,0 @@
{"classifiers": ["Development Status :: 5 - Production/Stable", "Intended Audience :: Developers", "License :: OSI Approved :: MIT License", "Operating System :: OS Independent", "Programming Language :: Python :: 2.6", "Programming Language :: Python :: 2.7", "Programming Language :: Python :: 3", "Programming Language :: Python :: 3.2", "Programming Language :: Python :: 3.3", "Programming Language :: Python :: 3.4", "Programming Language :: Python :: 3.5", "Programming Language :: Python :: Implementation :: CPython", "Programming Language :: Python :: Implementation :: PyPy", "Programming Language :: Python :: Implementation :: Stackless", "Topic :: Multimedia :: Graphics"], "extensions": {"python.details": {"contacts": [{"email": "hongminhee@member.fsf.org", "name": "Hong Minhee", "role": "author"}, {"email": "hongminhee@member.fsf.org", "name": "Hong Minhee", "role": "maintainer"}], "document_names": {"description": "DESCRIPTION.rst"}, "project_urls": {"Home": "http://wand-py.org/"}}}, "extras": ["doc"], "generator": "bdist_wheel (0.30.0)", "license": "MIT License", "metadata_version": "2.0", "name": "Wand", "run_requires": [{"extra": "doc", "requires": ["Sphinx (>=1.0)"]}], "summary": "Ctypes-based simple MagickWand API binding for Python", "test_requires": [{"requires": ["memory-profiler (>=0.27)", "psutil (>=1.0.1)", "pytest (>=2.3.0)", "pytest-xdist (>=1.8)"]}], "version": "0.4.4"}

View file

@ -1,104 +0,0 @@
Arrow - Better dates & times for Python
=======================================
.. image:: https://travis-ci.org/crsmithdev/arrow.svg
:alt: build status
:target: https://travis-ci.org/crsmithdev/arrow
.. image:: https://codecov.io/github/crsmithdev/arrow/coverage.svg?branch=master
:target: https://codecov.io/github/crsmithdev/arrow
:alt: Codecov
.. image:: https://img.shields.io/pypi/v/arrow.svg
:target: https://pypi.python.org/pypi/arrow
:alt: downloads
Documentation: `arrow.readthedocs.org <http://arrow.readthedocs.org/en/latest/>`_
---------------------------------------------------------------------------------
What?
-----
Arrow is a Python library that offers a sensible, human-friendly approach to creating, manipulating, formatting and converting dates, times, and timestamps. It implements and updates the datetime type, plugging gaps in functionality, and provides an intelligent module API that supports many common creation scenarios. Simply put, it helps you work with dates and times with fewer imports and a lot less code.
Arrow is heavily inspired by `moment.js <https://github.com/timrwood/moment>`_ and `requests <https://github.com/kennethreitz/requests>`_
Why?
----
Python's standard library and some other low-level modules have near-complete date, time and time zone functionality but don't work very well from a usability perspective:
- Too many modules: datetime, time, calendar, dateutil, pytz and more
- Too many types: date, time, datetime, tzinfo, timedelta, relativedelta, etc.
- Time zones and timestamp conversions are verbose and unpleasant
- Time zone naievety is the norm
- Gaps in functionality: ISO-8601 parsing, timespans, humanization
Features
--------
- Fully implemented, drop-in replacement for datetime
- Supports Python 2.6, 2.7, 3.3, 3.4 and 3.5
- Time zone-aware & UTC by default
- Provides super-simple creation options for many common input scenarios
- Updated .replace method with support for relative offsets, including weeks
- Formats and parses strings automatically
- Partial support for ISO-8601
- Timezone conversion
- Timestamp available as a property
- Generates time spans, ranges, floors and ceilings in timeframes from year to microsecond
- Humanizes and supports a growing list of contributed locales
- Extensible for your own Arrow-derived types
Quick start
-----------
First:
.. code-block:: console
$ pip install arrow
And then:
.. code-block:: pycon
>>> import arrow
>>> utc = arrow.utcnow()
>>> utc
<Arrow [2013-05-11T21:23:58.970460+00:00]>
>>> utc = utc.replace(hours=-1)
>>> utc
<Arrow [2013-05-11T20:23:58.970460+00:00]>
>>> local = utc.to('US/Pacific')
>>> local
<Arrow [2013-05-11T13:23:58.970460-07:00]>
>>> arrow.get('2013-05-11T21:23:58.970460+00:00')
<Arrow [2013-05-11T21:23:58.970460+00:00]>
>>> local.timestamp
1368303838
>>> local.format()
'2013-05-11 13:23:58 -07:00'
>>> local.format('YYYY-MM-DD HH:mm:ss ZZ')
'2013-05-11 13:23:58 -07:00'
>>> local.humanize()
'an hour ago'
>>> local.humanize(locale='ko_kr')
'1시간 전'
Further documentation can be found at `arrow.readthedocs.org <http://arrow.readthedocs.org/en/latest/>`_
Contributing
------------
Contributions are welcome, especially with localization. See `locales.py <https://github.com/crsmithdev/arrow/blob/master/arrow/locales.py>`_ for what's currently supported.

View file

@ -1,125 +0,0 @@
Metadata-Version: 2.0
Name: arrow
Version: 0.12.1
Summary: Better dates and times for Python
Home-page: https://github.com/crsmithdev/arrow/
Author: Chris Smith
Author-email: crsmithdev@gmail.com
License: Apache 2.0
Platform: UNKNOWN
Classifier: Development Status :: 4 - Beta
Classifier: Intended Audience :: Developers
Classifier: License :: OSI Approved :: Apache Software License
Classifier: Programming Language :: Python :: 2.6
Classifier: Programming Language :: Python :: 2.7
Classifier: Programming Language :: Python :: 3.3
Classifier: Programming Language :: Python :: 3.4
Classifier: Programming Language :: Python :: 3.5
Classifier: Topic :: Software Development :: Libraries :: Python Modules
Requires-Dist: python-dateutil
Requires-Dist: backports.functools-lru-cache (>=1.2.1); python_version=='2.7'
Arrow - Better dates & times for Python
=======================================
.. image:: https://travis-ci.org/crsmithdev/arrow.svg
:alt: build status
:target: https://travis-ci.org/crsmithdev/arrow
.. image:: https://codecov.io/github/crsmithdev/arrow/coverage.svg?branch=master
:target: https://codecov.io/github/crsmithdev/arrow
:alt: Codecov
.. image:: https://img.shields.io/pypi/v/arrow.svg
:target: https://pypi.python.org/pypi/arrow
:alt: downloads
Documentation: `arrow.readthedocs.org <http://arrow.readthedocs.org/en/latest/>`_
---------------------------------------------------------------------------------
What?
-----
Arrow is a Python library that offers a sensible, human-friendly approach to creating, manipulating, formatting and converting dates, times, and timestamps. It implements and updates the datetime type, plugging gaps in functionality, and provides an intelligent module API that supports many common creation scenarios. Simply put, it helps you work with dates and times with fewer imports and a lot less code.
Arrow is heavily inspired by `moment.js <https://github.com/timrwood/moment>`_ and `requests <https://github.com/kennethreitz/requests>`_
Why?
----
Python's standard library and some other low-level modules have near-complete date, time and time zone functionality but don't work very well from a usability perspective:
- Too many modules: datetime, time, calendar, dateutil, pytz and more
- Too many types: date, time, datetime, tzinfo, timedelta, relativedelta, etc.
- Time zones and timestamp conversions are verbose and unpleasant
- Time zone naievety is the norm
- Gaps in functionality: ISO-8601 parsing, timespans, humanization
Features
--------
- Fully implemented, drop-in replacement for datetime
- Supports Python 2.6, 2.7, 3.3, 3.4 and 3.5
- Time zone-aware & UTC by default
- Provides super-simple creation options for many common input scenarios
- Updated .replace method with support for relative offsets, including weeks
- Formats and parses strings automatically
- Partial support for ISO-8601
- Timezone conversion
- Timestamp available as a property
- Generates time spans, ranges, floors and ceilings in timeframes from year to microsecond
- Humanizes and supports a growing list of contributed locales
- Extensible for your own Arrow-derived types
Quick start
-----------
First:
.. code-block:: console
$ pip install arrow
And then:
.. code-block:: pycon
>>> import arrow
>>> utc = arrow.utcnow()
>>> utc
<Arrow [2013-05-11T21:23:58.970460+00:00]>
>>> utc = utc.replace(hours=-1)
>>> utc
<Arrow [2013-05-11T20:23:58.970460+00:00]>
>>> local = utc.to('US/Pacific')
>>> local
<Arrow [2013-05-11T13:23:58.970460-07:00]>
>>> arrow.get('2013-05-11T21:23:58.970460+00:00')
<Arrow [2013-05-11T21:23:58.970460+00:00]>
>>> local.timestamp
1368303838
>>> local.format()
'2013-05-11 13:23:58 -07:00'
>>> local.format('YYYY-MM-DD HH:mm:ss ZZ')
'2013-05-11 13:23:58 -07:00'
>>> local.humanize()
'an hour ago'
>>> local.humanize(locale='ko_kr')
'1시간 전'
Further documentation can be found at `arrow.readthedocs.org <http://arrow.readthedocs.org/en/latest/>`_
Contributing
------------
Contributions are welcome, especially with localization. See `locales.py <https://github.com/crsmithdev/arrow/blob/master/arrow/locales.py>`_ for what's currently supported.

View file

@ -1,23 +0,0 @@
arrow/__init__.py,sha256=_q2QJezQr_fCP0jE4ldqzWJwS_yw9unClcjUOdmMOn0,164
arrow/api.py,sha256=C6fxwcwAdd_KKhh7gaQcK_vSP4CRNNnuCUGGSn_8DS4,1066
arrow/arrow.py,sha256=CE7oSKXj9aEYnw7qw1hKA3X92ztiYnAin-QtfuSkROU,36025
arrow/factory.py,sha256=C7Ef9NQ5-hkrTECMM_NI-UIpBuMF5m546IkfZXIzfP4,8569
arrow/formatter.py,sha256=R1b-l3ejp8j8R71tF_fb19BFiyEY4VWrsGyX13g-o1Y,3434
arrow/locales.py,sha256=3iq7JEv8swN7rHrbsyHm2Jp0YMV6Ed88fa6JIiOOgVk,72239
arrow/parser.py,sha256=m_jSHIVZ0MH_pbhwjny6M2_4QpquY9V0gfDK0c6qYMM,11141
arrow/util.py,sha256=jO7c3wY9eN_58hy0KDN9caGi7F5Aip1M3Vc3PQlUixc,1034
arrow-0.12.1.dist-info/DESCRIPTION.rst,sha256=6dtM5fYwnUbulOk5mqLaJvm9T7W4iiJRLbUEBvCtBh0,3467
arrow-0.12.1.dist-info/METADATA,sha256=zqBfbZr4u-hOJ1ElVkP4Q1LuM33UIPD9WMgqrBmy_fE,4284
arrow-0.12.1.dist-info/RECORD,,
arrow-0.12.1.dist-info/WHEEL,sha256=kdsN-5OJAZIiHN-iO4Rhl82KyS0bDWf4uBwMbkNafr8,110
arrow-0.12.1.dist-info/metadata.json,sha256=u8k7n0Oh0Nn9pUuujXUsB8mGfdRHqR_Y3HHe81a36t4,996
arrow-0.12.1.dist-info/top_level.txt,sha256=aCBThK2RIB824ctI3l9i6z94l8UYpFF-BC4m3dDzFFo,6
arrow-0.12.1.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4
arrow/__pycache__/__init__.cpython-36.pyc,,
arrow/__pycache__/api.cpython-36.pyc,,
arrow/__pycache__/arrow.cpython-36.pyc,,
arrow/__pycache__/factory.cpython-36.pyc,,
arrow/__pycache__/formatter.cpython-36.pyc,,
arrow/__pycache__/locales.cpython-36.pyc,,
arrow/__pycache__/parser.cpython-36.pyc,,
arrow/__pycache__/util.cpython-36.pyc,,

View file

@ -1,6 +0,0 @@
Wheel-Version: 1.0
Generator: bdist_wheel (0.30.0)
Root-Is-Purelib: true
Tag: py2-none-any
Tag: py3-none-any

View file

@ -1 +0,0 @@
{"classifiers": ["Development Status :: 4 - Beta", "Intended Audience :: Developers", "License :: OSI Approved :: Apache Software License", "Programming Language :: Python :: 2.6", "Programming Language :: Python :: 2.7", "Programming Language :: Python :: 3.3", "Programming Language :: Python :: 3.4", "Programming Language :: Python :: 3.5", "Topic :: Software Development :: Libraries :: Python Modules"], "extensions": {"python.details": {"contacts": [{"email": "crsmithdev@gmail.com", "name": "Chris Smith", "role": "author"}], "document_names": {"description": "DESCRIPTION.rst"}, "project_urls": {"Home": "https://github.com/crsmithdev/arrow/"}}}, "extras": [], "generator": "bdist_wheel (0.30.0)", "license": "Apache 2.0", "metadata_version": "2.0", "name": "arrow", "run_requires": [{"requires": ["python-dateutil"]}, {"environment": "python_version=='2.7'", "requires": ["backports.functools-lru-cache (>=1.2.1)"]}], "summary": "Better dates and times for Python", "version": "0.12.1"}

View file

@ -1,8 +0,0 @@
# -*- coding: utf-8 -*-
from .arrow import Arrow
from .factory import ArrowFactory
from .api import get, now, utcnow
__version__ = '0.12.1'
VERSION = __version__

View file

@ -1,52 +0,0 @@
# -*- coding: utf-8 -*-
'''
Provides the default implementation of :class:`ArrowFactory <arrow.factory.ArrowFactory>`
methods for use as a module API.
'''
from __future__ import absolute_import
from arrow.factory import ArrowFactory
# internal default factory.
_factory = ArrowFactory()
def get(*args, **kwargs):
''' Calls the default :class:`ArrowFactory <arrow.factory.ArrowFactory>` ``get`` method.
'''
return _factory.get(*args, **kwargs)
def utcnow():
''' Calls the default :class:`ArrowFactory <arrow.factory.ArrowFactory>` ``utcnow`` method.
'''
return _factory.utcnow()
def now(tz=None):
''' Calls the default :class:`ArrowFactory <arrow.factory.ArrowFactory>` ``now`` method.
'''
return _factory.now(tz)
def factory(type):
''' Returns an :class:`.ArrowFactory` for the specified :class:`Arrow <arrow.arrow.Arrow>`
or derived type.
:param type: the type, :class:`Arrow <arrow.arrow.Arrow>` or derived.
'''
return ArrowFactory(type)
__all__ = ['get', 'utcnow', 'now', 'factory']

File diff suppressed because it is too large Load diff

View file

@ -1,258 +0,0 @@
# -*- coding: utf-8 -*-
"""
Implements the :class:`ArrowFactory <arrow.factory.ArrowFactory>` class,
providing factory methods for common :class:`Arrow <arrow.arrow.Arrow>`
construction scenarios.
"""
from __future__ import absolute_import
from arrow.arrow import Arrow
from arrow import parser
from arrow.util import is_timestamp, isstr
from datetime import datetime, tzinfo, date
from dateutil import tz as dateutil_tz
from time import struct_time
import calendar
class ArrowFactory(object):
''' A factory for generating :class:`Arrow <arrow.arrow.Arrow>` objects.
:param type: (optional) the :class:`Arrow <arrow.arrow.Arrow>`-based class to construct from.
Defaults to :class:`Arrow <arrow.arrow.Arrow>`.
'''
def __init__(self, type=Arrow):
self.type = type
def get(self, *args, **kwargs):
''' Returns an :class:`Arrow <arrow.arrow.Arrow>` object based on flexible inputs.
:param locale: (optional) a ``str`` specifying a locale for the parser. Defaults to
'en_us'.
:param tzinfo: (optional) a :ref:`timezone expression <tz-expr>` or tzinfo object.
Replaces the timezone unless using an input form that is explicitly UTC or specifies
the timezone in a positional argument. Defaults to UTC.
Usage::
>>> import arrow
**No inputs** to get current UTC time::
>>> arrow.get()
<Arrow [2013-05-08T05:51:43.316458+00:00]>
**None** to also get current UTC time::
>>> arrow.get(None)
<Arrow [2013-05-08T05:51:49.016458+00:00]>
**One** :class:`Arrow <arrow.arrow.Arrow>` object, to get a copy.
>>> arw = arrow.utcnow()
>>> arrow.get(arw)
<Arrow [2013-10-23T15:21:54.354846+00:00]>
**One** ``str``, ``float``, or ``int``, convertible to a floating-point timestamp, to get
that timestamp in UTC::
>>> arrow.get(1367992474.293378)
<Arrow [2013-05-08T05:54:34.293378+00:00]>
>>> arrow.get(1367992474)
<Arrow [2013-05-08T05:54:34+00:00]>
>>> arrow.get('1367992474.293378')
<Arrow [2013-05-08T05:54:34.293378+00:00]>
>>> arrow.get('1367992474')
<Arrow [2013-05-08T05:54:34+00:00]>
**One** ISO-8601-formatted ``str``, to parse it::
>>> arrow.get('2013-09-29T01:26:43.830580')
<Arrow [2013-09-29T01:26:43.830580+00:00]>
**One** ``tzinfo``, to get the current time **converted** to that timezone::
>>> arrow.get(tz.tzlocal())
<Arrow [2013-05-07T22:57:28.484717-07:00]>
**One** naive ``datetime``, to get that datetime in UTC::
>>> arrow.get(datetime(2013, 5, 5))
<Arrow [2013-05-05T00:00:00+00:00]>
**One** aware ``datetime``, to get that datetime::
>>> arrow.get(datetime(2013, 5, 5, tzinfo=tz.tzlocal()))
<Arrow [2013-05-05T00:00:00-07:00]>
**One** naive ``date``, to get that date in UTC::
>>> arrow.get(date(2013, 5, 5))
<Arrow [2013-05-05T00:00:00+00:00]>
**Two** arguments, a naive or aware ``datetime``, and a replacement
:ref:`timezone expression <tz-expr>`::
>>> arrow.get(datetime(2013, 5, 5), 'US/Pacific')
<Arrow [2013-05-05T00:00:00-07:00]>
**Two** arguments, a naive ``date``, and a replacement
:ref:`timezone expression <tz-expr>`::
>>> arrow.get(date(2013, 5, 5), 'US/Pacific')
<Arrow [2013-05-05T00:00:00-07:00]>
**Two** arguments, both ``str``, to parse the first according to the format of the second::
>>> arrow.get('2013-05-05 12:30:45', 'YYYY-MM-DD HH:mm:ss')
<Arrow [2013-05-05T12:30:45+00:00]>
**Two** arguments, first a ``str`` to parse and second a ``list`` of formats to try::
>>> arrow.get('2013-05-05 12:30:45', ['MM/DD/YYYY', 'YYYY-MM-DD HH:mm:ss'])
<Arrow [2013-05-05T12:30:45+00:00]>
**Three or more** arguments, as for the constructor of a ``datetime``::
>>> arrow.get(2013, 5, 5, 12, 30, 45)
<Arrow [2013-05-05T12:30:45+00:00]>
**One** time.struct time::
>>> arrow.get(gmtime(0))
<Arrow [1970-01-01T00:00:00+00:00]>
'''
arg_count = len(args)
locale = kwargs.get('locale', 'en_us')
tz = kwargs.get('tzinfo', None)
# () -> now, @ utc.
if arg_count == 0:
if isinstance(tz, tzinfo):
return self.type.now(tz)
return self.type.utcnow()
if arg_count == 1:
arg = args[0]
# (None) -> now, @ utc.
if arg is None:
return self.type.utcnow()
# try (int, float, str(int), str(float)) -> utc, from timestamp.
if is_timestamp(arg):
return self.type.utcfromtimestamp(arg)
# (Arrow) -> from the object's datetime.
if isinstance(arg, Arrow):
return self.type.fromdatetime(arg.datetime)
# (datetime) -> from datetime.
if isinstance(arg, datetime):
return self.type.fromdatetime(arg)
# (date) -> from date.
if isinstance(arg, date):
return self.type.fromdate(arg)
# (tzinfo) -> now, @ tzinfo.
elif isinstance(arg, tzinfo):
return self.type.now(arg)
# (str) -> parse.
elif isstr(arg):
dt = parser.DateTimeParser(locale).parse_iso(arg)
return self.type.fromdatetime(dt)
# (struct_time) -> from struct_time
elif isinstance(arg, struct_time):
return self.type.utcfromtimestamp(calendar.timegm(arg))
else:
raise TypeError('Can\'t parse single argument type of \'{0}\''.format(type(arg)))
elif arg_count == 2:
arg_1, arg_2 = args[0], args[1]
if isinstance(arg_1, datetime):
# (datetime, tzinfo/str) -> fromdatetime replace tzinfo.
if isinstance(arg_2, tzinfo) or isstr(arg_2):
return self.type.fromdatetime(arg_1, arg_2)
else:
raise TypeError('Can\'t parse two arguments of types \'datetime\', \'{0}\''.format(
type(arg_2)))
elif isinstance(arg_1, date):
# (date, tzinfo/str) -> fromdate replace tzinfo.
if isinstance(arg_2, tzinfo) or isstr(arg_2):
return self.type.fromdate(arg_1, tzinfo=arg_2)
else:
raise TypeError('Can\'t parse two arguments of types \'date\', \'{0}\''.format(
type(arg_2)))
# (str, format) -> parse.
elif isstr(arg_1) and (isstr(arg_2) or isinstance(arg_2, list)):
dt = parser.DateTimeParser(locale).parse(args[0], args[1])
return self.type.fromdatetime(dt, tzinfo=tz)
else:
raise TypeError('Can\'t parse two arguments of types \'{0}\', \'{1}\''.format(
type(arg_1), type(arg_2)))
# 3+ args -> datetime-like via constructor.
else:
return self.type(*args, **kwargs)
def utcnow(self):
'''Returns an :class:`Arrow <arrow.arrow.Arrow>` object, representing "now" in UTC time.
Usage::
>>> import arrow
>>> arrow.utcnow()
<Arrow [2013-05-08T05:19:07.018993+00:00]>
'''
return self.type.utcnow()
def now(self, tz=None):
'''Returns an :class:`Arrow <arrow.arrow.Arrow>` object, representing "now" in the given
timezone.
:param tz: (optional) A :ref:`timezone expression <tz-expr>`. Defaults to local time.
Usage::
>>> import arrow
>>> arrow.now()
<Arrow [2013-05-07T22:19:11.363410-07:00]>
>>> arrow.now('US/Pacific')
<Arrow [2013-05-07T22:19:15.251821-07:00]>
>>> arrow.now('+02:00')
<Arrow [2013-05-08T07:19:25.618646+02:00]>
>>> arrow.now('local')
<Arrow [2013-05-07T22:19:39.130059-07:00]>
'''
if tz is None:
tz = dateutil_tz.tzlocal()
elif not isinstance(tz, tzinfo):
tz = parser.TzinfoParser.parse(tz)
return self.type.now(tz)

View file

@ -1,105 +0,0 @@
# -*- coding: utf-8 -*-
from __future__ import absolute_import
import calendar
import re
from dateutil import tz as dateutil_tz
from arrow import util, locales
class DateTimeFormatter(object):
_FORMAT_RE = re.compile('(YYY?Y?|MM?M?M?|Do|DD?D?D?|d?dd?d?|HH?|hh?|mm?|ss?|SS?S?S?S?S?|ZZ?|a|A|X)')
def __init__(self, locale='en_us'):
self.locale = locales.get_locale(locale)
def format(cls, dt, fmt):
return cls._FORMAT_RE.sub(lambda m: cls._format_token(dt, m.group(0)), fmt)
def _format_token(self, dt, token):
if token == 'YYYY':
return self.locale.year_full(dt.year)
if token == 'YY':
return self.locale.year_abbreviation(dt.year)
if token == 'MMMM':
return self.locale.month_name(dt.month)
if token == 'MMM':
return self.locale.month_abbreviation(dt.month)
if token == 'MM':
return '{0:02d}'.format(dt.month)
if token == 'M':
return str(dt.month)
if token == 'DDDD':
return '{0:03d}'.format(dt.timetuple().tm_yday)
if token == 'DDD':
return str(dt.timetuple().tm_yday)
if token == 'DD':
return '{0:02d}'.format(dt.day)
if token == 'D':
return str(dt.day)
if token == 'Do':
return self.locale.ordinal_number(dt.day)
if token == 'dddd':
return self.locale.day_name(dt.isoweekday())
if token == 'ddd':
return self.locale.day_abbreviation(dt.isoweekday())
if token == 'd':
return str(dt.isoweekday())
if token == 'HH':
return '{0:02d}'.format(dt.hour)
if token == 'H':
return str(dt.hour)
if token == 'hh':
return '{0:02d}'.format(dt.hour if 0 < dt.hour < 13 else abs(dt.hour - 12))
if token == 'h':
return str(dt.hour if 0 < dt.hour < 13 else abs(dt.hour - 12))
if token == 'mm':
return '{0:02d}'.format(dt.minute)
if token == 'm':
return str(dt.minute)
if token == 'ss':
return '{0:02d}'.format(dt.second)
if token == 's':
return str(dt.second)
if token == 'SSSSSS':
return str('{0:06d}'.format(int(dt.microsecond)))
if token == 'SSSSS':
return str('{0:05d}'.format(int(dt.microsecond / 10)))
if token == 'SSSS':
return str('{0:04d}'.format(int(dt.microsecond / 100)))
if token == 'SSS':
return str('{0:03d}'.format(int(dt.microsecond / 1000)))
if token == 'SS':
return str('{0:02d}'.format(int(dt.microsecond / 10000)))
if token == 'S':
return str(int(dt.microsecond / 100000))
if token == 'X':
return str(calendar.timegm(dt.utctimetuple()))
if token in ['ZZ', 'Z']:
separator = ':' if token == 'ZZ' else ''
tz = dateutil_tz.tzutc() if dt.tzinfo is None else dt.tzinfo
total_minutes = int(util.total_seconds(tz.utcoffset(dt)) / 60)
sign = '+' if total_minutes >= 0 else '-'
total_minutes = abs(total_minutes)
hour, minute = divmod(total_minutes, 60)
return '{0}{1:02d}{2}{3:02d}'.format(sign, hour, separator, minute)
if token in ('a', 'A'):
return self.locale.meridian(dt.hour, token)

File diff suppressed because it is too large Load diff

View file

@ -1,344 +0,0 @@
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import unicode_literals
from datetime import datetime
from dateutil import tz
import re
try:
from functools import lru_cache
except ImportError: # pragma: no cover
from backports.functools_lru_cache import lru_cache # pragma: no cover
from arrow import locales
class ParserError(RuntimeError):
pass
class DateTimeParser(object):
_FORMAT_RE = re.compile('(YYY?Y?|MM?M?M?|Do|DD?D?D?|d?d?d?d|HH?|hh?|mm?|ss?|S+|ZZ?Z?|a|A|X)')
_ESCAPE_RE = re.compile('\[[^\[\]]*\]')
_ONE_OR_MORE_DIGIT_RE = re.compile('\d+')
_ONE_OR_TWO_DIGIT_RE = re.compile('\d{1,2}')
_FOUR_DIGIT_RE = re.compile('\d{4}')
_TWO_DIGIT_RE = re.compile('\d{2}')
_TZ_RE = re.compile('[+\-]?\d{2}:?(\d{2})?')
_TZ_NAME_RE = re.compile('\w[\w+\-/]+')
_BASE_INPUT_RE_MAP = {
'YYYY': _FOUR_DIGIT_RE,
'YY': _TWO_DIGIT_RE,
'MM': _TWO_DIGIT_RE,
'M': _ONE_OR_TWO_DIGIT_RE,
'DD': _TWO_DIGIT_RE,
'D': _ONE_OR_TWO_DIGIT_RE,
'HH': _TWO_DIGIT_RE,
'H': _ONE_OR_TWO_DIGIT_RE,
'hh': _TWO_DIGIT_RE,
'h': _ONE_OR_TWO_DIGIT_RE,
'mm': _TWO_DIGIT_RE,
'm': _ONE_OR_TWO_DIGIT_RE,
'ss': _TWO_DIGIT_RE,
's': _ONE_OR_TWO_DIGIT_RE,
'X': re.compile('\d+'),
'ZZZ': _TZ_NAME_RE,
'ZZ': _TZ_RE,
'Z': _TZ_RE,
'S': _ONE_OR_MORE_DIGIT_RE,
}
MARKERS = ['YYYY', 'MM', 'DD']
SEPARATORS = ['-', '/', '.']
def __init__(self, locale='en_us', cache_size=0):
self.locale = locales.get_locale(locale)
self._input_re_map = self._BASE_INPUT_RE_MAP.copy()
self._input_re_map.update({
'MMMM': self._choice_re(self.locale.month_names[1:], re.IGNORECASE),
'MMM': self._choice_re(self.locale.month_abbreviations[1:],
re.IGNORECASE),
'Do': re.compile(self.locale.ordinal_day_re),
'dddd': self._choice_re(self.locale.day_names[1:], re.IGNORECASE),
'ddd': self._choice_re(self.locale.day_abbreviations[1:],
re.IGNORECASE),
'd': re.compile(r"[1-7]"),
'a': self._choice_re(
(self.locale.meridians['am'], self.locale.meridians['pm'])
),
# note: 'A' token accepts both 'am/pm' and 'AM/PM' formats to
# ensure backwards compatibility of this token
'A': self._choice_re(self.locale.meridians.values())
})
if cache_size > 0:
self._generate_pattern_re =\
lru_cache(maxsize=cache_size)(self._generate_pattern_re)
def parse_iso(self, string):
has_time = 'T' in string or ' ' in string.strip()
space_divider = ' ' in string.strip()
if has_time:
if space_divider:
date_string, time_string = string.split(' ', 1)
else:
date_string, time_string = string.split('T', 1)
time_parts = re.split('[+-]', time_string, 1)
has_tz = len(time_parts) > 1
has_seconds = time_parts[0].count(':') > 1
has_subseconds = re.search('[.,]', time_parts[0])
if has_subseconds:
formats = ['YYYY-MM-DDTHH:mm:ss%sS' % has_subseconds.group()]
elif has_seconds:
formats = ['YYYY-MM-DDTHH:mm:ss']
else:
formats = ['YYYY-MM-DDTHH:mm']
else:
has_tz = False
# generate required formats: YYYY-MM-DD, YYYY-MM-DD, YYYY
# using various separators: -, /, .
l = len(self.MARKERS)
formats = [separator.join(self.MARKERS[:l-i])
for i in range(l)
for separator in self.SEPARATORS]
if has_time and has_tz:
formats = [f + 'Z' for f in formats]
if space_divider:
formats = [item.replace('T', ' ', 1) for item in formats]
return self._parse_multiformat(string, formats)
def _generate_pattern_re(self, fmt):
# fmt is a string of tokens like 'YYYY-MM-DD'
# we construct a new string by replacing each
# token by its pattern:
# 'YYYY-MM-DD' -> '(?P<YYYY>\d{4})-(?P<MM>\d{2})-(?P<DD>\d{2})'
tokens = []
offset = 0
# Extract the bracketed expressions to be reinserted later.
escaped_fmt = re.sub(self._ESCAPE_RE, "#", fmt)
# Any number of S is the same as one.
escaped_fmt = re.sub('S+', 'S', escaped_fmt)
escaped_data = re.findall(self._ESCAPE_RE, fmt)
fmt_pattern = escaped_fmt
for m in self._FORMAT_RE.finditer(escaped_fmt):
token = m.group(0)
try:
input_re = self._input_re_map[token]
except KeyError:
raise ParserError('Unrecognized token \'{0}\''.format(token))
input_pattern = '(?P<{0}>{1})'.format(token, input_re.pattern)
tokens.append(token)
# a pattern doesn't have the same length as the token
# it replaces! We keep the difference in the offset variable.
# This works because the string is scanned left-to-right and matches
# are returned in the order found by finditer.
fmt_pattern = fmt_pattern[:m.start() + offset] + input_pattern + fmt_pattern[m.end() + offset:]
offset += len(input_pattern) - (m.end() - m.start())
final_fmt_pattern = ""
a = fmt_pattern.split("#")
b = escaped_data
# Due to the way Python splits, 'a' will always be longer
for i in range(len(a)):
final_fmt_pattern += a[i]
if i < len(b):
final_fmt_pattern += b[i][1:-1]
return tokens, re.compile(final_fmt_pattern, flags=re.IGNORECASE)
def parse(self, string, fmt):
if isinstance(fmt, list):
return self._parse_multiformat(string, fmt)
fmt_tokens, fmt_pattern_re = self._generate_pattern_re(fmt)
match = fmt_pattern_re.search(string)
if match is None:
raise ParserError('Failed to match \'{0}\' when parsing \'{1}\''
.format(fmt_pattern_re.pattern, string))
parts = {}
for token in fmt_tokens:
if token == 'Do':
value = match.group('value')
else:
value = match.group(token)
self._parse_token(token, value, parts)
return self._build_datetime(parts)
def _parse_token(self, token, value, parts):
if token == 'YYYY':
parts['year'] = int(value)
elif token == 'YY':
value = int(value)
parts['year'] = 1900 + value if value > 68 else 2000 + value
elif token in ['MMMM', 'MMM']:
parts['month'] = self.locale.month_number(value.lower())
elif token in ['MM', 'M']:
parts['month'] = int(value)
elif token in ['DD', 'D']:
parts['day'] = int(value)
elif token in ['Do']:
parts['day'] = int(value)
elif token.upper() in ['HH', 'H']:
parts['hour'] = int(value)
elif token in ['mm', 'm']:
parts['minute'] = int(value)
elif token in ['ss', 's']:
parts['second'] = int(value)
elif token == 'S':
# We have the *most significant* digits of an arbitrary-precision integer.
# We want the six most significant digits as an integer, rounded.
# FIXME: add nanosecond support somehow?
value = value.ljust(7, str('0'))
# floating-point (IEEE-754) defaults to half-to-even rounding
seventh_digit = int(value[6])
if seventh_digit == 5:
rounding = int(value[5]) % 2
elif seventh_digit > 5:
rounding = 1
else:
rounding = 0
parts['microsecond'] = int(value[:6]) + rounding
elif token == 'X':
parts['timestamp'] = int(value)
elif token in ['ZZZ', 'ZZ', 'Z']:
parts['tzinfo'] = TzinfoParser.parse(value)
elif token in ['a', 'A']:
if value in (
self.locale.meridians['am'],
self.locale.meridians['AM']
):
parts['am_pm'] = 'am'
elif value in (
self.locale.meridians['pm'],
self.locale.meridians['PM']
):
parts['am_pm'] = 'pm'
@staticmethod
def _build_datetime(parts):
timestamp = parts.get('timestamp')
if timestamp:
tz_utc = tz.tzutc()
return datetime.fromtimestamp(timestamp, tz=tz_utc)
am_pm = parts.get('am_pm')
hour = parts.get('hour', 0)
if am_pm == 'pm' and hour < 12:
hour += 12
elif am_pm == 'am' and hour == 12:
hour = 0
return datetime(year=parts.get('year', 1), month=parts.get('month', 1),
day=parts.get('day', 1), hour=hour, minute=parts.get('minute', 0),
second=parts.get('second', 0), microsecond=parts.get('microsecond', 0),
tzinfo=parts.get('tzinfo'))
def _parse_multiformat(self, string, formats):
_datetime = None
for fmt in formats:
try:
_datetime = self.parse(string, fmt)
break
except ParserError:
pass
if _datetime is None:
raise ParserError('Could not match input to any of {0} on \'{1}\''.format(formats, string))
return _datetime
@staticmethod
def _map_lookup(input_map, key):
try:
return input_map[key]
except KeyError:
raise ParserError('Could not match "{0}" to {1}'.format(key, input_map))
@staticmethod
def _try_timestamp(string):
try:
return float(string)
except:
return None
@staticmethod
def _choice_re(choices, flags=0):
return re.compile('({0})'.format('|'.join(choices)), flags=flags)
class TzinfoParser(object):
_TZINFO_RE = re.compile('([+\-])?(\d\d):?(\d\d)?')
@classmethod
def parse(cls, string):
tzinfo = None
if string == 'local':
tzinfo = tz.tzlocal()
elif string in ['utc', 'UTC']:
tzinfo = tz.tzutc()
else:
iso_match = cls._TZINFO_RE.match(string)
if iso_match:
sign, hours, minutes = iso_match.groups()
if minutes is None:
minutes = 0
seconds = int(hours) * 3600 + int(minutes) * 60
if sign == '-':
seconds *= -1
tzinfo = tz.tzoffset(None, seconds)
else:
tzinfo = tz.gettz(string)
if tzinfo is None:
raise ParserError('Could not parse timezone expression "{0}"'.format(string))
return tzinfo

View file

@ -1,47 +0,0 @@
# -*- coding: utf-8 -*-
from __future__ import absolute_import
import sys
# python 2.6 / 2.7 definitions for total_seconds function.
def _total_seconds_27(td): # pragma: no cover
return td.total_seconds()
def _total_seconds_26(td):
return (td.microseconds + (td.seconds + td.days * 24 * 3600) * 1e6) / 1e6
# get version info and assign correct total_seconds function.
version = '{0}.{1}.{2}'.format(*sys.version_info[:3])
if version < '2.7': # pragma: no cover
total_seconds = _total_seconds_26
else: # pragma: no cover
total_seconds = _total_seconds_27
def is_timestamp(value):
if type(value) == bool:
return False
try:
float(value)
return True
except:
return False
# python 2.7 / 3.0+ definitions for isstr function.
try: # pragma: no cover
basestring
def isstr(s):
return isinstance(s, basestring)
except NameError: #pragma: no cover
def isstr(s):
return isinstance(s, str)
__all__ = ['total_seconds', 'is_timestamp', 'isstr']

View file

@ -1,578 +0,0 @@
======
Bleach
======
.. image:: https://travis-ci.org/mozilla/bleach.svg?branch=master
:target: https://travis-ci.org/mozilla/bleach
.. image:: https://badge.fury.io/py/bleach.svg
:target: http://badge.fury.io/py/bleach
Bleach is an allowed-list-based HTML sanitizing library that escapes or strips
markup and attributes.
Bleach can also linkify text safely, applying filters that Django's ``urlize``
filter cannot, and optionally setting ``rel`` attributes, even on links already
in the text.
Bleach is intended for sanitizing text from *untrusted* sources. If you find
yourself jumping through hoops to allow your site administrators to do lots of
things, you're probably outside the use cases. Either trust those users, or
don't.
Because it relies on html5lib_, Bleach is as good as modern browsers at dealing
with weird, quirky HTML fragments. And *any* of Bleach's methods will fix
unbalanced or mis-nested tags.
The version on GitHub_ is the most up-to-date and contains the latest bug
fixes. You can find full documentation on `ReadTheDocs`_.
:Code: https://github.com/mozilla/bleach
:Documentation: https://bleach.readthedocs.io/
:Issue tracker: https://github.com/mozilla/bleach/issues
:IRC: ``#bleach`` on irc.mozilla.org
:License: Apache License v2; see LICENSE file
Reporting Bugs
==============
For regular bugs, please report them `in our issue tracker
<https://github.com/mozilla/bleach/issues>`_.
If you believe that you've found a security vulnerability, please `file a secure
bug report in our bug tracker
<https://bugzilla.mozilla.org/enter_bug.cgi?assigned_to=nobody%40mozilla.org&product=Webtools&component=Bleach-security&groups=webtools-security>`_
or send an email to *security AT mozilla DOT org*.
For more information on security-related bug disclosure and the PGP key to use
for sending encrypted mail or to verify responses received from that address,
please read our wiki page at
`<https://www.mozilla.org/en-US/security/#For_Developers>`_.
Installing Bleach
=================
Bleach is available on PyPI_, so you can install it with ``pip``::
$ pip install bleach
Or with ``easy_install``::
$ easy_install bleach
Upgrading Bleach
================
.. warning::
Before doing any upgrades, read through `Bleach Changes
<https://bleach.readthedocs.io/en/latest/changes.html>`_ for backwards
incompatible changes, newer versions, etc.
Basic use
=========
The simplest way to use Bleach is:
.. code-block:: python
>>> import bleach
>>> bleach.clean('an <script>evil()</script> example')
u'an &lt;script&gt;evil()&lt;/script&gt; example'
>>> bleach.linkify('an http://example.com url')
u'an <a href="http://example.com" rel="nofollow">http://example.com</a> url
Security
========
Bleach is a security-related library.
We have a responsible security vulnerability reporting process. Please use
that if you're reporting a security issue.
Security issues are fixed in private. After we land such a fix, we'll do a
release.
For every release, we mark security issues we've fixed in the ``CHANGES`` in
the **Security issues** section. We include relevant CVE links.
Code of conduct
===============
This project and repository is governed by Mozilla's code of conduct and
etiquette guidelines. For more details please see the `Mozilla Community
Participation Guidelines
<https://www.mozilla.org/about/governance/policies/participation/>`_ and
`Developer Etiquette Guidelines
<https://bugzilla.mozilla.org/page.cgi?id=etiquette.html>`_.
.. _html5lib: https://github.com/html5lib/html5lib-python
.. _GitHub: https://github.com/mozilla/bleach
.. _ReadTheDocs: https://bleach.readthedocs.io/
.. _PyPI: http://pypi.python.org/pypi/bleach
Bleach changes
==============
Version 2.1.3 (March 5th, 2018)
-------------------------------
**Security fixes**
* Attributes that have URI values weren't properly sanitized if the
values contained character entities. Using character entities, it
was possible to construct a URI value with a scheme that was not
allowed that would slide through unsanitized.
This security issue was introduced in Bleach 2.1. Anyone using
Bleach 2.1 is highly encouraged to upgrade.
**Backwards incompatible changes**
None
**Features**
None
**Bug fixes**
* Fixed some other edge cases for attribute URI value sanitizing and
improved testing of this code.
Version 2.1.2 (December 7th, 2017)
----------------------------------
**Security fixes**
None
**Backwards incompatible changes**
None
**Features**
None
**Bug fixes**
* Support html5lib-python 1.0.1. (#337)
* Add deprecation warning for supporting html5lib-python < 1.0.
* Switch to semver.
Version 2.1.1 (October 2nd, 2017)
---------------------------------
**Security fixes**
None
**Backwards incompatible changes**
None
**Features**
None
**Bug fixes**
* Fix ``setup.py`` opening files when ``LANG=``. (#324)
Version 2.1 (September 28th, 2017)
----------------------------------
**Security fixes**
* Convert control characters (backspace particularly) to "?" preventing
malicious copy-and-paste situations. (#298)
See `<https://github.com/mozilla/bleach/issues/298>`_ for more details.
This affects all previous versions of Bleach. Check the comments on that
issue for ways to alleviate the issue if you can't upgrade to Bleach 2.1.
**Backwards incompatible changes**
* Redid versioning. ``bleach.VERSION`` is no longer available. Use the string
version at ``bleach.__version__`` and parse it with
``pkg_resources.parse_version``. (#307)
* clean, linkify: linkify and clean should only accept text types; thank you,
Janusz! (#292)
* clean, linkify: accept only unicode or utf-8-encoded str (#176)
**Features**
**Bug fixes**
* ``bleach.clean()`` no longer unescapes entities including ones that are missing
a ``;`` at the end which can happen in urls and other places. (#143)
* linkify: fix http links inside of mailto links; thank you, sedrubal! (#300)
* clarify security policy in docs (#303)
* fix dependency specification for html5lib 1.0b8, 1.0b9, and 1.0b10; thank you,
Zoltán! (#268)
* add Bleach vs. html5lib comparison to README; thank you, Stu Cox! (#278)
* fix KeyError exceptions on tags without href attr; thank you, Alex Defsen!
(#273)
* add test website and scripts to test ``bleach.clean()`` output in browser;
thank you, Greg Guthe!
Version 2.0 (March 8th, 2017)
-----------------------------
**Security fixes**
* None
**Backwards incompatible changes**
* Removed support for Python 2.6. #206
* Removed support for Python 3.2. #224
* Bleach no longer supports html5lib < 0.99999999 (8 9s).
This version is a rewrite to use the new sanitizing API since the old
one was dropped in html5lib 0.99999999 (8 9s).
If you're using 0.9999999 (7 9s) upgrade to 0.99999999 (8 9s) or higher.
If you're using 1.0b8 (equivalent to 0.9999999 (7 9s)), upgrade to 1.0b9
(equivalent to 0.99999999 (8 9s)) or higher.
* ``bleach.clean`` and friends were rewritten
``clean`` was reimplemented as an html5lib filter and happens at a different
step in the HTML parsing -> traversing -> serializing process. Because of
that, there are some differences in clean's output as compared with previous
versions.
Amongst other things, this version will add end tags even if the tag in
question is to be escaped.
* ``bleach.clean`` and friends attribute callables now take three arguments:
tag, attribute name and attribute value. Previously they only took attribute
name and attribute value.
All attribute callables will need to be updated.
* ``bleach.linkify`` was rewritten
``linkify`` was reimplemented as an html5lib Filter. As such, it no longer
accepts a ``tokenizer`` argument.
The callback functions for adjusting link attributes now takes a namespaced
attribute.
Previously you'd do something like this::
def check_protocol(attrs, is_new):
if not attrs.get('href', '').startswith('http:', 'https:')):
return None
return attrs
Now it's more like this::
def check_protocol(attrs, is_new):
if not attrs.get((None, u'href'), u'').startswith(('http:', 'https:')):
# ^^^^^^^^^^^^^^^
return None
return attrs
Further, you need to make sure you're always using unicode values. If you
don't then html5lib will raise an assertion error that the value is not
unicode.
All linkify filters will need to be updated.
* ``bleach.linkify`` and friends had a ``skip_pre`` argument--that's been
replaced with a more general ``skip_tags`` argument.
Before, you might do::
bleach.linkify(some_text, skip_pre=True)
The equivalent with Bleach 2.0 is::
bleach.linkify(some_text, skip_tags=['pre'])
You can skip other tags, too, like ``style`` or ``script`` or other places
where you don't want linkification happening.
All uses of linkify that use ``skip_pre`` will need to be updated.
**Changes**
* Supports Python 3.6.
* Supports html5lib >= 0.99999999 (8 9s).
* There's a ``bleach.sanitizer.Cleaner`` class that you can instantiate with your
favorite clean settings for easy reuse.
* There's a ``bleach.linkifier.Linker`` class that you can instantiate with your
favorite linkify settings for easy reuse.
* There's a ``bleach.linkifier.LinkifyFilter`` which is an htm5lib filter that
you can pass as a filter to ``bleach.sanitizer.Cleaner`` allowing you to clean
and linkify in one pass.
* ``bleach.clean`` and friends can now take a callable as an attributes arg value.
* Tons of bug fixes.
* Cleaned up tests.
* Documentation fixes.
Version 1.5 (November 4th, 2016)
--------------------------------
**Security fixes**
* None
**Backwards incompatible changes**
* clean: The list of ``ALLOWED_PROTOCOLS`` now defaults to http, https and
mailto.
Previously it was a long list of protocols something like ed2k, ftp, http,
https, irc, mailto, news, gopher, nntp, telnet, webcal, xmpp, callto, feed,
urn, aim, rsync, tag, ssh, sftp, rtsp, afs, data. #149
**Changes**
* clean: Added ``protocols`` to arguments list to let you override the list of
allowed protocols. Thank you, Andreas Malecki! #149
* linkify: Fix a bug involving periods at the end of an email address. Thank you,
Lorenz Schori! #219
* linkify: Fix linkification of non-ascii ports. Thank you Alexandre, Macabies!
#207
* linkify: Fix linkify inappropriately removing node tails when dropping nodes.
#132
* Fixed a test that failed periodically. #161
* Switched from nose to py.test. #204
* Add test matrix for all supported Python and html5lib versions. #230
* Limit to html5lib ``>=0.999,!=0.9999,!=0.99999,<0.99999999`` because 0.9999
and 0.99999 are busted.
* Add support for ``python setup.py test``. #97
Version 1.4.3 (May 23rd, 2016)
------------------------------
**Security fixes**
* None
**Changes**
* Limit to html5lib ``>=0.999,<0.99999999`` because of impending change to
sanitizer api. #195
Version 1.4.2 (September 11, 2015)
----------------------------------
**Changes**
* linkify: Fix hang in linkify with ``parse_email=True``. #124
* linkify: Fix crash in linkify when removing a link that is a first-child. #136
* Updated TLDs.
* linkify: Don't remove exterior brackets when linkifying. #146
Version 1.4.1 (December 15, 2014)
---------------------------------
**Changes**
* Consistent order of attributes in output.
* Python 3.4 support.
Version 1.4 (January 12, 2014)
------------------------------
**Changes**
* linkify: Update linkify to use etree type Treewalker instead of simpletree.
* Updated html5lib to version ``>=0.999``.
* Update all code to be compatible with Python 3 and 2 using six.
* Switch to Apache License.
Version 1.3
-----------
* Used by Python 3-only fork.
Version 1.2.2 (May 18, 2013)
----------------------------
* Pin html5lib to version 0.95 for now due to major API break.
Version 1.2.1 (February 19, 2013)
---------------------------------
* ``clean()`` no longer considers ``feed:`` an acceptable protocol due to
inconsistencies in browser behavior.
Version 1.2 (January 28, 2013)
------------------------------
* ``linkify()`` has changed considerably. Many keyword arguments have been
replaced with a single callbacks list. Please see the documentation for more
information.
* Bleach will no longer consider unacceptable protocols when linkifying.
* ``linkify()`` now takes a tokenizer argument that allows it to skip
sanitization.
* ``delinkify()`` is gone.
* Removed exception handling from ``_render``. ``clean()`` and ``linkify()`` may
now throw.
* ``linkify()`` correctly ignores case for protocols and domain names.
* ``linkify()`` correctly handles markup within an <a> tag.
Version 1.1.5
-------------
Version 1.1.4
-------------
Version 1.1.3 (July 10, 2012)
-----------------------------
* Fix parsing bare URLs when parse_email=True.
Version 1.1.2 (June 1, 2012)
----------------------------
* Fix hang in style attribute sanitizer. (#61)
* Allow ``/`` in style attribute values.
Version 1.1.1 (February 17, 2012)
---------------------------------
* Fix tokenizer for html5lib 0.9.5.
Version 1.1.0 (October 24, 2011)
--------------------------------
* ``linkify()`` now understands port numbers. (#38)
* Documented character encoding behavior. (#41)
* Add an optional target argument to ``linkify()``.
* Add ``delinkify()`` method. (#45)
* Support subdomain whitelist for ``delinkify()``. (#47, #48)
Version 1.0.4 (September 2, 2011)
---------------------------------
* Switch to SemVer git tags.
* Make ``linkify()`` smarter about trailing punctuation. (#30)
* Pass ``exc_info`` to logger during rendering issues.
* Add wildcard key for attributes. (#19)
* Make ``linkify()`` use the ``HTMLSanitizer`` tokenizer. (#36)
* Fix URLs wrapped in parentheses. (#23)
* Make ``linkify()`` UTF-8 safe. (#33)
Version 1.0.3 (June 14, 2011)
-----------------------------
* ``linkify()`` works with 3rd level domains. (#24)
* ``clean()`` supports vendor prefixes in style values. (#31, #32)
* Fix ``linkify()`` email escaping.
Version 1.0.2 (June 6, 2011)
----------------------------
* ``linkify()`` supports email addresses.
* ``clean()`` supports callables in attributes filter.
Version 1.0.1 (April 12, 2011)
------------------------------
* ``linkify()`` doesn't drop trailing slashes. (#21)
* ``linkify()`` won't linkify 'libgl.so.1'. (#22)

View file

@ -1,605 +0,0 @@
Metadata-Version: 2.0
Name: bleach
Version: 2.1.3
Summary: An easy safelist-based HTML-sanitizing tool.
Home-page: http://github.com/mozilla/bleach
Author: Will Kahn-Greene
Author-email: willkg@mozilla.com
License: Apache Software License
Description-Content-Type: UNKNOWN
Platform: UNKNOWN
Classifier: Development Status :: 5 - Production/Stable
Classifier: Environment :: Web Environment
Classifier: Intended Audience :: Developers
Classifier: License :: OSI Approved :: Apache Software License
Classifier: Operating System :: OS Independent
Classifier: Programming Language :: Python
Classifier: Programming Language :: Python :: 2
Classifier: Programming Language :: Python :: 2.7
Classifier: Programming Language :: Python :: 3
Classifier: Programming Language :: Python :: 3.3
Classifier: Programming Language :: Python :: 3.4
Classifier: Programming Language :: Python :: 3.5
Classifier: Programming Language :: Python :: 3.6
Classifier: Topic :: Software Development :: Libraries :: Python Modules
Requires-Dist: six
Requires-Dist: html5lib (!=1.0b1,!=1.0b2,!=1.0b3,!=1.0b4,!=1.0b5,!=1.0b6,!=1.0b7,!=1.0b8,>=0.99999999pre)
======
Bleach
======
.. image:: https://travis-ci.org/mozilla/bleach.svg?branch=master
:target: https://travis-ci.org/mozilla/bleach
.. image:: https://badge.fury.io/py/bleach.svg
:target: http://badge.fury.io/py/bleach
Bleach is an allowed-list-based HTML sanitizing library that escapes or strips
markup and attributes.
Bleach can also linkify text safely, applying filters that Django's ``urlize``
filter cannot, and optionally setting ``rel`` attributes, even on links already
in the text.
Bleach is intended for sanitizing text from *untrusted* sources. If you find
yourself jumping through hoops to allow your site administrators to do lots of
things, you're probably outside the use cases. Either trust those users, or
don't.
Because it relies on html5lib_, Bleach is as good as modern browsers at dealing
with weird, quirky HTML fragments. And *any* of Bleach's methods will fix
unbalanced or mis-nested tags.
The version on GitHub_ is the most up-to-date and contains the latest bug
fixes. You can find full documentation on `ReadTheDocs`_.
:Code: https://github.com/mozilla/bleach
:Documentation: https://bleach.readthedocs.io/
:Issue tracker: https://github.com/mozilla/bleach/issues
:IRC: ``#bleach`` on irc.mozilla.org
:License: Apache License v2; see LICENSE file
Reporting Bugs
==============
For regular bugs, please report them `in our issue tracker
<https://github.com/mozilla/bleach/issues>`_.
If you believe that you've found a security vulnerability, please `file a secure
bug report in our bug tracker
<https://bugzilla.mozilla.org/enter_bug.cgi?assigned_to=nobody%40mozilla.org&product=Webtools&component=Bleach-security&groups=webtools-security>`_
or send an email to *security AT mozilla DOT org*.
For more information on security-related bug disclosure and the PGP key to use
for sending encrypted mail or to verify responses received from that address,
please read our wiki page at
`<https://www.mozilla.org/en-US/security/#For_Developers>`_.
Installing Bleach
=================
Bleach is available on PyPI_, so you can install it with ``pip``::
$ pip install bleach
Or with ``easy_install``::
$ easy_install bleach
Upgrading Bleach
================
.. warning::
Before doing any upgrades, read through `Bleach Changes
<https://bleach.readthedocs.io/en/latest/changes.html>`_ for backwards
incompatible changes, newer versions, etc.
Basic use
=========
The simplest way to use Bleach is:
.. code-block:: python
>>> import bleach
>>> bleach.clean('an <script>evil()</script> example')
u'an &lt;script&gt;evil()&lt;/script&gt; example'
>>> bleach.linkify('an http://example.com url')
u'an <a href="http://example.com" rel="nofollow">http://example.com</a> url
Security
========
Bleach is a security-related library.
We have a responsible security vulnerability reporting process. Please use
that if you're reporting a security issue.
Security issues are fixed in private. After we land such a fix, we'll do a
release.
For every release, we mark security issues we've fixed in the ``CHANGES`` in
the **Security issues** section. We include relevant CVE links.
Code of conduct
===============
This project and repository is governed by Mozilla's code of conduct and
etiquette guidelines. For more details please see the `Mozilla Community
Participation Guidelines
<https://www.mozilla.org/about/governance/policies/participation/>`_ and
`Developer Etiquette Guidelines
<https://bugzilla.mozilla.org/page.cgi?id=etiquette.html>`_.
.. _html5lib: https://github.com/html5lib/html5lib-python
.. _GitHub: https://github.com/mozilla/bleach
.. _ReadTheDocs: https://bleach.readthedocs.io/
.. _PyPI: http://pypi.python.org/pypi/bleach
Bleach changes
==============
Version 2.1.3 (March 5th, 2018)
-------------------------------
**Security fixes**
* Attributes that have URI values weren't properly sanitized if the
values contained character entities. Using character entities, it
was possible to construct a URI value with a scheme that was not
allowed that would slide through unsanitized.
This security issue was introduced in Bleach 2.1. Anyone using
Bleach 2.1 is highly encouraged to upgrade.
**Backwards incompatible changes**
None
**Features**
None
**Bug fixes**
* Fixed some other edge cases for attribute URI value sanitizing and
improved testing of this code.
Version 2.1.2 (December 7th, 2017)
----------------------------------
**Security fixes**
None
**Backwards incompatible changes**
None
**Features**
None
**Bug fixes**
* Support html5lib-python 1.0.1. (#337)
* Add deprecation warning for supporting html5lib-python < 1.0.
* Switch to semver.
Version 2.1.1 (October 2nd, 2017)
---------------------------------
**Security fixes**
None
**Backwards incompatible changes**
None
**Features**
None
**Bug fixes**
* Fix ``setup.py`` opening files when ``LANG=``. (#324)
Version 2.1 (September 28th, 2017)
----------------------------------
**Security fixes**
* Convert control characters (backspace particularly) to "?" preventing
malicious copy-and-paste situations. (#298)
See `<https://github.com/mozilla/bleach/issues/298>`_ for more details.
This affects all previous versions of Bleach. Check the comments on that
issue for ways to alleviate the issue if you can't upgrade to Bleach 2.1.
**Backwards incompatible changes**
* Redid versioning. ``bleach.VERSION`` is no longer available. Use the string
version at ``bleach.__version__`` and parse it with
``pkg_resources.parse_version``. (#307)
* clean, linkify: linkify and clean should only accept text types; thank you,
Janusz! (#292)
* clean, linkify: accept only unicode or utf-8-encoded str (#176)
**Features**
**Bug fixes**
* ``bleach.clean()`` no longer unescapes entities including ones that are missing
a ``;`` at the end which can happen in urls and other places. (#143)
* linkify: fix http links inside of mailto links; thank you, sedrubal! (#300)
* clarify security policy in docs (#303)
* fix dependency specification for html5lib 1.0b8, 1.0b9, and 1.0b10; thank you,
Zoltán! (#268)
* add Bleach vs. html5lib comparison to README; thank you, Stu Cox! (#278)
* fix KeyError exceptions on tags without href attr; thank you, Alex Defsen!
(#273)
* add test website and scripts to test ``bleach.clean()`` output in browser;
thank you, Greg Guthe!
Version 2.0 (March 8th, 2017)
-----------------------------
**Security fixes**
* None
**Backwards incompatible changes**
* Removed support for Python 2.6. #206
* Removed support for Python 3.2. #224
* Bleach no longer supports html5lib < 0.99999999 (8 9s).
This version is a rewrite to use the new sanitizing API since the old
one was dropped in html5lib 0.99999999 (8 9s).
If you're using 0.9999999 (7 9s) upgrade to 0.99999999 (8 9s) or higher.
If you're using 1.0b8 (equivalent to 0.9999999 (7 9s)), upgrade to 1.0b9
(equivalent to 0.99999999 (8 9s)) or higher.
* ``bleach.clean`` and friends were rewritten
``clean`` was reimplemented as an html5lib filter and happens at a different
step in the HTML parsing -> traversing -> serializing process. Because of
that, there are some differences in clean's output as compared with previous
versions.
Amongst other things, this version will add end tags even if the tag in
question is to be escaped.
* ``bleach.clean`` and friends attribute callables now take three arguments:
tag, attribute name and attribute value. Previously they only took attribute
name and attribute value.
All attribute callables will need to be updated.
* ``bleach.linkify`` was rewritten
``linkify`` was reimplemented as an html5lib Filter. As such, it no longer
accepts a ``tokenizer`` argument.
The callback functions for adjusting link attributes now takes a namespaced
attribute.
Previously you'd do something like this::
def check_protocol(attrs, is_new):
if not attrs.get('href', '').startswith('http:', 'https:')):
return None
return attrs
Now it's more like this::
def check_protocol(attrs, is_new):
if not attrs.get((None, u'href'), u'').startswith(('http:', 'https:')):
# ^^^^^^^^^^^^^^^
return None
return attrs
Further, you need to make sure you're always using unicode values. If you
don't then html5lib will raise an assertion error that the value is not
unicode.
All linkify filters will need to be updated.
* ``bleach.linkify`` and friends had a ``skip_pre`` argument--that's been
replaced with a more general ``skip_tags`` argument.
Before, you might do::
bleach.linkify(some_text, skip_pre=True)
The equivalent with Bleach 2.0 is::
bleach.linkify(some_text, skip_tags=['pre'])
You can skip other tags, too, like ``style`` or ``script`` or other places
where you don't want linkification happening.
All uses of linkify that use ``skip_pre`` will need to be updated.
**Changes**
* Supports Python 3.6.
* Supports html5lib >= 0.99999999 (8 9s).
* There's a ``bleach.sanitizer.Cleaner`` class that you can instantiate with your
favorite clean settings for easy reuse.
* There's a ``bleach.linkifier.Linker`` class that you can instantiate with your
favorite linkify settings for easy reuse.
* There's a ``bleach.linkifier.LinkifyFilter`` which is an htm5lib filter that
you can pass as a filter to ``bleach.sanitizer.Cleaner`` allowing you to clean
and linkify in one pass.
* ``bleach.clean`` and friends can now take a callable as an attributes arg value.
* Tons of bug fixes.
* Cleaned up tests.
* Documentation fixes.
Version 1.5 (November 4th, 2016)
--------------------------------
**Security fixes**
* None
**Backwards incompatible changes**
* clean: The list of ``ALLOWED_PROTOCOLS`` now defaults to http, https and
mailto.
Previously it was a long list of protocols something like ed2k, ftp, http,
https, irc, mailto, news, gopher, nntp, telnet, webcal, xmpp, callto, feed,
urn, aim, rsync, tag, ssh, sftp, rtsp, afs, data. #149
**Changes**
* clean: Added ``protocols`` to arguments list to let you override the list of
allowed protocols. Thank you, Andreas Malecki! #149
* linkify: Fix a bug involving periods at the end of an email address. Thank you,
Lorenz Schori! #219
* linkify: Fix linkification of non-ascii ports. Thank you Alexandre, Macabies!
#207
* linkify: Fix linkify inappropriately removing node tails when dropping nodes.
#132
* Fixed a test that failed periodically. #161
* Switched from nose to py.test. #204
* Add test matrix for all supported Python and html5lib versions. #230
* Limit to html5lib ``>=0.999,!=0.9999,!=0.99999,<0.99999999`` because 0.9999
and 0.99999 are busted.
* Add support for ``python setup.py test``. #97
Version 1.4.3 (May 23rd, 2016)
------------------------------
**Security fixes**
* None
**Changes**
* Limit to html5lib ``>=0.999,<0.99999999`` because of impending change to
sanitizer api. #195
Version 1.4.2 (September 11, 2015)
----------------------------------
**Changes**
* linkify: Fix hang in linkify with ``parse_email=True``. #124
* linkify: Fix crash in linkify when removing a link that is a first-child. #136
* Updated TLDs.
* linkify: Don't remove exterior brackets when linkifying. #146
Version 1.4.1 (December 15, 2014)
---------------------------------
**Changes**
* Consistent order of attributes in output.
* Python 3.4 support.
Version 1.4 (January 12, 2014)
------------------------------
**Changes**
* linkify: Update linkify to use etree type Treewalker instead of simpletree.
* Updated html5lib to version ``>=0.999``.
* Update all code to be compatible with Python 3 and 2 using six.
* Switch to Apache License.
Version 1.3
-----------
* Used by Python 3-only fork.
Version 1.2.2 (May 18, 2013)
----------------------------
* Pin html5lib to version 0.95 for now due to major API break.
Version 1.2.1 (February 19, 2013)
---------------------------------
* ``clean()`` no longer considers ``feed:`` an acceptable protocol due to
inconsistencies in browser behavior.
Version 1.2 (January 28, 2013)
------------------------------
* ``linkify()`` has changed considerably. Many keyword arguments have been
replaced with a single callbacks list. Please see the documentation for more
information.
* Bleach will no longer consider unacceptable protocols when linkifying.
* ``linkify()`` now takes a tokenizer argument that allows it to skip
sanitization.
* ``delinkify()`` is gone.
* Removed exception handling from ``_render``. ``clean()`` and ``linkify()`` may
now throw.
* ``linkify()`` correctly ignores case for protocols and domain names.
* ``linkify()`` correctly handles markup within an <a> tag.
Version 1.1.5
-------------
Version 1.1.4
-------------
Version 1.1.3 (July 10, 2012)
-----------------------------
* Fix parsing bare URLs when parse_email=True.
Version 1.1.2 (June 1, 2012)
----------------------------
* Fix hang in style attribute sanitizer. (#61)
* Allow ``/`` in style attribute values.
Version 1.1.1 (February 17, 2012)
---------------------------------
* Fix tokenizer for html5lib 0.9.5.
Version 1.1.0 (October 24, 2011)
--------------------------------
* ``linkify()`` now understands port numbers. (#38)
* Documented character encoding behavior. (#41)
* Add an optional target argument to ``linkify()``.
* Add ``delinkify()`` method. (#45)
* Support subdomain whitelist for ``delinkify()``. (#47, #48)
Version 1.0.4 (September 2, 2011)
---------------------------------
* Switch to SemVer git tags.
* Make ``linkify()`` smarter about trailing punctuation. (#30)
* Pass ``exc_info`` to logger during rendering issues.
* Add wildcard key for attributes. (#19)
* Make ``linkify()`` use the ``HTMLSanitizer`` tokenizer. (#36)
* Fix URLs wrapped in parentheses. (#23)
* Make ``linkify()`` UTF-8 safe. (#33)
Version 1.0.3 (June 14, 2011)
-----------------------------
* ``linkify()`` works with 3rd level domains. (#24)
* ``clean()`` supports vendor prefixes in style values. (#31, #32)
* Fix ``linkify()`` email escaping.
Version 1.0.2 (June 6, 2011)
----------------------------
* ``linkify()`` supports email addresses.
* ``clean()`` supports callables in attributes filter.
Version 1.0.1 (April 12, 2011)
------------------------------
* ``linkify()`` doesn't drop trailing slashes. (#21)
* ``linkify()`` won't linkify 'libgl.so.1'. (#22)

View file

@ -1,17 +0,0 @@
bleach/__init__.py,sha256=nz4CT9t1nEUf1kSlNuRECzJSYONGYWSw34o3Z8qXobA,4238
bleach/callbacks.py,sha256=SRPRKUioBvShXqHUi1zE2Qdvm60wciab5i5lnu3cnng,804
bleach/linkifier.py,sha256=vCoS9fyKgK01O9fPc30e3qs0Ix9v7spie4xZFOKpiKc,19297
bleach/sanitizer.py,sha256=mlv66r0LUEhkiBJWFixjj265qVSb9cUiU0mxVsj2s0s,24390
bleach/utils.py,sha256=xehkBK0bogKXZ26LFFT6zeAJo0sZ4TZKQGZq7SBAFUI,1115
bleach-2.1.3.dist-info/DESCRIPTION.rst,sha256=3_RB5azZGUQRG1nBwLJpQnGFvqObmSQ8ippMtF8ZCW4,14589
bleach-2.1.3.dist-info/METADATA,sha256=fWQImy7FTRWXzxWVOMehkw6LcOfdx-JnEXupxG1-SjY,15721
bleach-2.1.3.dist-info/RECORD,,
bleach-2.1.3.dist-info/WHEEL,sha256=kdsN-5OJAZIiHN-iO4Rhl82KyS0bDWf4uBwMbkNafr8,110
bleach-2.1.3.dist-info/metadata.json,sha256=y66CizKN_Y09-DH1UVAF_pAd2UXDRIsOef3FZkqJAOI,1293
bleach-2.1.3.dist-info/top_level.txt,sha256=dcv0wKIySB0zMjAEXLwY4V0-3IN9UZQGAT1wDmfQICY,7
bleach-2.1.3.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4
bleach/__pycache__/__init__.cpython-36.pyc,,
bleach/__pycache__/callbacks.cpython-36.pyc,,
bleach/__pycache__/linkifier.cpython-36.pyc,,
bleach/__pycache__/sanitizer.cpython-36.pyc,,
bleach/__pycache__/utils.cpython-36.pyc,,

View file

@ -1,6 +0,0 @@
Wheel-Version: 1.0
Generator: bdist_wheel (0.30.0)
Root-Is-Purelib: true
Tag: py2-none-any
Tag: py3-none-any

View file

@ -1 +0,0 @@
{"classifiers": ["Development Status :: 5 - Production/Stable", "Environment :: Web Environment", "Intended Audience :: Developers", "License :: OSI Approved :: Apache Software License", "Operating System :: OS Independent", "Programming Language :: Python", "Programming Language :: Python :: 2", "Programming Language :: Python :: 2.7", "Programming Language :: Python :: 3", "Programming Language :: Python :: 3.3", "Programming Language :: Python :: 3.4", "Programming Language :: Python :: 3.5", "Programming Language :: Python :: 3.6", "Topic :: Software Development :: Libraries :: Python Modules"], "description_content_type": "UNKNOWN", "extensions": {"python.details": {"contacts": [{"email": "willkg@mozilla.com", "name": "Will Kahn-Greene", "role": "author"}], "document_names": {"description": "DESCRIPTION.rst"}, "project_urls": {"Home": "http://github.com/mozilla/bleach"}}}, "extras": [], "generator": "bdist_wheel (0.30.0)", "license": "Apache Software License", "metadata_version": "2.0", "name": "bleach", "run_requires": [{"requires": ["html5lib (!=1.0b1,!=1.0b2,!=1.0b3,!=1.0b4,!=1.0b5,!=1.0b6,!=1.0b7,!=1.0b8,>=0.99999999pre)", "six"]}], "summary": "An easy safelist-based HTML-sanitizing tool.", "test_requires": [{"requires": ["pytest (>=3.0.0)"]}], "version": "2.1.3"}

View file

@ -1,146 +0,0 @@
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import warnings
from pkg_resources import parse_version
from bleach.linkifier import (
DEFAULT_CALLBACKS,
Linker,
)
from bleach.sanitizer import (
ALLOWED_ATTRIBUTES,
ALLOWED_PROTOCOLS,
ALLOWED_STYLES,
ALLOWED_TAGS,
Cleaner,
)
import html5lib
try:
_html5lib_version = html5lib.__version__.split('.')
if len(_html5lib_version) < 2:
_html5lib_version = _html5lib_version + ['0']
except Exception:
_h5ml5lib_version = ['unknown', 'unknown']
# Bleach 3.0.0 won't support html5lib-python < 1.0.0.
if _html5lib_version < ['1', '0'] or 'b' in _html5lib_version[1]:
warnings.warn('Support for html5lib-python < 1.0.0 is deprecated.', DeprecationWarning)
# yyyymmdd
__releasedate__ = '20180305'
# x.y.z or x.y.z.dev0 -- semver
__version__ = '2.1.3'
VERSION = parse_version(__version__)
__all__ = ['clean', 'linkify']
def clean(text, tags=ALLOWED_TAGS, attributes=ALLOWED_ATTRIBUTES,
styles=ALLOWED_STYLES, protocols=ALLOWED_PROTOCOLS, strip=False,
strip_comments=True):
"""Clean an HTML fragment of malicious content and return it
This function is a security-focused function whose sole purpose is to
remove malicious content from a string such that it can be displayed as
content in a web page.
This function is not designed to use to transform content to be used in
non-web-page contexts.
Example::
import bleach
better_text = bleach.clean(yucky_text)
.. Note::
If you're cleaning a lot of text and passing the same argument values or
you want more configurability, consider using a
:py:class:`bleach.sanitizer.Cleaner` instance.
:arg str text: the text to clean
:arg list tags: allowed list of tags; defaults to
``bleach.sanitizer.ALLOWED_TAGS``
:arg dict attributes: allowed attributes; can be a callable, list or dict;
defaults to ``bleach.sanitizer.ALLOWED_ATTRIBUTES``
:arg list styles: allowed list of css styles; defaults to
``bleach.sanitizer.ALLOWED_STYLES``
:arg list protocols: allowed list of protocols for links; defaults
to ``bleach.sanitizer.ALLOWED_PROTOCOLS``
:arg bool strip: whether or not to strip disallowed elements
:arg bool strip_comments: whether or not to strip HTML comments
:returns: cleaned text as unicode
"""
cleaner = Cleaner(
tags=tags,
attributes=attributes,
styles=styles,
protocols=protocols,
strip=strip,
strip_comments=strip_comments,
)
return cleaner.clean(text)
def linkify(text, callbacks=DEFAULT_CALLBACKS, skip_tags=None, parse_email=False):
"""Convert URL-like strings in an HTML fragment to links
This function converts strings that look like URLs, domain names and email
addresses in text that may be an HTML fragment to links, while preserving:
1. links already in the string
2. urls found in attributes
3. email addresses
linkify does a best-effort approach and tries to recover from bad
situations due to crazy text.
.. Note::
If you're linking a lot of text and passing the same argument values or
you want more configurability, consider using a
:py:class:`bleach.linkifier.Linker` instance.
.. Note::
If you have text that you want to clean and then linkify, consider using
the :py:class:`bleach.linkifier.LinkifyFilter` as a filter in the clean
pass. That way you're not parsing the HTML twice.
:arg str text: the text to linkify
:arg list callbacks: list of callbacks to run when adjusting tag attributes;
defaults to ``bleach.linkifier.DEFAULT_CALLBACKS``
:arg list skip_tags: list of tags that you don't want to linkify the
contents of; for example, you could set this to ``['pre']`` to skip
linkifying contents of ``pre`` tags
:arg bool parse_email: whether or not to linkify email addresses
:returns: linkified text as unicode
"""
linker = Linker(
callbacks=callbacks,
skip_tags=skip_tags,
parse_email=parse_email
)
return linker.linkify(text)

View file

@ -1,33 +0,0 @@
"""A set of basic callbacks for bleach.linkify."""
from __future__ import unicode_literals
def nofollow(attrs, new=False):
href_key = (None, u'href')
if href_key not in attrs:
return attrs
if attrs[href_key].startswith(u'mailto:'):
return attrs
rel_key = (None, u'rel')
rel_values = [val for val in attrs.get(rel_key, u'').split(u' ') if val]
if u'nofollow' not in [rel_val.lower() for rel_val in rel_values]:
rel_values.append(u'nofollow')
attrs[rel_key] = u' '.join(rel_values)
return attrs
def target_blank(attrs, new=False):
href_key = (None, u'href')
if href_key not in attrs:
return attrs
if attrs[href_key].startswith(u'mailto:'):
return attrs
attrs[(None, u'target')] = u'_blank'
return attrs

View file

@ -1,541 +0,0 @@
from __future__ import unicode_literals
import re
import six
import html5lib
from html5lib.filters.base import Filter
from html5lib.filters.sanitizer import allowed_protocols
from html5lib.serializer import HTMLSerializer
from bleach import callbacks as linkify_callbacks
from bleach.utils import alphabetize_attributes, force_unicode
#: List of default callbacks
DEFAULT_CALLBACKS = [linkify_callbacks.nofollow]
TLDS = """ac ad ae aero af ag ai al am an ao aq ar arpa as asia at au aw ax az
ba bb bd be bf bg bh bi biz bj bm bn bo br bs bt bv bw by bz ca cat
cc cd cf cg ch ci ck cl cm cn co com coop cr cu cv cx cy cz de dj dk
dm do dz ec edu ee eg er es et eu fi fj fk fm fo fr ga gb gd ge gf gg
gh gi gl gm gn gov gp gq gr gs gt gu gw gy hk hm hn hr ht hu id ie il
im in info int io iq ir is it je jm jo jobs jp ke kg kh ki km kn kp
kr kw ky kz la lb lc li lk lr ls lt lu lv ly ma mc md me mg mh mil mk
ml mm mn mo mobi mp mq mr ms mt mu museum mv mw mx my mz na name nc ne
net nf ng ni nl no np nr nu nz om org pa pe pf pg ph pk pl pm pn post
pr pro ps pt pw py qa re ro rs ru rw sa sb sc sd se sg sh si sj sk sl
sm sn so sr ss st su sv sx sy sz tc td tel tf tg th tj tk tl tm tn to
tp tr travel tt tv tw tz ua ug uk us uy uz va vc ve vg vi vn vu wf ws
xn xxx ye yt yu za zm zw""".split()
# Make sure that .com doesn't get matched by .co first
TLDS.reverse()
def build_url_re(tlds=TLDS, protocols=allowed_protocols):
"""Builds the url regex used by linkifier
If you want a different set of tlds or allowed protocols, pass those in
and stomp on the existing ``url_re``::
from bleach import linkifier
my_url_re = linkifier.build_url_re(my_tlds_list, my_protocols)
linker = LinkifyFilter(url_re=my_url_re)
"""
return re.compile(
r"""\(* # Match any opening parentheses.
\b(?<![@.])(?:(?:{0}):/{{0,3}}(?:(?:\w+:)?\w+@)?)? # http://
([\w-]+\.)+(?:{1})(?:\:[0-9]+)?(?!\.\w)\b # xx.yy.tld(:##)?
(?:[/?][^\s\{{\}}\|\\\^\[\]`<>"]*)?
# /path/zz (excluding "unsafe" chars from RFC 1738,
# except for # and ~, which happen in practice)
""".format('|'.join(protocols), '|'.join(tlds)),
re.IGNORECASE | re.VERBOSE | re.UNICODE)
URL_RE = build_url_re()
PROTO_RE = re.compile(r'^[\w-]+:/{0,3}', re.IGNORECASE)
EMAIL_RE = re.compile(
r"""(?<!//)
(([-!#$%&'*+/=?^_`{}|~0-9A-Z]+
(\.[-!#$%&'*+/=?^_`{}|~0-9A-Z]+)* # dot-atom
|^"([\001-\010\013\014\016-\037!#-\[\]-\177]
|\\[\001-\011\013\014\016-\177])*" # quoted-string
)@(?:[A-Z0-9](?:[A-Z0-9-]{0,61}[A-Z0-9])?\.)+[A-Z]{2,6}) # domain
""",
re.IGNORECASE | re.MULTILINE | re.VERBOSE)
class Linker(object):
"""Convert URL-like strings in an HTML fragment to links
This function converts strings that look like URLs, domain names and email
addresses in text that may be an HTML fragment to links, while preserving:
1. links already in the string
2. urls found in attributes
3. email addresses
linkify does a best-effort approach and tries to recover from bad
situations due to crazy text.
"""
def __init__(self, callbacks=DEFAULT_CALLBACKS, skip_tags=None, parse_email=False,
url_re=URL_RE, email_re=EMAIL_RE):
"""Creates a Linker instance
:arg list callbacks: list of callbacks to run when adjusting tag attributes;
defaults to ``bleach.linkifier.DEFAULT_CALLBACKS``
:arg list skip_tags: list of tags that you don't want to linkify the
contents of; for example, you could set this to ``['pre']`` to skip
linkifying contents of ``pre`` tags
:arg bool parse_email: whether or not to linkify email addresses
:arg re url_re: url matching regex
:arg re email_re: email matching regex
:returns: linkified text as unicode
"""
self.callbacks = callbacks
self.skip_tags = skip_tags
self.parse_email = parse_email
self.url_re = url_re
self.email_re = email_re
self.parser = html5lib.HTMLParser(namespaceHTMLElements=False)
self.walker = html5lib.getTreeWalker('etree')
self.serializer = HTMLSerializer(
quote_attr_values='always',
omit_optional_tags=False,
# linkify does not sanitize
sanitize=False,
# linkify alphabetizes
alphabetical_attributes=False,
)
def linkify(self, text):
"""Linkify specified text
:arg str text: the text to add links to
:returns: linkified text as unicode
:raises TypeError: if ``text`` is not a text type
"""
if not isinstance(text, six.string_types):
raise TypeError('argument must be of text type')
text = force_unicode(text)
if not text:
return u''
dom = self.parser.parseFragment(text)
filtered = LinkifyFilter(
source=self.walker(dom),
callbacks=self.callbacks,
skip_tags=self.skip_tags,
parse_email=self.parse_email,
url_re=self.url_re,
email_re=self.email_re,
)
return self.serializer.render(filtered)
class LinkifyFilter(Filter):
"""html5lib filter that linkifies text
This will do the following:
* convert email addresses into links
* convert urls into links
* edit existing links by running them through callbacks--the default is to
add a ``rel="nofollow"``
This filter can be used anywhere html5lib filters can be used.
"""
def __init__(self, source, callbacks=None, skip_tags=None, parse_email=False,
url_re=URL_RE, email_re=EMAIL_RE):
"""Creates a LinkifyFilter instance
:arg TreeWalker source: stream
:arg list callbacks: list of callbacks to run when adjusting tag attributes;
defaults to ``bleach.linkifier.DEFAULT_CALLBACKS``
:arg list skip_tags: list of tags that you don't want to linkify the
contents of; for example, you could set this to ``['pre']`` to skip
linkifying contents of ``pre`` tags
:arg bool parse_email: whether or not to linkify email addresses
:arg re url_re: url matching regex
:arg re email_re: email matching regex
"""
super(LinkifyFilter, self).__init__(source)
self.callbacks = callbacks or []
self.skip_tags = skip_tags or []
self.parse_email = parse_email
self.url_re = url_re
self.email_re = email_re
def apply_callbacks(self, attrs, is_new):
"""Given an attrs dict and an is_new bool, runs through callbacks
Callbacks can return an adjusted attrs dict or ``None``. In the case of
``None``, we stop going through callbacks and return that and the link
gets dropped.
:arg dict attrs: map of ``(namespace, name)`` -> ``value``
:arg bool is_new: whether or not this link was added by linkify
:returns: adjusted attrs dict or ``None``
"""
for cb in self.callbacks:
attrs = cb(attrs, is_new)
if attrs is None:
return None
return attrs
def extract_character_data(self, token_list):
"""Extracts and squashes character sequences in a token stream"""
# FIXME(willkg): This is a terrible idea. What it does is drop all the
# tags from the token list and merge the Characters and SpaceCharacters
# tokens into a single text.
#
# So something like this::
#
# "<span>" "<b>" "some text" "</b>" "</span>"
#
# gets converted to "some text".
#
# This gets used to figure out the ``_text`` fauxttribute value for
# linkify callables.
#
# I'm not really sure how else to support that ``_text`` fauxttribute and
# maintain some modicum of backwards compatability with previous versions
# of Bleach.
out = []
for token in token_list:
token_type = token['type']
if token_type in ['Characters', 'SpaceCharacters']:
out.append(token['data'])
return u''.join(out)
def handle_email_addresses(self, src_iter):
"""Handle email addresses in character tokens"""
for token in src_iter:
if token['type'] == 'Characters':
text = token['data']
new_tokens = []
end = 0
# For each email address we find in the text
for match in self.email_re.finditer(text):
if match.start() > end:
new_tokens.append(
{u'type': u'Characters', u'data': text[end:match.start()]}
)
# Run attributes through the callbacks to see what we
# should do with this match
attrs = {
(None, u'href'): u'mailto:%s' % match.group(0),
u'_text': match.group(0)
}
attrs = self.apply_callbacks(attrs, True)
if attrs is None:
# Just add the text--but not as a link
new_tokens.append(
{u'type': u'Characters', u'data': match.group(0)}
)
else:
# Add an "a" tag for the new link
_text = attrs.pop(u'_text', '')
attrs = alphabetize_attributes(attrs)
new_tokens.extend([
{u'type': u'StartTag', u'name': u'a', u'data': attrs},
{u'type': u'Characters', u'data': force_unicode(_text)},
{u'type': u'EndTag', u'name': 'a'}
])
end = match.end()
if new_tokens:
# Yield the adjusted set of tokens and then continue
# through the loop
if end < len(text):
new_tokens.append({u'type': u'Characters', u'data': text[end:]})
for new_token in new_tokens:
yield new_token
continue
yield token
def strip_non_url_bits(self, fragment):
"""Strips non-url bits from the url
This accounts for over-eager matching by the regex.
"""
prefix = suffix = ''
while fragment:
# Try removing ( from the beginning and, if it's balanced, from the
# end, too
if fragment.startswith(u'('):
prefix = prefix + u'('
fragment = fragment[1:]
if fragment.endswith(u')'):
suffix = u')' + suffix
fragment = fragment[:-1]
continue
# Now try extraneous things from the end. For example, sometimes we
# pick up ) at the end of a url, but the url is in a parenthesized
# phrase like:
#
# "i looked at the site (at http://example.com)"
if fragment.endswith(u')') and u'(' not in fragment:
fragment = fragment[:-1]
suffix = u')' + suffix
continue
# Handle commas
if fragment.endswith(u','):
fragment = fragment[:-1]
suffix = u',' + suffix
continue
# Handle periods
if fragment.endswith(u'.'):
fragment = fragment[:-1]
suffix = u'.' + suffix
continue
# Nothing matched, so we're done
break
return fragment, prefix, suffix
def handle_links(self, src_iter):
"""Handle links in character tokens"""
in_a = False # happens, if parse_email=True and if a mail was found
for token in src_iter:
if in_a:
if token['type'] == 'EndTag' and token['name'] == 'a':
in_a = False
yield token
continue
elif token['type'] == 'StartTag' and token['name'] == 'a':
in_a = True
yield token
continue
if token['type'] == 'Characters':
text = token['data']
new_tokens = []
end = 0
for match in self.url_re.finditer(text):
if match.start() > end:
new_tokens.append(
{u'type': u'Characters', u'data': text[end:match.start()]}
)
url = match.group(0)
prefix = suffix = ''
# Sometimes we pick up too much in the url match, so look for
# bits we should drop and remove them from the match
url, prefix, suffix = self.strip_non_url_bits(url)
# If there's no protocol, add one
if PROTO_RE.search(url):
href = url
else:
href = u'http://%s' % url
attrs = {
(None, u'href'): href,
u'_text': url
}
attrs = self.apply_callbacks(attrs, True)
if attrs is None:
# Just add the text
new_tokens.append(
{u'type': u'Characters', u'data': prefix + url + suffix}
)
else:
# Add the "a" tag!
if prefix:
new_tokens.append(
{u'type': u'Characters', u'data': prefix}
)
_text = attrs.pop(u'_text', '')
attrs = alphabetize_attributes(attrs)
new_tokens.extend([
{u'type': u'StartTag', u'name': u'a', u'data': attrs},
{u'type': u'Characters', u'data': force_unicode(_text)},
{u'type': u'EndTag', u'name': 'a'},
])
if suffix:
new_tokens.append(
{u'type': u'Characters', u'data': suffix}
)
end = match.end()
if new_tokens:
# Yield the adjusted set of tokens and then continue
# through the loop
if end < len(text):
new_tokens.append({u'type': u'Characters', u'data': text[end:]})
for new_token in new_tokens:
yield new_token
continue
yield token
def handle_a_tag(self, token_buffer):
"""Handle the "a" tag
This could adjust the link or drop it altogether depending on what the
callbacks return.
This yields the new set of tokens.
"""
a_token = token_buffer[0]
if a_token['data']:
attrs = a_token['data']
else:
attrs = {}
text = self.extract_character_data(token_buffer)
attrs['_text'] = text
attrs = self.apply_callbacks(attrs, False)
if attrs is None:
# We're dropping the "a" tag and everything else and replacing
# it with character data. So emit that token.
yield {'type': 'Characters', 'data': text}
else:
new_text = attrs.pop('_text', '')
a_token['data'] = alphabetize_attributes(attrs)
if text == new_text:
# The callbacks didn't change the text, so we yield the new "a"
# token, then whatever else was there, then the end "a" token
yield a_token
for mem in token_buffer[1:]:
yield mem
else:
# If the callbacks changed the text, then we're going to drop
# all the tokens between the start and end "a" tags and replace
# it with the new text
yield a_token
yield {'type': 'Characters', 'data': force_unicode(new_text)}
yield token_buffer[-1]
def __iter__(self):
in_a = False
in_skip_tag = None
token_buffer = []
for token in super(LinkifyFilter, self).__iter__():
if in_a:
# Handle the case where we're in an "a" tag--we want to buffer tokens
# until we hit an end "a" tag.
if token['type'] == 'EndTag' and token['name'] == 'a':
# Add the end tag to the token buffer and then handle them
# and yield anything returned
token_buffer.append(token)
for new_token in self.handle_a_tag(token_buffer):
yield new_token
# Clear "a" related state and continue since we've yielded all
# the tokens we're going to yield
in_a = False
token_buffer = []
continue
else:
token_buffer.append(token)
continue
elif token['type'] in ['StartTag', 'EmptyTag']:
if token['name'] in self.skip_tags:
# Skip tags start a "special mode" where we don't linkify
# anything until the end tag.
in_skip_tag = token['name']
elif token['name'] == 'a':
# The "a" tag is special--we switch to a slurp mode and
# slurp all the tokens until the end "a" tag and then
# figure out what to do with them there.
in_a = True
token_buffer.append(token)
# We buffer the start tag, so we don't want to yield it,
# yet
continue
elif in_skip_tag and self.skip_tags:
# NOTE(willkg): We put this clause here since in_a and
# switching in and out of in_a takes precedence.
if token['type'] == 'EndTag' and token['name'] == in_skip_tag:
in_skip_tag = None
elif not in_a and not in_skip_tag and token['type'] == 'Characters':
new_stream = iter([token])
if self.parse_email:
new_stream = self.handle_email_addresses(new_stream)
new_stream = self.handle_links(new_stream)
for token in new_stream:
yield token
# We've already yielded this token, so continue
continue
yield token

View file

@ -1,771 +0,0 @@
from __future__ import unicode_literals
from itertools import chain
import re
import string
import six
from six.moves.urllib.parse import urlparse
from xml.sax.saxutils import unescape
import html5lib
from html5lib.constants import (
entities,
namespaces,
prefixes,
tokenTypes,
)
try:
from html5lib.constants import ReparseException
except ImportError:
# html5lib-python 1.0 changed the name
from html5lib.constants import _ReparseException as ReparseException
from html5lib.filters.base import Filter
from html5lib.filters import sanitizer
from html5lib.serializer import HTMLSerializer
from html5lib._tokenizer import HTMLTokenizer
from html5lib._trie import Trie
from bleach.utils import alphabetize_attributes, force_unicode
#: Map of entity name to expanded entity
ENTITIES = entities
#: Trie of html entity string -> character representation
ENTITIES_TRIE = Trie(ENTITIES)
#: List of allowed tags
ALLOWED_TAGS = [
'a',
'abbr',
'acronym',
'b',
'blockquote',
'code',
'em',
'i',
'li',
'ol',
'strong',
'ul',
]
#: Map of allowed attributes by tag
ALLOWED_ATTRIBUTES = {
'a': ['href', 'title'],
'abbr': ['title'],
'acronym': ['title'],
}
#: List of allowed styles
ALLOWED_STYLES = []
#: List of allowed protocols
ALLOWED_PROTOCOLS = ['http', 'https', 'mailto']
AMP_SPLIT_RE = re.compile('(&)')
#: Invisible characters--0 to and including 31 except 9 (tab), 10 (lf), and 13 (cr)
INVISIBLE_CHARACTERS = ''.join([chr(c) for c in chain(range(0, 9), range(11, 13), range(14, 32))])
#: Regexp for characters that are invisible
INVISIBLE_CHARACTERS_RE = re.compile(
'[' + INVISIBLE_CHARACTERS + ']',
re.UNICODE
)
#: String to replace invisible characters with. This can be a character, a
#: string, or even a function that takes a Python re matchobj
INVISIBLE_REPLACEMENT_CHAR = '?'
def convert_entity(value):
"""Convert an entity (minus the & and ; part) into what it represents
This handles numeric, hex, and text entities.
:arg value: the string (minus the ``&`` and ``;`` part) to convert
:returns: unicode character
"""
if value[0] == '#':
if value[1] in ('x', 'X'):
return six.unichr(int(value[2:], 16))
return six.unichr(int(value[1:], 10))
return ENTITIES[value]
def convert_entities(text):
"""Converts all found entities in the text
:arg text: the text to convert entities in
:returns: unicode text with converted entities
"""
if '&' not in text:
return text
new_text = []
for part in next_possible_entity(text):
if not part:
continue
if part.startswith('&'):
entity = match_entity(part)
if entity is not None:
new_text.append(convert_entity(entity))
remainder = part[len(entity) + 2:]
if part:
new_text.append(remainder)
continue
new_text.append(part)
return u''.join(new_text)
class BleachHTMLTokenizer(HTMLTokenizer):
def consumeEntity(self, allowedChar=None, fromAttribute=False):
# We don't want to consume and convert entities, so this overrides the
# html5lib tokenizer's consumeEntity so that it's now a no-op.
#
# However, when that gets called, it's consumed an &, so we put that in
# the stream.
if fromAttribute:
self.currentToken['data'][-1][1] += '&'
else:
self.tokenQueue.append({"type": tokenTypes['Characters'], "data": '&'})
class BleachHTMLParser(html5lib.HTMLParser):
def _parse(self, stream, innerHTML=False, container="div", scripting=False, **kwargs):
# Override HTMLParser so we can swap out the tokenizer for our own.
self.innerHTMLMode = innerHTML
self.container = container
self.scripting = scripting
self.tokenizer = BleachHTMLTokenizer(stream, parser=self, **kwargs)
self.reset()
try:
self.mainLoop()
except ReparseException:
self.reset()
self.mainLoop()
class Cleaner(object):
"""Cleaner for cleaning HTML fragments of malicious content
This cleaner is a security-focused function whose sole purpose is to remove
malicious content from a string such that it can be displayed as content in
a web page.
This cleaner is not designed to use to transform content to be used in
non-web-page contexts.
To use::
from bleach.sanitizer import Cleaner
cleaner = Cleaner()
for text in all_the_yucky_things:
sanitized = cleaner.clean(text)
"""
def __init__(self, tags=ALLOWED_TAGS, attributes=ALLOWED_ATTRIBUTES,
styles=ALLOWED_STYLES, protocols=ALLOWED_PROTOCOLS, strip=False,
strip_comments=True, filters=None):
"""Initializes a Cleaner
:arg list tags: allowed list of tags; defaults to
``bleach.sanitizer.ALLOWED_TAGS``
:arg dict attributes: allowed attributes; can be a callable, list or dict;
defaults to ``bleach.sanitizer.ALLOWED_ATTRIBUTES``
:arg list styles: allowed list of css styles; defaults to
``bleach.sanitizer.ALLOWED_STYLES``
:arg list protocols: allowed list of protocols for links; defaults
to ``bleach.sanitizer.ALLOWED_PROTOCOLS``
:arg bool strip: whether or not to strip disallowed elements
:arg bool strip_comments: whether or not to strip HTML comments
:arg list filters: list of html5lib Filter classes to pass streamed content through
.. seealso:: http://html5lib.readthedocs.io/en/latest/movingparts.html#filters
.. Warning::
Using filters changes the output of ``bleach.Cleaner.clean``.
Make sure the way the filters change the output are secure.
"""
self.tags = tags
self.attributes = attributes
self.styles = styles
self.protocols = protocols
self.strip = strip
self.strip_comments = strip_comments
self.filters = filters or []
self.parser = BleachHTMLParser(namespaceHTMLElements=False)
self.walker = html5lib.getTreeWalker('etree')
self.serializer = BleachHTMLSerializer(
quote_attr_values='always',
omit_optional_tags=False,
escape_lt_in_attrs=True,
# We want to leave entities as they are without escaping or
# resolving or expanding
resolve_entities=False,
# Bleach has its own sanitizer, so don't use the html5lib one
sanitize=False,
# Bleach sanitizer alphabetizes already, so don't use the html5lib one
alphabetical_attributes=False,
)
def clean(self, text):
"""Cleans text and returns sanitized result as unicode
:arg str text: text to be cleaned
:returns: sanitized text as unicode
:raises TypeError: if ``text`` is not a text type
"""
if not isinstance(text, six.string_types):
message = "argument cannot be of '{name}' type, must be of text type".format(
name=text.__class__.__name__)
raise TypeError(message)
if not text:
return u''
text = force_unicode(text)
dom = self.parser.parseFragment(text)
filtered = BleachSanitizerFilter(
source=self.walker(dom),
# Bleach-sanitizer-specific things
attributes=self.attributes,
strip_disallowed_elements=self.strip,
strip_html_comments=self.strip_comments,
# html5lib-sanitizer things
allowed_elements=self.tags,
allowed_css_properties=self.styles,
allowed_protocols=self.protocols,
allowed_svg_properties=[],
)
# Apply any filters after the BleachSanitizerFilter
for filter_class in self.filters:
filtered = filter_class(source=filtered)
return self.serializer.render(filtered)
def attribute_filter_factory(attributes):
"""Generates attribute filter function for the given attributes value
The attributes value can take one of several shapes. This returns a filter
function appropriate to the attributes value. One nice thing about this is
that there's less if/then shenanigans in the ``allow_token`` method.
"""
if callable(attributes):
return attributes
if isinstance(attributes, dict):
def _attr_filter(tag, attr, value):
if tag in attributes:
attr_val = attributes[tag]
if callable(attr_val):
return attr_val(tag, attr, value)
if attr in attr_val:
return True
if '*' in attributes:
attr_val = attributes['*']
if callable(attr_val):
return attr_val(tag, attr, value)
return attr in attr_val
return False
return _attr_filter
if isinstance(attributes, list):
def _attr_filter(tag, attr, value):
return attr in attributes
return _attr_filter
raise ValueError('attributes needs to be a callable, a list or a dict')
def match_entity(stream):
"""Returns first entity in stream or None if no entity exists
Note: For Bleach purposes, entities must start with a "&" and end with
a ";".
:arg stream: the character stream
:returns: ``None`` or the entity string without "&" or ";"
"""
# Nix the & at the beginning
if stream[0] != '&':
raise ValueError('Stream should begin with "&"')
stream = stream[1:]
stream = list(stream)
possible_entity = ''
end_characters = '<&=;' + string.whitespace
# Handle number entities
if stream and stream[0] == '#':
possible_entity = '#'
stream.pop(0)
if stream and stream[0] in ('x', 'X'):
allowed = '0123456789abcdefABCDEF'
possible_entity += stream.pop(0)
else:
allowed = '0123456789'
# FIXME(willkg): Do we want to make sure these are valid number
# entities? This doesn't do that currently.
while stream and stream[0] not in end_characters:
c = stream.pop(0)
if c not in allowed:
break
possible_entity += c
if possible_entity and stream and stream[0] == ';':
return possible_entity
return None
# Handle character entities
while stream and stream[0] not in end_characters:
c = stream.pop(0)
if not ENTITIES_TRIE.has_keys_with_prefix(possible_entity):
break
possible_entity += c
if possible_entity and stream and stream[0] == ';':
return possible_entity
return None
def next_possible_entity(text):
"""Takes a text and generates a list of possible entities
:arg text: the text to look at
:returns: generator where each part (except the first) starts with an
"&"
"""
for i, part in enumerate(AMP_SPLIT_RE.split(text)):
if i == 0:
yield part
elif i % 2 == 0:
yield '&' + part
class BleachSanitizerFilter(sanitizer.Filter):
"""html5lib Filter that sanitizes text
This filter can be used anywhere html5lib filters can be used.
"""
def __init__(self, source, attributes=ALLOWED_ATTRIBUTES,
strip_disallowed_elements=False, strip_html_comments=True,
**kwargs):
"""Creates a BleachSanitizerFilter instance
:arg Treewalker source: stream
:arg list tags: allowed list of tags; defaults to
``bleach.sanitizer.ALLOWED_TAGS``
:arg dict attributes: allowed attributes; can be a callable, list or dict;
defaults to ``bleach.sanitizer.ALLOWED_ATTRIBUTES``
:arg list styles: allowed list of css styles; defaults to
``bleach.sanitizer.ALLOWED_STYLES``
:arg list protocols: allowed list of protocols for links; defaults
to ``bleach.sanitizer.ALLOWED_PROTOCOLS``
:arg bool strip_disallowed_elements: whether or not to strip disallowed
elements
:arg bool strip_html_comments: whether or not to strip HTML comments
"""
self.attr_filter = attribute_filter_factory(attributes)
self.strip_disallowed_elements = strip_disallowed_elements
self.strip_html_comments = strip_html_comments
return super(BleachSanitizerFilter, self).__init__(source, **kwargs)
def __iter__(self):
for token in Filter.__iter__(self):
ret = self.sanitize_token(token)
if not ret:
continue
if isinstance(ret, list):
for subtoken in ret:
yield subtoken
else:
yield ret
def sanitize_token(self, token):
"""Sanitize a token either by HTML-encoding or dropping.
Unlike sanitizer.Filter, allowed_attributes can be a dict of {'tag':
['attribute', 'pairs'], 'tag': callable}.
Here callable is a function with two arguments of attribute name and
value. It should return true of false.
Also gives the option to strip tags instead of encoding.
:arg dict token: token to sanitize
:returns: token or list of tokens
"""
token_type = token['type']
if token_type in ['StartTag', 'EndTag', 'EmptyTag']:
if token['name'] in self.allowed_elements:
return self.allow_token(token)
elif self.strip_disallowed_elements:
return None
else:
if 'data' in token:
# Alphabetize the attributes before calling .disallowed_token()
# so that the resulting string is stable
token['data'] = alphabetize_attributes(token['data'])
return self.disallowed_token(token)
elif token_type == 'Comment':
if not self.strip_html_comments:
return token
else:
return None
elif token_type == 'Characters':
return self.sanitize_characters(token)
else:
return token
def sanitize_characters(self, token):
"""Handles Characters tokens
Our overridden tokenizer doesn't do anything with entities. However,
that means that the serializer will convert all ``&`` in Characters
tokens to ``&amp;``.
Since we don't want that, we extract entities here and convert them to
Entity tokens so the serializer will let them be.
:arg token: the Characters token to work on
:returns: a list of tokens
"""
data = token.get('data', '')
if not data:
return token
data = INVISIBLE_CHARACTERS_RE.sub(INVISIBLE_REPLACEMENT_CHAR, data)
token['data'] = data
# If there isn't a & in the data, we can return now
if '&' not in data:
return token
new_tokens = []
# For each possible entity that starts with a "&", we try to extract an
# actual entity and re-tokenize accordingly
for part in next_possible_entity(data):
if not part:
continue
if part.startswith('&'):
entity = match_entity(part)
if entity is not None:
new_tokens.append({'type': 'Entity', 'name': entity})
# Length of the entity plus 2--one for & at the beginning
# and and one for ; at the end
remainder = part[len(entity) + 2:]
if remainder:
new_tokens.append({'type': 'Characters', 'data': remainder})
continue
new_tokens.append({'type': 'Characters', 'data': part})
return new_tokens
def sanitize_uri_value(self, value, allowed_protocols):
"""Checks a uri value to see if it's allowed
:arg value: the uri value to sanitize
:arg allowed_protocols: list of allowed protocols
:returns: allowed value or None
"""
# NOTE(willkg): This transforms the value into one that's easier to
# match and verify, but shouldn't get returned since it's vastly
# different than the original value.
# Convert all character entities in the value
new_value = convert_entities(value)
# Nix backtick, space characters, and control characters
new_value = re.sub(
"[`\000-\040\177-\240\s]+",
'',
new_value
)
# Remove REPLACEMENT characters
new_value = new_value.replace('\ufffd', '')
# Lowercase it--this breaks the value, but makes it easier to match
# against
new_value = new_value.lower()
# Drop attributes with uri values that have protocols that aren't
# allowed
parsed = urlparse(new_value)
if parsed.scheme:
# If urlparse found a scheme, check that
if parsed.scheme in allowed_protocols:
return value
else:
# Allow uris that are just an anchor
if new_value.startswith('#'):
return value
# Handle protocols that urlparse doesn't recognize like "myprotocol"
if ':' in new_value and new_value.split(':')[0] in allowed_protocols:
return value
# If there's no protocol/scheme specified, then assume it's "http"
# and see if that's allowed
if 'http' in allowed_protocols:
return value
return None
def allow_token(self, token):
"""Handles the case where we're allowing the tag"""
if 'data' in token:
# Loop through all the attributes and drop the ones that are not
# allowed, are unsafe or break other rules. Additionally, fix
# attribute values that need fixing.
#
# At the end of this loop, we have the final set of attributes
# we're keeping.
attrs = {}
for namespaced_name, val in token['data'].items():
namespace, name = namespaced_name
# Drop attributes that are not explicitly allowed
#
# NOTE(willkg): We pass in the attribute name--not a namespaced
# name.
if not self.attr_filter(token['name'], name, val):
continue
# Drop attributes with uri values that use a disallowed protocol
# Sanitize attributes with uri values
if namespaced_name in self.attr_val_is_uri:
new_value = self.sanitize_uri_value(val, self.allowed_protocols)
if new_value is None:
continue
val = new_value
# Drop values in svg attrs with non-local IRIs
if namespaced_name in self.svg_attr_val_allows_ref:
new_val = re.sub(r'url\s*\(\s*[^#\s][^)]+?\)',
' ',
unescape(val))
new_val = new_val.strip()
if not new_val:
continue
else:
# Replace the val with the unescaped version because
# it's a iri
val = new_val
# Drop href and xlink:href attr for svg elements with non-local IRIs
if (None, token['name']) in self.svg_allow_local_href:
if namespaced_name in [(None, 'href'), (namespaces['xlink'], 'href')]:
if re.search(r'^\s*[^#\s]', val):
continue
# If it's a style attribute, sanitize it
if namespaced_name == (None, u'style'):
val = self.sanitize_css(val)
# At this point, we want to keep the attribute, so add it in
attrs[namespaced_name] = val
token['data'] = alphabetize_attributes(attrs)
return token
def disallowed_token(self, token):
token_type = token["type"]
if token_type == "EndTag":
token["data"] = "</%s>" % token["name"]
elif token["data"]:
assert token_type in ("StartTag", "EmptyTag")
attrs = []
for (ns, name), v in token["data"].items():
attrs.append(' %s="%s"' % (
name if ns is None else "%s:%s" % (prefixes[ns], name),
# NOTE(willkg): HTMLSerializer escapes attribute values
# already, so if we do it here (like HTMLSerializer does),
# then we end up double-escaping.
v)
)
token["data"] = "<%s%s>" % (token["name"], ''.join(attrs))
else:
token["data"] = "<%s>" % token["name"]
if token.get("selfClosing"):
token["data"] = token["data"][:-1] + "/>"
token["type"] = "Characters"
del token["name"]
return token
def sanitize_css(self, style):
"""Sanitizes css in style tags"""
# disallow urls
style = re.compile('url\s*\(\s*[^\s)]+?\s*\)\s*').sub(' ', style)
# gauntlet
# Validate the css in the style tag and if it's not valid, then drop
# the whole thing.
parts = style.split(';')
gauntlet = re.compile(
r"""^([-/:,#%.'"\sa-zA-Z0-9!]|\w-\w|'[\s\w]+'\s*|"[\s\w]+"|\([\d,%\.\s]+\))*$"""
)
for part in parts:
if not gauntlet.match(part):
return ''
if not re.match("^\s*([-\w]+\s*:[^:;]*(;\s*|$))*$", style):
return ''
clean = []
for prop, value in re.findall('([-\w]+)\s*:\s*([^:;]*)', style):
if not value:
continue
if prop.lower() in self.allowed_css_properties:
clean.append(prop + ': ' + value + ';')
elif prop.lower() in self.allowed_svg_properties:
clean.append(prop + ': ' + value + ';')
return ' '.join(clean)
class BleachHTMLSerializer(HTMLSerializer):
"""Wraps the HTMLSerializer and undoes & -> &amp; in attributes"""
def escape_base_amp(self, stoken):
"""Escapes bare & in HTML attribute values"""
# First, undo what the HTMLSerializer did
stoken = stoken.replace('&amp;', '&')
# Then, escape any bare &
for part in next_possible_entity(stoken):
if not part:
continue
if part.startswith('&'):
entity = match_entity(part)
if entity is not None:
yield '&' + entity + ';'
# Length of the entity plus 2--one for & at the beginning
# and and one for ; at the end
part = part[len(entity) + 2:]
if part:
yield part
continue
yield part.replace('&', '&amp;')
def serialize(self, treewalker, encoding=None):
"""Wrap HTMLSerializer.serialize and escape bare & in attributes"""
in_tag = False
after_equals = False
for stoken in super(BleachHTMLSerializer, self).serialize(treewalker, encoding):
if in_tag:
if stoken == '>':
in_tag = False
elif after_equals:
if stoken != '"':
for part in self.escape_base_amp(stoken):
yield part
after_equals = False
continue
elif stoken == '=':
after_equals = True
yield stoken
else:
if stoken.startswith('<'):
in_tag = True
yield stoken

View file

@ -1,44 +0,0 @@
from collections import OrderedDict
import six
def _attr_key(attr):
"""Returns appropriate key for sorting attribute names
Attribute names are a tuple of ``(namespace, name)`` where namespace can be
``None`` or a string. These can't be compared in Python 3, so we conver the
``None`` to an empty string.
"""
key = (attr[0][0] or ''), attr[0][1]
return key
def alphabetize_attributes(attrs):
"""Takes a dict of attributes (or None) and returns them alphabetized"""
if not attrs:
return attrs
return OrderedDict(
[(k, v) for k, v in sorted(attrs.items(), key=_attr_key)]
)
def force_unicode(text):
"""Takes a text (Python 2: str/unicode; Python 3: unicode) and converts to unicode
:arg str/unicode text: the text in question
:returns: text as unicode
:raises UnicodeDecodeError: if the text was a Python 2 str and isn't in
utf-8
"""
# If it's already unicode, then return it
if isinstance(text, six.text_type):
return text
# If not, convert it
return six.text_type(text, 'utf-8', 'strict')

View file

@ -1,48 +0,0 @@
Certifi: Python SSL Certificates
================================
`Certifi`_ is a carefully curated collection of Root Certificates for
validating the trustworthiness of SSL certificates while verifying the identity
of TLS hosts. It has been extracted from the `Requests`_ project.
Installation
------------
``certifi`` is available on PyPI. Simply install it with ``pip``::
$ pip install certifi
Usage
-----
To reference the installed certificate authority (CA) bundle, you can use the
built-in function::
>>> import certifi
>>> certifi.where()
'/usr/local/lib/python2.7/site-packages/certifi/cacert.pem'
Enjoy!
1024-bit Root Certificates
~~~~~~~~~~~~~~~~~~~~~~~~~~
Browsers and certificate authorities have concluded that 1024-bit keys are
unacceptably weak for certificates, particularly root certificates. For this
reason, Mozilla has removed any weak (i.e. 1024-bit key) certificate from its
bundle, replacing it with an equivalent strong (i.e. 2048-bit or greater key)
certificate from the same CA. Because Mozilla removed these certificates from
its bundle, ``certifi`` removed them as well.
In previous versions, ``certifi`` provided the ``certifi.old_where()`` function
to intentionally re-add the 1024-bit roots back into your bundle. This was not
recommended in production and therefore was removed. To assist in migrating old
code, the function ``certifi.old_where()`` continues to exist as an alias of
``certifi.where()``. Please update your code to use ``certifi.where()``
instead. ``certifi.old_where()`` will be removed in 2018.
.. _`Certifi`: http://certifi.io/en/latest/
.. _`Requests`: http://docs.python-requests.org/en/latest/

View file

@ -1,21 +0,0 @@
This packge contains a modified version of ca-bundle.crt:
ca-bundle.crt -- Bundle of CA Root Certificates
Certificate data from Mozilla as of: Thu Nov 3 19:04:19 2011#
This is a bundle of X.509 certificates of public Certificate Authorities
(CA). These were automatically extracted from Mozilla's root certificates
file (certdata.txt). This file can be found in the mozilla source tree:
http://mxr.mozilla.org/mozilla/source/security/nss/lib/ckfw/builtins/certdata.txt?raw=1#
It contains the certificates in PEM format and therefore
can be directly used with curl / libcurl / php_curl, or with
an Apache+mod_ssl webserver for SSL client authentication.
Just configure this file as the SSLCACertificateFile.#
***** BEGIN LICENSE BLOCK *****
This Source Code Form is subject to the terms of the Mozilla Public License,
v. 2.0. If a copy of the MPL was not distributed with this file, You can obtain
one at http://mozilla.org/MPL/2.0/.
***** END LICENSE BLOCK *****
@(#) $RCSfile: certdata.txt,v $ $Revision: 1.80 $ $Date: 2011/11/03 15:11:58 $

View file

@ -1,71 +0,0 @@
Metadata-Version: 2.0
Name: certifi
Version: 2018.4.16
Summary: Python package for providing Mozilla's CA Bundle.
Home-page: http://certifi.io/
Author: Kenneth Reitz
Author-email: me@kennethreitz.com
License: MPL-2.0
Platform: UNKNOWN
Classifier: Development Status :: 5 - Production/Stable
Classifier: Intended Audience :: Developers
Classifier: License :: OSI Approved :: Mozilla Public License 2.0 (MPL 2.0)
Classifier: Natural Language :: English
Classifier: Programming Language :: Python
Classifier: Programming Language :: Python :: 2
Classifier: Programming Language :: Python :: 2.6
Classifier: Programming Language :: Python :: 2.7
Classifier: Programming Language :: Python :: 3
Classifier: Programming Language :: Python :: 3.3
Classifier: Programming Language :: Python :: 3.4
Classifier: Programming Language :: Python :: 3.5
Classifier: Programming Language :: Python :: 3.6
Certifi: Python SSL Certificates
================================
`Certifi`_ is a carefully curated collection of Root Certificates for
validating the trustworthiness of SSL certificates while verifying the identity
of TLS hosts. It has been extracted from the `Requests`_ project.
Installation
------------
``certifi`` is available on PyPI. Simply install it with ``pip``::
$ pip install certifi
Usage
-----
To reference the installed certificate authority (CA) bundle, you can use the
built-in function::
>>> import certifi
>>> certifi.where()
'/usr/local/lib/python2.7/site-packages/certifi/cacert.pem'
Enjoy!
1024-bit Root Certificates
~~~~~~~~~~~~~~~~~~~~~~~~~~
Browsers and certificate authorities have concluded that 1024-bit keys are
unacceptably weak for certificates, particularly root certificates. For this
reason, Mozilla has removed any weak (i.e. 1024-bit key) certificate from its
bundle, replacing it with an equivalent strong (i.e. 2048-bit or greater key)
certificate from the same CA. Because Mozilla removed these certificates from
its bundle, ``certifi`` removed them as well.
In previous versions, ``certifi`` provided the ``certifi.old_where()`` function
to intentionally re-add the 1024-bit roots back into your bundle. This was not
recommended in production and therefore was removed. To assist in migrating old
code, the function ``certifi.old_where()`` continues to exist as an alias of
``certifi.where()``. Please update your code to use ``certifi.where()``
instead. ``certifi.old_where()`` will be removed in 2018.
.. _`Certifi`: http://certifi.io/en/latest/
.. _`Requests`: http://docs.python-requests.org/en/latest/

View file

@ -1,15 +0,0 @@
certifi/__init__.py,sha256=KHDlQtQQTRmOG0TJi12ZIE5WWq2tYHM5ax30EX6UJ04,63
certifi/__main__.py,sha256=FiOYt1Fltst7wk9DRa6GCoBr8qBUxlNQu_MKJf04E6s,41
certifi/cacert.pem,sha256=0lwMLbfi4umzDdOmdLMdrNkgZxw-5y6PCE10PrnJy-k,268839
certifi/core.py,sha256=xPQDdG_siy5A7BfqGWa7RJhcA61xXEqPiSrw9GNyhHE,836
certifi-2018.4.16.dist-info/DESCRIPTION.rst,sha256=jXrtxvB2mFIsHbuK8aP8RXrMx5yecyAIMZ2cn8Xb_ro,1679
certifi-2018.4.16.dist-info/LICENSE.txt,sha256=anCkv2sBABbVmmS4rkrY3H9e8W8ftFPMLs13HFo0ETE,1048
certifi-2018.4.16.dist-info/METADATA,sha256=uYCLBFPwRU0XfEULiHO8iLo1QELisMwd9CSJ_Bw4DIc,2570
certifi-2018.4.16.dist-info/RECORD,,
certifi-2018.4.16.dist-info/WHEEL,sha256=5wvfB7GvgZAbKBSE9uX9Zbi6LCL-_KgezgHblXhCRnM,113
certifi-2018.4.16.dist-info/metadata.json,sha256=ayQwq1S2ID9f_MxGU0ZEouhzp5UoCwVtNT3ZLM23p7g,1006
certifi-2018.4.16.dist-info/top_level.txt,sha256=KMu4vUCfsjLrkPbSNdgdekS-pVJzBAJFO__nI8NF6-U,8
certifi-2018.4.16.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4
certifi/__pycache__/__init__.cpython-36.pyc,,
certifi/__pycache__/__main__.cpython-36.pyc,,
certifi/__pycache__/core.cpython-36.pyc,,

View file

@ -1,6 +0,0 @@
Wheel-Version: 1.0
Generator: bdist_wheel (0.30.0.a0)
Root-Is-Purelib: true
Tag: py2-none-any
Tag: py3-none-any

View file

@ -1 +0,0 @@
{"classifiers": ["Development Status :: 5 - Production/Stable", "Intended Audience :: Developers", "License :: OSI Approved :: Mozilla Public License 2.0 (MPL 2.0)", "Natural Language :: English", "Programming Language :: Python", "Programming Language :: Python :: 2", "Programming Language :: Python :: 2.6", "Programming Language :: Python :: 2.7", "Programming Language :: Python :: 3", "Programming Language :: Python :: 3.3", "Programming Language :: Python :: 3.4", "Programming Language :: Python :: 3.5", "Programming Language :: Python :: 3.6"], "extensions": {"python.details": {"contacts": [{"email": "me@kennethreitz.com", "name": "Kenneth Reitz", "role": "author"}], "document_names": {"description": "DESCRIPTION.rst", "license": "LICENSE.txt"}, "project_urls": {"Home": "http://certifi.io/"}}}, "generator": "bdist_wheel (0.30.0.a0)", "license": "MPL-2.0", "metadata_version": "2.0", "name": "certifi", "summary": "Python package for providing Mozilla's CA Bundle.", "version": "2018.4.16"}

View file

@ -1,3 +0,0 @@
from .core import where, old_where
__version__ = "2018.04.16"

View file

@ -1,2 +0,0 @@
from certifi import where
print(where())

File diff suppressed because it is too large Load diff

View file

@ -1,37 +0,0 @@
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
certifi.py
~~~~~~~~~~
This module returns the installation location of cacert.pem.
"""
import os
import warnings
class DeprecatedBundleWarning(DeprecationWarning):
"""
The weak security bundle is being deprecated. Please bother your service
provider to get them to stop using cross-signed roots.
"""
def where():
f = os.path.dirname(__file__)
return os.path.join(f, 'cacert.pem')
def old_where():
warnings.warn(
"The weak security bundle has been removed. certifi.old_where() is now an alias "
"of certifi.where(). Please update your code to use certifi.where() instead. "
"certifi.old_where() will be removed in 2018.",
DeprecatedBundleWarning
)
return where()
if __name__ == '__main__':
print(where())

View file

@ -1,70 +0,0 @@
Chardet: The Universal Character Encoding Detector
--------------------------------------------------
.. image:: https://img.shields.io/travis/chardet/chardet/stable.svg
:alt: Build status
:target: https://travis-ci.org/chardet/chardet
.. image:: https://img.shields.io/coveralls/chardet/chardet/stable.svg
:target: https://coveralls.io/r/chardet/chardet
.. image:: https://img.shields.io/pypi/v/chardet.svg
:target: https://warehouse.python.org/project/chardet/
:alt: Latest version on PyPI
.. image:: https://img.shields.io/pypi/l/chardet.svg
:alt: License
Detects
- ASCII, UTF-8, UTF-16 (2 variants), UTF-32 (4 variants)
- Big5, GB2312, EUC-TW, HZ-GB-2312, ISO-2022-CN (Traditional and Simplified Chinese)
- EUC-JP, SHIFT_JIS, CP932, ISO-2022-JP (Japanese)
- EUC-KR, ISO-2022-KR (Korean)
- KOI8-R, MacCyrillic, IBM855, IBM866, ISO-8859-5, windows-1251 (Cyrillic)
- ISO-8859-5, windows-1251 (Bulgarian)
- ISO-8859-1, windows-1252 (Western European languages)
- ISO-8859-7, windows-1253 (Greek)
- ISO-8859-8, windows-1255 (Visual and Logical Hebrew)
- TIS-620 (Thai)
.. note::
Our ISO-8859-2 and windows-1250 (Hungarian) probers have been temporarily
disabled until we can retrain the models.
Requires Python 2.6, 2.7, or 3.3+.
Installation
------------
Install from `PyPI <https://pypi.python.org/pypi/chardet>`_::
pip install chardet
Documentation
-------------
For users, docs are now available at https://chardet.readthedocs.io/.
Command-line Tool
-----------------
chardet comes with a command-line script which reports on the encodings of one
or more files::
% chardetect somefile someotherfile
somefile: windows-1252 with confidence 0.5
someotherfile: ascii with confidence 1.0
About
-----
This is a continuation of Mark Pilgrim's excellent chardet. Previously, two
versions needed to be maintained: one that supported python 2.x and one that
supported python 3.x. We've recently merged with `Ian Cordasco <https://github.com/sigmavirus24>`_'s
`charade <https://github.com/sigmavirus24/charade>`_ fork, so now we have one
coherent version that works for Python 2.6+.
:maintainer: Dan Blanchard

View file

@ -1,96 +0,0 @@
Metadata-Version: 2.0
Name: chardet
Version: 3.0.4
Summary: Universal encoding detector for Python 2 and 3
Home-page: https://github.com/chardet/chardet
Author: Daniel Blanchard
Author-email: dan.blanchard@gmail.com
License: LGPL
Keywords: encoding,i18n,xml
Platform: UNKNOWN
Classifier: Development Status :: 4 - Beta
Classifier: Intended Audience :: Developers
Classifier: License :: OSI Approved :: GNU Library or Lesser General Public License (LGPL)
Classifier: Operating System :: OS Independent
Classifier: Programming Language :: Python
Classifier: Programming Language :: Python :: 2
Classifier: Programming Language :: Python :: 2.6
Classifier: Programming Language :: Python :: 2.7
Classifier: Programming Language :: Python :: 3
Classifier: Programming Language :: Python :: 3.3
Classifier: Programming Language :: Python :: 3.4
Classifier: Programming Language :: Python :: 3.5
Classifier: Programming Language :: Python :: 3.6
Classifier: Topic :: Software Development :: Libraries :: Python Modules
Classifier: Topic :: Text Processing :: Linguistic
Chardet: The Universal Character Encoding Detector
--------------------------------------------------
.. image:: https://img.shields.io/travis/chardet/chardet/stable.svg
:alt: Build status
:target: https://travis-ci.org/chardet/chardet
.. image:: https://img.shields.io/coveralls/chardet/chardet/stable.svg
:target: https://coveralls.io/r/chardet/chardet
.. image:: https://img.shields.io/pypi/v/chardet.svg
:target: https://warehouse.python.org/project/chardet/
:alt: Latest version on PyPI
.. image:: https://img.shields.io/pypi/l/chardet.svg
:alt: License
Detects
- ASCII, UTF-8, UTF-16 (2 variants), UTF-32 (4 variants)
- Big5, GB2312, EUC-TW, HZ-GB-2312, ISO-2022-CN (Traditional and Simplified Chinese)
- EUC-JP, SHIFT_JIS, CP932, ISO-2022-JP (Japanese)
- EUC-KR, ISO-2022-KR (Korean)
- KOI8-R, MacCyrillic, IBM855, IBM866, ISO-8859-5, windows-1251 (Cyrillic)
- ISO-8859-5, windows-1251 (Bulgarian)
- ISO-8859-1, windows-1252 (Western European languages)
- ISO-8859-7, windows-1253 (Greek)
- ISO-8859-8, windows-1255 (Visual and Logical Hebrew)
- TIS-620 (Thai)
.. note::
Our ISO-8859-2 and windows-1250 (Hungarian) probers have been temporarily
disabled until we can retrain the models.
Requires Python 2.6, 2.7, or 3.3+.
Installation
------------
Install from `PyPI <https://pypi.python.org/pypi/chardet>`_::
pip install chardet
Documentation
-------------
For users, docs are now available at https://chardet.readthedocs.io/.
Command-line Tool
-----------------
chardet comes with a command-line script which reports on the encodings of one
or more files::
% chardetect somefile someotherfile
somefile: windows-1252 with confidence 0.5
someotherfile: ascii with confidence 1.0
About
-----
This is a continuation of Mark Pilgrim's excellent chardet. Previously, two
versions needed to be maintained: one that supported python 2.x and one that
supported python 3.x. We've recently merged with `Ian Cordasco <https://github.com/sigmavirus24>`_'s
`charade <https://github.com/sigmavirus24/charade>`_ fork, so now we have one
coherent version that works for Python 2.6+.
:maintainer: Dan Blanchard

View file

@ -1,91 +0,0 @@
chardet/__init__.py,sha256=YsP5wQlsHJ2auF1RZJfypiSrCA7_bQiRm3ES_NI76-Y,1559
chardet/big5freq.py,sha256=D_zK5GyzoVsRes0HkLJziltFQX0bKCLOrFe9_xDvO_8,31254
chardet/big5prober.py,sha256=kBxHbdetBpPe7xrlb-e990iot64g_eGSLd32lB7_h3M,1757
chardet/chardistribution.py,sha256=3woWS62KrGooKyqz4zQSnjFbJpa6V7g02daAibTwcl8,9411
chardet/charsetgroupprober.py,sha256=6bDu8YIiRuScX4ca9Igb0U69TA2PGXXDej6Cc4_9kO4,3787
chardet/charsetprober.py,sha256=KSmwJErjypyj0bRZmC5F5eM7c8YQgLYIjZXintZNstg,5110
chardet/codingstatemachine.py,sha256=VYp_6cyyki5sHgXDSZnXW4q1oelHc3cu9AyQTX7uug8,3590
chardet/compat.py,sha256=PKTzHkSbtbHDqS9PyujMbX74q1a8mMpeQTDVsQhZMRw,1134
chardet/cp949prober.py,sha256=TZ434QX8zzBsnUvL_8wm4AQVTZ2ZkqEEQL_lNw9f9ow,1855
chardet/enums.py,sha256=Aimwdb9as1dJKZaFNUH2OhWIVBVd6ZkJJ_WK5sNY8cU,1661
chardet/escprober.py,sha256=kkyqVg1Yw3DIOAMJ2bdlyQgUFQhuHAW8dUGskToNWSc,3950
chardet/escsm.py,sha256=RuXlgNvTIDarndvllNCk5WZBIpdCxQ0kcd9EAuxUh84,10510
chardet/eucjpprober.py,sha256=iD8Jdp0ISRjgjiVN7f0e8xGeQJ5GM2oeZ1dA8nbSeUw,3749
chardet/euckrfreq.py,sha256=-7GdmvgWez4-eO4SuXpa7tBiDi5vRXQ8WvdFAzVaSfo,13546
chardet/euckrprober.py,sha256=MqFMTQXxW4HbzIpZ9lKDHB3GN8SP4yiHenTmf8g_PxY,1748
chardet/euctwfreq.py,sha256=No1WyduFOgB5VITUA7PLyC5oJRNzRyMbBxaKI1l16MA,31621
chardet/euctwprober.py,sha256=13p6EP4yRaxqnP4iHtxHOJ6R2zxHq1_m8hTRjzVZ95c,1747
chardet/gb2312freq.py,sha256=JX8lsweKLmnCwmk8UHEQsLgkr_rP_kEbvivC4qPOrlc,20715
chardet/gb2312prober.py,sha256=gGvIWi9WhDjE-xQXHvNIyrnLvEbMAYgyUSZ65HUfylw,1754
chardet/hebrewprober.py,sha256=c3SZ-K7hvyzGY6JRAZxJgwJ_sUS9k0WYkvMY00YBYFo,13838
chardet/jisfreq.py,sha256=vpmJv2Bu0J8gnMVRPHMFefTRvo_ha1mryLig8CBwgOg,25777
chardet/jpcntx.py,sha256=PYlNqRUQT8LM3cT5FmHGP0iiscFlTWED92MALvBungo,19643
chardet/langbulgarianmodel.py,sha256=1HqQS9Pbtnj1xQgxitJMvw8X6kKr5OockNCZWfEQrPE,12839
chardet/langcyrillicmodel.py,sha256=LODajvsetH87yYDDQKA2CULXUH87tI223dhfjh9Zx9c,17948
chardet/langgreekmodel.py,sha256=8YAW7bU8YwSJap0kIJSbPMw1BEqzGjWzqcqf0WgUKAA,12688
chardet/langhebrewmodel.py,sha256=JSnqmE5E62tDLTPTvLpQsg5gOMO4PbdWRvV7Avkc0HA,11345
chardet/langhungarianmodel.py,sha256=RhapYSG5l0ZaO-VV4Fan5sW0WRGQqhwBM61yx3yxyOA,12592
chardet/langthaimodel.py,sha256=8l0173Gu_W6G8mxmQOTEF4ls2YdE7FxWf3QkSxEGXJQ,11290
chardet/langturkishmodel.py,sha256=W22eRNJsqI6uWAfwXSKVWWnCerYqrI8dZQTm_M0lRFk,11102
chardet/latin1prober.py,sha256=S2IoORhFk39FEFOlSFWtgVybRiP6h7BlLldHVclNkU8,5370
chardet/mbcharsetprober.py,sha256=AR95eFH9vuqSfvLQZN-L5ijea25NOBCoXqw8s5O9xLQ,3413
chardet/mbcsgroupprober.py,sha256=h6TRnnYq2OxG1WdD5JOyxcdVpn7dG0q-vB8nWr5mbh4,2012
chardet/mbcssm.py,sha256=SY32wVIF3HzcjY3BaEspy9metbNSKxIIB0RKPn7tjpI,25481
chardet/sbcharsetprober.py,sha256=LDSpCldDCFlYwUkGkwD2oFxLlPWIWXT09akH_2PiY74,5657
chardet/sbcsgroupprober.py,sha256=1IprcCB_k1qfmnxGC6MBbxELlKqD3scW6S8YIwdeyXA,3546
chardet/sjisprober.py,sha256=IIt-lZj0WJqK4rmUZzKZP4GJlE8KUEtFYVuY96ek5MQ,3774
chardet/universaldetector.py,sha256=qL0174lSZE442eB21nnktT9_VcAye07laFWUeUrjttY,12485
chardet/utf8prober.py,sha256=IdD8v3zWOsB8OLiyPi-y_fqwipRFxV9Nc1eKBLSuIEw,2766
chardet/version.py,sha256=sp3B08mrDXB-pf3K9fqJ_zeDHOCLC8RrngQyDFap_7g,242
chardet/cli/__init__.py,sha256=AbpHGcgLb-kRsJGnwFEktk7uzpZOCcBY74-YBdrKVGs,1
chardet/cli/chardetect.py,sha256=YBO8L4mXo0WR6_-Fjh_8QxPBoEBNqB9oNxNrdc54AQs,2738
chardet-3.0.4.dist-info/DESCRIPTION.rst,sha256=PQ4sBsMyKFZkjC6QpmbpLn0UtCNyeb-ZqvCGEgyZMGk,2174
chardet-3.0.4.dist-info/METADATA,sha256=RV_2I4B1Z586DL8oVO5Kp7X5bUdQ5EuKAvNoAEF8wSw,3239
chardet-3.0.4.dist-info/RECORD,,
chardet-3.0.4.dist-info/WHEEL,sha256=o2k-Qa-RMNIJmUdIc7KU6VWR_ErNRbWNlxDIpl7lm34,110
chardet-3.0.4.dist-info/entry_points.txt,sha256=fAMmhu5eJ-zAJ-smfqQwRClQ3-nozOCmvJ6-E8lgGJo,60
chardet-3.0.4.dist-info/metadata.json,sha256=0htbRM18ujyGZDdfowgAqj6Hq2eQtwzwyhaEveKntgo,1375
chardet-3.0.4.dist-info/top_level.txt,sha256=AowzBbZy4x8EirABDdJSLJZMkJ_53iIag8xfKR6D7kI,8
../../../bin/chardetect,sha256=9D2GN7QHdXNcdjpmgCfbrwLfqvUeisEIzvB9b2L-ECI,270
chardet-3.0.4.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4
chardet/cli/__pycache__/__init__.cpython-36.pyc,,
chardet/cli/__pycache__/chardetect.cpython-36.pyc,,
chardet/__pycache__/__init__.cpython-36.pyc,,
chardet/__pycache__/big5freq.cpython-36.pyc,,
chardet/__pycache__/big5prober.cpython-36.pyc,,
chardet/__pycache__/chardistribution.cpython-36.pyc,,
chardet/__pycache__/charsetgroupprober.cpython-36.pyc,,
chardet/__pycache__/charsetprober.cpython-36.pyc,,
chardet/__pycache__/codingstatemachine.cpython-36.pyc,,
chardet/__pycache__/compat.cpython-36.pyc,,
chardet/__pycache__/cp949prober.cpython-36.pyc,,
chardet/__pycache__/enums.cpython-36.pyc,,
chardet/__pycache__/escprober.cpython-36.pyc,,
chardet/__pycache__/escsm.cpython-36.pyc,,
chardet/__pycache__/eucjpprober.cpython-36.pyc,,
chardet/__pycache__/euckrfreq.cpython-36.pyc,,
chardet/__pycache__/euckrprober.cpython-36.pyc,,
chardet/__pycache__/euctwfreq.cpython-36.pyc,,
chardet/__pycache__/euctwprober.cpython-36.pyc,,
chardet/__pycache__/gb2312freq.cpython-36.pyc,,
chardet/__pycache__/gb2312prober.cpython-36.pyc,,
chardet/__pycache__/hebrewprober.cpython-36.pyc,,
chardet/__pycache__/jisfreq.cpython-36.pyc,,
chardet/__pycache__/jpcntx.cpython-36.pyc,,
chardet/__pycache__/langbulgarianmodel.cpython-36.pyc,,
chardet/__pycache__/langcyrillicmodel.cpython-36.pyc,,
chardet/__pycache__/langgreekmodel.cpython-36.pyc,,
chardet/__pycache__/langhebrewmodel.cpython-36.pyc,,
chardet/__pycache__/langhungarianmodel.cpython-36.pyc,,
chardet/__pycache__/langthaimodel.cpython-36.pyc,,
chardet/__pycache__/langturkishmodel.cpython-36.pyc,,
chardet/__pycache__/latin1prober.cpython-36.pyc,,
chardet/__pycache__/mbcharsetprober.cpython-36.pyc,,
chardet/__pycache__/mbcsgroupprober.cpython-36.pyc,,
chardet/__pycache__/mbcssm.cpython-36.pyc,,
chardet/__pycache__/sbcharsetprober.cpython-36.pyc,,
chardet/__pycache__/sbcsgroupprober.cpython-36.pyc,,
chardet/__pycache__/sjisprober.cpython-36.pyc,,
chardet/__pycache__/universaldetector.cpython-36.pyc,,
chardet/__pycache__/utf8prober.cpython-36.pyc,,
chardet/__pycache__/version.cpython-36.pyc,,

View file

@ -1,6 +0,0 @@
Wheel-Version: 1.0
Generator: bdist_wheel (0.29.0)
Root-Is-Purelib: true
Tag: py2-none-any
Tag: py3-none-any

View file

@ -1,3 +0,0 @@
[console_scripts]
chardetect = chardet.cli.chardetect:main

View file

@ -1 +0,0 @@
{"classifiers": ["Development Status :: 4 - Beta", "Intended Audience :: Developers", "License :: OSI Approved :: GNU Library or Lesser General Public License (LGPL)", "Operating System :: OS Independent", "Programming Language :: Python", "Programming Language :: Python :: 2", "Programming Language :: Python :: 2.6", "Programming Language :: Python :: 2.7", "Programming Language :: Python :: 3", "Programming Language :: Python :: 3.3", "Programming Language :: Python :: 3.4", "Programming Language :: Python :: 3.5", "Programming Language :: Python :: 3.6", "Topic :: Software Development :: Libraries :: Python Modules", "Topic :: Text Processing :: Linguistic"], "extensions": {"python.commands": {"wrap_console": {"chardetect": "chardet.cli.chardetect:main"}}, "python.details": {"contacts": [{"email": "dan.blanchard@gmail.com", "name": "Daniel Blanchard", "role": "author"}], "document_names": {"description": "DESCRIPTION.rst"}, "project_urls": {"Home": "https://github.com/chardet/chardet"}}, "python.exports": {"console_scripts": {"chardetect": "chardet.cli.chardetect:main"}}}, "generator": "bdist_wheel (0.29.0)", "keywords": ["encoding", "i18n", "xml"], "license": "LGPL", "metadata_version": "2.0", "name": "chardet", "summary": "Universal encoding detector for Python 2 and 3", "test_requires": [{"requires": ["hypothesis", "pytest"]}], "version": "3.0.4"}

Some files were not shown because too many files have changed in this diff Show more