repo_name
stringlengths 5
100
| ref
stringlengths 12
67
| path
stringlengths 4
244
| copies
stringlengths 1
8
| content
stringlengths 0
1.05M
⌀ |
|---|---|---|---|---|
sunqb/oa_qian
|
refs/heads/master
|
flask/Lib/site-packages/pip/_vendor/requests/packages/chardet/big5prober.py
|
2930
|
######################## BEGIN LICENSE BLOCK ########################
# The Original Code is Mozilla Communicator client code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 1998
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
from .mbcharsetprober import MultiByteCharSetProber
from .codingstatemachine import CodingStateMachine
from .chardistribution import Big5DistributionAnalysis
from .mbcssm import Big5SMModel
class Big5Prober(MultiByteCharSetProber):
def __init__(self):
MultiByteCharSetProber.__init__(self)
self._mCodingSM = CodingStateMachine(Big5SMModel)
self._mDistributionAnalyzer = Big5DistributionAnalysis()
self.reset()
def get_charset_name(self):
return "Big5"
|
lnielsen/cookiecutter-invenio-module
|
refs/heads/master
|
docs/conf.py
|
2
|
# -*- coding: utf-8 -*-
#
# This file is part of Invenio.
# Copyright (C) 2015, 2017 CERN.
#
# Invenio is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# Invenio is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Invenio; if not, see <http://www.gnu.org/licenses>.
#
# In applying this license, CERN does not waive the privileges and immunities
# granted to it by virtue of its status as an Intergovernmental Organization
# or submit itself to any jurisdiction.
import sys
import os
import shlex
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath(os.path.join('..', '..')))
#sys.path.insert(0, os.path.abspath(os.path.join('_ext')))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.intersphinx',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Cookiecutter - Invenio Module Template'
copyright = u'2015 CERN'
author = u'Invenio Software Collaboration'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.1'
# The full version, including alpha/beta/rc tags.
release = version
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
on_rtd = os.environ.get('READTHEDOCS', None) == 'True'
# only set the theme when we are not on RTD
if not on_rtd:
try:
import sphinx_rtd_theme
html_theme = "sphinx_rtd_theme"
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
except ImportError:
print("`sphinx_rtd_theme` not found, pip install it", file=sys.stderr)
html_theme = 'alabaster'
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr'
#html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# Now only 'ja' uses this config value
#html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'cookiecutter-invenio-moduledoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
# Latex figure (float) alignment
#'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'cookiecutter-invenio-module.tex', u'cookiecutter-invenio-module Documentation',
u'Invenio Software Collaboration', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'cookiecutter-invenio-module', u'Cookiecutter - Invenio Module Template Documentation',
[author], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'cookiecutter-invenio-module', u'Cookiecutter - Invenio Module Template Documentation',
author, 'cookiecutter-invenio-module', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {
'python': ('https://docs.python.org/', None),
}
# Autodoc configuraton.
autoclass_content = 'both'
|
KitKatXperience/platform_external_chromium_org
|
refs/heads/kk
|
tools/json_schema_compiler/highlighters/__init__.py
|
12133432
| |
virtool/virtool
|
refs/heads/master
|
tests/groups/__init__.py
|
12133432
| |
levilucio/SyVOLT
|
refs/heads/master
|
ECore_Copier_MM/properties/positive/himesis/__init__.py
|
12133432
| |
zhangjunli177/sahara
|
refs/heads/master
|
sahara/tests/unit/service/edp/workflow_creator/__init__.py
|
12133432
| |
Fansion/crawltwitter
|
refs/heads/master
|
crawltwitter/controllers/__init__.py
|
12133432
| |
DONIKAN/django
|
refs/heads/master
|
django/contrib/gis/serializers/__init__.py
|
12133432
| |
valkjsaaa/sl4a
|
refs/heads/master
|
python/src/Lib/test/test_normalization.py
|
55
|
from test.test_support import run_unittest, open_urlresource
import unittest
import sys
import os
from unicodedata import normalize, unidata_version
TESTDATAFILE = "NormalizationTest" + os.extsep + "txt"
TESTDATAURL = "http://www.unicode.org/Public/" + unidata_version + "/ucd/" + TESTDATAFILE
if os.path.exists(TESTDATAFILE):
f = open(TESTDATAFILE)
l = f.readline()
f.close()
if not unidata_version in l:
os.unlink(TESTDATAFILE)
class RangeError(Exception):
pass
def NFC(str):
return normalize("NFC", str)
def NFKC(str):
return normalize("NFKC", str)
def NFD(str):
return normalize("NFD", str)
def NFKD(str):
return normalize("NFKD", str)
def unistr(data):
data = [int(x, 16) for x in data.split(" ")]
for x in data:
if x > sys.maxunicode:
raise RangeError
return u"".join([unichr(x) for x in data])
class NormalizationTest(unittest.TestCase):
def test_main(self):
part1_data = {}
for line in open_urlresource(TESTDATAURL):
if '#' in line:
line = line.split('#')[0]
line = line.strip()
if not line:
continue
if line.startswith("@Part"):
part = line.split()[0]
continue
if part == "@Part3":
# XXX we don't support PRI #29 yet, so skip these tests for now
continue
try:
c1,c2,c3,c4,c5 = [unistr(x) for x in line.split(';')[:-1]]
except RangeError:
# Skip unsupported characters;
# try atleast adding c1 if we are in part1
if part == "@Part1":
try:
c1 = unistr(line.split(';')[0])
except RangeError:
pass
else:
part1_data[c1] = 1
continue
# Perform tests
self.failUnless(c2 == NFC(c1) == NFC(c2) == NFC(c3), line)
self.failUnless(c4 == NFC(c4) == NFC(c5), line)
self.failUnless(c3 == NFD(c1) == NFD(c2) == NFD(c3), line)
self.failUnless(c5 == NFD(c4) == NFD(c5), line)
self.failUnless(c4 == NFKC(c1) == NFKC(c2) == \
NFKC(c3) == NFKC(c4) == NFKC(c5),
line)
self.failUnless(c5 == NFKD(c1) == NFKD(c2) == \
NFKD(c3) == NFKD(c4) == NFKD(c5),
line)
# Record part 1 data
if part == "@Part1":
part1_data[c1] = 1
# Perform tests for all other data
for c in range(sys.maxunicode+1):
X = unichr(c)
if X in part1_data:
continue
self.failUnless(X == NFC(X) == NFD(X) == NFKC(X) == NFKD(X), c)
def test_bug_834676(self):
# Check for bug 834676
normalize('NFC', u'\ud55c\uae00')
def test_main():
# Hit the exception early
open_urlresource(TESTDATAURL)
run_unittest(NormalizationTest)
if __name__ == "__main__":
test_main()
|
ZECTBynmo/notnode-gyp
|
refs/heads/master
|
gyp/test/mac/gyptest-depend-on-bundle.py
|
303
|
#!/usr/bin/env python
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Verifies that a dependency on a bundle causes the whole bundle to be built.
"""
import TestGyp
import sys
if sys.platform == 'darwin':
test = TestGyp.TestGyp(formats=['ninja', 'make', 'xcode'])
test.run_gyp('test.gyp', chdir='depend-on-bundle')
test.build('test.gyp', 'dependent_on_bundle', chdir='depend-on-bundle')
# Binary itself.
test.built_file_must_exist('dependent_on_bundle', chdir='depend-on-bundle')
# Bundle dependency.
test.built_file_must_exist(
'my_bundle.framework/Versions/A/my_bundle',
chdir='depend-on-bundle')
test.built_file_must_exist( # package_framework
'my_bundle.framework/my_bundle',
chdir='depend-on-bundle')
test.built_file_must_exist( # plist
'my_bundle.framework/Versions/A/Resources/Info.plist',
chdir='depend-on-bundle')
test.built_file_must_exist(
'my_bundle.framework/Versions/A/Resources/English.lproj/' # Resources
'InfoPlist.strings',
chdir='depend-on-bundle')
test.pass_test()
|
robinro/ansible
|
refs/heads/devel
|
lib/ansible/modules/network/f5/bigip_command.py
|
37
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright 2016 F5 Networks Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {
'status': ['preview'],
'supported_by': 'community',
'metadata_version': '1.0'
}
DOCUMENTATION = '''
---
module: bigip_command
short_description: Run arbitrary command on F5 devices.
description:
- Sends an arbitrary command to an BIG-IP node and returns the results
read from the device. This module includes an argument that will cause
the module to wait for a specific condition before returning or timing
out if the condition is not met.
version_added: "2.4"
options:
commands:
description:
- The commands to send to the remote BIG-IP device over the
configured provider. The resulting output from the command
is returned. If the I(wait_for) argument is provided, the
module is not returned until the condition is satisfied or
the number of retries as expired.
- The I(commands) argument also accepts an alternative form
that allows for complex values that specify the command
to run and the output format to return. This can be done
on a command by command basis. The complex argument supports
the keywords C(command) and C(output) where C(command) is the
command to run and C(output) is 'text' or 'one-line'.
required: True
wait_for:
description:
- Specifies what to evaluate from the output of the command
and what conditionals to apply. This argument will cause
the task to wait for a particular conditional to be true
before moving forward. If the conditional is not true
by the configured retries, the task fails. See examples.
aliases: ['waitfor']
match:
description:
- The I(match) argument is used in conjunction with the
I(wait_for) argument to specify the match policy. Valid
values are C(all) or C(any). If the value is set to C(all)
then all conditionals in the I(wait_for) must be satisfied. If
the value is set to C(any) then only one of the values must be
satisfied.
default: all
retries:
description:
- Specifies the number of retries a command should by tried
before it is considered failed. The command is run on the
target device every retry and evaluated against the I(wait_for)
conditionals.
default: 10
interval:
description:
- Configures the interval in seconds to wait between retries
of the command. If the command does not pass the specified
conditional, the interval indicates how to long to wait before
trying the command again.
default: 1
notes:
- Requires the f5-sdk Python package on the host. This is as easy as pip
install f5-sdk.
requirements:
- f5-sdk >= 2.2.3
extends_documentation_fragment: f5
author:
- Tim Rupp (@caphrim007)
'''
EXAMPLES = '''
- name: run show version on remote devices
bigip_command:
commands: show sys version
server: "lb.mydomain.com"
password: "secret"
user: "admin"
validate_certs: "no"
delegate_to: localhost
- name: run show version and check to see if output contains BIG-IP
bigip_command:
commands: show sys version
wait_for: result[0] contains BIG-IP
server: "lb.mydomain.com"
password: "secret"
user: "admin"
validate_certs: "no"
delegate_to: localhost
- name: run multiple commands on remote nodes
bigip_command:
commands:
- show sys version
- list ltm virtual
server: "lb.mydomain.com"
password: "secret"
user: "admin"
validate_certs: "no"
delegate_to: localhost
- name: run multiple commands and evaluate the output
bigip_command:
commands:
- show sys version
- list ltm virtual
wait_for:
- result[0] contains BIG-IP
- result[1] contains my-vs
server: "lb.mydomain.com"
password: "secret"
user: "admin"
validate_certs: "no"
delegate_to: localhost
- name: tmsh prefixes will automatically be handled
bigip_command:
commands:
- show sys version
- tmsh list ltm virtual
server: "lb.mydomain.com"
password: "secret"
user: "admin"
validate_certs: "no"
delegate_to: localhost
'''
RETURN = '''
stdout:
description: The set of responses from the commands
returned: always
type: list
sample: ['...', '...']
stdout_lines:
description: The value of stdout split into a list
returned: always
type: list
sample: [['...', '...'], ['...'], ['...']]
failed_conditions:
description: The list of conditionals that have failed
returned: failed
type: list
sample: ['...', '...']
'''
import time
from ansible.module_utils.f5_utils import AnsibleF5Client
from ansible.module_utils.f5_utils import AnsibleF5Parameters
from ansible.module_utils.f5_utils import HAS_F5SDK
from ansible.module_utils.f5_utils import F5ModuleError
from ansible.module_utils.f5_utils import iControlUnexpectedHTTPError
from ansible.module_utils.netcli import FailedConditionsError
from ansible.module_utils.six import string_types
from ansible.module_utils.netcli import Conditional
from ansible.module_utils.network_common import ComplexList
from ansible.module_utils.network_common import to_list
from collections import deque
class Parameters(AnsibleF5Parameters):
returnables = ['stdout', 'stdout_lines', 'warnings']
def to_return(self):
result = {}
for returnable in self.returnables:
result[returnable] = getattr(self, returnable)
result = self._filter_params(result)
return result
@property
def commands(self):
commands = deque(self._values['commands'])
commands.appendleft(
'tmsh modify cli preference pager disabled'
)
commands = map(self._ensure_tmsh_prefix, list(commands))
return list(commands)
def _ensure_tmsh_prefix(self, cmd):
cmd = cmd.strip()
if cmd[0:5] != 'tmsh ':
cmd = 'tmsh ' + cmd.strip()
return cmd
class ModuleManager(object):
def __init__(self, client):
self.client = client
self.want = Parameters(self.client.module.params)
self.changes = Parameters()
def _to_lines(self, stdout):
lines = list()
for item in stdout:
if isinstance(item, string_types):
item = str(item).split('\n')
lines.append(item)
return lines
def _is_valid_mode(self, cmd):
valid_configs = [
'tmsh list', 'tmsh show',
'tmsh modify cli preference pager disabled'
]
if any(cmd.startswith(x) for x in valid_configs):
return True
return False
def exec_module(self):
result = dict()
try:
self.execute()
except iControlUnexpectedHTTPError as e:
raise F5ModuleError(str(e))
result.update(**self.changes.to_return())
result.update(dict(changed=True))
return result
def execute(self):
warnings = list()
commands = self.parse_commands(warnings)
wait_for = self.want.wait_for or list()
retries = self.want.retries
conditionals = [Conditional(c) for c in wait_for]
if self.client.check_mode:
return
while retries > 0:
responses = self.execute_on_device(commands)
for item in list(conditionals):
if item(responses):
if self.want.match == 'any':
return item
conditionals.remove(item)
if not conditionals:
break
time.sleep(self.want.interval)
retries -= 1
else:
failed_conditions = [item.raw for item in conditionals]
errmsg = 'One or more conditional statements have not been satisfied'
raise FailedConditionsError(errmsg, failed_conditions)
self.changes = Parameters({
'stdout': responses,
'stdout_lines': self._to_lines(responses),
'warnings': warnings
})
def parse_commands(self, warnings):
results = []
commands = list(deque(set(self.want.commands)))
spec = dict(
command=dict(key=True),
output=dict(
default='text',
choices=['text', 'one-line']
),
)
transform = ComplexList(spec, self.client.module)
commands = transform(commands)
for index, item in enumerate(commands):
if not self._is_valid_mode(item['command']):
warnings.append(
'Using "write" commands is not idempotent. You should use '
'a module that is specifically made for that. If such a '
'module does not exist, then please file a bug. The command '
'in question is "%s..."' % item['command'][0:40]
)
if item['output'] == 'one-line' and 'one-line' not in item['command']:
item['command'] += ' one-line'
elif item['output'] == 'text' and 'one-line' in item['command']:
item['command'] = item['command'].replace('one-line', '')
results.append(item)
return results
def execute_on_device(self, commands):
responses = []
for item in to_list(commands):
output = self.client.api.tm.util.bash.exec_cmd(
'run',
utilCmdArgs='-c "{0}"'.format(item['command'])
)
if hasattr(output, 'commandResult'):
responses.append(str(output.commandResult))
return responses
class ArgumentSpec(object):
def __init__(self):
self.supports_check_mode = True
self.argument_spec = dict(
commands=dict(
type='list',
required=True
),
wait_for=dict(
type='list',
aliases=['waitfor']
),
match=dict(
default='all',
choices=['any', 'all']
),
retries=dict(
default=10,
type='int'
),
interval=dict(
default=1,
type='int'
)
)
self.f5_product_name = 'bigip'
def main():
if not HAS_F5SDK:
raise F5ModuleError("The python f5-sdk module is required")
spec = ArgumentSpec()
client = AnsibleF5Client(
argument_spec=spec.argument_spec,
supports_check_mode=spec.supports_check_mode,
f5_product_name=spec.f5_product_name
)
try:
mm = ModuleManager(client)
results = mm.exec_module()
client.module.exit_json(**results)
except (FailedConditionsError, AttributeError) as e:
client.module.fail_json(msg=str(e))
if __name__ == '__main__':
main()
|
dgarros/ansible
|
refs/heads/devel
|
lib/ansible/modules/packaging/os/homebrew.py
|
60
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2013, Andrew Dunham <andrew@du.nham.ca>
# (c) 2013, Daniel Jaouen <dcj24@cornell.edu>
# (c) 2015, Indrajit Raychaudhuri <irc+code@indrajit.com>
#
# Based on macports (Jimmy Tang <jcftang@gmail.com>)
#
# This module is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This software is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this software. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: homebrew
author:
- "Indrajit Raychaudhuri (@indrajitr)"
- "Daniel Jaouen (@danieljaouen)"
- "Andrew Dunham (@andrew-d)"
requirements:
- "python >= 2.6"
short_description: Package manager for Homebrew
description:
- Manages Homebrew packages
version_added: "1.1"
options:
name:
description:
- name of package to install/remove
required: false
default: None
aliases: ['pkg', 'package', 'formula']
path:
description:
- >
':' separated list of paths to search for 'brew' executable. Since A package (I(formula) in homebrew parlance) location is prefixed
relative to the actual path of I(brew) command, providing an alternative I(brew) path enables managing different set of packages in an
alternative location in the system.
required: false
default: '/usr/local/bin'
state:
description:
- state of the package
choices: [ 'head', 'latest', 'present', 'absent', 'linked', 'unlinked' ]
required: false
default: present
update_homebrew:
description:
- update homebrew itself first
required: false
default: no
choices: [ "yes", "no" ]
aliases: ['update-brew']
upgrade_all:
description:
- upgrade all homebrew packages
required: false
default: no
choices: [ "yes", "no" ]
aliases: ['upgrade']
install_options:
description:
- options flags to install a package
required: false
default: null
aliases: ['options']
version_added: "1.4"
notes: []
'''
EXAMPLES = '''
# Install formula foo with 'brew' in default path (C(/usr/local/bin))
- homebrew:
name: foo
state: present
# Install formula foo with 'brew' in alternate path C(/my/other/location/bin)
- homebrew:
name: foo
path: /my/other/location/bin
state: present
# Update homebrew first and install formula foo with 'brew' in default path
- homebrew:
name: foo
state: present
update_homebrew: yes
# Update homebrew first and upgrade formula foo to latest available with 'brew' in default path
- homebrew:
name: foo
state: latest
update_homebrew: yes
# Update homebrew and upgrade all packages
- homebrew:
update_homebrew: yes
upgrade_all: yes
# Miscellaneous other examples
- homebrew:
name: foo
state: head
- homebrew:
name: foo
state: linked
- homebrew:
name: foo
state: absent
- homebrew:
name: foo,bar
state: absent
- homebrew:
name: foo
state: present
install_options: with-baz,enable-debug
'''
import os.path
import re
from ansible.module_utils.six import iteritems
# exceptions -------------------------------------------------------------- {{{
class HomebrewException(Exception):
pass
# /exceptions ------------------------------------------------------------- }}}
# utils ------------------------------------------------------------------- {{{
def _create_regex_group(s):
lines = (line.strip() for line in s.split('\n') if line.strip())
chars = filter(None, (line.split('#')[0].strip() for line in lines))
group = r'[^' + r''.join(chars) + r']'
return re.compile(group)
# /utils ------------------------------------------------------------------ }}}
class Homebrew(object):
'''A class to manage Homebrew packages.'''
# class regexes ------------------------------------------------ {{{
VALID_PATH_CHARS = r'''
\w # alphanumeric characters (i.e., [a-zA-Z0-9_])
\s # spaces
: # colons
{sep} # the OS-specific path separator
. # dots
- # dashes
'''.format(sep=os.path.sep)
VALID_BREW_PATH_CHARS = r'''
\w # alphanumeric characters (i.e., [a-zA-Z0-9_])
\s # spaces
{sep} # the OS-specific path separator
. # dots
- # dashes
'''.format(sep=os.path.sep)
VALID_PACKAGE_CHARS = r'''
\w # alphanumeric characters (i.e., [a-zA-Z0-9_])
. # dots
/ # slash (for taps)
\+ # plusses
- # dashes
: # colons (for URLs)
@ # at-sign
'''
INVALID_PATH_REGEX = _create_regex_group(VALID_PATH_CHARS)
INVALID_BREW_PATH_REGEX = _create_regex_group(VALID_BREW_PATH_CHARS)
INVALID_PACKAGE_REGEX = _create_regex_group(VALID_PACKAGE_CHARS)
# /class regexes ----------------------------------------------- }}}
# class validations -------------------------------------------- {{{
@classmethod
def valid_path(cls, path):
'''
`path` must be one of:
- list of paths
- a string containing only:
- alphanumeric characters
- dashes
- dots
- spaces
- colons
- os.path.sep
'''
if isinstance(path, basestring):
return not cls.INVALID_PATH_REGEX.search(path)
try:
iter(path)
except TypeError:
return False
else:
paths = path
return all(cls.valid_brew_path(path_) for path_ in paths)
@classmethod
def valid_brew_path(cls, brew_path):
'''
`brew_path` must be one of:
- None
- a string containing only:
- alphanumeric characters
- dashes
- dots
- spaces
- os.path.sep
'''
if brew_path is None:
return True
return (
isinstance(brew_path, basestring)
and not cls.INVALID_BREW_PATH_REGEX.search(brew_path)
)
@classmethod
def valid_package(cls, package):
'''A valid package is either None or alphanumeric.'''
if package is None:
return True
return (
isinstance(package, basestring)
and not cls.INVALID_PACKAGE_REGEX.search(package)
)
@classmethod
def valid_state(cls, state):
'''
A valid state is one of:
- None
- installed
- upgraded
- head
- linked
- unlinked
- absent
'''
if state is None:
return True
else:
return (
isinstance(state, basestring)
and state.lower() in (
'installed',
'upgraded',
'head',
'linked',
'unlinked',
'absent',
)
)
@classmethod
def valid_module(cls, module):
'''A valid module is an instance of AnsibleModule.'''
return isinstance(module, AnsibleModule)
# /class validations ------------------------------------------- }}}
# class properties --------------------------------------------- {{{
@property
def module(self):
return self._module
@module.setter
def module(self, module):
if not self.valid_module(module):
self._module = None
self.failed = True
self.message = 'Invalid module: {0}.'.format(module)
raise HomebrewException(self.message)
else:
self._module = module
return module
@property
def path(self):
return self._path
@path.setter
def path(self, path):
if not self.valid_path(path):
self._path = []
self.failed = True
self.message = 'Invalid path: {0}.'.format(path)
raise HomebrewException(self.message)
else:
if isinstance(path, basestring):
self._path = path.split(':')
else:
self._path = path
return path
@property
def brew_path(self):
return self._brew_path
@brew_path.setter
def brew_path(self, brew_path):
if not self.valid_brew_path(brew_path):
self._brew_path = None
self.failed = True
self.message = 'Invalid brew_path: {0}.'.format(brew_path)
raise HomebrewException(self.message)
else:
self._brew_path = brew_path
return brew_path
@property
def params(self):
return self._params
@params.setter
def params(self, params):
self._params = self.module.params
return self._params
@property
def current_package(self):
return self._current_package
@current_package.setter
def current_package(self, package):
if not self.valid_package(package):
self._current_package = None
self.failed = True
self.message = 'Invalid package: {0}.'.format(package)
raise HomebrewException(self.message)
else:
self._current_package = package
return package
# /class properties -------------------------------------------- }}}
def __init__(self, module, path, packages=None, state=None,
update_homebrew=False, upgrade_all=False,
install_options=None):
if not install_options:
install_options = list()
self._setup_status_vars()
self._setup_instance_vars(module=module, path=path, packages=packages,
state=state, update_homebrew=update_homebrew,
upgrade_all=upgrade_all,
install_options=install_options, )
self._prep()
# prep --------------------------------------------------------- {{{
def _setup_status_vars(self):
self.failed = False
self.changed = False
self.changed_count = 0
self.unchanged_count = 0
self.message = ''
def _setup_instance_vars(self, **kwargs):
for key, val in iteritems(kwargs):
setattr(self, key, val)
def _prep(self):
self._prep_brew_path()
def _prep_brew_path(self):
if not self.module:
self.brew_path = None
self.failed = True
self.message = 'AnsibleModule not set.'
raise HomebrewException(self.message)
self.brew_path = self.module.get_bin_path(
'brew',
required=True,
opt_dirs=self.path,
)
if not self.brew_path:
self.brew_path = None
self.failed = True
self.message = 'Unable to locate homebrew executable.'
raise HomebrewException('Unable to locate homebrew executable.')
return self.brew_path
def _status(self):
return (self.failed, self.changed, self.message)
# /prep -------------------------------------------------------- }}}
def run(self):
try:
self._run()
except HomebrewException:
pass
if not self.failed and (self.changed_count + self.unchanged_count > 1):
self.message = "Changed: %d, Unchanged: %d" % (
self.changed_count,
self.unchanged_count,
)
(failed, changed, message) = self._status()
return (failed, changed, message)
# checks ------------------------------------------------------- {{{
def _current_package_is_installed(self):
if not self.valid_package(self.current_package):
self.failed = True
self.message = 'Invalid package: {0}.'.format(self.current_package)
raise HomebrewException(self.message)
cmd = [
"{brew_path}".format(brew_path=self.brew_path),
"info",
self.current_package,
]
rc, out, err = self.module.run_command(cmd)
for line in out.split('\n'):
if (
re.search(r'Built from source', line)
or re.search(r'Poured from bottle', line)
):
return True
return False
def _current_package_is_outdated(self):
if not self.valid_package(self.current_package):
return False
rc, out, err = self.module.run_command([
self.brew_path,
'outdated',
self.current_package,
])
return rc != 0
def _current_package_is_installed_from_head(self):
if not Homebrew.valid_package(self.current_package):
return False
elif not self._current_package_is_installed():
return False
rc, out, err = self.module.run_command([
self.brew_path,
'info',
self.current_package,
])
try:
version_info = [line for line in out.split('\n') if line][0]
except IndexError:
return False
return version_info.split(' ')[-1] == 'HEAD'
# /checks ------------------------------------------------------ }}}
# commands ----------------------------------------------------- {{{
def _run(self):
if self.update_homebrew:
self._update_homebrew()
if self.upgrade_all:
self._upgrade_all()
if self.packages:
if self.state == 'installed':
return self._install_packages()
elif self.state == 'upgraded':
return self._upgrade_packages()
elif self.state == 'head':
return self._install_packages()
elif self.state == 'linked':
return self._link_packages()
elif self.state == 'unlinked':
return self._unlink_packages()
elif self.state == 'absent':
return self._uninstall_packages()
# updated -------------------------------- {{{
def _update_homebrew(self):
rc, out, err = self.module.run_command([
self.brew_path,
'update',
])
if rc == 0:
if out and isinstance(out, basestring):
already_updated = any(
re.search(r'Already up-to-date.', s.strip(), re.IGNORECASE)
for s in out.split('\n')
if s
)
if not already_updated:
self.changed = True
self.message = 'Homebrew updated successfully.'
else:
self.message = 'Homebrew already up-to-date.'
return True
else:
self.failed = True
self.message = err.strip()
raise HomebrewException(self.message)
# /updated ------------------------------- }}}
# _upgrade_all --------------------------- {{{
def _upgrade_all(self):
rc, out, err = self.module.run_command([
self.brew_path,
'upgrade',
])
if rc == 0:
if not out:
self.message = 'Homebrew packages already upgraded.'
else:
self.changed = True
self.message = 'Homebrew upgraded.'
return True
else:
self.failed = True
self.message = err.strip()
raise HomebrewException(self.message)
# /_upgrade_all -------------------------- }}}
# installed ------------------------------ {{{
def _install_current_package(self):
if not self.valid_package(self.current_package):
self.failed = True
self.message = 'Invalid package: {0}.'.format(self.current_package)
raise HomebrewException(self.message)
if self._current_package_is_installed():
self.unchanged_count += 1
self.message = 'Package already installed: {0}'.format(
self.current_package,
)
return True
if self.module.check_mode:
self.changed = True
self.message = 'Package would be installed: {0}'.format(
self.current_package
)
raise HomebrewException(self.message)
if self.state == 'head':
head = '--HEAD'
else:
head = None
opts = (
[self.brew_path, 'install']
+ self.install_options
+ [self.current_package, head]
)
cmd = [opt for opt in opts if opt]
rc, out, err = self.module.run_command(cmd)
if self._current_package_is_installed():
self.changed_count += 1
self.changed = True
self.message = 'Package installed: {0}'.format(self.current_package)
return True
else:
self.failed = True
self.message = err.strip()
raise HomebrewException(self.message)
def _install_packages(self):
for package in self.packages:
self.current_package = package
self._install_current_package()
return True
# /installed ----------------------------- }}}
# upgraded ------------------------------- {{{
def _upgrade_current_package(self):
command = 'upgrade'
if not self.valid_package(self.current_package):
self.failed = True
self.message = 'Invalid package: {0}.'.format(self.current_package)
raise HomebrewException(self.message)
if not self._current_package_is_installed():
command = 'install'
if self._current_package_is_installed() and not self._current_package_is_outdated():
self.message = 'Package is already upgraded: {0}'.format(
self.current_package,
)
self.unchanged_count += 1
return True
if self.module.check_mode:
self.changed = True
self.message = 'Package would be upgraded: {0}'.format(
self.current_package
)
raise HomebrewException(self.message)
opts = (
[self.brew_path, command]
+ self.install_options
+ [self.current_package]
)
cmd = [opt for opt in opts if opt]
rc, out, err = self.module.run_command(cmd)
if self._current_package_is_installed() and not self._current_package_is_outdated():
self.changed_count += 1
self.changed = True
self.message = 'Package upgraded: {0}'.format(self.current_package)
return True
else:
self.failed = True
self.message = err.strip()
raise HomebrewException(self.message)
def _upgrade_all_packages(self):
opts = (
[self.brew_path, 'upgrade']
+ self.install_options
)
cmd = [opt for opt in opts if opt]
rc, out, err = self.module.run_command(cmd)
if rc == 0:
self.changed = True
self.message = 'All packages upgraded.'
return True
else:
self.failed = True
self.message = err.strip()
raise HomebrewException(self.message)
def _upgrade_packages(self):
if not self.packages:
self._upgrade_all_packages()
else:
for package in self.packages:
self.current_package = package
self._upgrade_current_package()
return True
# /upgraded ------------------------------ }}}
# uninstalled ---------------------------- {{{
def _uninstall_current_package(self):
if not self.valid_package(self.current_package):
self.failed = True
self.message = 'Invalid package: {0}.'.format(self.current_package)
raise HomebrewException(self.message)
if not self._current_package_is_installed():
self.unchanged_count += 1
self.message = 'Package already uninstalled: {0}'.format(
self.current_package,
)
return True
if self.module.check_mode:
self.changed = True
self.message = 'Package would be uninstalled: {0}'.format(
self.current_package
)
raise HomebrewException(self.message)
opts = (
[self.brew_path, 'uninstall']
+ self.install_options
+ [self.current_package]
)
cmd = [opt for opt in opts if opt]
rc, out, err = self.module.run_command(cmd)
if not self._current_package_is_installed():
self.changed_count += 1
self.changed = True
self.message = 'Package uninstalled: {0}'.format(self.current_package)
return True
else:
self.failed = True
self.message = err.strip()
raise HomebrewException(self.message)
def _uninstall_packages(self):
for package in self.packages:
self.current_package = package
self._uninstall_current_package()
return True
# /uninstalled ----------------------------- }}}
# linked --------------------------------- {{{
def _link_current_package(self):
if not self.valid_package(self.current_package):
self.failed = True
self.message = 'Invalid package: {0}.'.format(self.current_package)
raise HomebrewException(self.message)
if not self._current_package_is_installed():
self.failed = True
self.message = 'Package not installed: {0}.'.format(self.current_package)
raise HomebrewException(self.message)
if self.module.check_mode:
self.changed = True
self.message = 'Package would be linked: {0}'.format(
self.current_package
)
raise HomebrewException(self.message)
opts = (
[self.brew_path, 'link']
+ self.install_options
+ [self.current_package]
)
cmd = [opt for opt in opts if opt]
rc, out, err = self.module.run_command(cmd)
if rc == 0:
self.changed_count += 1
self.changed = True
self.message = 'Package linked: {0}'.format(self.current_package)
return True
else:
self.failed = True
self.message = 'Package could not be linked: {0}.'.format(self.current_package)
raise HomebrewException(self.message)
def _link_packages(self):
for package in self.packages:
self.current_package = package
self._link_current_package()
return True
# /linked -------------------------------- }}}
# unlinked ------------------------------- {{{
def _unlink_current_package(self):
if not self.valid_package(self.current_package):
self.failed = True
self.message = 'Invalid package: {0}.'.format(self.current_package)
raise HomebrewException(self.message)
if not self._current_package_is_installed():
self.failed = True
self.message = 'Package not installed: {0}.'.format(self.current_package)
raise HomebrewException(self.message)
if self.module.check_mode:
self.changed = True
self.message = 'Package would be unlinked: {0}'.format(
self.current_package
)
raise HomebrewException(self.message)
opts = (
[self.brew_path, 'unlink']
+ self.install_options
+ [self.current_package]
)
cmd = [opt for opt in opts if opt]
rc, out, err = self.module.run_command(cmd)
if rc == 0:
self.changed_count += 1
self.changed = True
self.message = 'Package unlinked: {0}'.format(self.current_package)
return True
else:
self.failed = True
self.message = 'Package could not be unlinked: {0}.'.format(self.current_package)
raise HomebrewException(self.message)
def _unlink_packages(self):
for package in self.packages:
self.current_package = package
self._unlink_current_package()
return True
# /unlinked ------------------------------ }}}
# /commands ---------------------------------------------------- }}}
def main():
module = AnsibleModule(
argument_spec=dict(
name=dict(
aliases=["pkg", "package", "formula"],
required=False,
type='list',
),
path=dict(
default="/usr/local/bin",
required=False,
type='path',
),
state=dict(
default="present",
choices=[
"present", "installed",
"latest", "upgraded", "head",
"linked", "unlinked",
"absent", "removed", "uninstalled",
],
),
update_homebrew=dict(
default=False,
aliases=["update-brew"],
type='bool',
),
upgrade_all=dict(
default=False,
aliases=["upgrade"],
type='bool',
),
install_options=dict(
default=None,
aliases=['options'],
type='list',
)
),
supports_check_mode=True,
)
module.run_command_environ_update = dict(LANG='C', LC_ALL='C', LC_MESSAGES='C', LC_CTYPE='C')
p = module.params
if p['name']:
packages = p['name']
else:
packages = None
path = p['path']
if path:
path = path.split(':')
state = p['state']
if state in ('present', 'installed'):
state = 'installed'
if state in ('head', ):
state = 'head'
if state in ('latest', 'upgraded'):
state = 'upgraded'
if state == 'linked':
state = 'linked'
if state == 'unlinked':
state = 'unlinked'
if state in ('absent', 'removed', 'uninstalled'):
state = 'absent'
update_homebrew = p['update_homebrew']
upgrade_all = p['upgrade_all']
p['install_options'] = p['install_options'] or []
install_options = ['--{0}'.format(install_option)
for install_option in p['install_options']]
brew = Homebrew(module=module, path=path, packages=packages,
state=state, update_homebrew=update_homebrew,
upgrade_all=upgrade_all, install_options=install_options)
(failed, changed, message) = brew.run()
if failed:
module.fail_json(msg=message)
else:
module.exit_json(changed=changed, msg=message)
# this is magic, see lib/ansible/module_common.py
from ansible.module_utils.basic import *
if __name__ == '__main__':
main()
|
lym/allura-git
|
refs/heads/master
|
ForgeChat/forgechat/command.py
|
2
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import time
import logging
import socket
import asyncore
import asynchat
import random
from urlparse import urljoin
from datetime import datetime, timedelta
import tg
from paste.script import command
from paste.deploy.converters import asint
from ming.orm import ThreadLocalORMSession
import allura
from allura.command import base
from allura.lib import helpers as h
from allura.lib import search, security
from allura import model as M
from forgechat import model as CM
class IRCBotCommand(allura.command.Command):
min_args = 1
max_args = 1
usage = '<ini file>'
summary = 'For the ForgeChat tool. Connect to all configured IRC servers and relay messages'
parser = command.Command.standard_parser(verbose=True)
parser.add_option('-c', '--context', dest='context',
help=('The context of the message (path to the project'
' and/or tool'))
def command(self):
self.basic_setup()
base.log.info('IRCBot starting up...')
while True:
try:
IRCBot(
tg.config.get('forgechat.host', 'irc.freenode.net'),
asint(tg.config.get('forgechat.port', '6667')))
asyncore.loop()
except Exception:
base.log.exception(
'Error in ircbot asyncore.loop(), restart in 5s')
time.sleep(5)
class IRCBot(asynchat.async_chat):
TIME_BETWEEN_CONFIGS = timedelta(minutes=1)
def __init__(self, host, port, nick=None):
if nick is None:
nick = tg.config.get('ircbot.nick', 'allurabot')
self.logger = logging.getLogger(__name__)
self.host = host
self.port = port
self.nick = nick
sock = socket.socket()
sock.connect((host, port))
asynchat.async_chat.__init__(self, sock)
self.set_terminator('\r\n')
self.data = []
self.channels = {}
self.set_nick('000')
self.say('USER {nick} {host} {host} :{nick} 0.0'.format(
nick=self.nick,
host=self.host))
self.configure()
def set_nick(self, suffix=None):
if suffix is None:
suffix = '%.3d' % random.randint(0, 999)
nick = '%s-%s' % (self.nick, suffix)
self.say('NICK ' + nick)
def collect_incoming_data(self, data):
self.data.append(data)
def found_terminator(self):
request = ''.join(self.data)
self.logger.debug('RECV %s', request)
self.data = []
if request.startswith(':'):
sender, cmd, rest = request[1:].split(' ', 2)
sender = sender.split('!', 1)
else:
sender = ('', '')
cmd, rest = request.split(' ', 1)
self.handle_command(sender, cmd, rest)
def configure(self):
new_channels = dict(
(ch.channel, ch) for ch in CM.ChatChannel.query.find())
for channel in new_channels:
if channel not in self.channels and channel:
self.say('JOIN %s' % channel)
for channel in self.channels:
if channel not in new_channels and channel:
self.say('LEAVE %s' % channel)
self.channels = new_channels
self.last_configured = datetime.utcnow()
def check_configure(self):
if (datetime.utcnow() - self.last_configured
> self.TIME_BETWEEN_CONFIGS):
self.configure()
def say(self, s):
s = s.encode('utf-8')
self.logger.debug('SAYING %s', s)
self.push(s + '\r\n')
def notice(self, out, message):
self.say('NOTICE %s :%s' % (out, message))
CM.ChatMessage(
sender=self.nick,
channel=out,
text=message)
ThreadLocalORMSession.flush_all()
def handle_command(self, sender, cmd, rest):
if cmd == 'NOTICE':
pass
elif cmd == '433':
self.set_nick()
self.channels = {}
self.configure()
elif cmd == 'PING':
self.say('PONG ' + rest)
elif cmd in ('NOTICE', 'PRIVMSG'):
rcpt, msg = rest.split(' ', 1)
if not self.set_context(rcpt):
return
if msg.startswith(':'):
msg = msg[1:]
self.log_channel(sender, cmd, rcpt, msg)
if cmd == 'NOTICE':
return
for lnk in search.find_shortlinks(msg):
self.handle_shortlink(lnk, sender, rcpt)
ThreadLocalORMSession.flush_all()
ThreadLocalORMSession.close_all()
self.check_configure()
ThreadLocalORMSession.close_all()
def set_context(self, rcpt):
if rcpt == self.nick:
return False
chan = self.channels.get(rcpt, None)
if not chan:
return False
h.set_context(chan.project_id,
app_config_id=chan.app_config_id)
return True
def handle_shortlink(self, lnk, sender, rcpt):
art = lnk.ref.artifact
if security.has_access(art, 'read', user=M.User.anonymous())():
index = art.index()
text = index['snippet_s'] or h.get_first(index, 'title')
url = urljoin(
tg.config['base_url'], index['url_s'])
self.notice(rcpt, '[%s] - [%s](%s)' % (lnk.link, text, url))
def log_channel(self, sender, cmd, rcpt, rest):
if cmd not in ('NOTICE', 'PRIVMSG'):
self.logger.debug('IGN: %s %s %s %s', sender, cmd, rcpt, rest)
return
if cmd == 'NOTICE':
text = '--' + rest
else:
text = rest
CM.ChatMessage(
sender='!'.join(sender),
channel=rcpt,
text=text)
ThreadLocalORMSession.flush_all()
ThreadLocalORMSession.close_all()
|
okfn/ckanext-project
|
refs/heads/master
|
ckanext/project/logic/converters.py
|
4
|
import ckan.model as model
import ckan.lib.navl.dictization_functions as df
from ckan.common import _
def convert_package_name_or_id_to_title_or_name(package_name_or_id, context):
'''
Return the package title, or name if no title, for the given package name
or id.
:returns: the name of the package with the given name or id
:rtype: string
:raises: ckan.lib.navl.dictization_functions.Invalid if there is no
package with the given name or id
'''
session = context['session']
result = session.query(model.Package).filter_by(
id=package_name_or_id).first()
if not result:
result = session.query(model.Package).filter_by(
name=package_name_or_id).first()
if not result:
raise df.Invalid('%s: %s' % (_('Not found'), _('Dataset')))
return result.title or result.name
|
windyuuy/opera
|
refs/heads/master
|
chromium/src/third_party/python_26/Tools/pynche/ChipViewer.py
|
94
|
"""Chip viewer and widget.
In the lower left corner of the main Pynche window, you will see two
ChipWidgets, one for the selected color and one for the nearest color. The
selected color is the actual RGB value expressed as an X11 #COLOR name. The
nearest color is the named color from the X11 database that is closest to the
selected color in 3D space. There may be other colors equally close, but the
nearest one is the first one found.
Clicking on the nearest color chip selects that named color.
The ChipViewer class includes the entire lower left quandrant; i.e. both the
selected and nearest ChipWidgets.
"""
from Tkinter import *
import ColorDB
class ChipWidget:
_WIDTH = 150
_HEIGHT = 80
def __init__(self,
master = None,
width = _WIDTH,
height = _HEIGHT,
text = 'Color',
initialcolor = 'blue',
presscmd = None,
releasecmd = None):
# create the text label
self.__label = Label(master, text=text)
self.__label.grid(row=0, column=0)
# create the color chip, implemented as a frame
self.__chip = Frame(master, relief=RAISED, borderwidth=2,
width=width,
height=height,
background=initialcolor)
self.__chip.grid(row=1, column=0)
# create the color name
self.__namevar = StringVar()
self.__namevar.set(initialcolor)
self.__name = Entry(master, textvariable=self.__namevar,
relief=FLAT, justify=CENTER, state=DISABLED,
font=self.__label['font'])
self.__name.grid(row=2, column=0)
# create the message area
self.__msgvar = StringVar()
self.__name = Entry(master, textvariable=self.__msgvar,
relief=FLAT, justify=CENTER, state=DISABLED,
font=self.__label['font'])
self.__name.grid(row=3, column=0)
# set bindings
if presscmd:
self.__chip.bind('<ButtonPress-1>', presscmd)
if releasecmd:
self.__chip.bind('<ButtonRelease-1>', releasecmd)
def set_color(self, color):
self.__chip.config(background=color)
def get_color(self):
return self.__chip['background']
def set_name(self, colorname):
self.__namevar.set(colorname)
def set_message(self, message):
self.__msgvar.set(message)
def press(self):
self.__chip.configure(relief=SUNKEN)
def release(self):
self.__chip.configure(relief=RAISED)
class ChipViewer:
def __init__(self, switchboard, master=None):
self.__sb = switchboard
self.__frame = Frame(master, relief=RAISED, borderwidth=1)
self.__frame.grid(row=3, column=0, ipadx=5, sticky='NSEW')
# create the chip that will display the currently selected color
# exactly
self.__sframe = Frame(self.__frame)
self.__sframe.grid(row=0, column=0)
self.__selected = ChipWidget(self.__sframe, text='Selected')
# create the chip that will display the nearest real X11 color
# database color name
self.__nframe = Frame(self.__frame)
self.__nframe.grid(row=0, column=1)
self.__nearest = ChipWidget(self.__nframe, text='Nearest',
presscmd = self.__buttonpress,
releasecmd = self.__buttonrelease)
def update_yourself(self, red, green, blue):
# Selected always shows the #rrggbb name of the color, nearest always
# shows the name of the nearest color in the database. BAW: should
# an exact match be indicated in some way?
#
# Always use the #rrggbb style to actually set the color, since we may
# not be using X color names (e.g. "web-safe" names)
colordb = self.__sb.colordb()
rgbtuple = (red, green, blue)
rrggbb = ColorDB.triplet_to_rrggbb(rgbtuple)
# find the nearest
nearest = colordb.nearest(red, green, blue)
nearest_tuple = colordb.find_byname(nearest)
nearest_rrggbb = ColorDB.triplet_to_rrggbb(nearest_tuple)
self.__selected.set_color(rrggbb)
self.__nearest.set_color(nearest_rrggbb)
# set the name and messages areas
self.__selected.set_name(rrggbb)
if rrggbb == nearest_rrggbb:
self.__selected.set_message(nearest)
else:
self.__selected.set_message('')
self.__nearest.set_name(nearest_rrggbb)
self.__nearest.set_message(nearest)
def __buttonpress(self, event=None):
self.__nearest.press()
def __buttonrelease(self, event=None):
self.__nearest.release()
rrggbb = self.__nearest.get_color()
red, green, blue = ColorDB.rrggbb_to_triplet(rrggbb)
self.__sb.update_views(red, green, blue)
|
zachriggle/pwndbg
|
refs/heads/master
|
pwndbg/commands/config.py
|
1
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Dumps all pwndbg-specific configuration points.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import pwndbg.commands
import pwndbg.config
from pwndbg.color import light_yellow
from pwndbg.color import ljust_colored
from pwndbg.color import strip
def print_row(name, value, default, docstring, ljust_optname, ljust_value, empty_space=6):
name = ljust_colored(name, ljust_optname + empty_space)
defval = extend_value_with_default(value, default)
defval = ljust_colored(defval, ljust_value + empty_space)
result = ' '.join((name, defval, docstring))
print(result)
return result
def extend_value_with_default(value, default):
if strip(value) != strip(default):
return '%s (%s)' % (value, default)
return value
@pwndbg.commands.Command
def config():
"""Shows pwndbg-specific configuration points"""
values = [v for k, v in pwndbg.config.__dict__.items()
if isinstance(v, pwndbg.config.Parameter) and v.scope == 'config']
longest_optname = max(map(len, [v.optname for v in values]))
longest_value = max(map(len, [extend_value_with_default(repr(v.value), repr(v.default)) for v in values]))
header = print_row('Name', 'Value', 'Def', 'Documentation', longest_optname, longest_value)
print('-' * (len(header)))
for v in sorted(values):
print_row(v.optname, repr(v.value), repr(v.default), v.docstring, longest_optname, longest_value)
print(light_yellow('You can set config variable with `set <config-var> <value>`'))
print(light_yellow('You can generate configuration file using `configfile` '
'- then put it in your .gdbinit after initializing pwndbg'))
@pwndbg.commands.Command
def configfile(show_all=False):
"""Generates a configuration file for the current Pwndbg options"""
configfile_print_scope('config', show_all)
@pwndbg.commands.Command
def themefile(show_all=False):
"""Generates a configuration file for the current Pwndbg theme options"""
configfile_print_scope('theme', show_all)
def configfile_print_scope(scope, show_all=False):
params = pwndbg.config.get_params(scope)
if not show_all:
params = list(filter(lambda p: p.is_changed, params))
if params:
if not show_all:
print(light_yellow('Showing only changed values:'))
for p in params:
print('# %s: %s' % (p.optname, p.docstring))
print('# default: %s' % p.native_default)
print('set %s %s' % (p.optname, p.native_value))
print()
else:
print(light_yellow('No changed values. To see current values use `%s`.' % scope))
|
errbotio/errbot
|
refs/heads/master
|
tests/poller_test.py
|
1
|
import time
from os import path
CURRENT_FILE_DIR = path.dirname(path.realpath(__file__))
extra_plugin_dir = path.join(CURRENT_FILE_DIR, "poller_plugin")
def test_delayed_hello(testbot):
assert "Hello, world!" in testbot.exec_command("!hello")
time.sleep(1)
delayed_msg = "Hello world! was sent 5 seconds ago"
assert delayed_msg in testbot.pop_message(timeout=1)
# Assert that only one message has been enqueued
assert testbot.bot.outgoing_message_queue.empty()
def test_delayed_hello_loop(testbot):
assert "Hello, world!" in testbot.exec_command("!hello_loop")
time.sleep(1)
delayed_msg = "Hello world! was sent 5 seconds ago"
assert delayed_msg in testbot.pop_message(timeout=1)
# Assert that only one message has been enqueued
assert testbot.bot.outgoing_message_queue.empty()
|
bambuste/qgis-vfk-plugin
|
refs/heads/master
|
vfkDocument.py
|
2
|
# -*- coding: utf-8 -*-
"""
/***************************************************************************
vfkPluginDialog
A QGIS plugin
Plugin umoznujici praci s daty katastru nemovitosti
-------------------
begin : 2015-06-11
git sha : $Format:%H$
copyright : (C) 2015 by Stepan Bambula
email : stepan.bambula@gmail.com
***************************************************************************/
/***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************/
"""
from abc import ABCMeta, abstractmethod
class TPair(object):
def __init__(self, first=u'', second=u''):
self.first = first
self.second = second
class VfkDocument:
__metaclass__ = ABCMeta
def __init__(self):
pass
@abstractmethod
def header(self):
pass
@abstractmethod
def footer(self):
pass
@abstractmethod
def heading1(self, text):
pass
@abstractmethod
def heading2(self, text):
pass
@abstractmethod
def heading3(self, text):
pass
@abstractmethod
def beginItemize(self):
pass
@abstractmethod
def endItemize(self):
pass
@abstractmethod
def beginItem(self):
pass
@abstractmethod
def endItem(self):
pass
@abstractmethod
def item(self, text):
pass
@abstractmethod
def beginTable(self):
pass
@abstractmethod
def endTable(self):
pass
@abstractmethod
def tableHeader(self, columns):
pass
@abstractmethod
def tableRow(self, columns):
pass
@abstractmethod
def tableRowOneColumnSpan(self, text):
pass
@abstractmethod
def link(self, href, text):
pass
@abstractmethod
def superScript(self, text):
pass
@abstractmethod
def newLine(self):
pass
@abstractmethod
def keyValueTable(self, content):
pass
@abstractmethod
def paragraph(self, text):
pass
@abstractmethod
def table(self, content, header):
pass
@abstractmethod
def text(self, text):
pass
@abstractmethod
def discardLastBeginTable(self):
pass
@abstractmethod
def isLastTableEmpty(self):
pass
|
deepakkv07/Implementation-of-UDP-Lite-in-ns-3
|
refs/heads/master
|
src/lte/bindings/callbacks_list.py
|
3
|
callback_classes = [
['void', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty'],
['void', 'ns3::Ptr<ns3::Packet>', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty'],
['bool', 'ns3::Ptr<ns3::NetDevice>', 'ns3::Ptr<ns3::Packet const>', 'unsigned short', 'ns3::Address const&', 'ns3::Address const&', 'ns3::NetDevice::PacketType', 'ns3::empty', 'ns3::empty', 'ns3::empty'],
['bool', 'ns3::Ptr<ns3::NetDevice>', 'ns3::Ptr<ns3::Packet const>', 'unsigned short', 'ns3::Address const&', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty'],
['bool', 'ns3::Ptr<ns3::Packet>', 'ns3::Address const&', 'ns3::Address const&', 'unsigned short', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty'],
['void', 'ns3::Ptr<ns3::Socket>', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty'],
['void', 'ns3::Ptr<ns3::Socket>', 'unsigned int', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty'],
['void', 'ns3::Ptr<ns3::Socket>', 'ns3::Address const&', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty'],
['bool', 'ns3::Ptr<ns3::Socket>', 'ns3::Address const&', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty'],
['void', 'unsigned short', 'ns3::Ptr<ns3::SpectrumValue>', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty'],
['void', 'ns3::UlInfoListElement_s', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty'],
['void', 'ns3::DlInfoListElement_s', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty'],
['void', 'std::list<ns3::Ptr<ns3::LteControlMessage>, std::allocator<ns3::Ptr<ns3::LteControlMessage> > >', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty'],
['void', 'ns3::Ptr<ns3::NetDevice>', 'ns3::Ptr<ns3::Packet const>', 'unsigned short', 'ns3::Address const&', 'ns3::Address const&', 'ns3::NetDevice::PacketType', 'ns3::empty', 'ns3::empty', 'ns3::empty'],
['unsigned char', 'ns3::Ptr<ns3::QueueItem>', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty'],
]
|
maximmaxim345/Sheep-it-blender-plugin
|
refs/heads/master
|
selenium/webdriver/firefox/firefox_profile.py
|
24
|
# Licensed to the Software Freedom Conservancy (SFC) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The SFC licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import with_statement
import base64
import copy
import json
import os
import re
import shutil
import sys
import tempfile
import zipfile
try:
from cStringIO import StringIO as BytesIO
except ImportError:
from io import BytesIO
from xml.dom import minidom
from selenium.webdriver.common.proxy import ProxyType
from selenium.common.exceptions import WebDriverException
WEBDRIVER_EXT = "webdriver.xpi"
WEBDRIVER_PREFERENCES = "webdriver_prefs.json"
EXTENSION_NAME = "fxdriver@googlecode.com"
class AddonFormatError(Exception):
"""Exception for not well-formed add-on manifest files"""
class FirefoxProfile(object):
ANONYMOUS_PROFILE_NAME = "WEBDRIVER_ANONYMOUS_PROFILE"
DEFAULT_PREFERENCES = None
def __init__(self, profile_directory=None):
"""
Initialises a new instance of a Firefox Profile
:args:
- profile_directory: Directory of profile that you want to use.
This defaults to None and will create a new
directory when object is created.
"""
if not FirefoxProfile.DEFAULT_PREFERENCES:
with open(os.path.join(os.path.dirname(__file__),
WEBDRIVER_PREFERENCES)) as default_prefs:
FirefoxProfile.DEFAULT_PREFERENCES = json.load(default_prefs)
self.default_preferences = copy.deepcopy(
FirefoxProfile.DEFAULT_PREFERENCES['mutable'])
self.native_events_enabled = True
self.profile_dir = profile_directory
self.tempfolder = None
if self.profile_dir is None:
self.profile_dir = self._create_tempfolder()
else:
self.tempfolder = tempfile.mkdtemp()
newprof = os.path.join(self.tempfolder, "webdriver-py-profilecopy")
shutil.copytree(self.profile_dir, newprof,
ignore=shutil.ignore_patterns("parent.lock", "lock", ".parentlock"))
self.profile_dir = newprof
self._read_existing_userjs(os.path.join(self.profile_dir, "user.js"))
self.extensionsDir = os.path.join(self.profile_dir, "extensions")
self.userPrefs = os.path.join(self.profile_dir, "user.js")
# Public Methods
def set_preference(self, key, value):
"""
sets the preference that we want in the profile.
"""
self.default_preferences[key] = value
def add_extension(self, extension=WEBDRIVER_EXT):
self._install_extension(extension)
def update_preferences(self):
for key, value in FirefoxProfile.DEFAULT_PREFERENCES['frozen'].items():
self.default_preferences[key] = value
self._write_user_prefs(self.default_preferences)
# Properties
@property
def path(self):
"""
Gets the profile directory that is currently being used
"""
return self.profile_dir
@property
def port(self):
"""
Gets the port that WebDriver is working on
"""
return self._port
@port.setter
def port(self, port):
"""
Sets the port that WebDriver will be running on
"""
if not isinstance(port, int):
raise WebDriverException("Port needs to be an integer")
try:
port = int(port)
if port < 1 or port > 65535:
raise WebDriverException("Port number must be in the range 1..65535")
except (ValueError, TypeError):
raise WebDriverException("Port needs to be an integer")
self._port = port
self.set_preference("webdriver_firefox_port", self._port)
@property
def accept_untrusted_certs(self):
return self.default_preferences["webdriver_accept_untrusted_certs"]
@accept_untrusted_certs.setter
def accept_untrusted_certs(self, value):
if value not in [True, False]:
raise WebDriverException("Please pass in a Boolean to this call")
self.set_preference("webdriver_accept_untrusted_certs", value)
@property
def assume_untrusted_cert_issuer(self):
return self.default_preferences["webdriver_assume_untrusted_issuer"]
@assume_untrusted_cert_issuer.setter
def assume_untrusted_cert_issuer(self, value):
if value not in [True, False]:
raise WebDriverException("Please pass in a Boolean to this call")
self.set_preference("webdriver_assume_untrusted_issuer", value)
@property
def native_events_enabled(self):
return self.default_preferences['webdriver_enable_native_events']
@native_events_enabled.setter
def native_events_enabled(self, value):
if value not in [True, False]:
raise WebDriverException("Please pass in a Boolean to this call")
self.set_preference("webdriver_enable_native_events", value)
@property
def encoded(self):
"""
A zipped, base64 encoded string of profile directory
for use with remote WebDriver JSON wire protocol
"""
self.update_preferences()
fp = BytesIO()
zipped = zipfile.ZipFile(fp, 'w', zipfile.ZIP_DEFLATED)
path_root = len(self.path) + 1 # account for trailing slash
for base, dirs, files in os.walk(self.path):
for fyle in files:
filename = os.path.join(base, fyle)
zipped.write(filename, filename[path_root:])
zipped.close()
return base64.b64encode(fp.getvalue()).decode('UTF-8')
def set_proxy(self, proxy):
import warnings
warnings.warn(
"This method has been deprecated. Please pass in the proxy object to the Driver Object",
DeprecationWarning)
if proxy is None:
raise ValueError("proxy can not be None")
if proxy.proxy_type is ProxyType.UNSPECIFIED:
return
self.set_preference("network.proxy.type", proxy.proxy_type['ff_value'])
if proxy.proxy_type is ProxyType.MANUAL:
self.set_preference("network.proxy.no_proxies_on", proxy.no_proxy)
self._set_manual_proxy_preference("ftp", proxy.ftp_proxy)
self._set_manual_proxy_preference("http", proxy.http_proxy)
self._set_manual_proxy_preference("ssl", proxy.ssl_proxy)
self._set_manual_proxy_preference("socks", proxy.socks_proxy)
elif proxy.proxy_type is ProxyType.PAC:
self.set_preference("network.proxy.autoconfig_url", proxy.proxy_autoconfig_url)
def _set_manual_proxy_preference(self, key, setting):
if setting is None or setting is '':
return
host_details = setting.split(":")
self.set_preference("network.proxy.%s" % key, host_details[0])
if len(host_details) > 1:
self.set_preference("network.proxy.%s_port" % key, int(host_details[1]))
def _create_tempfolder(self):
"""
Creates a temp folder to store User.js and the extension
"""
return tempfile.mkdtemp()
def _write_user_prefs(self, user_prefs):
"""
writes the current user prefs dictionary to disk
"""
with open(self.userPrefs, "w") as f:
for key, value in user_prefs.items():
f.write('user_pref("%s", %s);\n' % (key, json.dumps(value)))
def _read_existing_userjs(self, userjs):
import warnings
PREF_RE = re.compile(r'user_pref\("(.*)",\s(.*)\)')
try:
with open(userjs) as f:
for usr in f:
matches = re.search(PREF_RE, usr)
try:
self.default_preferences[matches.group(1)] = json.loads(matches.group(2))
except Exception:
warnings.warn("(skipping) failed to json.loads existing preference: " +
matches.group(1) + matches.group(2))
except Exception:
# The profile given hasn't had any changes made, i.e no users.js
pass
def _install_extension(self, addon, unpack=True):
"""
Installs addon from a filepath, url
or directory of addons in the profile.
- path: url, path to .xpi, or directory of addons
- unpack: whether to unpack unless specified otherwise in the install.rdf
"""
if addon == WEBDRIVER_EXT:
addon = os.path.join(os.path.dirname(__file__), WEBDRIVER_EXT)
tmpdir = None
xpifile = None
if addon.endswith('.xpi'):
tmpdir = tempfile.mkdtemp(suffix='.' + os.path.split(addon)[-1])
compressed_file = zipfile.ZipFile(addon, 'r')
for name in compressed_file.namelist():
if name.endswith('/'):
if not os.path.isdir(os.path.join(tmpdir, name)):
os.makedirs(os.path.join(tmpdir, name))
else:
if not os.path.isdir(os.path.dirname(os.path.join(tmpdir, name))):
os.makedirs(os.path.dirname(os.path.join(tmpdir, name)))
data = compressed_file.read(name)
with open(os.path.join(tmpdir, name), 'wb') as f:
f.write(data)
xpifile = addon
addon = tmpdir
# determine the addon id
addon_details = self._addon_details(addon)
addon_id = addon_details.get('id')
assert addon_id, 'The addon id could not be found: %s' % addon
# copy the addon to the profile
extensions_path = os.path.join(self.profile_dir, 'extensions')
addon_path = os.path.join(extensions_path, addon_id)
if not unpack and not addon_details['unpack'] and xpifile:
if not os.path.exists(extensions_path):
os.makedirs(extensions_path)
shutil.copy(xpifile, addon_path + '.xpi')
else:
if not os.path.exists(addon_path):
shutil.copytree(addon, addon_path, symlinks=True)
# remove the temporary directory, if any
if tmpdir:
shutil.rmtree(tmpdir)
def _addon_details(self, addon_path):
"""
Returns a dictionary of details about the addon.
:param addon_path: path to the add-on directory or XPI
Returns::
{'id': u'rainbow@colors.org', # id of the addon
'version': u'1.4', # version of the addon
'name': u'Rainbow', # name of the addon
'unpack': False } # whether to unpack the addon
"""
details = {
'id': None,
'unpack': False,
'name': None,
'version': None
}
def get_namespace_id(doc, url):
attributes = doc.documentElement.attributes
namespace = ""
for i in range(attributes.length):
if attributes.item(i).value == url:
if ":" in attributes.item(i).name:
# If the namespace is not the default one remove 'xlmns:'
namespace = attributes.item(i).name.split(':')[1] + ":"
break
return namespace
def get_text(element):
"""Retrieve the text value of a given node"""
rc = []
for node in element.childNodes:
if node.nodeType == node.TEXT_NODE:
rc.append(node.data)
return ''.join(rc).strip()
if not os.path.exists(addon_path):
raise IOError('Add-on path does not exist: %s' % addon_path)
try:
if zipfile.is_zipfile(addon_path):
# Bug 944361 - We cannot use 'with' together with zipFile because
# it will cause an exception thrown in Python 2.6.
try:
compressed_file = zipfile.ZipFile(addon_path, 'r')
manifest = compressed_file.read('install.rdf')
finally:
compressed_file.close()
elif os.path.isdir(addon_path):
with open(os.path.join(addon_path, 'install.rdf'), 'r') as f:
manifest = f.read()
else:
raise IOError('Add-on path is neither an XPI nor a directory: %s' % addon_path)
except (IOError, KeyError) as e:
raise AddonFormatError(str(e), sys.exc_info()[2])
try:
doc = minidom.parseString(manifest)
# Get the namespaces abbreviations
em = get_namespace_id(doc, 'http://www.mozilla.org/2004/em-rdf#')
rdf = get_namespace_id(doc, 'http://www.w3.org/1999/02/22-rdf-syntax-ns#')
description = doc.getElementsByTagName(rdf + 'Description').item(0)
if description is None:
description = doc.getElementsByTagName('Description').item(0)
for node in description.childNodes:
# Remove the namespace prefix from the tag for comparison
entry = node.nodeName.replace(em, "")
if entry in details.keys():
details.update({entry: get_text(node)})
if details.get('id') is None:
for i in range(description.attributes.length):
attribute = description.attributes.item(i)
if attribute.name == em + 'id':
details.update({'id': attribute.value})
except Exception as e:
raise AddonFormatError(str(e), sys.exc_info()[2])
# turn unpack into a true/false value
if isinstance(details['unpack'], str):
details['unpack'] = details['unpack'].lower() == 'true'
# If no ID is set, the add-on is invalid
if details.get('id') is None:
raise AddonFormatError('Add-on id could not be found.')
return details
|
jonathanwcrane/netaddr
|
refs/heads/rel-0.7.x
|
docs/source/conf.py
|
4
|
# -*- coding: utf-8 -*-
#
# netaddr documentation build configuration file, created by
# sphinx-quickstart on Sun May 27 22:23:51 2012.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.doctest', 'sphinx.ext.todo', 'sphinx.ext.coverage', 'sphinx.ext.viewcode']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'netaddr'
copyright = u'2008-2015, David P. D. Moss. All rights reserved'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.7.15'
# The full version, including alpha/beta/rc tags.
release = '0.7.15'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'netaddrdoc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'netaddr.tex', u'netaddr Documentation',
u'David P. D. Moss', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'netaddr', u'netaddr Documentation',
[u'David P. D. Moss'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index',
'netaddr',
u'netaddr Documentation',
u'David P. D. Moss',
'netaddr',
'a comprehensive network address library for Python',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
|
Fusion-Rom/android_external_chromium_org
|
refs/heads/lp5.1
|
chrome/common/extensions/docs/server2/compiled_file_system.py
|
26
|
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import sys
from docs_server_utils import ToUnicode
from file_system import FileNotFoundError
from future import Future
from path_util import AssertIsDirectory, AssertIsFile, ToDirectory
from third_party.json_schema_compiler import json_parse
from third_party.json_schema_compiler.memoize import memoize
from third_party.motemplate import Motemplate
_CACHEABLE_FUNCTIONS = set()
_SINGLE_FILE_FUNCTIONS = set()
def _GetUnboundFunction(fn):
'''Functions bound to an object are separate from the unbound
defintion. This causes issues when checking for cache membership,
so always get the unbound function, if possible.
'''
return getattr(fn, 'im_func', fn)
def Cache(fn):
'''A decorator which can be applied to the compilation function
passed to CompiledFileSystem.Create, indicating that file/list data
should be cached.
This decorator should be listed first in any list of decorators, along
with the SingleFile decorator below.
'''
_CACHEABLE_FUNCTIONS.add(_GetUnboundFunction(fn))
return fn
def SingleFile(fn):
'''A decorator which can be optionally applied to the compilation function
passed to CompiledFileSystem.Create, indicating that the function only
needs access to the file which is given in the function's callback. When
this is the case some optimisations can be done.
Note that this decorator must be listed first in any list of decorators to
have any effect.
'''
_SINGLE_FILE_FUNCTIONS.add(_GetUnboundFunction(fn))
return fn
def Unicode(fn):
'''A decorator which can be optionally applied to the compilation function
passed to CompiledFileSystem.Create, indicating that the function processes
the file's data as Unicode text.
'''
# The arguments passed to fn can be (self, path, data) or (path, data). In
# either case the last argument is |data|, which should be converted to
# Unicode.
def convert_args(args):
args = list(args)
args[-1] = ToUnicode(args[-1])
return args
return lambda *args: fn(*convert_args(args))
class _CacheEntry(object):
def __init__(self, cache_data, version):
self.cache_data = cache_data
self.version = version
class CompiledFileSystem(object):
'''This class caches FileSystem data that has been processed.
'''
class Factory(object):
'''A class to build a CompiledFileSystem backed by |file_system|.
'''
def __init__(self, object_store_creator):
self._object_store_creator = object_store_creator
def Create(self, file_system, compilation_function, cls, category=None):
'''Creates a CompiledFileSystem view over |file_system| that populates
its cache by calling |compilation_function| with (path, data), where
|data| is the data that was fetched from |path| in |file_system|.
The namespace for the compiled file system is derived similar to
ObjectStoreCreator: from |cls| along with an optional |category|.
'''
assert isinstance(cls, type)
assert not cls.__name__[0].islower() # guard against non-class types
full_name = [cls.__name__, file_system.GetIdentity()]
if category is not None:
full_name.append(category)
def create_object_store(my_category):
# The read caches can start populated (start_empty=False) because file
# updates are picked up by the stat - but only if the compilation
# function is affected by a single file. If the compilation function is
# affected by other files (e.g. compiling a list of APIs available to
# extensions may be affected by both a features file and the list of
# files in the API directory) then this optimisation won't work.
return self._object_store_creator.Create(
CompiledFileSystem,
category='/'.join(full_name + [my_category]),
start_empty=compilation_function not in _SINGLE_FILE_FUNCTIONS)
return CompiledFileSystem(file_system,
compilation_function,
create_object_store('file'),
create_object_store('list'))
@memoize
def ForJson(self, file_system):
'''A CompiledFileSystem specifically for parsing JSON configuration data.
These are memoized over file systems tied to different branches.
'''
return self.Create(file_system,
Cache(SingleFile(lambda _, data:
json_parse.Parse(ToUnicode(data)))),
CompiledFileSystem,
category='json')
@memoize
def ForTemplates(self, file_system):
'''Creates a CompiledFileSystem for parsing templates.
'''
return self.Create(
file_system,
SingleFile(lambda path, text: Motemplate(ToUnicode(text), name=path)),
CompiledFileSystem)
@memoize
def ForUnicode(self, file_system):
'''Creates a CompiledFileSystem for Unicode text processing.
'''
return self.Create(
file_system,
SingleFile(lambda _, text: ToUnicode(text)),
CompiledFileSystem,
category='text')
def __init__(self,
file_system,
compilation_function,
file_object_store,
list_object_store):
self._file_system = file_system
self._compilation_function = compilation_function
self._file_object_store = file_object_store
self._list_object_store = list_object_store
def _Get(self, store, key):
if _GetUnboundFunction(self._compilation_function) in _CACHEABLE_FUNCTIONS:
return store.Get(key)
return Future(value=None)
def _Set(self, store, key, value):
if _GetUnboundFunction(self._compilation_function) in _CACHEABLE_FUNCTIONS:
store.Set(key, value)
def _RecursiveList(self, path):
'''Returns a Future containing the recursive directory listing of |path| as
a flat list of paths.
'''
def split_dirs_from_files(paths):
'''Returns a tuple (dirs, files) where |dirs| contains the directory
names in |paths| and |files| contains the files.
'''
result = [], []
for path in paths:
result[0 if path.endswith('/') else 1].append(path)
return result
def add_prefix(prefix, paths):
return [prefix + path for path in paths]
# Read in the initial list of files. Do this eagerly (i.e. not part of the
# asynchronous Future contract) because there's a greater chance to
# parallelise fetching with the second layer (can fetch multiple paths).
try:
first_layer_dirs, first_layer_files = split_dirs_from_files(
self._file_system.ReadSingle(path).Get())
except FileNotFoundError:
return Future(exc_info=sys.exc_info())
if not first_layer_dirs:
return Future(value=first_layer_files)
def get_from_future_listing(listings):
'''Recursively lists files from directory listing |futures|.
'''
dirs, files = [], []
for dir_name, listing in listings.iteritems():
new_dirs, new_files = split_dirs_from_files(listing)
# |dirs| are paths for reading. Add the full prefix relative to
# |path| so that |file_system| can find the files.
dirs += add_prefix(dir_name, new_dirs)
# |files| are not for reading, they are for returning to the caller.
# This entire function set (i.e. GetFromFileListing) is defined to
# not include the fetched-path in the result, however, |dir_name|
# will be prefixed with |path|. Strip it.
assert dir_name.startswith(path)
files += add_prefix(dir_name[len(path):], new_files)
if dirs:
files += self._file_system.Read(dirs).Then(
get_from_future_listing).Get()
return files
return self._file_system.Read(add_prefix(path, first_layer_dirs)).Then(
lambda results: first_layer_files + get_from_future_listing(results))
def GetFromFile(self, path, skip_not_found=False):
'''Calls |compilation_function| on the contents of the file at |path|.
If |skip_not_found| is True, then None is passed to |compilation_function|.
'''
AssertIsFile(path)
try:
version = self._file_system.Stat(path).version
except FileNotFoundError:
if skip_not_found:
version = None
else:
return Future(exc_info=sys.exc_info())
cache_entry = self._Get(self._file_object_store, path).Get()
if (cache_entry is not None) and (version == cache_entry.version):
return Future(value=cache_entry.cache_data)
def compile_(files):
cache_data = self._compilation_function(path, files)
self._Set(self._file_object_store, path, _CacheEntry(cache_data, version))
return cache_data
return self._file_system.ReadSingle(
path, skip_not_found=skip_not_found).Then(compile_)
def GetFromFileListing(self, path):
'''Calls |compilation_function| on the listing of the files at |path|.
Assumes that the path given is to a directory.
'''
AssertIsDirectory(path)
try:
version = self._file_system.Stat(path).version
except FileNotFoundError:
return Future(exc_info=sys.exc_info())
cache_entry = self._Get(self._list_object_store, path).Get()
if (cache_entry is not None) and (version == cache_entry.version):
return Future(value=cache_entry.cache_data)
def compile_(files):
cache_data = self._compilation_function(path, files)
self._Set(self._list_object_store, path, _CacheEntry(cache_data, version))
return cache_data
return self._RecursiveList(path).Then(compile_)
# _GetFileVersionFromCache and _GetFileListingVersionFromCache are exposed
# *only* so that ChainedCompiledFileSystem can optimise its caches. *Do not*
# use these methods otherwise, they don't do what you want. Use
# FileSystem.Stat on the FileSystem that this CompiledFileSystem uses.
def _GetFileVersionFromCache(self, path):
cache_entry = self._Get(self._file_object_store, path).Get()
if cache_entry is not None:
return Future(value=cache_entry.version)
stat_future = self._file_system.StatAsync(path)
return Future(callback=lambda: stat_future.Get().version)
def _GetFileListingVersionFromCache(self, path):
path = ToDirectory(path)
cache_entry = self._Get(self._list_object_store, path).Get()
if cache_entry is not None:
return Future(value=cache_entry.version)
stat_future = self._file_system.StatAsync(path)
return Future(callback=lambda: stat_future.Get().version)
def GetIdentity(self):
return self._file_system.GetIdentity()
|
printerpam/Stormy
|
refs/heads/master
|
desktop_gui/Stormy/controllers/helpController.py
|
2
|
__author__ = 'phpgeek'
import webbrowser
class HelpController():
def __init__(self, mainWidget):
self.mainWidget = mainWidget
def showHelp(self):
# cek for other installation in progress. will show popup and stop execution
if self.mainWidget.otherInstallationOnProgress() :
return
# data for widget
# show install widget with specific software data
self.mainWidget.stack.setCurrentWidget( self.mainWidget.helpWidget )
def openInWebBrowser(self, url):
webbrowser.open( url )
|
shasha79/pysolr
|
refs/heads/master
|
tests/client.py
|
2
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import datetime
import sys
from pysolr import (Solr, Results, SolrError, unescape_html, safe_urlencode,
force_unicode, force_bytes, sanitize, json, ET, IS_PY3,
clean_xml_string)
try:
import unittest2 as unittest
except ImportError:
import unittest
try:
from urllib.parse import unquote_plus
except ImportError:
from urllib import unquote_plus
if IS_PY3:
from io import StringIO
else:
from StringIO import StringIO
class UtilsTestCase(unittest.TestCase):
def test_unescape_html(self):
self.assertEqual(unescape_html('Hello • world'), 'Hello \x95 world')
self.assertEqual(unescape_html('Hello d world'), 'Hello d world')
self.assertEqual(unescape_html('Hello & ☃'), 'Hello & ☃')
self.assertEqual(unescape_html('Hello &doesnotexist; world'), 'Hello &doesnotexist; world')
def test_safe_urlencode(self):
self.assertEqual(force_unicode(unquote_plus(safe_urlencode({'test': 'Hello ☃! Helllo world!'}))), 'test=Hello ☃! Helllo world!')
self.assertEqual(force_unicode(unquote_plus(safe_urlencode({'test': ['Hello ☃!', 'Helllo world!']}, True))), "test=Hello \u2603!&test=Helllo world!")
self.assertEqual(force_unicode(unquote_plus(safe_urlencode({'test': ('Hello ☃!', 'Helllo world!')}, True))), "test=Hello \u2603!&test=Helllo world!")
def test_sanitize(self):
self.assertEqual(sanitize('\x00\x01\x02\x03\x04\x05\x06\x07\x08\x0b\x0c\x0e\x0f\x10\x11\x12\x13\x14\x15\x16\x17\x18\x19h\x1ae\x1bl\x1cl\x1do\x1e\x1f'), 'hello'),
def test_force_unicode(self):
self.assertEqual(force_unicode(b'Hello \xe2\x98\x83'), 'Hello ☃')
# Don't mangle, it's already Unicode.
self.assertEqual(force_unicode('Hello ☃'), 'Hello ☃')
self.assertEqual(force_unicode(1), '1', "force_unicode() should convert ints")
self.assertEqual(force_unicode(1.0), '1.0', "force_unicode() should convert floats")
self.assertEqual(force_unicode(None), 'None', 'force_unicode() should convert None')
def test_force_bytes(self):
self.assertEqual(force_bytes('Hello ☃'), b'Hello \xe2\x98\x83')
# Don't mangle, it's already a bytestring.
self.assertEqual(force_bytes(b'Hello \xe2\x98\x83'), b'Hello \xe2\x98\x83')
def test_clean_xml_string(self):
self.assertEqual(clean_xml_string('\x00\x0b\x0d\uffff'), '\x0d')
class ResultsTestCase(unittest.TestCase):
def test_init(self):
default_results = Results([{'id': 1}, {'id': 2}], 2)
self.assertEqual(default_results.docs, [{'id': 1}, {'id': 2}])
self.assertEqual(default_results.hits, 2)
self.assertEqual(default_results.highlighting, {})
self.assertEqual(default_results.facets, {})
self.assertEqual(default_results.spellcheck, {})
self.assertEqual(default_results.stats, {})
self.assertEqual(default_results.qtime, None)
self.assertEqual(default_results.debug, {})
self.assertEqual(default_results.grouped, {})
full_results = Results(
docs=[{'id': 1}, {'id': 2}, {'id': 3}],
hits=3,
# Fake data just to check assignments.
highlighting='hi',
facets='fa',
spellcheck='sp',
stats='st',
qtime='0.001',
debug=True,
grouped=['a']
)
self.assertEqual(full_results.docs, [{'id': 1}, {'id': 2}, {'id': 3}])
self.assertEqual(full_results.hits, 3)
self.assertEqual(full_results.highlighting, 'hi')
self.assertEqual(full_results.facets, 'fa')
self.assertEqual(full_results.spellcheck, 'sp')
self.assertEqual(full_results.stats, 'st')
self.assertEqual(full_results.qtime, '0.001')
self.assertEqual(full_results.debug, True)
self.assertEqual(full_results.grouped, ['a'])
def test_len(self):
small_results = Results([{'id': 1}, {'id': 2}], 2)
self.assertEqual(len(small_results), 2)
wrong_hits_results = Results([{'id': 1}, {'id': 2}, {'id': 3}], 7)
self.assertEqual(len(wrong_hits_results), 3)
def test_iter(self):
long_results = Results([{'id': 1}, {'id': 2}, {'id': 3}], 3)
to_iter = list(long_results)
self.assertEqual(to_iter[0], {'id': 1})
self.assertEqual(to_iter[1], {'id': 2})
self.assertEqual(to_iter[2], {'id': 3})
class SolrTestCase(unittest.TestCase):
def setUp(self):
super(SolrTestCase, self).setUp()
self.default_solr = Solr('http://localhost:8983/solr/core0')
# Short timeouts.
self.solr = Solr('http://localhost:8983/solr/core0', timeout=2)
self.docs = [
{
'id': 'doc_1',
'title': 'Example doc 1',
'price': 12.59,
'popularity': 10,
},
{
'id': 'doc_2',
'title': 'Another example ☃ doc 2',
'price': 13.69,
'popularity': 7,
},
{
'id': 'doc_3',
'title': 'Another thing',
'price': 2.35,
'popularity': 8,
},
{
'id': 'doc_4',
'title': 'doc rock',
'price': 99.99,
'popularity': 10,
},
{
'id': 'doc_5',
'title': 'Boring',
'price': 1.12,
'popularity': 2,
},
]
# Clear it.
self.solr.delete(q='*:*')
# Index our docs. Yes, this leans on functionality we're going to test
# later & if it's broken, everything will catastrophically fail.
# Such is life.
self.solr.add(self.docs)
def tearDown(self):
self.solr.delete(q='*:*')
super(SolrTestCase, self).tearDown()
def test_init(self):
self.assertEqual(self.default_solr.url, 'http://localhost:8983/solr/core0')
self.assertTrue(isinstance(self.default_solr.decoder, json.JSONDecoder))
self.assertEqual(self.default_solr.timeout, 60)
self.assertEqual(self.solr.url, 'http://localhost:8983/solr/core0')
self.assertTrue(isinstance(self.solr.decoder, json.JSONDecoder))
self.assertEqual(self.solr.timeout, 2)
def test__create_full_url(self):
# Nada.
self.assertEqual(self.solr._create_full_url(path=''), 'http://localhost:8983/solr/core0')
# Basic path.
self.assertEqual(self.solr._create_full_url(path='pysolr_tests'), 'http://localhost:8983/solr/core0/pysolr_tests')
# Leading slash (& making sure we don't touch the trailing slash).
self.assertEqual(self.solr._create_full_url(path='/pysolr_tests/select/?whatever=/'), 'http://localhost:8983/solr/core0/pysolr_tests/select/?whatever=/')
def test__send_request(self):
# Test a valid request.
resp_body = self.solr._send_request('GET', 'select/?q=doc&wt=json')
self.assertTrue('"numFound":3' in resp_body)
# Test a lowercase method & a body.
xml_body = '<add><doc><field name="id">doc_12</field><field name="title">Whee! ☃</field></doc></add>'
resp_body = self.solr._send_request('POST', 'update/?commit=true', body=xml_body, headers={
'Content-type': 'text/xml; charset=utf-8',
})
self.assertTrue('<int name="status">0</int>' in resp_body)
# Test a non-existent URL.
old_url = self.solr.url
self.solr.url = 'http://127.0.0.1:567898/wahtever'
self.assertRaises(SolrError, self.solr._send_request, 'get', 'select/?q=doc&wt=json')
self.solr.url = old_url
def test__select(self):
# Short params.
resp_body = self.solr._select({'q': 'doc'})
resp_data = json.loads(resp_body)
self.assertEqual(resp_data['response']['numFound'], 3)
# Long params.
resp_body = self.solr._select({'q': 'doc' * 1024})
resp_data = json.loads(resp_body)
self.assertEqual(resp_data['response']['numFound'], 0)
self.assertEqual(len(resp_data['responseHeader']['params']['q']), 3 * 1024)
def test__mlt(self):
resp_body = self.solr._mlt({'q': 'id:doc_1', 'mlt.fl': 'title'})
resp_data = json.loads(resp_body)
self.assertEqual(resp_data['response']['numFound'], 0)
def test__suggest_terms(self):
resp_body = self.solr._select({'terms.fl': 'title'})
resp_data = json.loads(resp_body)
self.assertEqual(resp_data['response']['numFound'], 0)
def test__update(self):
xml_body = '<add><doc><field name="id">doc_12</field><field name="title">Whee!</field></doc></add>'
resp_body = self.solr._update(xml_body)
self.assertTrue('<int name="status">0</int>' in resp_body)
def test__soft_commit(self):
xml_body = '<add><doc><field name="id">doc_12</field><field name="title">Whee!</field></doc></add>'
resp_body = self.solr._update(xml_body, softCommit=True)
self.assertTrue('<int name="status">0</int>' in resp_body)
def test__extract_error(self):
class RubbishResponse(object):
def __init__(self, content, headers=None):
if isinstance(content, bytes):
content = content.decode('utf-8')
self.content = content
self.headers = headers
if self.headers is None:
self.headers = {}
def json(self):
return json.loads(self.content)
# Just the reason.
resp_1 = RubbishResponse("We don't care.", {'reason': 'Something went wrong.'})
self.assertEqual(self.solr._extract_error(resp_1), "[Reason: Something went wrong.]")
# Empty reason.
resp_2 = RubbishResponse("We don't care.", {'reason': None})
self.assertEqual(self.solr._extract_error(resp_2), "[Reason: None]\nWe don't care.")
# No reason. Time to scrape.
resp_3 = RubbishResponse('<html><body><pre>Something is broke.</pre></body></html>', {'server': 'jetty'})
self.assertEqual(self.solr._extract_error(resp_3), "[Reason: Something is broke.]")
# No reason. JSON response.
resp_4 = RubbishResponse(b'\n {"error": {"msg": "It happens"}}', {'server': 'tomcat'})
self.assertEqual(self.solr._extract_error(resp_4), "[Reason: It happens]")
# No reason. Weird JSON response.
resp_5 = RubbishResponse(b'{"kinda": "weird"}', {'server': 'jetty'})
self.assertEqual(self.solr._extract_error(resp_5), '[Reason: None]\n{"kinda": "weird"}')
def test__scrape_response(self):
# Jetty.
resp_1 = self.solr._scrape_response({'server': 'jetty'}, '<html><body><pre>Something is broke.</pre></body></html>')
self.assertEqual(resp_1, ('Something is broke.', u''))
# Other.
resp_2 = self.solr._scrape_response({'server': 'crapzilla'}, '<html><head><title>Wow. Seriously weird.</title></head><body><pre>Something is broke.</pre></body></html>')
self.assertEqual(resp_2, ('Wow. Seriously weird.', u''))
@unittest.skipIf(sys.version_info < (2, 7), reason=u'Python 2.6 lacks the ElementTree 1.3 interface required for Solr XML error message parsing')
def test__scrape_response_coyote_xml(self):
resp_3 = self.solr._scrape_response({'server': 'coyote'}, '<?xml version="1.0"?>\n<response>\n<lst name="responseHeader"><int name="status">400</int><int name="QTime">0</int></lst><lst name="error"><str name="msg">Invalid Date String:\'2015-03-23 10:43:33\'</str><int name="code">400</int></lst>\n</response>\n')
self.assertEqual(resp_3, ("Invalid Date String:'2015-03-23 10:43:33'", "Invalid Date String:'2015-03-23 10:43:33'"))
# Valid XML with a traceback
resp_4 = self.solr._scrape_response({'server': 'coyote'}, """<?xml version="1.0"?>
<response>
<lst name="responseHeader"><int name="status">500</int><int name="QTime">138</int></lst><lst name="error"><str name="msg">Internal Server Error</str><str name="trace">org.apache.solr.common.SolrException: Internal Server Error at java.lang.Thread.run(Thread.java:745)</str><int name="code">500</int></lst>
</response>""")
self.assertEqual(resp_4, (u"Internal Server Error", u"org.apache.solr.common.SolrException: Internal Server Error at java.lang.Thread.run(Thread.java:745)"))
def test__scrape_response_tomcat(self):
"""Tests for Tomcat error responses"""
resp_0 = self.solr._scrape_response({'server': 'coyote'}, '<html><body><h1>Something broke!</h1><pre>gigantic stack trace</pre></body></html>')
self.assertEqual(resp_0, ('Something broke!', ''))
# Invalid XML
bogus_xml = '<?xml version="1.0"?>\n<response>\n<lst name="responseHeader"><int name="status">400</int><int name="QTime">0</int></lst><lst name="error"><str name="msg">Invalid Date String:\'2015-03-23 10:43:33\'</str><int name="code">400</int></lst>'
reason, full_html = self.solr._scrape_response({'server': 'coyote'}, bogus_xml)
self.assertEqual(reason, None)
self.assertEqual(full_html, bogus_xml.replace("\n", ""))
def test__from_python(self):
self.assertEqual(self.solr._from_python(datetime.date(2013, 1, 18)), '2013-01-18T00:00:00Z')
self.assertEqual(self.solr._from_python(datetime.datetime(2013, 1, 18, 0, 30, 28)), '2013-01-18T00:30:28Z')
self.assertEqual(self.solr._from_python(True), 'true')
self.assertEqual(self.solr._from_python(False), 'false')
self.assertEqual(self.solr._from_python(1), '1')
self.assertEqual(self.solr._from_python(1.2), '1.2')
self.assertEqual(self.solr._from_python(b'hello'), 'hello')
self.assertEqual(self.solr._from_python('hello ☃'), 'hello ☃')
self.assertEqual(self.solr._from_python('\x01test\x02'), 'test')
def test__to_python(self):
self.assertEqual(self.solr._to_python('2013-01-18T00:00:00Z'), datetime.datetime(2013, 1, 18))
self.assertEqual(self.solr._to_python('2013-01-18T00:30:28Z'), datetime.datetime(2013, 1, 18, 0, 30, 28))
self.assertEqual(self.solr._to_python('true'), True)
self.assertEqual(self.solr._to_python('false'), False)
self.assertEqual(self.solr._to_python(1), 1)
self.assertEqual(self.solr._to_python(1.2), 1.2)
self.assertEqual(self.solr._to_python(b'hello'), 'hello')
self.assertEqual(self.solr._to_python('hello ☃'), 'hello ☃')
self.assertEqual(self.solr._to_python(['foo', 'bar']), 'foo')
self.assertEqual(self.solr._to_python(('foo', 'bar')), 'foo')
self.assertEqual(self.solr._to_python('tuple("foo", "bar")'), 'tuple("foo", "bar")')
def test__is_null_value(self):
self.assertTrue(self.solr._is_null_value(None))
self.assertTrue(self.solr._is_null_value(''))
self.assertFalse(self.solr._is_null_value('Hello'))
self.assertFalse(self.solr._is_null_value(1))
def test_search(self):
results = self.solr.search('doc')
self.assertEqual(len(results), 3)
results = self.solr.search('example')
self.assertEqual(len(results), 2)
results = self.solr.search('nothing')
self.assertEqual(len(results), 0)
# Advanced options.
results = self.solr.search('doc', **{
'debug': 'true',
'hl': 'true',
'hl.fragsize': 8,
'facet': 'on',
'facet.field': 'popularity',
'spellcheck': 'true',
'spellcheck.collate': 'true',
'spellcheck.count': 1,
# TODO: Can't get these working in my test setup.
# 'group': 'true',
# 'group.field': 'id',
})
self.assertEqual(len(results), 3)
self.assertTrue('explain' in results.debug)
self.assertEqual(results.highlighting, {u'doc_4': {}, u'doc_2': {}, u'doc_1': {}})
self.assertEqual(results.spellcheck, {})
self.assertEqual(results.facets['facet_fields']['popularity'], ['10', 2, '7', 1, '2', 0, '8', 0])
self.assertTrue(results.qtime is not None)
# TODO: Can't get these working in my test setup.
# self.assertEqual(results.grouped, '')
def test_more_like_this(self):
results = self.solr.more_like_this('id:doc_1', 'text')
self.assertEqual(len(results), 0)
def test_suggest_terms(self):
results = self.solr.suggest_terms('title', '')
self.assertEqual(len(results), 1)
self.assertEqual(results, {'title': [('doc', 3), ('another', 2), ('example', 2), ('1', 1), ('2', 1), ('boring', 1), ('rock', 1), ('thing', 1)]})
def test__build_doc(self):
doc = {
'id': 'doc_1',
'title': 'Example doc ☃ 1',
'price': 12.59,
'popularity': 10,
}
doc_xml = force_unicode(ET.tostring(self.solr._build_doc(doc), encoding='utf-8'))
self.assertTrue('<field name="title">Example doc ☃ 1</field>' in doc_xml)
self.assertTrue('<field name="id">doc_1</field>' in doc_xml)
self.assertEqual(len(doc_xml), 152)
def test_add(self):
self.assertEqual(len(self.solr.search('doc')), 3)
self.assertEqual(len(self.solr.search('example')), 2)
self.solr.add([
{
'id': 'doc_6',
'title': 'Newly added doc',
},
{
'id': 'doc_7',
'title': 'Another example doc',
},
])
self.assertEqual(len(self.solr.search('doc')), 5)
self.assertEqual(len(self.solr.search('example')), 3)
def test_add_with_boost(self):
self.assertEqual(len(self.solr.search('doc')), 3)
self.solr.add([{'id': 'doc_6', 'title': 'Important doc'}],
boost={'title': 10.0})
self.solr.add([{'id': 'doc_7', 'title': 'Spam doc doc'}],
boost={'title': 0})
res = self.solr.search('doc')
self.assertEqual(len(res), 5)
self.assertEqual('doc_6', res.docs[0]['id'])
def test_field_update(self):
originalDocs = self.solr.search('doc')
self.assertEqual(len(originalDocs), 3)
updateList = []
for i, doc in enumerate(originalDocs):
updateList.append( {'id': doc['id'], 'popularity': 5} )
self.solr.add(updateList, fieldUpdates={'popularity': 'inc'})
updatedDocs = self.solr.search('doc')
self.assertEqual(len(updatedDocs), 3)
for i, (originalDoc, updatedDoc) in enumerate(zip(originalDocs, updatedDocs)):
self.assertEqual(len(updatedDoc.keys()), len(originalDoc.keys()))
self.assertEqual(updatedDoc['popularity'], originalDoc['popularity'] + 5)
self.assertEqual(True, all(updatedDoc[k] == originalDoc[k] for k in updatedDoc.keys() if not k in ['_version_', 'popularity']))
self.solr.add([
{
'id': 'multivalued_1',
'title': 'Multivalued doc 1',
'word_ss': ['alpha', 'beta'],
},
{
'id': 'multivalued_2',
'title': 'Multivalued doc 2',
'word_ss': ['charlie', 'delta'],
},
])
originalDocs = self.solr.search('multivalued')
self.assertEqual(len(originalDocs), 2)
updateList = []
for i, doc in enumerate(originalDocs):
updateList.append( {'id': doc['id'], 'word_ss': ['epsilon', 'gamma']} )
self.solr.add(updateList, fieldUpdates={'word_ss': 'add'})
updatedDocs = self.solr.search('multivalued')
self.assertEqual(len(updatedDocs), 2)
for i, (originalDoc, updatedDoc) in enumerate(zip(originalDocs, updatedDocs)):
self.assertEqual(len(updatedDoc.keys()), len(originalDoc.keys()))
self.assertEqual(updatedDoc['word_ss'], originalDoc['word_ss'] + ['epsilon', 'gamma'])
self.assertEqual(True, all(updatedDoc[k] == originalDoc[k] for k in updatedDoc.keys() if not k in ['_version_', 'word_ss']))
def test_delete(self):
self.assertEqual(len(self.solr.search('doc')), 3)
self.solr.delete(id='doc_1')
self.assertEqual(len(self.solr.search('doc')), 2)
self.solr.delete(q='price:[0 TO 15]')
self.assertEqual(len(self.solr.search('doc')), 1)
self.assertEqual(len(self.solr.search('*:*')), 1)
self.solr.delete(q='*:*')
self.assertEqual(len(self.solr.search('*:*')), 0)
# Need at least one.
self.assertRaises(ValueError, self.solr.delete)
# Can't have both.
self.assertRaises(ValueError, self.solr.delete, id='foo', q='bar')
def test_commit(self):
self.assertEqual(len(self.solr.search('doc')), 3)
self.solr.add([
{
'id': 'doc_6',
'title': 'Newly added doc',
}
], commit=False)
self.assertEqual(len(self.solr.search('doc')), 3)
self.solr.commit()
self.assertEqual(len(self.solr.search('doc')), 4)
def test_optimize(self):
# Make sure it doesn't blow up. Side effects are hard to measure. :/
self.assertEqual(len(self.solr.search('doc')), 3)
self.solr.add([
{
'id': 'doc_6',
'title': 'Newly added doc',
}
], commit=False)
self.assertEqual(len(self.solr.search('doc')), 3)
self.solr.optimize()
self.assertEqual(len(self.solr.search('doc')), 4)
def test_extract(self):
fake_f = StringIO("""
<html>
<head>
<meta charset="utf-8">
<meta name="haystack-test" content="test 1234">
<title>Test Title ☃☃</title>
</head>
<body>foobar</body>
</html>
""")
fake_f.name = "test.html"
extracted = self.solr.extract(fake_f)
# Verify documented response structure:
self.assertIn('contents', extracted)
self.assertIn('metadata', extracted)
self.assertIn('foobar', extracted['contents'])
m = extracted['metadata']
self.assertEqual([fake_f.name], m['stream_name'])
self.assertIn('haystack-test', m, "HTML metadata should have been extracted!")
self.assertEqual(['test 1234'], m['haystack-test'])
# Note the underhanded use of a double snowman to verify both that Tika
# correctly decoded entities and that our UTF-8 characters survived the
# round-trip:
self.assertEqual(['Test Title ☃☃'], m['title'])
def test_full_url(self):
self.solr.url = 'http://localhost:8983/solr/core0'
full_url = self.solr._create_full_url(path='/update')
# Make sure trailing and leading slashes do not collide:
self.assertEqual(full_url, 'http://localhost:8983/solr/core0/update')
|
snowdj/research_public
|
refs/heads/master
|
template_algorithms/basic_pairs_trade_optimize_template.py
|
2
|
"""
This is a basic pairs trading algorithm that uses the optimize
WARNING: THIS IS A LEARNING EXAMPLE ONLY. DO NOT TRY TO TRADE SOMETHING THIS SIMPLE.
https://www.quantopian.com/workshops
https://www.quantopian.com/lectures
For any questions, email max@quantopian.com
"""
import numpy as np
import pandas as pd
import quantopian.experimental.optimize as opt
import quantopian.algorithm as algo
MAX_GROSS_LEVERAGE = 1.0 # Set leverage constraint constant value for optimizer
def initialize(context):
"""
Called once at the start of the algorithm.
"""
# Check status of the pair every day 2 minutes before we rebalance
# The 2 minutes is just because we want to be safe, and 1 minutes
# is cutting it close
schedule_function(check_pair_status, date_rules.every_day(), time_rules.market_close(minutes=60))
context.stock1 = symbol('ABGB')
context.stock2 = symbol('FSLR')
context.stocks = [context.stock1, context.stock2]
# Our threshold for trading on the z-score
context.entry_threshold = 0.2
context.exit_threshold = 0.1
# Create a variable to store our target weights
context.target_weights = pd.Series(index=context.stocks, data=0.0)
# Moving average lengths
context.long_ma_length = 30
context.short_ma_length = 1
# Flags to tell us if we're currently in a trade
context.currently_long_the_spread = False
context.currently_short_the_spread = False
def check_pair_status(context, data):
# For notational convenience
s1 = context.stock1
s2 = context.stock2
# Get pricing history
prices = data.history([s1, s2], "price", context.long_ma_length, '1d')
# Try debugging me here to see what the price
# data structure looks like
# To debug, click on the line number to the left of the
# next command. Line numbers on blank lines or comments
# won't work.
short_prices = prices.iloc[-context.short_ma_length:]
# Get the long mavg
long_ma = np.mean(prices[s1] - prices[s2])
# Get the std of the long window
long_std = np.std(prices[s1] - prices[s2])
# Get the short mavg
short_ma = np.mean(short_prices[s1] - short_prices[s2])
# Compute z-score
if long_std > 0:
zscore = (short_ma - long_ma)/long_std
# Our two entry cases
if zscore > context.entry_threshold and \
not context.currently_short_the_spread:
context.target_weights[s1] = -0.5 # short top
context.target_weights[s2] = 0.5 # long bottom
context.currently_short_the_spread = True
context.currently_long_the_spread = False
elif zscore < -context.entry_threshold and \
not context.currently_long_the_spread:
context.target_weights[s1] = 0.5 # long top
context.target_weights[s2] = -0.5 # short bottom
context.currently_short_the_spread = False
context.currently_long_the_spread = True
# Our exit case
elif abs(zscore) < context.exit_threshold:
context.target_weights[s1] = 0 # close out
context.target_weights[s2] = 0 # close out
context.currently_short_the_spread = False
context.currently_long_the_spread = False
record('zscore', zscore)
# Call the optimizer
allocate(context, data)
def allocate(context, data):
# Set objective to match target weights as closely as possible, given constraints
objective = opt.TargetPortfolioWeights(context.target_weights)
# Define constraints
constraints = []
constraints.append(opt.MaxGrossLeverage(MAX_GROSS_LEVERAGE))
algo.order_optimal_portfolio(
objective=objective,
constraints=constraints,
universe=context.stocks
)
|
rockneurotiko/madness-things
|
refs/heads/master
|
Python/tailrec.py
|
1
|
import sys
class TailRecurseException(Exception):
def __init__(self, args, kwargs):
self.args = args
self.kwargs = kwargs
def tail_call_optimized(g):
"""
This function decorates a function with tail call
optimization. It does this by throwing an exception
if it is it's own grandparent, and catching such
exceptions to fake the tail call optimization.
This function fails if the decorated
function recurses in a non-tail context.
"""
def func(*args, **kwargs):
f = sys._getframe()
if f.f_back and f.f_back.f_back \
and f.f_back.f_back.f_code == f.f_code:
raise TailRecurseException(args, kwargs)
else:
while 1:
try:
return g(*args, **kwargs)
except TailRecurseException as e:
args = e.args
kwargs = e.kwargs
func.__doc__ = g.__doc__
return func
@tail_call_optimized
def factorial(n, acc=1):
"calculate a factorial"
if n == 0:
return acc
return factorial(n-1, n*acc)
print(factorial(100000))
# prints a big, big number,
# but doesn't hit the recursion limit.
@tail_call_optimized
def fib(i, current = 0, next = 1):
if i == 0:
return current
else:
return fib(i - 1, next, current + next)
print(fib(10000))
# also prints a big number,
# but doesn't hit the recursion limit.
|
pcmoritz/Strada.jl
|
refs/heads/master
|
deps/src/caffe/python/apollocaffe/models/googlenet.py
|
1
|
import os
from apollocaffe import layers
def weights_file():
filename = os.path.normpath('%s/../../../models/bvlc_googlenet/bvlc_googlenet.caffemodel' % os.path.dirname(os.path.realpath(__file__)))
if not os.path.exists(filename):
download_script = os.path.normpath('%s/../../../scripts/download_model_binary.py' % os.path.dirname(os.path.realpath(__file__)))
model_file = os.path.normpath('%s/../../../models/bvlc_googlenet' % os.path.dirname(os.path.realpath(__file__)))
raise OSError('Please download the GoogLeNet model first with: \n' +
'$ %s %s' % (download_script, model_file))
return filename
def googlenet_layers():
weight_filler = layers.Filler("xavier")
bias_filler = layers.Filler("constant", 0.2)
conv_lr_mults = [1.0, 2.0]
conv_decay_mults = [1.0, 0.0]
googlenet_layers = [
layers.Convolution(name="conv1/7x7_s2", bottoms=["data"], param_lr_mults=conv_lr_mults, param_decay_mults=conv_decay_mults, kernel_dim=(7, 7), stride=2, pad=3, weight_filler=weight_filler, bias_filler=bias_filler, num_output=64),
layers.ReLU(name="conv1/relu_7x7", bottoms=["conv1/7x7_s2"], tops=["conv1/7x7_s2"]),
layers.Pooling(name="pool1/3x3_s2", bottoms=["conv1/7x7_s2"], kernel_size=3, stride=2),
layers.LRN(name="pool1/norm1", bottoms=["pool1/3x3_s2"], local_size=5, alpha=0.0001, beta=0.75),
layers.Convolution(name="conv2/3x3_reduce", bottoms=["pool1/norm1"], param_lr_mults=conv_lr_mults, param_decay_mults=conv_decay_mults, kernel_dim=(1, 1), weight_filler=weight_filler, bias_filler=bias_filler, num_output=64),
layers.ReLU(name="conv2/relu_3x3_reduce", bottoms=["conv2/3x3_reduce"], tops=["conv2/3x3_reduce"]),
layers.Convolution(name="conv2/3x3", bottoms=["conv2/3x3_reduce"], param_lr_mults=conv_lr_mults, param_decay_mults=conv_decay_mults, kernel_dim=(3, 3), pad=1, weight_filler=weight_filler, bias_filler=bias_filler, num_output=192),
layers.ReLU(name="conv2/relu_3x3", bottoms=["conv2/3x3"], tops=["conv2/3x3"]),
layers.LRN(name="conv2/norm2", bottoms=["conv2/3x3"], local_size=5, alpha=0.0001, beta=0.75),
layers.Pooling(name="pool2/3x3_s2", bottoms=["conv2/norm2"], kernel_size=3, stride=2),
layers.Convolution(name="inception_3a/1x1", bottoms=["pool2/3x3_s2"], param_lr_mults=conv_lr_mults, param_decay_mults=conv_decay_mults, kernel_dim=(1, 1), weight_filler=weight_filler, bias_filler=bias_filler, num_output=64),
layers.ReLU(name="inception_3a/relu_1x1", bottoms=["inception_3a/1x1"], tops=["inception_3a/1x1"]),
layers.Convolution(name="inception_3a/3x3_reduce", bottoms=["pool2/3x3_s2"], param_lr_mults=conv_lr_mults, param_decay_mults=conv_decay_mults, kernel_dim=(1, 1), weight_filler=weight_filler, bias_filler=bias_filler, num_output=96),
layers.ReLU(name="inception_3a/relu_3x3_reduce", bottoms=["inception_3a/3x3_reduce"], tops=["inception_3a/3x3_reduce"]),
layers.Convolution(name="inception_3a/3x3", bottoms=["inception_3a/3x3_reduce"], param_lr_mults=conv_lr_mults, param_decay_mults=conv_decay_mults, kernel_dim=(3, 3), pad=1, weight_filler=weight_filler, bias_filler=bias_filler, num_output=128),
layers.ReLU(name="inception_3a/relu_3x3", bottoms=["inception_3a/3x3"], tops=["inception_3a/3x3"]),
layers.Convolution(name="inception_3a/5x5_reduce", bottoms=["pool2/3x3_s2"], param_lr_mults=conv_lr_mults, param_decay_mults=conv_decay_mults, kernel_dim=(1, 1), weight_filler=weight_filler, bias_filler=bias_filler, num_output=16),
layers.ReLU(name="inception_3a/relu_5x5_reduce", bottoms=["inception_3a/5x5_reduce"], tops=["inception_3a/5x5_reduce"]),
layers.Convolution(name="inception_3a/5x5", bottoms=["inception_3a/5x5_reduce"], param_lr_mults=conv_lr_mults, param_decay_mults=conv_decay_mults, kernel_dim=(5, 5), pad=2, weight_filler=weight_filler, bias_filler=bias_filler, num_output=32),
layers.ReLU(name="inception_3a/relu_5x5", bottoms=["inception_3a/5x5"], tops=["inception_3a/5x5"]),
layers.Pooling(name="inception_3a/pool", bottoms=["pool2/3x3_s2"], kernel_size=3, stride=1, pad=1),
layers.Convolution(name="inception_3a/pool_proj", bottoms=["inception_3a/pool"], param_lr_mults=conv_lr_mults, param_decay_mults=conv_decay_mults, kernel_dim=(1, 1), weight_filler=weight_filler, bias_filler=bias_filler, num_output=32),
layers.ReLU(name="inception_3a/relu_pool_proj", bottoms=["inception_3a/pool_proj"], tops=["inception_3a/pool_proj"]),
layers.Concat(name="inception_3a/output", bottoms=["inception_3a/1x1", "inception_3a/3x3", "inception_3a/5x5", "inception_3a/pool_proj"]),
layers.Convolution(name="inception_3b/1x1", bottoms=["inception_3a/output"], param_lr_mults=conv_lr_mults, param_decay_mults=conv_decay_mults, kernel_dim=(1, 1), weight_filler=weight_filler, bias_filler=bias_filler, num_output=128),
layers.ReLU(name="inception_3b/relu_1x1", bottoms=["inception_3b/1x1"], tops=["inception_3b/1x1"]),
layers.Convolution(name="inception_3b/3x3_reduce", bottoms=["inception_3a/output"], param_lr_mults=conv_lr_mults, param_decay_mults=conv_decay_mults, kernel_dim=(1, 1), weight_filler=weight_filler, bias_filler=bias_filler, num_output=128),
layers.ReLU(name="inception_3b/relu_3x3_reduce", bottoms=["inception_3b/3x3_reduce"], tops=["inception_3b/3x3_reduce"]),
layers.Convolution(name="inception_3b/3x3", bottoms=["inception_3b/3x3_reduce"], param_lr_mults=conv_lr_mults, param_decay_mults=conv_decay_mults, kernel_dim=(3, 3), pad=1, weight_filler=weight_filler, bias_filler=bias_filler, num_output=192),
layers.ReLU(name="inception_3b/relu_3x3", bottoms=["inception_3b/3x3"], tops=["inception_3b/3x3"]),
layers.Convolution(name="inception_3b/5x5_reduce", bottoms=["inception_3a/output"], param_lr_mults=conv_lr_mults, param_decay_mults=conv_decay_mults, kernel_dim=(1, 1), weight_filler=weight_filler, bias_filler=bias_filler, num_output=32),
layers.ReLU(name="inception_3b/relu_5x5_reduce", bottoms=["inception_3b/5x5_reduce"], tops=["inception_3b/5x5_reduce"]),
layers.Convolution(name="inception_3b/5x5", bottoms=["inception_3b/5x5_reduce"], param_lr_mults=conv_lr_mults, param_decay_mults=conv_decay_mults, kernel_dim=(5, 5), pad=2, weight_filler=weight_filler, bias_filler=bias_filler, num_output=96),
layers.ReLU(name="inception_3b/relu_5x5", bottoms=["inception_3b/5x5"], tops=["inception_3b/5x5"]),
layers.Pooling(name="inception_3b/pool", bottoms=["inception_3a/output"], kernel_size=3, stride=1, pad=1),
layers.Convolution(name="inception_3b/pool_proj", bottoms=["inception_3b/pool"], param_lr_mults=conv_lr_mults, param_decay_mults=conv_decay_mults, kernel_dim=(1, 1), weight_filler=weight_filler, bias_filler=bias_filler, num_output=64),
layers.ReLU(name="inception_3b/relu_pool_proj", bottoms=["inception_3b/pool_proj"], tops=["inception_3b/pool_proj"]),
layers.Concat(name="inception_3b/output", bottoms=["inception_3b/1x1", "inception_3b/3x3", "inception_3b/5x5", "inception_3b/pool_proj"]),
layers.Pooling(name="pool3/3x3_s2", bottoms=["inception_3b/output"], kernel_size=3, stride=2),
layers.Convolution(name="inception_4a/1x1", bottoms=["pool3/3x3_s2"], param_lr_mults=conv_lr_mults, param_decay_mults=conv_decay_mults, kernel_dim=(1, 1), weight_filler=weight_filler, bias_filler=bias_filler, num_output=192),
layers.ReLU(name="inception_4a/relu_1x1", bottoms=["inception_4a/1x1"], tops=["inception_4a/1x1"]),
layers.Convolution(name="inception_4a/3x3_reduce", bottoms=["pool3/3x3_s2"], param_lr_mults=conv_lr_mults, param_decay_mults=conv_decay_mults, kernel_dim=(1, 1), weight_filler=weight_filler, bias_filler=bias_filler, num_output=96),
layers.ReLU(name="inception_4a/relu_3x3_reduce", bottoms=["inception_4a/3x3_reduce"], tops=["inception_4a/3x3_reduce"]),
layers.Convolution(name="inception_4a/3x3", bottoms=["inception_4a/3x3_reduce"], param_lr_mults=conv_lr_mults, param_decay_mults=conv_decay_mults, kernel_dim=(3, 3), pad=1, weight_filler=weight_filler, bias_filler=bias_filler, num_output=208),
layers.ReLU(name="inception_4a/relu_3x3", bottoms=["inception_4a/3x3"], tops=["inception_4a/3x3"]),
layers.Convolution(name="inception_4a/5x5_reduce", bottoms=["pool3/3x3_s2"], param_lr_mults=conv_lr_mults, param_decay_mults=conv_decay_mults, kernel_dim=(1, 1), weight_filler=weight_filler, bias_filler=bias_filler, num_output=16),
layers.ReLU(name="inception_4a/relu_5x5_reduce", bottoms=["inception_4a/5x5_reduce"], tops=["inception_4a/5x5_reduce"]),
layers.Convolution(name="inception_4a/5x5", bottoms=["inception_4a/5x5_reduce"], param_lr_mults=conv_lr_mults, param_decay_mults=conv_decay_mults, kernel_dim=(5, 5), pad=2, weight_filler=weight_filler, bias_filler=bias_filler, num_output=48),
layers.ReLU(name="inception_4a/relu_5x5", bottoms=["inception_4a/5x5"], tops=["inception_4a/5x5"]),
layers.Pooling(name="inception_4a/pool", bottoms=["pool3/3x3_s2"], kernel_size=3, stride=1, pad=1),
layers.Convolution(name="inception_4a/pool_proj", bottoms=["inception_4a/pool"], param_lr_mults=conv_lr_mults, param_decay_mults=conv_decay_mults, kernel_dim=(1, 1), weight_filler=weight_filler, bias_filler=bias_filler, num_output=64),
layers.ReLU(name="inception_4a/relu_pool_proj", bottoms=["inception_4a/pool_proj"], tops=["inception_4a/pool_proj"]),
layers.Concat(name="inception_4a/output", bottoms=["inception_4a/1x1", "inception_4a/3x3", "inception_4a/5x5", "inception_4a/pool_proj"]),
layers.Pooling(name="loss1/ave_pool", bottoms=["inception_4a/output"], kernel_size=5, stride=3, pool='AVE'),
layers.Convolution(name="loss1/conv", bottoms=["loss1/ave_pool"], param_lr_mults=conv_lr_mults, param_decay_mults=conv_decay_mults, kernel_dim=(1, 1), weight_filler=weight_filler, bias_filler=bias_filler, num_output=128),
layers.ReLU(name="loss1/relu_conv", bottoms=["loss1/conv"], tops=["loss1/conv"]),
layers.InnerProduct(name="loss1/fc", bottoms=["loss1/conv"], param_lr_mults=conv_lr_mults, param_decay_mults=conv_decay_mults, weight_filler=weight_filler, bias_filler=bias_filler, num_output=1024),
layers.ReLU(name="loss1/relu_fc", bottoms=["loss1/fc"], tops=["loss1/fc"]),
layers.Dropout(name="loss1/drop_fc", bottoms=["loss1/fc"], tops=["loss1/fc"], dropout_ratio=0.7),
layers.InnerProduct(name="loss1/classifier", bottoms=["loss1/fc"], param_lr_mults=conv_lr_mults, param_decay_mults=conv_decay_mults, weight_filler=weight_filler, bias_filler=layers.CaffeFiller(type="constant", value=0.0), num_output=1000),
layers.SoftmaxWithLoss(name="loss1/loss", bottoms=["loss1/classifier", "label"], tops=["loss1/loss1"], loss_weight=0.3),
layers.Convolution(name="inception_4b/1x1", bottoms=["inception_4a/output"], param_lr_mults=conv_lr_mults, param_decay_mults=conv_decay_mults, kernel_dim=(1, 1), weight_filler=weight_filler, bias_filler=bias_filler, num_output=160),
layers.ReLU(name="inception_4b/relu_1x1", bottoms=["inception_4b/1x1"], tops=["inception_4b/1x1"]),
layers.Convolution(name="inception_4b/3x3_reduce", bottoms=["inception_4a/output"], param_lr_mults=conv_lr_mults, param_decay_mults=conv_decay_mults, kernel_dim=(1, 1), weight_filler=weight_filler, bias_filler=bias_filler, num_output=112),
layers.ReLU(name="inception_4b/relu_3x3_reduce", bottoms=["inception_4b/3x3_reduce"], tops=["inception_4b/3x3_reduce"]),
layers.Convolution(name="inception_4b/3x3", bottoms=["inception_4b/3x3_reduce"], param_lr_mults=conv_lr_mults, param_decay_mults=conv_decay_mults, kernel_dim=(3, 3), pad=1, weight_filler=weight_filler, bias_filler=bias_filler, num_output=224),
layers.ReLU(name="inception_4b/relu_3x3", bottoms=["inception_4b/3x3"], tops=["inception_4b/3x3"]),
layers.Convolution(name="inception_4b/5x5_reduce", bottoms=["inception_4a/output"], param_lr_mults=conv_lr_mults, param_decay_mults=conv_decay_mults, kernel_dim=(1, 1), weight_filler=weight_filler, bias_filler=bias_filler, num_output=24),
layers.ReLU(name="inception_4b/relu_5x5_reduce", bottoms=["inception_4b/5x5_reduce"], tops=["inception_4b/5x5_reduce"]),
layers.Convolution(name="inception_4b/5x5", bottoms=["inception_4b/5x5_reduce"], param_lr_mults=conv_lr_mults, param_decay_mults=conv_decay_mults, kernel_dim=(5, 5), pad=2, weight_filler=weight_filler, bias_filler=bias_filler, num_output=64),
layers.ReLU(name="inception_4b/relu_5x5", bottoms=["inception_4b/5x5"], tops=["inception_4b/5x5"]),
layers.Pooling(name="inception_4b/pool", bottoms=["inception_4a/output"], kernel_size=3, stride=1, pad=1),
layers.Convolution(name="inception_4b/pool_proj", bottoms=["inception_4b/pool"], param_lr_mults=conv_lr_mults, param_decay_mults=conv_decay_mults, kernel_dim=(1, 1), weight_filler=weight_filler, bias_filler=bias_filler, num_output=64),
layers.ReLU(name="inception_4b/relu_pool_proj", bottoms=["inception_4b/pool_proj"], tops=["inception_4b/pool_proj"]),
layers.Concat(name="inception_4b/output", bottoms=["inception_4b/1x1", "inception_4b/3x3", "inception_4b/5x5", "inception_4b/pool_proj"]),
layers.Convolution(name="inception_4c/1x1", bottoms=["inception_4b/output"], param_lr_mults=conv_lr_mults, param_decay_mults=conv_decay_mults, kernel_dim=(1, 1), weight_filler=weight_filler, bias_filler=bias_filler, num_output=128),
layers.ReLU(name="inception_4c/relu_1x1", bottoms=["inception_4c/1x1"], tops=["inception_4c/1x1"]),
layers.Convolution(name="inception_4c/3x3_reduce", bottoms=["inception_4b/output"], param_lr_mults=conv_lr_mults, param_decay_mults=conv_decay_mults, kernel_dim=(1, 1), weight_filler=weight_filler, bias_filler=bias_filler, num_output=128),
layers.ReLU(name="inception_4c/relu_3x3_reduce", bottoms=["inception_4c/3x3_reduce"], tops=["inception_4c/3x3_reduce"]),
layers.Convolution(name="inception_4c/3x3", bottoms=["inception_4c/3x3_reduce"], param_lr_mults=conv_lr_mults, param_decay_mults=conv_decay_mults, kernel_dim=(3, 3), pad=1, weight_filler=weight_filler, bias_filler=bias_filler, num_output=256),
layers.ReLU(name="inception_4c/relu_3x3", bottoms=["inception_4c/3x3"], tops=["inception_4c/3x3"]),
layers.Convolution(name="inception_4c/5x5_reduce", bottoms=["inception_4b/output"], param_lr_mults=conv_lr_mults, param_decay_mults=conv_decay_mults, kernel_dim=(1, 1), weight_filler=weight_filler, bias_filler=bias_filler, num_output=24),
layers.ReLU(name="inception_4c/relu_5x5_reduce", bottoms=["inception_4c/5x5_reduce"], tops=["inception_4c/5x5_reduce"]),
layers.Convolution(name="inception_4c/5x5", bottoms=["inception_4c/5x5_reduce"], param_lr_mults=conv_lr_mults, param_decay_mults=conv_decay_mults, kernel_dim=(5, 5), pad=2, weight_filler=weight_filler, bias_filler=bias_filler, num_output=64),
layers.ReLU(name="inception_4c/relu_5x5", bottoms=["inception_4c/5x5"], tops=["inception_4c/5x5"]),
layers.Pooling(name="inception_4c/pool", bottoms=["inception_4b/output"], kernel_size=3, stride=1, pad=1),
layers.Convolution(name="inception_4c/pool_proj", bottoms=["inception_4c/pool"], param_lr_mults=conv_lr_mults, param_decay_mults=conv_decay_mults, kernel_dim=(1, 1), weight_filler=weight_filler, bias_filler=bias_filler, num_output=64),
layers.ReLU(name="inception_4c/relu_pool_proj", bottoms=["inception_4c/pool_proj"], tops=["inception_4c/pool_proj"]),
layers.Concat(name="inception_4c/output", bottoms=["inception_4c/1x1", "inception_4c/3x3", "inception_4c/5x5", "inception_4c/pool_proj"]),
layers.Convolution(name="inception_4d/1x1", bottoms=["inception_4c/output"], param_lr_mults=conv_lr_mults, param_decay_mults=conv_decay_mults, kernel_dim=(1, 1), weight_filler=weight_filler, bias_filler=bias_filler, num_output=112),
layers.ReLU(name="inception_4d/relu_1x1", bottoms=["inception_4d/1x1"], tops=["inception_4d/1x1"]),
layers.Convolution(name="inception_4d/3x3_reduce", bottoms=["inception_4c/output"], param_lr_mults=conv_lr_mults, param_decay_mults=conv_decay_mults, kernel_dim=(1, 1), weight_filler=weight_filler, bias_filler=bias_filler, num_output=144),
layers.ReLU(name="inception_4d/relu_3x3_reduce", bottoms=["inception_4d/3x3_reduce"], tops=["inception_4d/3x3_reduce"]),
layers.Convolution(name="inception_4d/3x3", bottoms=["inception_4d/3x3_reduce"], param_lr_mults=conv_lr_mults, param_decay_mults=conv_decay_mults, kernel_dim=(3, 3), pad=1, weight_filler=weight_filler, bias_filler=bias_filler, num_output=288),
layers.ReLU(name="inception_4d/relu_3x3", bottoms=["inception_4d/3x3"], tops=["inception_4d/3x3"]),
layers.Convolution(name="inception_4d/5x5_reduce", bottoms=["inception_4c/output"], param_lr_mults=conv_lr_mults, param_decay_mults=conv_decay_mults, kernel_dim=(1, 1), weight_filler=weight_filler, bias_filler=bias_filler, num_output=32),
layers.ReLU(name="inception_4d/relu_5x5_reduce", bottoms=["inception_4d/5x5_reduce"], tops=["inception_4d/5x5_reduce"]),
layers.Convolution(name="inception_4d/5x5", bottoms=["inception_4d/5x5_reduce"], param_lr_mults=conv_lr_mults, param_decay_mults=conv_decay_mults, kernel_dim=(5, 5), pad=2, weight_filler=weight_filler, bias_filler=bias_filler, num_output=64),
layers.ReLU(name="inception_4d/relu_5x5", bottoms=["inception_4d/5x5"], tops=["inception_4d/5x5"]),
layers.Pooling(name="inception_4d/pool", bottoms=["inception_4c/output"], kernel_size=3, stride=1, pad=1),
layers.Convolution(name="inception_4d/pool_proj", bottoms=["inception_4d/pool"], param_lr_mults=conv_lr_mults, param_decay_mults=conv_decay_mults, kernel_dim=(1, 1), weight_filler=weight_filler, bias_filler=bias_filler, num_output=64),
layers.ReLU(name="inception_4d/relu_pool_proj", bottoms=["inception_4d/pool_proj"], tops=["inception_4d/pool_proj"]),
layers.Concat(name="inception_4d/output", bottoms=["inception_4d/1x1", "inception_4d/3x3", "inception_4d/5x5", "inception_4d/pool_proj"]),
layers.Pooling(name="loss2/ave_pool", bottoms=["inception_4d/output"], kernel_size=5, stride=3, pool='AVE'),
layers.Convolution(name="loss2/conv", bottoms=["loss2/ave_pool"], param_lr_mults=conv_lr_mults, param_decay_mults=conv_decay_mults, kernel_dim=(1, 1), weight_filler=weight_filler, bias_filler=bias_filler, num_output=128),
layers.ReLU(name="loss2/relu_conv", bottoms=["loss2/conv"], tops=["loss2/conv"]),
layers.InnerProduct(name="loss2/fc", bottoms=["loss2/conv"], param_lr_mults=conv_lr_mults, param_decay_mults=conv_decay_mults, weight_filler=weight_filler, bias_filler=bias_filler, num_output=1024),
layers.ReLU(name="loss2/relu_fc", bottoms=["loss2/fc"], tops=["loss2/fc"]),
layers.Dropout(name="loss2/drop_fc", bottoms=["loss2/fc"], tops=["loss2/fc"], dropout_ratio=0.7),
layers.InnerProduct(name="loss2/classifier", bottoms=["loss2/fc"], param_lr_mults=conv_lr_mults, param_decay_mults=conv_decay_mults, weight_filler=weight_filler, bias_filler=layers.CaffeFiller(type="constant", value=0.0), num_output=1000),
layers.SoftmaxWithLoss(name="loss2/loss", bottoms=["loss2/classifier", "label"], tops=["loss2/loss1"], loss_weight=0.3),
layers.Convolution(name="inception_4e/1x1", bottoms=["inception_4d/output"], param_lr_mults=conv_lr_mults, param_decay_mults=conv_decay_mults, kernel_dim=(1, 1), weight_filler=weight_filler, bias_filler=bias_filler, num_output=256),
layers.ReLU(name="inception_4e/relu_1x1", bottoms=["inception_4e/1x1"], tops=["inception_4e/1x1"]),
layers.Convolution(name="inception_4e/3x3_reduce", bottoms=["inception_4d/output"], param_lr_mults=conv_lr_mults, param_decay_mults=conv_decay_mults, kernel_dim=(1, 1), weight_filler=weight_filler, bias_filler=bias_filler, num_output=160),
layers.ReLU(name="inception_4e/relu_3x3_reduce", bottoms=["inception_4e/3x3_reduce"], tops=["inception_4e/3x3_reduce"]),
layers.Convolution(name="inception_4e/3x3", bottoms=["inception_4e/3x3_reduce"], param_lr_mults=conv_lr_mults, param_decay_mults=conv_decay_mults, kernel_dim=(3, 3), pad=1, weight_filler=weight_filler, bias_filler=bias_filler, num_output=320),
layers.ReLU(name="inception_4e/relu_3x3", bottoms=["inception_4e/3x3"], tops=["inception_4e/3x3"]),
layers.Convolution(name="inception_4e/5x5_reduce", bottoms=["inception_4d/output"], param_lr_mults=conv_lr_mults, param_decay_mults=conv_decay_mults, kernel_dim=(1, 1), weight_filler=weight_filler, bias_filler=bias_filler, num_output=32),
layers.ReLU(name="inception_4e/relu_5x5_reduce", bottoms=["inception_4e/5x5_reduce"], tops=["inception_4e/5x5_reduce"]),
layers.Convolution(name="inception_4e/5x5", bottoms=["inception_4e/5x5_reduce"], param_lr_mults=conv_lr_mults, param_decay_mults=conv_decay_mults, kernel_dim=(5, 5), pad=2, weight_filler=weight_filler, bias_filler=bias_filler, num_output=128),
layers.ReLU(name="inception_4e/relu_5x5", bottoms=["inception_4e/5x5"], tops=["inception_4e/5x5"]),
layers.Pooling(name="inception_4e/pool", bottoms=["inception_4d/output"], kernel_size=3, stride=1, pad=1),
layers.Convolution(name="inception_4e/pool_proj", bottoms=["inception_4e/pool"], param_lr_mults=conv_lr_mults, param_decay_mults=conv_decay_mults, kernel_dim=(1, 1), weight_filler=weight_filler, bias_filler=bias_filler, num_output=128),
layers.ReLU(name="inception_4e/relu_pool_proj", bottoms=["inception_4e/pool_proj"], tops=["inception_4e/pool_proj"]),
layers.Concat(name="inception_4e/output", bottoms=["inception_4e/1x1", "inception_4e/3x3", "inception_4e/5x5", "inception_4e/pool_proj"]),
layers.Pooling(name="pool4/3x3_s2", bottoms=["inception_4e/output"], kernel_size=3, stride=2),
layers.Convolution(name="inception_5a/1x1", bottoms=["pool4/3x3_s2"], param_lr_mults=conv_lr_mults, param_decay_mults=conv_decay_mults, kernel_dim=(1, 1), weight_filler=weight_filler, bias_filler=bias_filler, num_output=256),
layers.ReLU(name="inception_5a/relu_1x1", bottoms=["inception_5a/1x1"], tops=["inception_5a/1x1"]),
layers.Convolution(name="inception_5a/3x3_reduce", bottoms=["pool4/3x3_s2"], param_lr_mults=conv_lr_mults, param_decay_mults=conv_decay_mults, kernel_dim=(1, 1), weight_filler=weight_filler, bias_filler=bias_filler, num_output=160),
layers.ReLU(name="inception_5a/relu_3x3_reduce", bottoms=["inception_5a/3x3_reduce"], tops=["inception_5a/3x3_reduce"]),
layers.Convolution(name="inception_5a/3x3", bottoms=["inception_5a/3x3_reduce"], param_lr_mults=conv_lr_mults, param_decay_mults=conv_decay_mults, kernel_dim=(3, 3), pad=1, weight_filler=weight_filler, bias_filler=bias_filler, num_output=320),
layers.ReLU(name="inception_5a/relu_3x3", bottoms=["inception_5a/3x3"], tops=["inception_5a/3x3"]),
layers.Convolution(name="inception_5a/5x5_reduce", bottoms=["pool4/3x3_s2"], param_lr_mults=conv_lr_mults, param_decay_mults=conv_decay_mults, kernel_dim=(1, 1), weight_filler=weight_filler, bias_filler=bias_filler, num_output=32),
layers.ReLU(name="inception_5a/relu_5x5_reduce", bottoms=["inception_5a/5x5_reduce"], tops=["inception_5a/5x5_reduce"]),
layers.Convolution(name="inception_5a/5x5", bottoms=["inception_5a/5x5_reduce"], param_lr_mults=conv_lr_mults, param_decay_mults=conv_decay_mults, kernel_dim=(5, 5), pad=2, weight_filler=weight_filler, bias_filler=bias_filler, num_output=128),
layers.ReLU(name="inception_5a/relu_5x5", bottoms=["inception_5a/5x5"], tops=["inception_5a/5x5"]),
layers.Pooling(name="inception_5a/pool", bottoms=["pool4/3x3_s2"], kernel_size=3, stride=1, pad=1),
layers.Convolution(name="inception_5a/pool_proj", bottoms=["inception_5a/pool"], param_lr_mults=conv_lr_mults, param_decay_mults=conv_decay_mults, kernel_dim=(1, 1), weight_filler=weight_filler, bias_filler=bias_filler, num_output=128),
layers.ReLU(name="inception_5a/relu_pool_proj", bottoms=["inception_5a/pool_proj"], tops=["inception_5a/pool_proj"]),
layers.Concat(name="inception_5a/output", bottoms=["inception_5a/1x1", "inception_5a/3x3", "inception_5a/5x5", "inception_5a/pool_proj"]),
layers.Convolution(name="inception_5b/1x1", bottoms=["inception_5a/output"], param_lr_mults=conv_lr_mults, param_decay_mults=conv_decay_mults, kernel_dim=(1, 1), weight_filler=weight_filler, bias_filler=bias_filler, num_output=384),
layers.ReLU(name="inception_5b/relu_1x1", bottoms=["inception_5b/1x1"], tops=["inception_5b/1x1"]),
layers.Convolution(name="inception_5b/3x3_reduce", bottoms=["inception_5a/output"], param_lr_mults=conv_lr_mults, param_decay_mults=conv_decay_mults, kernel_dim=(1, 1), weight_filler=weight_filler, bias_filler=bias_filler, num_output=192),
layers.ReLU(name="inception_5b/relu_3x3_reduce", bottoms=["inception_5b/3x3_reduce"], tops=["inception_5b/3x3_reduce"]),
layers.Convolution(name="inception_5b/3x3", bottoms=["inception_5b/3x3_reduce"], param_lr_mults=conv_lr_mults, param_decay_mults=conv_decay_mults, kernel_dim=(3, 3), pad=1, weight_filler=weight_filler, bias_filler=bias_filler, num_output=384),
layers.ReLU(name="inception_5b/relu_3x3", bottoms=["inception_5b/3x3"], tops=["inception_5b/3x3"]),
layers.Convolution(name="inception_5b/5x5_reduce", bottoms=["inception_5a/output"], param_lr_mults=conv_lr_mults, param_decay_mults=conv_decay_mults, kernel_dim=(1, 1), weight_filler=weight_filler, bias_filler=bias_filler, num_output=48),
layers.ReLU(name="inception_5b/relu_5x5_reduce", bottoms=["inception_5b/5x5_reduce"], tops=["inception_5b/5x5_reduce"]),
layers.Convolution(name="inception_5b/5x5", bottoms=["inception_5b/5x5_reduce"], param_lr_mults=conv_lr_mults, param_decay_mults=conv_decay_mults, kernel_dim=(5, 5), pad=2, weight_filler=weight_filler, bias_filler=bias_filler, num_output=128),
layers.ReLU(name="inception_5b/relu_5x5", bottoms=["inception_5b/5x5"], tops=["inception_5b/5x5"]),
layers.Pooling(name="inception_5b/pool", bottoms=["inception_5a/output"], kernel_size=3, stride=1, pad=1),
layers.Convolution(name="inception_5b/pool_proj", bottoms=["inception_5b/pool"], param_lr_mults=conv_lr_mults, param_decay_mults=conv_decay_mults, kernel_dim=(1, 1), weight_filler=weight_filler, bias_filler=bias_filler, num_output=128),
layers.ReLU(name="inception_5b/relu_pool_proj", bottoms=["inception_5b/pool_proj"], tops=["inception_5b/pool_proj"]),
layers.Concat(name="inception_5b/output", bottoms=["inception_5b/1x1", "inception_5b/3x3", "inception_5b/5x5", "inception_5b/pool_proj"]),
layers.Pooling(name="pool5/7x7_s1", bottoms=["inception_5b/output"], kernel_size=7, stride=1, pool='AVE'),
layers.Dropout(name="pool5/drop_7x7_s1", bottoms=["pool5/7x7_s1"], tops=["pool5/7x7_s1"], dropout_ratio=0.4),
layers.InnerProduct(name="loss3/classifier", bottoms=["pool5/7x7_s1"], param_lr_mults=conv_lr_mults, param_decay_mults=conv_decay_mults, weight_filler=weight_filler, bias_filler=layers.CaffeFiller(type="constant", value=0.0), num_output=1000),
layers.SoftmaxWithLoss(name="loss3/loss3", bottoms=["loss3/classifier", "label"], loss_weight=1.0),
]
return googlenet_layers
|
bunnyitvn/webptn
|
refs/heads/master
|
build/lib.linux-i686-2.7/django/contrib/staticfiles/utils.py
|
322
|
import os
import fnmatch
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
def matches_patterns(path, patterns=None):
"""
Return True or False depending on whether the ``path`` should be
ignored (if it matches any pattern in ``ignore_patterns``).
"""
if patterns is None:
patterns = []
for pattern in patterns:
if fnmatch.fnmatchcase(path, pattern):
return True
return False
def get_files(storage, ignore_patterns=None, location=''):
"""
Recursively walk the storage directories yielding the paths
of all files that should be copied.
"""
if ignore_patterns is None:
ignore_patterns = []
directories, files = storage.listdir(location)
for fn in files:
if matches_patterns(fn, ignore_patterns):
continue
if location:
fn = os.path.join(location, fn)
yield fn
for dir in directories:
if matches_patterns(dir, ignore_patterns):
continue
if location:
dir = os.path.join(location, dir)
for fn in get_files(storage, ignore_patterns, dir):
yield fn
def check_settings(base_url=None):
"""
Checks if the staticfiles settings have sane values.
"""
if base_url is None:
base_url = settings.STATIC_URL
if not base_url:
raise ImproperlyConfigured(
"You're using the staticfiles app "
"without having set the required STATIC_URL setting.")
if settings.MEDIA_URL == base_url:
raise ImproperlyConfigured("The MEDIA_URL and STATIC_URL "
"settings must have different values")
if ((settings.MEDIA_ROOT and settings.STATIC_ROOT) and
(settings.MEDIA_ROOT == settings.STATIC_ROOT)):
raise ImproperlyConfigured("The MEDIA_ROOT and STATIC_ROOT "
"settings must have different values")
|
infphilo/hisat2
|
refs/heads/master
|
evaluation/get_programs.py
|
1
|
#!/usr/bin/env python
import sys, os
use_message = '''
'''
def get_aligners():
mac = (sys.platform == "darwin")
if not os.path.exists("aligners"):
os.mkdir("aligners")
os.chdir("aligners")
if not os.path.exists("bin"):
os.mkdir("bin")
programs = ["HISAT", "Bowtie2", "Bowtie", "TopHat2", "STAR", "GSNAP", "BWA", "StringTie", "Cufflinks"]
for program in programs:
if program == "HISAT":
dir = "hisat-0.1.6-beta"
if os.path.exists(dir):
continue
fname = dir + "-source.zip"
url = "http://www.ccb.jhu.edu/software/hisat/downloads"
bins = "hisat-align-s hisat-build-s hisat-inspect-s"
installs = bins + " hisat hisat-build hisat-inspect"
cmd = "wget %s/%s; unzip %s; cd %s; make %s; cp %s ../bin; cd .." % \
(url, fname, fname, dir, bins, installs)
elif program == "Bowtie2":
dir = "bowtie2-2.2.5"
if os.path.exists(dir):
continue
fname = dir + "-source.zip"
url = "http://sourceforge.net/projects/bowtie-bio/files/bowtie2/2.2.5"
bins = "bowtie2-align-s bowtie2-build-s bowtie2-inspect-s"
installs = bins + " bowtie2 bowtie2-build bowtie2-inspect"
cmd = "wget %s/%s; unzip %s; cd %s; make %s; cp %s ../bin; cd .." % \
(url, fname, fname, dir, bins, installs)
elif program == "Bowtie":
dir = "bowtie-1.1.2"
if os.path.exists(dir):
continue
fname = dir + "-src.zip"
url = "http://sourceforge.net/projects/bowtie-bio/files/bowtie/1.1.2"
bins = "bowtie-align-s bowtie-build-s bowtie-inspect-s"
installs = bins + " bowtie bowtie-build bowtie-inspect"
cmd = "wget %s/%s; unzip %s; cd %s; make %s; cp %s ../bin; cd .." % \
(url, fname, fname, dir, bins, installs)
elif program == "TopHat2":
if mac:
dir = "tophat-2.1.0.OSX_x86_64"
else:
dir = "tophat-2.1.0.Linux_x86_64"
if os.path.exists(dir):
continue
fname = dir + ".tar.gz"
url = "http://ccb.jhu.edu/software/tophat/downloads"
installs = "gtf_juncs juncs_db prep_reads segment_juncs tophat tophat_reports sra_to_solid samtools_0.1.18 map2gtf fix_map_ordering bam_merge long_spanning_reads sam_juncs gtf_to_fasta bam2fastx"
cmd = "wget %s/%s; tar xvzf %s; cd %s; cp %s ../bin; cd .." % \
(url, fname, fname, dir, installs)
elif program == "STAR":
dir = "2.5.2b"
if os.path.exists("STAR-" + dir):
continue
fname = dir + ".tar.gz"
url = "https://github.com/alexdobin/STAR/archive"
if mac:
add_cmd = "awk '{if($1 ~ /^CXX/) {print \"CXX =/opt/local/bin/g++-mp-4.8\";} else {print;}}' Makefile > Makefile.tmp; mv Makefile.tmp Makefile"
make_arg = "STARforMac"
cmd = "wget %s/%s; tar xvzf %s; cd STAR-%s/source; %s; make; make %s; cp STAR ../../bin; cd ../.." % \
(url, fname, fname, dir, add_cmd, make_arg)
else:
cmd = "wget %s/%s; tar xvzf %s; cd STAR-%s/source; make; cp STAR ../../bin; cd ../.." % \
(url, fname, fname, dir)
elif program == "GSNAP":
dir = "gmap-2015-07-23"
dir2 = "gmap-gsnap-2015-07-23"
if os.path.exists(dir):
continue
fname = dir2 + ".tar.gz"
url = "http://research-pub.gene.com/gmap/src"
installs = "gmap gmapl get-genome gmapindex iit_store iit_get iit_dump gsnap gsnapl uniqscan uniqscanl snpindex cmetindex atoiindex sam_sort ../util/*"
cmd = "wget %s/%s; tar xvzf %s; cd %s; ./configure; make; cd src; cp %s ../../bin; cd ../.." % \
(url, fname, fname, dir, installs)
elif program == "BWA":
dir = "bwa-0.7.12"
if os.path.exists(dir):
continue
url = "http://sourceforge.net/projects/bio-bwa/files/%s.tar.bz2" % (dir)
installs = "bwa"
cmd = "wget %s; tar xvzf %s.tar.bz2; cd %s; make; cp %s ../bin/; cd .." % (url, dir, dir, installs)
elif program == "StringTie":
dir = "stringtie-1.0.4"
url = "http://ccb.jhu.edu/software/stringtie/dl"
bins = "stringtie"
cmd = "wget %s/%s.tar.gz; tar xvzf %s.tar.gz; cd %s; make release; cp %s ../bin; cd .." % \
(url, dir, dir, dir, bins)
elif program == "Cufflinks":
cmd = ""
else:
assert False
print >> sys.stderr, cmd
os.system(cmd)
files = ["hisat2", "hisat2-align-s", "hisat2-build", "hisat2-build-s", "hisat2-inspect", "hisat2-inspect-s", "extract_splice_sites.py", "extract_snps.py", "simulate_reads.py"]
os.chdir("bin")
for file in files:
if os.path.exists(file):
continue
os.system("ln -s ../../../%s %s" % (file, file))
os.chdir("..")
os.chdir("..")
if __name__ == "__main__":
get_aligners()
|
K-3D/k3d
|
refs/heads/master
|
tests/mesh/mesh.source.QuadricParaboloid.py
|
2
|
#python
import testing
setup = testing.setup_mesh_source_test("QuadricParaboloid")
testing.require_valid_mesh(setup.document, setup.source.get_property("output_mesh"))
testing.require_similar_mesh(setup.document, setup.source.get_property("output_mesh"), "mesh.source.QuadricParaboloid", 1)
|
ftomassetti/intellij-community
|
refs/heads/master
|
python/helpers/pydev/third_party/pep8/lib2to3/lib2to3/fixes/fix_filter.py
|
326
|
# Copyright 2007 Google, Inc. All Rights Reserved.
# Licensed to PSF under a Contributor Agreement.
"""Fixer that changes filter(F, X) into list(filter(F, X)).
We avoid the transformation if the filter() call is directly contained
in iter(<>), list(<>), tuple(<>), sorted(<>), ...join(<>), or
for V in <>:.
NOTE: This is still not correct if the original code was depending on
filter(F, X) to return a string if X is a string and a tuple if X is a
tuple. That would require type inference, which we don't do. Let
Python 2.6 figure it out.
"""
# Local imports
from ..pgen2 import token
from .. import fixer_base
from ..fixer_util import Name, Call, ListComp, in_special_context
class FixFilter(fixer_base.ConditionalFix):
BM_compatible = True
PATTERN = """
filter_lambda=power<
'filter'
trailer<
'('
arglist<
lambdef< 'lambda'
(fp=NAME | vfpdef< '(' fp=NAME ')'> ) ':' xp=any
>
','
it=any
>
')'
>
>
|
power<
'filter'
trailer< '(' arglist< none='None' ',' seq=any > ')' >
>
|
power<
'filter'
args=trailer< '(' [any] ')' >
>
"""
skip_on = "future_builtins.filter"
def transform(self, node, results):
if self.should_skip(node):
return
if "filter_lambda" in results:
new = ListComp(results.get("fp").clone(),
results.get("fp").clone(),
results.get("it").clone(),
results.get("xp").clone())
elif "none" in results:
new = ListComp(Name(u"_f"),
Name(u"_f"),
results["seq"].clone(),
Name(u"_f"))
else:
if in_special_context(node):
return None
new = node.clone()
new.prefix = u""
new = Call(Name(u"list"), [new])
new.prefix = node.prefix
return new
|
jinyu121/Canteen
|
refs/heads/master
|
CanteenWebsite/tests.py
|
72
|
# -*- coding: utf-8 -*-
from django.test import TestCase
# Create your tests here.
|
nagyistoce/odoo-dev-odoo
|
refs/heads/8.0
|
openerp/addons/base/tests/test_search.py
|
290
|
import unittest2
import openerp.tests.common as common
class test_search(common.TransactionCase):
def test_00_search_order(self):
registry, cr, uid = self.registry, self.cr, self.uid
# Create 6 partners with a given name, and a given creation order to
# ensure the order of their ID. Some are set as unactive to verify they
# are by default excluded from the searches and to provide a second
# `order` argument.
partners = registry('res.partner')
c = partners.create(cr, uid, {'name': 'test_search_order_C'})
d = partners.create(cr, uid, {'name': 'test_search_order_D', 'active': False})
a = partners.create(cr, uid, {'name': 'test_search_order_A'})
b = partners.create(cr, uid, {'name': 'test_search_order_B'})
ab = partners.create(cr, uid, {'name': 'test_search_order_AB'})
e = partners.create(cr, uid, {'name': 'test_search_order_E', 'active': False})
# The tests.
# The basic searches should exclude records that have active = False.
# The order of the returned ids should be given by the `order`
# parameter of search().
name_asc = partners.search(cr, uid, [('name', 'like', 'test_search_order%')], order="name asc")
self.assertEqual([a, ab, b, c], name_asc, "Search with 'NAME ASC' order failed.")
name_desc = partners.search(cr, uid, [('name', 'like', 'test_search_order%')], order="name desc")
self.assertEqual([c, b, ab, a], name_desc, "Search with 'NAME DESC' order failed.")
id_asc = partners.search(cr, uid, [('name', 'like', 'test_search_order%')], order="id asc")
self.assertEqual([c, a, b, ab], id_asc, "Search with 'ID ASC' order failed.")
id_desc = partners.search(cr, uid, [('name', 'like', 'test_search_order%')], order="id desc")
self.assertEqual([ab, b, a, c], id_desc, "Search with 'ID DESC' order failed.")
# The inactive records shouldn't be excluded as soon as a condition on
# that field is present in the domain. The `order` parameter of
# search() should support any legal coma-separated values.
active_asc_id_asc = partners.search(cr, uid, [('name', 'like', 'test_search_order%'), '|', ('active', '=', True), ('active', '=', False)], order="active asc, id asc")
self.assertEqual([d, e, c, a, b, ab], active_asc_id_asc, "Search with 'ACTIVE ASC, ID ASC' order failed.")
active_desc_id_asc = partners.search(cr, uid, [('name', 'like', 'test_search_order%'), '|', ('active', '=', True), ('active', '=', False)], order="active desc, id asc")
self.assertEqual([c, a, b, ab, d, e], active_desc_id_asc, "Search with 'ACTIVE DESC, ID ASC' order failed.")
active_asc_id_desc = partners.search(cr, uid, [('name', 'like', 'test_search_order%'), '|', ('active', '=', True), ('active', '=', False)], order="active asc, id desc")
self.assertEqual([e, d, ab, b, a, c], active_asc_id_desc, "Search with 'ACTIVE ASC, ID DESC' order failed.")
active_desc_id_desc = partners.search(cr, uid, [('name', 'like', 'test_search_order%'), '|', ('active', '=', True), ('active', '=', False)], order="active desc, id desc")
self.assertEqual([ab, b, a, c, e, d], active_desc_id_desc, "Search with 'ACTIVE DESC, ID DESC' order failed.")
id_asc_active_asc = partners.search(cr, uid, [('name', 'like', 'test_search_order%'), '|', ('active', '=', True), ('active', '=', False)], order="id asc, active asc")
self.assertEqual([c, d, a, b, ab, e], id_asc_active_asc, "Search with 'ID ASC, ACTIVE ASC' order failed.")
id_asc_active_desc = partners.search(cr, uid, [('name', 'like', 'test_search_order%'), '|', ('active', '=', True), ('active', '=', False)], order="id asc, active desc")
self.assertEqual([c, d, a, b, ab, e], id_asc_active_desc, "Search with 'ID ASC, ACTIVE DESC' order failed.")
id_desc_active_asc = partners.search(cr, uid, [('name', 'like', 'test_search_order%'), '|', ('active', '=', True), ('active', '=', False)], order="id desc, active asc")
self.assertEqual([e, ab, b, a, d, c], id_desc_active_asc, "Search with 'ID DESC, ACTIVE ASC' order failed.")
id_desc_active_desc = partners.search(cr, uid, [('name', 'like', 'test_search_order%'), '|', ('active', '=', True), ('active', '=', False)], order="id desc, active desc")
self.assertEqual([e, ab, b, a, d, c], id_desc_active_desc, "Search with 'ID DESC, ACTIVE DESC' order failed.")
def test_10_inherits_m2order(self):
registry, cr, uid = self.registry, self.cr, self.uid
users_obj = registry('res.users')
# Find Employee group
group_employee_ref = self.registry('ir.model.data').get_object_reference(cr, uid, 'base', 'group_user')
group_employee_id = group_employee_ref and group_employee_ref[1] or False
# Get country/state data
country_us_id = registry('res.country').search(cr, uid, [('code', 'like', 'US')])[0]
state_ids = registry('res.country.state').search(cr, uid, [('country_id', '=', country_us_id)], limit=2)
country_be_id = registry('res.country').search(cr, uid, [('code', 'like', 'BE')])[0]
# Create test users
search_user = users_obj.create(cr, uid, {'name': '__search', 'login': '__search', 'groups_id': [(6, 0, [group_employee_id])]})
a = users_obj.create(cr, uid, {'name': '__test_A', 'login': '__test_A', 'country_id': country_be_id, 'state_id': country_be_id})
b = users_obj.create(cr, uid, {'name': '__test_B', 'login': '__a_test_B', 'country_id': country_us_id, 'state_id': state_ids[1]})
c = users_obj.create(cr, uid, {'name': '__test_B', 'login': '__z_test_B', 'country_id': country_us_id, 'state_id': state_ids[0]})
# Do: search on res.users, order on a field on res.partner to try inherits'd fields, then res.users
user_ids = users_obj.search(cr, search_user, [], order='name asc, login desc')
expected_ids = [search_user, a, c, b]
test_user_ids = filter(lambda x: x in expected_ids, user_ids)
self.assertEqual(test_user_ids, expected_ids, 'search on res_users did not provide expected ids or expected order')
# Do: order on many2one and inherits'd fields
user_ids = users_obj.search(cr, search_user, [], order='state_id asc, country_id desc, name asc, login desc')
expected_ids = [c, b, a, search_user]
test_user_ids = filter(lambda x: x in expected_ids, user_ids)
self.assertEqual(test_user_ids, expected_ids, 'search on res_users did not provide expected ids or expected order')
# Do: order on many2one and inherits'd fields
user_ids = users_obj.search(cr, search_user, [], order='country_id desc, state_id desc, name asc, login desc')
expected_ids = [search_user, b, c, a]
test_user_ids = filter(lambda x: x in expected_ids, user_ids)
self.assertEqual(test_user_ids, expected_ids, 'search on res_users did not provide expected ids or expected order')
# Do: order on many2one, but not by specifying in order parameter of search, but by overriding _order of res_users
old_order = users_obj._order
users_obj._order = 'country_id desc, name asc, login desc'
user_ids = users_obj.search(cr, search_user, [])
expected_ids = [search_user, c, b, a]
test_user_ids = filter(lambda x: x in expected_ids, user_ids)
self.assertEqual(test_user_ids, expected_ids, 'search on res_users did not provide expected ids or expected order')
users_obj._order = old_order
if __name__ == '__main__':
unittest2.main()
|
roxyboy/bokeh
|
refs/heads/master
|
sphinx/source/docs/tutorials/solutions/olympics.py
|
23
|
import numpy as np
from bokeh.plotting import figure, output_file, show, VBox
from bokeh.sampledata.olympics2014 import data
data = { d['abbr']: d['medals'] for d in data['data'] if d['medals']['total'] > 0}
# pull out just the data we care about
countries = sorted(
data.keys(),
key=lambda x: data[x]['total'], reverse=True
)
gold = np.array([data[abbr]['gold'] for abbr in countries], dtype=np.float)
silver = np.array([data[abbr]['silver'] for abbr in countries], dtype=np.float)
bronze = np.array([data[abbr]['bronze'] for abbr in countries], dtype=np.float)
# EXERCISE: output static HTML file
output_file('olympics.html')
# create a figure()
p1 = figure(title="Olympic Medals by Country (stacked)", tools="",
x_range=countries, y_range=[0, max(gold+silver+bronze)],
background_fill='#59636C', plot_width=800
)
# use the `rect` renderer to display stacked bars of the medal results. Note
# that we set y_range explicitly on the first renderer
p1.rect(x=countries, y=bronze/2, width=0.8, height=bronze, color="#CD7F32", alpha=0.6)
p1.rect(x=countries, y=bronze+silver/2, width=0.8, height=silver, color="silver", alpha=0.6)
# EXERCISE: add a `rect` renderer to stack the gold medal results
p1.rect(x=countries, y=bronze+silver+gold/2, width=0.8, height=gold, color="gold", alpha=0.6)
# EXERCISE: use grid(), axis(), etc. to style the plot. Some suggestions:
# - remove the grid lines
# - change the major label standoff, and major_tick_out values
# - make the tick labels smaller
# - set the x-axis orientation to vertical, or angled
p1.xgrid.grid_line_color = None
p1.axis.major_label_text_font_size = "8pt"
p1.axis.major_label_standoff = 0
p1.xaxis.major_label_orientation = np.pi/3
p1.xaxis.major_label_standoff = 6
p1.xaxis.major_tick_out = 0
# create a new figure
p2 = figure(title="Olympic Medals by Country (grouped)", tools="",
x_range=countries, y_range=[0, max([gold.max(), silver.max(), bronze.max()])],
background_fill='#59636C', plot_width=1000, plot_height=300)
# Categorical percentage coordinates can be used for positioning/grouping
countries_bronze = [c+":0.3" for c in countries]
countries_silver = [c+":0.5" for c in countries]
countries_gold = [c+":0.7" for c in countries]
# EXERCISE: re create the medal plot, but this time:
# - do not stack the bars on the y coordinate
# - use countries_gold, etc. to positions the bars on the x coordinate
p2.rect(x=countries_bronze, y=bronze/2, width=0.2, height=bronze, color="#CD7F32", alpha=0.6)
p2.rect(x=countries_silver, y=silver/2, width=0.2, height=silver, color="silver", alpha=0.6)
p2.rect(x=countries_gold, y=gold/2, width=0.2, height=gold, color="gold", alpha=0.6)
# EXERCISE: use grid(), axis(), etc. to style the plot. Some suggestions:
# - remove the axis and grid lines
# - remove the major ticks
# - make the tick labels smaller
# - set the x-axis orientation to vertical, or angled
p2.xgrid.grid_line_color = None
p2.axis.major_label_text_font_size = "8pt"
p2.axis.major_label_standoff = 0
p2.xaxis.major_label_orientation = np.pi/3
p2.xaxis.major_label_standoff = 6
p2.xaxis.major_tick_out = 0
# show the plots arrayed in a VBox
show(VBox(p1, p2))
|
KrzysztofStachanczyk/Sensors-WWW-website
|
refs/heads/master
|
www/env/lib/python2.7/site-packages/django/db/backends/mysql/schema.py
|
37
|
from django.db.backends.base.schema import BaseDatabaseSchemaEditor
from django.db.models import NOT_PROVIDED
class DatabaseSchemaEditor(BaseDatabaseSchemaEditor):
sql_rename_table = "RENAME TABLE %(old_table)s TO %(new_table)s"
sql_alter_column_null = "MODIFY %(column)s %(type)s NULL"
sql_alter_column_not_null = "MODIFY %(column)s %(type)s NOT NULL"
sql_alter_column_type = "MODIFY %(column)s %(type)s"
sql_rename_column = "ALTER TABLE %(table)s CHANGE %(old_column)s %(new_column)s %(type)s"
sql_delete_unique = "ALTER TABLE %(table)s DROP INDEX %(name)s"
sql_delete_fk = "ALTER TABLE %(table)s DROP FOREIGN KEY %(name)s"
sql_delete_index = "DROP INDEX %(name)s ON %(table)s"
sql_create_pk = "ALTER TABLE %(table)s ADD CONSTRAINT %(name)s PRIMARY KEY (%(columns)s)"
sql_delete_pk = "ALTER TABLE %(table)s DROP PRIMARY KEY"
def quote_value(self, value):
# Inner import to allow module to fail to load gracefully
import MySQLdb.converters
return MySQLdb.escape(value, MySQLdb.converters.conversions)
def skip_default(self, field):
"""
MySQL doesn't accept default values for TEXT and BLOB types, and
implicitly treats these columns as nullable.
"""
db_type = field.db_type(self.connection)
return (
db_type is not None and
db_type.lower() in {
'tinyblob', 'blob', 'mediumblob', 'longblob',
'tinytext', 'text', 'mediumtext', 'longtext',
}
)
def add_field(self, model, field):
super(DatabaseSchemaEditor, self).add_field(model, field)
# Simulate the effect of a one-off default.
# field.default may be unhashable, so a set isn't used for "in" check.
if self.skip_default(field) and field.default not in (None, NOT_PROVIDED):
effective_default = self.effective_default(field)
self.execute('UPDATE %(table)s SET %(column)s = %%s' % {
'table': self.quote_name(model._meta.db_table),
'column': self.quote_name(field.column),
}, [effective_default])
def _field_should_be_indexed(self, model, field):
create_index = super(DatabaseSchemaEditor, self)._field_should_be_indexed(model, field)
storage = self.connection.introspection.get_storage_engine(
self.connection.cursor(), model._meta.db_table
)
# No need to create an index for ForeignKey fields except if
# db_constraint=False because the index from that constraint won't be
# created.
if (storage == "InnoDB" and
create_index and
field.get_internal_type() == 'ForeignKey' and
field.db_constraint):
return False
return create_index
def _delete_composed_index(self, model, fields, *args):
"""
MySQL can remove an implicit FK index on a field when that field is
covered by another index like a unique_together. "covered" here means
that the more complex index starts like the simpler one.
http://bugs.mysql.com/bug.php?id=37910 / Django ticket #24757
We check here before removing the [unique|index]_together if we have to
recreate a FK index.
"""
first_field = model._meta.get_field(fields[0])
if first_field.get_internal_type() == 'ForeignKey':
constraint_names = self._constraint_names(model, [first_field.column], index=True)
if not constraint_names:
self.execute(self._create_index_sql(model, [first_field], suffix=""))
return super(DatabaseSchemaEditor, self)._delete_composed_index(model, fields, *args)
def _set_field_new_type_null_status(self, field, new_type):
"""
Keep the null property of the old field. If it has changed, it will be
handled separately.
"""
if field.null:
new_type += " NULL"
else:
new_type += " NOT NULL"
return new_type
def _alter_column_type_sql(self, table, old_field, new_field, new_type):
new_type = self._set_field_new_type_null_status(old_field, new_type)
return super(DatabaseSchemaEditor, self)._alter_column_type_sql(table, old_field, new_field, new_type)
def _rename_field_sql(self, table, old_field, new_field, new_type):
new_type = self._set_field_new_type_null_status(old_field, new_type)
return super(DatabaseSchemaEditor, self)._rename_field_sql(table, old_field, new_field, new_type)
|
40223105/w17test
|
refs/heads/master
|
static/Brython3.1.1-20150328-091302/Lib/zipfile.py
|
620
|
"""
Read and write ZIP files.
XXX references to utf-8 need further investigation.
"""
import io
import os
import re
import imp
import sys
import time
import stat
import shutil
import struct
import binascii
try:
import zlib # We may need its compression method
crc32 = zlib.crc32
except ImportError:
zlib = None
crc32 = binascii.crc32
try:
import bz2 # We may need its compression method
except ImportError:
bz2 = None
try:
import lzma # We may need its compression method
except ImportError:
lzma = None
__all__ = ["BadZipFile", "BadZipfile", "error",
"ZIP_STORED", "ZIP_DEFLATED", "ZIP_BZIP2", "ZIP_LZMA",
"is_zipfile", "ZipInfo", "ZipFile", "PyZipFile", "LargeZipFile"]
class BadZipFile(Exception):
pass
class LargeZipFile(Exception):
"""
Raised when writing a zipfile, the zipfile requires ZIP64 extensions
and those extensions are disabled.
"""
error = BadZipfile = BadZipFile # Pre-3.2 compatibility names
ZIP64_LIMIT = (1 << 31) - 1
ZIP_FILECOUNT_LIMIT = 1 << 16
ZIP_MAX_COMMENT = (1 << 16) - 1
# constants for Zip file compression methods
ZIP_STORED = 0
ZIP_DEFLATED = 8
ZIP_BZIP2 = 12
ZIP_LZMA = 14
# Other ZIP compression methods not supported
DEFAULT_VERSION = 20
ZIP64_VERSION = 45
BZIP2_VERSION = 46
LZMA_VERSION = 63
# we recognize (but not necessarily support) all features up to that version
MAX_EXTRACT_VERSION = 63
# Below are some formats and associated data for reading/writing headers using
# the struct module. The names and structures of headers/records are those used
# in the PKWARE description of the ZIP file format:
# http://www.pkware.com/documents/casestudies/APPNOTE.TXT
# (URL valid as of January 2008)
# The "end of central directory" structure, magic number, size, and indices
# (section V.I in the format document)
structEndArchive = b"<4s4H2LH"
stringEndArchive = b"PK\005\006"
sizeEndCentDir = struct.calcsize(structEndArchive)
_ECD_SIGNATURE = 0
_ECD_DISK_NUMBER = 1
_ECD_DISK_START = 2
_ECD_ENTRIES_THIS_DISK = 3
_ECD_ENTRIES_TOTAL = 4
_ECD_SIZE = 5
_ECD_OFFSET = 6
_ECD_COMMENT_SIZE = 7
# These last two indices are not part of the structure as defined in the
# spec, but they are used internally by this module as a convenience
_ECD_COMMENT = 8
_ECD_LOCATION = 9
# The "central directory" structure, magic number, size, and indices
# of entries in the structure (section V.F in the format document)
structCentralDir = "<4s4B4HL2L5H2L"
stringCentralDir = b"PK\001\002"
sizeCentralDir = struct.calcsize(structCentralDir)
# indexes of entries in the central directory structure
_CD_SIGNATURE = 0
_CD_CREATE_VERSION = 1
_CD_CREATE_SYSTEM = 2
_CD_EXTRACT_VERSION = 3
_CD_EXTRACT_SYSTEM = 4
_CD_FLAG_BITS = 5
_CD_COMPRESS_TYPE = 6
_CD_TIME = 7
_CD_DATE = 8
_CD_CRC = 9
_CD_COMPRESSED_SIZE = 10
_CD_UNCOMPRESSED_SIZE = 11
_CD_FILENAME_LENGTH = 12
_CD_EXTRA_FIELD_LENGTH = 13
_CD_COMMENT_LENGTH = 14
_CD_DISK_NUMBER_START = 15
_CD_INTERNAL_FILE_ATTRIBUTES = 16
_CD_EXTERNAL_FILE_ATTRIBUTES = 17
_CD_LOCAL_HEADER_OFFSET = 18
# The "local file header" structure, magic number, size, and indices
# (section V.A in the format document)
structFileHeader = "<4s2B4HL2L2H"
stringFileHeader = b"PK\003\004"
sizeFileHeader = struct.calcsize(structFileHeader)
_FH_SIGNATURE = 0
_FH_EXTRACT_VERSION = 1
_FH_EXTRACT_SYSTEM = 2
_FH_GENERAL_PURPOSE_FLAG_BITS = 3
_FH_COMPRESSION_METHOD = 4
_FH_LAST_MOD_TIME = 5
_FH_LAST_MOD_DATE = 6
_FH_CRC = 7
_FH_COMPRESSED_SIZE = 8
_FH_UNCOMPRESSED_SIZE = 9
_FH_FILENAME_LENGTH = 10
_FH_EXTRA_FIELD_LENGTH = 11
# The "Zip64 end of central directory locator" structure, magic number, and size
structEndArchive64Locator = "<4sLQL"
stringEndArchive64Locator = b"PK\x06\x07"
sizeEndCentDir64Locator = struct.calcsize(structEndArchive64Locator)
# The "Zip64 end of central directory" record, magic number, size, and indices
# (section V.G in the format document)
structEndArchive64 = "<4sQ2H2L4Q"
stringEndArchive64 = b"PK\x06\x06"
sizeEndCentDir64 = struct.calcsize(structEndArchive64)
_CD64_SIGNATURE = 0
_CD64_DIRECTORY_RECSIZE = 1
_CD64_CREATE_VERSION = 2
_CD64_EXTRACT_VERSION = 3
_CD64_DISK_NUMBER = 4
_CD64_DISK_NUMBER_START = 5
_CD64_NUMBER_ENTRIES_THIS_DISK = 6
_CD64_NUMBER_ENTRIES_TOTAL = 7
_CD64_DIRECTORY_SIZE = 8
_CD64_OFFSET_START_CENTDIR = 9
def _check_zipfile(fp):
try:
if _EndRecData(fp):
return True # file has correct magic number
except IOError:
pass
return False
def is_zipfile(filename):
"""Quickly see if a file is a ZIP file by checking the magic number.
The filename argument may be a file or file-like object too.
"""
result = False
try:
if hasattr(filename, "read"):
result = _check_zipfile(fp=filename)
else:
with open(filename, "rb") as fp:
result = _check_zipfile(fp)
except IOError:
pass
return result
def _EndRecData64(fpin, offset, endrec):
"""
Read the ZIP64 end-of-archive records and use that to update endrec
"""
try:
fpin.seek(offset - sizeEndCentDir64Locator, 2)
except IOError:
# If the seek fails, the file is not large enough to contain a ZIP64
# end-of-archive record, so just return the end record we were given.
return endrec
data = fpin.read(sizeEndCentDir64Locator)
if len(data) != sizeEndCentDir64Locator:
return endrec
sig, diskno, reloff, disks = struct.unpack(structEndArchive64Locator, data)
if sig != stringEndArchive64Locator:
return endrec
if diskno != 0 or disks != 1:
raise BadZipFile("zipfiles that span multiple disks are not supported")
# Assume no 'zip64 extensible data'
fpin.seek(offset - sizeEndCentDir64Locator - sizeEndCentDir64, 2)
data = fpin.read(sizeEndCentDir64)
if len(data) != sizeEndCentDir64:
return endrec
sig, sz, create_version, read_version, disk_num, disk_dir, \
dircount, dircount2, dirsize, diroffset = \
struct.unpack(structEndArchive64, data)
if sig != stringEndArchive64:
return endrec
# Update the original endrec using data from the ZIP64 record
endrec[_ECD_SIGNATURE] = sig
endrec[_ECD_DISK_NUMBER] = disk_num
endrec[_ECD_DISK_START] = disk_dir
endrec[_ECD_ENTRIES_THIS_DISK] = dircount
endrec[_ECD_ENTRIES_TOTAL] = dircount2
endrec[_ECD_SIZE] = dirsize
endrec[_ECD_OFFSET] = diroffset
return endrec
def _EndRecData(fpin):
"""Return data from the "End of Central Directory" record, or None.
The data is a list of the nine items in the ZIP "End of central dir"
record followed by a tenth item, the file seek offset of this record."""
# Determine file size
fpin.seek(0, 2)
filesize = fpin.tell()
# Check to see if this is ZIP file with no archive comment (the
# "end of central directory" structure should be the last item in the
# file if this is the case).
try:
fpin.seek(-sizeEndCentDir, 2)
except IOError:
return None
data = fpin.read()
if (len(data) == sizeEndCentDir and
data[0:4] == stringEndArchive and
data[-2:] == b"\000\000"):
# the signature is correct and there's no comment, unpack structure
endrec = struct.unpack(structEndArchive, data)
endrec=list(endrec)
# Append a blank comment and record start offset
endrec.append(b"")
endrec.append(filesize - sizeEndCentDir)
# Try to read the "Zip64 end of central directory" structure
return _EndRecData64(fpin, -sizeEndCentDir, endrec)
# Either this is not a ZIP file, or it is a ZIP file with an archive
# comment. Search the end of the file for the "end of central directory"
# record signature. The comment is the last item in the ZIP file and may be
# up to 64K long. It is assumed that the "end of central directory" magic
# number does not appear in the comment.
maxCommentStart = max(filesize - (1 << 16) - sizeEndCentDir, 0)
fpin.seek(maxCommentStart, 0)
data = fpin.read()
start = data.rfind(stringEndArchive)
if start >= 0:
# found the magic number; attempt to unpack and interpret
recData = data[start:start+sizeEndCentDir]
if len(recData) != sizeEndCentDir:
# Zip file is corrupted.
return None
endrec = list(struct.unpack(structEndArchive, recData))
commentSize = endrec[_ECD_COMMENT_SIZE] #as claimed by the zip file
comment = data[start+sizeEndCentDir:start+sizeEndCentDir+commentSize]
endrec.append(comment)
endrec.append(maxCommentStart + start)
# Try to read the "Zip64 end of central directory" structure
return _EndRecData64(fpin, maxCommentStart + start - filesize,
endrec)
# Unable to find a valid end of central directory structure
return None
class ZipInfo (object):
"""Class with attributes describing each file in the ZIP archive."""
__slots__ = (
'orig_filename',
'filename',
'date_time',
'compress_type',
'comment',
'extra',
'create_system',
'create_version',
'extract_version',
'reserved',
'flag_bits',
'volume',
'internal_attr',
'external_attr',
'header_offset',
'CRC',
'compress_size',
'file_size',
'_raw_time',
)
def __init__(self, filename="NoName", date_time=(1980,1,1,0,0,0)):
self.orig_filename = filename # Original file name in archive
# Terminate the file name at the first null byte. Null bytes in file
# names are used as tricks by viruses in archives.
null_byte = filename.find(chr(0))
if null_byte >= 0:
filename = filename[0:null_byte]
# This is used to ensure paths in generated ZIP files always use
# forward slashes as the directory separator, as required by the
# ZIP format specification.
if os.sep != "/" and os.sep in filename:
filename = filename.replace(os.sep, "/")
self.filename = filename # Normalized file name
self.date_time = date_time # year, month, day, hour, min, sec
if date_time[0] < 1980:
raise ValueError('ZIP does not support timestamps before 1980')
# Standard values:
self.compress_type = ZIP_STORED # Type of compression for the file
self.comment = b"" # Comment for each file
self.extra = b"" # ZIP extra data
if sys.platform == 'win32':
self.create_system = 0 # System which created ZIP archive
else:
# Assume everything else is unix-y
self.create_system = 3 # System which created ZIP archive
self.create_version = DEFAULT_VERSION # Version which created ZIP archive
self.extract_version = DEFAULT_VERSION # Version needed to extract archive
self.reserved = 0 # Must be zero
self.flag_bits = 0 # ZIP flag bits
self.volume = 0 # Volume number of file header
self.internal_attr = 0 # Internal attributes
self.external_attr = 0 # External file attributes
# Other attributes are set by class ZipFile:
# header_offset Byte offset to the file header
# CRC CRC-32 of the uncompressed file
# compress_size Size of the compressed file
# file_size Size of the uncompressed file
def FileHeader(self, zip64=None):
"""Return the per-file header as a string."""
dt = self.date_time
dosdate = (dt[0] - 1980) << 9 | dt[1] << 5 | dt[2]
dostime = dt[3] << 11 | dt[4] << 5 | (dt[5] // 2)
if self.flag_bits & 0x08:
# Set these to zero because we write them after the file data
CRC = compress_size = file_size = 0
else:
CRC = self.CRC
compress_size = self.compress_size
file_size = self.file_size
extra = self.extra
min_version = 0
if zip64 is None:
zip64 = file_size > ZIP64_LIMIT or compress_size > ZIP64_LIMIT
if zip64:
fmt = '<HHQQ'
extra = extra + struct.pack(fmt,
1, struct.calcsize(fmt)-4, file_size, compress_size)
if file_size > ZIP64_LIMIT or compress_size > ZIP64_LIMIT:
if not zip64:
raise LargeZipFile("Filesize would require ZIP64 extensions")
# File is larger than what fits into a 4 byte integer,
# fall back to the ZIP64 extension
file_size = 0xffffffff
compress_size = 0xffffffff
min_version = ZIP64_VERSION
if self.compress_type == ZIP_BZIP2:
min_version = max(BZIP2_VERSION, min_version)
elif self.compress_type == ZIP_LZMA:
min_version = max(LZMA_VERSION, min_version)
self.extract_version = max(min_version, self.extract_version)
self.create_version = max(min_version, self.create_version)
filename, flag_bits = self._encodeFilenameFlags()
header = struct.pack(structFileHeader, stringFileHeader,
self.extract_version, self.reserved, flag_bits,
self.compress_type, dostime, dosdate, CRC,
compress_size, file_size,
len(filename), len(extra))
return header + filename + extra
def _encodeFilenameFlags(self):
try:
return self.filename.encode('ascii'), self.flag_bits
except UnicodeEncodeError:
return self.filename.encode('utf-8'), self.flag_bits | 0x800
def _decodeExtra(self):
# Try to decode the extra field.
extra = self.extra
unpack = struct.unpack
while extra:
tp, ln = unpack('<HH', extra[:4])
if tp == 1:
if ln >= 24:
counts = unpack('<QQQ', extra[4:28])
elif ln == 16:
counts = unpack('<QQ', extra[4:20])
elif ln == 8:
counts = unpack('<Q', extra[4:12])
elif ln == 0:
counts = ()
else:
raise RuntimeError("Corrupt extra field %s"%(ln,))
idx = 0
# ZIP64 extension (large files and/or large archives)
if self.file_size in (0xffffffffffffffff, 0xffffffff):
self.file_size = counts[idx]
idx += 1
if self.compress_size == 0xFFFFFFFF:
self.compress_size = counts[idx]
idx += 1
if self.header_offset == 0xffffffff:
old = self.header_offset
self.header_offset = counts[idx]
idx+=1
extra = extra[ln+4:]
class _ZipDecrypter:
"""Class to handle decryption of files stored within a ZIP archive.
ZIP supports a password-based form of encryption. Even though known
plaintext attacks have been found against it, it is still useful
to be able to get data out of such a file.
Usage:
zd = _ZipDecrypter(mypwd)
plain_char = zd(cypher_char)
plain_text = map(zd, cypher_text)
"""
def _GenerateCRCTable():
"""Generate a CRC-32 table.
ZIP encryption uses the CRC32 one-byte primitive for scrambling some
internal keys. We noticed that a direct implementation is faster than
relying on binascii.crc32().
"""
poly = 0xedb88320
table = [0] * 256
for i in range(256):
crc = i
for j in range(8):
if crc & 1:
crc = ((crc >> 1) & 0x7FFFFFFF) ^ poly
else:
crc = ((crc >> 1) & 0x7FFFFFFF)
table[i] = crc
return table
crctable = _GenerateCRCTable()
def _crc32(self, ch, crc):
"""Compute the CRC32 primitive on one byte."""
return ((crc >> 8) & 0xffffff) ^ self.crctable[(crc ^ ch) & 0xff]
def __init__(self, pwd):
self.key0 = 305419896
self.key1 = 591751049
self.key2 = 878082192
for p in pwd:
self._UpdateKeys(p)
def _UpdateKeys(self, c):
self.key0 = self._crc32(c, self.key0)
self.key1 = (self.key1 + (self.key0 & 255)) & 4294967295
self.key1 = (self.key1 * 134775813 + 1) & 4294967295
self.key2 = self._crc32((self.key1 >> 24) & 255, self.key2)
def __call__(self, c):
"""Decrypt a single character."""
assert isinstance(c, int)
k = self.key2 | 2
c = c ^ (((k * (k^1)) >> 8) & 255)
self._UpdateKeys(c)
return c
class LZMACompressor:
def __init__(self):
self._comp = None
def _init(self):
props = lzma._encode_filter_properties({'id': lzma.FILTER_LZMA1})
self._comp = lzma.LZMACompressor(lzma.FORMAT_RAW, filters=[
lzma._decode_filter_properties(lzma.FILTER_LZMA1, props)
])
return struct.pack('<BBH', 9, 4, len(props)) + props
def compress(self, data):
if self._comp is None:
return self._init() + self._comp.compress(data)
return self._comp.compress(data)
def flush(self):
if self._comp is None:
return self._init() + self._comp.flush()
return self._comp.flush()
class LZMADecompressor:
def __init__(self):
self._decomp = None
self._unconsumed = b''
self.eof = False
def decompress(self, data):
if self._decomp is None:
self._unconsumed += data
if len(self._unconsumed) <= 4:
return b''
psize, = struct.unpack('<H', self._unconsumed[2:4])
if len(self._unconsumed) <= 4 + psize:
return b''
self._decomp = lzma.LZMADecompressor(lzma.FORMAT_RAW, filters=[
lzma._decode_filter_properties(lzma.FILTER_LZMA1,
self._unconsumed[4:4 + psize])
])
data = self._unconsumed[4 + psize:]
del self._unconsumed
result = self._decomp.decompress(data)
self.eof = self._decomp.eof
return result
compressor_names = {
0: 'store',
1: 'shrink',
2: 'reduce',
3: 'reduce',
4: 'reduce',
5: 'reduce',
6: 'implode',
7: 'tokenize',
8: 'deflate',
9: 'deflate64',
10: 'implode',
12: 'bzip2',
14: 'lzma',
18: 'terse',
19: 'lz77',
97: 'wavpack',
98: 'ppmd',
}
def _check_compression(compression):
if compression == ZIP_STORED:
pass
elif compression == ZIP_DEFLATED:
if not zlib:
raise RuntimeError(
"Compression requires the (missing) zlib module")
elif compression == ZIP_BZIP2:
if not bz2:
raise RuntimeError(
"Compression requires the (missing) bz2 module")
elif compression == ZIP_LZMA:
if not lzma:
raise RuntimeError(
"Compression requires the (missing) lzma module")
else:
raise RuntimeError("That compression method is not supported")
def _get_compressor(compress_type):
if compress_type == ZIP_DEFLATED:
return zlib.compressobj(zlib.Z_DEFAULT_COMPRESSION,
zlib.DEFLATED, -15)
elif compress_type == ZIP_BZIP2:
return bz2.BZ2Compressor()
elif compress_type == ZIP_LZMA:
return LZMACompressor()
else:
return None
def _get_decompressor(compress_type):
if compress_type == ZIP_STORED:
return None
elif compress_type == ZIP_DEFLATED:
return zlib.decompressobj(-15)
elif compress_type == ZIP_BZIP2:
return bz2.BZ2Decompressor()
elif compress_type == ZIP_LZMA:
return LZMADecompressor()
else:
descr = compressor_names.get(compress_type)
if descr:
raise NotImplementedError("compression type %d (%s)" % (compress_type, descr))
else:
raise NotImplementedError("compression type %d" % (compress_type,))
class ZipExtFile(io.BufferedIOBase):
"""File-like object for reading an archive member.
Is returned by ZipFile.open().
"""
# Max size supported by decompressor.
MAX_N = 1 << 31 - 1
# Read from compressed files in 4k blocks.
MIN_READ_SIZE = 4096
# Search for universal newlines or line chunks.
PATTERN = re.compile(br'^(?P<chunk>[^\r\n]+)|(?P<newline>\n|\r\n?)')
def __init__(self, fileobj, mode, zipinfo, decrypter=None,
close_fileobj=False):
self._fileobj = fileobj
self._decrypter = decrypter
self._close_fileobj = close_fileobj
self._compress_type = zipinfo.compress_type
self._compress_left = zipinfo.compress_size
self._left = zipinfo.file_size
self._decompressor = _get_decompressor(self._compress_type)
self._eof = False
self._readbuffer = b''
self._offset = 0
self._universal = 'U' in mode
self.newlines = None
# Adjust read size for encrypted files since the first 12 bytes
# are for the encryption/password information.
if self._decrypter is not None:
self._compress_left -= 12
self.mode = mode
self.name = zipinfo.filename
if hasattr(zipinfo, 'CRC'):
self._expected_crc = zipinfo.CRC
self._running_crc = crc32(b'') & 0xffffffff
else:
self._expected_crc = None
def readline(self, limit=-1):
"""Read and return a line from the stream.
If limit is specified, at most limit bytes will be read.
"""
if not self._universal and limit < 0:
# Shortcut common case - newline found in buffer.
i = self._readbuffer.find(b'\n', self._offset) + 1
if i > 0:
line = self._readbuffer[self._offset: i]
self._offset = i
return line
if not self._universal:
return io.BufferedIOBase.readline(self, limit)
line = b''
while limit < 0 or len(line) < limit:
readahead = self.peek(2)
if readahead == b'':
return line
#
# Search for universal newlines or line chunks.
#
# The pattern returns either a line chunk or a newline, but not
# both. Combined with peek(2), we are assured that the sequence
# '\r\n' is always retrieved completely and never split into
# separate newlines - '\r', '\n' due to coincidental readaheads.
#
match = self.PATTERN.search(readahead)
newline = match.group('newline')
if newline is not None:
if self.newlines is None:
self.newlines = []
if newline not in self.newlines:
self.newlines.append(newline)
self._offset += len(newline)
return line + b'\n'
chunk = match.group('chunk')
if limit >= 0:
chunk = chunk[: limit - len(line)]
self._offset += len(chunk)
line += chunk
return line
def peek(self, n=1):
"""Returns buffered bytes without advancing the position."""
if n > len(self._readbuffer) - self._offset:
chunk = self.read(n)
if len(chunk) > self._offset:
self._readbuffer = chunk + self._readbuffer[self._offset:]
self._offset = 0
else:
self._offset -= len(chunk)
# Return up to 512 bytes to reduce allocation overhead for tight loops.
return self._readbuffer[self._offset: self._offset + 512]
def readable(self):
return True
def read(self, n=-1):
"""Read and return up to n bytes.
If the argument is omitted, None, or negative, data is read and returned until EOF is reached..
"""
if n is None or n < 0:
buf = self._readbuffer[self._offset:]
self._readbuffer = b''
self._offset = 0
while not self._eof:
buf += self._read1(self.MAX_N)
return buf
end = n + self._offset
if end < len(self._readbuffer):
buf = self._readbuffer[self._offset:end]
self._offset = end
return buf
n = end - len(self._readbuffer)
buf = self._readbuffer[self._offset:]
self._readbuffer = b''
self._offset = 0
while n > 0 and not self._eof:
data = self._read1(n)
if n < len(data):
self._readbuffer = data
self._offset = n
buf += data[:n]
break
buf += data
n -= len(data)
return buf
def _update_crc(self, newdata):
# Update the CRC using the given data.
if self._expected_crc is None:
# No need to compute the CRC if we don't have a reference value
return
self._running_crc = crc32(newdata, self._running_crc) & 0xffffffff
# Check the CRC if we're at the end of the file
if self._eof and self._running_crc != self._expected_crc:
raise BadZipFile("Bad CRC-32 for file %r" % self.name)
def read1(self, n):
"""Read up to n bytes with at most one read() system call."""
if n is None or n < 0:
buf = self._readbuffer[self._offset:]
self._readbuffer = b''
self._offset = 0
while not self._eof:
data = self._read1(self.MAX_N)
if data:
buf += data
break
return buf
end = n + self._offset
if end < len(self._readbuffer):
buf = self._readbuffer[self._offset:end]
self._offset = end
return buf
n = end - len(self._readbuffer)
buf = self._readbuffer[self._offset:]
self._readbuffer = b''
self._offset = 0
if n > 0:
while not self._eof:
data = self._read1(n)
if n < len(data):
self._readbuffer = data
self._offset = n
buf += data[:n]
break
if data:
buf += data
break
return buf
def _read1(self, n):
# Read up to n compressed bytes with at most one read() system call,
# decrypt and decompress them.
if self._eof or n <= 0:
return b''
# Read from file.
if self._compress_type == ZIP_DEFLATED:
## Handle unconsumed data.
data = self._decompressor.unconsumed_tail
if n > len(data):
data += self._read2(n - len(data))
else:
data = self._read2(n)
if self._compress_type == ZIP_STORED:
self._eof = self._compress_left <= 0
elif self._compress_type == ZIP_DEFLATED:
n = max(n, self.MIN_READ_SIZE)
data = self._decompressor.decompress(data, n)
self._eof = (self._decompressor.eof or
self._compress_left <= 0 and
not self._decompressor.unconsumed_tail)
if self._eof:
data += self._decompressor.flush()
else:
data = self._decompressor.decompress(data)
self._eof = self._decompressor.eof or self._compress_left <= 0
data = data[:self._left]
self._left -= len(data)
if self._left <= 0:
self._eof = True
self._update_crc(data)
return data
def _read2(self, n):
if self._compress_left <= 0:
return b''
n = max(n, self.MIN_READ_SIZE)
n = min(n, self._compress_left)
data = self._fileobj.read(n)
self._compress_left -= len(data)
if self._decrypter is not None:
data = bytes(map(self._decrypter, data))
return data
def close(self):
try:
if self._close_fileobj:
self._fileobj.close()
finally:
super().close()
class ZipFile:
""" Class with methods to open, read, write, close, list zip files.
z = ZipFile(file, mode="r", compression=ZIP_STORED, allowZip64=False)
file: Either the path to the file, or a file-like object.
If it is a path, the file will be opened and closed by ZipFile.
mode: The mode can be either read "r", write "w" or append "a".
compression: ZIP_STORED (no compression), ZIP_DEFLATED (requires zlib),
ZIP_BZIP2 (requires bz2) or ZIP_LZMA (requires lzma).
allowZip64: if True ZipFile will create files with ZIP64 extensions when
needed, otherwise it will raise an exception when this would
be necessary.
"""
fp = None # Set here since __del__ checks it
_windows_illegal_name_trans_table = None
def __init__(self, file, mode="r", compression=ZIP_STORED, allowZip64=False):
"""Open the ZIP file with mode read "r", write "w" or append "a"."""
if mode not in ("r", "w", "a"):
raise RuntimeError('ZipFile() requires mode "r", "w", or "a"')
_check_compression(compression)
self._allowZip64 = allowZip64
self._didModify = False
self.debug = 0 # Level of printing: 0 through 3
self.NameToInfo = {} # Find file info given name
self.filelist = [] # List of ZipInfo instances for archive
self.compression = compression # Method of compression
self.mode = key = mode.replace('b', '')[0]
self.pwd = None
self._comment = b''
# Check if we were passed a file-like object
if isinstance(file, str):
# No, it's a filename
self._filePassed = 0
self.filename = file
modeDict = {'r' : 'rb', 'w': 'wb', 'a' : 'r+b'}
try:
self.fp = io.open(file, modeDict[mode])
except IOError:
if mode == 'a':
mode = key = 'w'
self.fp = io.open(file, modeDict[mode])
else:
raise
else:
self._filePassed = 1
self.fp = file
self.filename = getattr(file, 'name', None)
try:
if key == 'r':
self._RealGetContents()
elif key == 'w':
# set the modified flag so central directory gets written
# even if no files are added to the archive
self._didModify = True
elif key == 'a':
try:
# See if file is a zip file
self._RealGetContents()
# seek to start of directory and overwrite
self.fp.seek(self.start_dir, 0)
except BadZipFile:
# file is not a zip file, just append
self.fp.seek(0, 2)
# set the modified flag so central directory gets written
# even if no files are added to the archive
self._didModify = True
else:
raise RuntimeError('Mode must be "r", "w" or "a"')
except:
fp = self.fp
self.fp = None
if not self._filePassed:
fp.close()
raise
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
self.close()
def _RealGetContents(self):
"""Read in the table of contents for the ZIP file."""
fp = self.fp
try:
endrec = _EndRecData(fp)
except IOError:
raise BadZipFile("File is not a zip file")
if not endrec:
raise BadZipFile("File is not a zip file")
if self.debug > 1:
print(endrec)
size_cd = endrec[_ECD_SIZE] # bytes in central directory
offset_cd = endrec[_ECD_OFFSET] # offset of central directory
self._comment = endrec[_ECD_COMMENT] # archive comment
# "concat" is zero, unless zip was concatenated to another file
concat = endrec[_ECD_LOCATION] - size_cd - offset_cd
if endrec[_ECD_SIGNATURE] == stringEndArchive64:
# If Zip64 extension structures are present, account for them
concat -= (sizeEndCentDir64 + sizeEndCentDir64Locator)
if self.debug > 2:
inferred = concat + offset_cd
print("given, inferred, offset", offset_cd, inferred, concat)
# self.start_dir: Position of start of central directory
self.start_dir = offset_cd + concat
fp.seek(self.start_dir, 0)
data = fp.read(size_cd)
fp = io.BytesIO(data)
total = 0
while total < size_cd:
centdir = fp.read(sizeCentralDir)
if len(centdir) != sizeCentralDir:
raise BadZipFile("Truncated central directory")
centdir = struct.unpack(structCentralDir, centdir)
if centdir[_CD_SIGNATURE] != stringCentralDir:
raise BadZipFile("Bad magic number for central directory")
if self.debug > 2:
print(centdir)
filename = fp.read(centdir[_CD_FILENAME_LENGTH])
flags = centdir[5]
if flags & 0x800:
# UTF-8 file names extension
filename = filename.decode('utf-8')
else:
# Historical ZIP filename encoding
filename = filename.decode('cp437')
# Create ZipInfo instance to store file information
x = ZipInfo(filename)
x.extra = fp.read(centdir[_CD_EXTRA_FIELD_LENGTH])
x.comment = fp.read(centdir[_CD_COMMENT_LENGTH])
x.header_offset = centdir[_CD_LOCAL_HEADER_OFFSET]
(x.create_version, x.create_system, x.extract_version, x.reserved,
x.flag_bits, x.compress_type, t, d,
x.CRC, x.compress_size, x.file_size) = centdir[1:12]
if x.extract_version > MAX_EXTRACT_VERSION:
raise NotImplementedError("zip file version %.1f" %
(x.extract_version / 10))
x.volume, x.internal_attr, x.external_attr = centdir[15:18]
# Convert date/time code to (year, month, day, hour, min, sec)
x._raw_time = t
x.date_time = ( (d>>9)+1980, (d>>5)&0xF, d&0x1F,
t>>11, (t>>5)&0x3F, (t&0x1F) * 2 )
x._decodeExtra()
x.header_offset = x.header_offset + concat
self.filelist.append(x)
self.NameToInfo[x.filename] = x
# update total bytes read from central directory
total = (total + sizeCentralDir + centdir[_CD_FILENAME_LENGTH]
+ centdir[_CD_EXTRA_FIELD_LENGTH]
+ centdir[_CD_COMMENT_LENGTH])
if self.debug > 2:
print("total", total)
def namelist(self):
"""Return a list of file names in the archive."""
return [data.filename for data in self.filelist]
def infolist(self):
"""Return a list of class ZipInfo instances for files in the
archive."""
return self.filelist
def printdir(self, file=None):
"""Print a table of contents for the zip file."""
print("%-46s %19s %12s" % ("File Name", "Modified ", "Size"),
file=file)
for zinfo in self.filelist:
date = "%d-%02d-%02d %02d:%02d:%02d" % zinfo.date_time[:6]
print("%-46s %s %12d" % (zinfo.filename, date, zinfo.file_size),
file=file)
def testzip(self):
"""Read all the files and check the CRC."""
chunk_size = 2 ** 20
for zinfo in self.filelist:
try:
# Read by chunks, to avoid an OverflowError or a
# MemoryError with very large embedded files.
with self.open(zinfo.filename, "r") as f:
while f.read(chunk_size): # Check CRC-32
pass
except BadZipFile:
return zinfo.filename
def getinfo(self, name):
"""Return the instance of ZipInfo given 'name'."""
info = self.NameToInfo.get(name)
if info is None:
raise KeyError(
'There is no item named %r in the archive' % name)
return info
def setpassword(self, pwd):
"""Set default password for encrypted files."""
if pwd and not isinstance(pwd, bytes):
raise TypeError("pwd: expected bytes, got %s" % type(pwd))
if pwd:
self.pwd = pwd
else:
self.pwd = None
@property
def comment(self):
"""The comment text associated with the ZIP file."""
return self._comment
@comment.setter
def comment(self, comment):
if not isinstance(comment, bytes):
raise TypeError("comment: expected bytes, got %s" % type(comment))
# check for valid comment length
if len(comment) >= ZIP_MAX_COMMENT:
if self.debug:
print('Archive comment is too long; truncating to %d bytes'
% ZIP_MAX_COMMENT)
comment = comment[:ZIP_MAX_COMMENT]
self._comment = comment
self._didModify = True
def read(self, name, pwd=None):
"""Return file bytes (as a string) for name."""
with self.open(name, "r", pwd) as fp:
return fp.read()
def open(self, name, mode="r", pwd=None):
"""Return file-like object for 'name'."""
if mode not in ("r", "U", "rU"):
raise RuntimeError('open() requires mode "r", "U", or "rU"')
if pwd and not isinstance(pwd, bytes):
raise TypeError("pwd: expected bytes, got %s" % type(pwd))
if not self.fp:
raise RuntimeError(
"Attempt to read ZIP archive that was already closed")
# Only open a new file for instances where we were not
# given a file object in the constructor
if self._filePassed:
zef_file = self.fp
else:
zef_file = io.open(self.filename, 'rb')
try:
# Make sure we have an info object
if isinstance(name, ZipInfo):
# 'name' is already an info object
zinfo = name
else:
# Get info object for name
zinfo = self.getinfo(name)
zef_file.seek(zinfo.header_offset, 0)
# Skip the file header:
fheader = zef_file.read(sizeFileHeader)
if len(fheader) != sizeFileHeader:
raise BadZipFile("Truncated file header")
fheader = struct.unpack(structFileHeader, fheader)
if fheader[_FH_SIGNATURE] != stringFileHeader:
raise BadZipFile("Bad magic number for file header")
fname = zef_file.read(fheader[_FH_FILENAME_LENGTH])
if fheader[_FH_EXTRA_FIELD_LENGTH]:
zef_file.read(fheader[_FH_EXTRA_FIELD_LENGTH])
if zinfo.flag_bits & 0x20:
# Zip 2.7: compressed patched data
raise NotImplementedError("compressed patched data (flag bit 5)")
if zinfo.flag_bits & 0x40:
# strong encryption
raise NotImplementedError("strong encryption (flag bit 6)")
if zinfo.flag_bits & 0x800:
# UTF-8 filename
fname_str = fname.decode("utf-8")
else:
fname_str = fname.decode("cp437")
if fname_str != zinfo.orig_filename:
raise BadZipFile(
'File name in directory %r and header %r differ.'
% (zinfo.orig_filename, fname))
# check for encrypted flag & handle password
is_encrypted = zinfo.flag_bits & 0x1
zd = None
if is_encrypted:
if not pwd:
pwd = self.pwd
if not pwd:
raise RuntimeError("File %s is encrypted, password "
"required for extraction" % name)
zd = _ZipDecrypter(pwd)
# The first 12 bytes in the cypher stream is an encryption header
# used to strengthen the algorithm. The first 11 bytes are
# completely random, while the 12th contains the MSB of the CRC,
# or the MSB of the file time depending on the header type
# and is used to check the correctness of the password.
header = zef_file.read(12)
h = list(map(zd, header[0:12]))
if zinfo.flag_bits & 0x8:
# compare against the file type from extended local headers
check_byte = (zinfo._raw_time >> 8) & 0xff
else:
# compare against the CRC otherwise
check_byte = (zinfo.CRC >> 24) & 0xff
if h[11] != check_byte:
raise RuntimeError("Bad password for file", name)
return ZipExtFile(zef_file, mode, zinfo, zd,
close_fileobj=not self._filePassed)
except:
if not self._filePassed:
zef_file.close()
raise
def extract(self, member, path=None, pwd=None):
"""Extract a member from the archive to the current working directory,
using its full name. Its file information is extracted as accurately
as possible. `member' may be a filename or a ZipInfo object. You can
specify a different directory using `path'.
"""
if not isinstance(member, ZipInfo):
member = self.getinfo(member)
if path is None:
path = os.getcwd()
return self._extract_member(member, path, pwd)
def extractall(self, path=None, members=None, pwd=None):
"""Extract all members from the archive to the current working
directory. `path' specifies a different directory to extract to.
`members' is optional and must be a subset of the list returned
by namelist().
"""
if members is None:
members = self.namelist()
for zipinfo in members:
self.extract(zipinfo, path, pwd)
@classmethod
def _sanitize_windows_name(cls, arcname, pathsep):
"""Replace bad characters and remove trailing dots from parts."""
table = cls._windows_illegal_name_trans_table
if not table:
illegal = ':<>|"?*'
table = str.maketrans(illegal, '_' * len(illegal))
cls._windows_illegal_name_trans_table = table
arcname = arcname.translate(table)
# remove trailing dots
arcname = (x.rstrip('.') for x in arcname.split(pathsep))
# rejoin, removing empty parts.
arcname = pathsep.join(x for x in arcname if x)
return arcname
def _extract_member(self, member, targetpath, pwd):
"""Extract the ZipInfo object 'member' to a physical
file on the path targetpath.
"""
# build the destination pathname, replacing
# forward slashes to platform specific separators.
arcname = member.filename.replace('/', os.path.sep)
if os.path.altsep:
arcname = arcname.replace(os.path.altsep, os.path.sep)
# interpret absolute pathname as relative, remove drive letter or
# UNC path, redundant separators, "." and ".." components.
arcname = os.path.splitdrive(arcname)[1]
invalid_path_parts = ('', os.path.curdir, os.path.pardir)
arcname = os.path.sep.join(x for x in arcname.split(os.path.sep)
if x not in invalid_path_parts)
if os.path.sep == '\\':
# filter illegal characters on Windows
arcname = self._sanitize_windows_name(arcname, os.path.sep)
targetpath = os.path.join(targetpath, arcname)
targetpath = os.path.normpath(targetpath)
# Create all upper directories if necessary.
upperdirs = os.path.dirname(targetpath)
if upperdirs and not os.path.exists(upperdirs):
os.makedirs(upperdirs)
if member.filename[-1] == '/':
if not os.path.isdir(targetpath):
os.mkdir(targetpath)
return targetpath
with self.open(member, pwd=pwd) as source, \
open(targetpath, "wb") as target:
shutil.copyfileobj(source, target)
return targetpath
def _writecheck(self, zinfo):
"""Check for errors before writing a file to the archive."""
if zinfo.filename in self.NameToInfo:
if self.debug: # Warning for duplicate names
print("Duplicate name:", zinfo.filename)
if self.mode not in ("w", "a"):
raise RuntimeError('write() requires mode "w" or "a"')
if not self.fp:
raise RuntimeError(
"Attempt to write ZIP archive that was already closed")
_check_compression(zinfo.compress_type)
if zinfo.file_size > ZIP64_LIMIT:
if not self._allowZip64:
raise LargeZipFile("Filesize would require ZIP64 extensions")
if zinfo.header_offset > ZIP64_LIMIT:
if not self._allowZip64:
raise LargeZipFile(
"Zipfile size would require ZIP64 extensions")
def write(self, filename, arcname=None, compress_type=None):
"""Put the bytes from filename into the archive under the name
arcname."""
if not self.fp:
raise RuntimeError(
"Attempt to write to ZIP archive that was already closed")
st = os.stat(filename)
isdir = stat.S_ISDIR(st.st_mode)
mtime = time.localtime(st.st_mtime)
date_time = mtime[0:6]
# Create ZipInfo instance to store file information
if arcname is None:
arcname = filename
arcname = os.path.normpath(os.path.splitdrive(arcname)[1])
while arcname[0] in (os.sep, os.altsep):
arcname = arcname[1:]
if isdir:
arcname += '/'
zinfo = ZipInfo(arcname, date_time)
zinfo.external_attr = (st[0] & 0xFFFF) << 16 # Unix attributes
if compress_type is None:
zinfo.compress_type = self.compression
else:
zinfo.compress_type = compress_type
zinfo.file_size = st.st_size
zinfo.flag_bits = 0x00
zinfo.header_offset = self.fp.tell() # Start of header bytes
if zinfo.compress_type == ZIP_LZMA:
# Compressed data includes an end-of-stream (EOS) marker
zinfo.flag_bits |= 0x02
self._writecheck(zinfo)
self._didModify = True
if isdir:
zinfo.file_size = 0
zinfo.compress_size = 0
zinfo.CRC = 0
self.filelist.append(zinfo)
self.NameToInfo[zinfo.filename] = zinfo
self.fp.write(zinfo.FileHeader(False))
return
cmpr = _get_compressor(zinfo.compress_type)
with open(filename, "rb") as fp:
# Must overwrite CRC and sizes with correct data later
zinfo.CRC = CRC = 0
zinfo.compress_size = compress_size = 0
# Compressed size can be larger than uncompressed size
zip64 = self._allowZip64 and \
zinfo.file_size * 1.05 > ZIP64_LIMIT
self.fp.write(zinfo.FileHeader(zip64))
file_size = 0
while 1:
buf = fp.read(1024 * 8)
if not buf:
break
file_size = file_size + len(buf)
CRC = crc32(buf, CRC) & 0xffffffff
if cmpr:
buf = cmpr.compress(buf)
compress_size = compress_size + len(buf)
self.fp.write(buf)
if cmpr:
buf = cmpr.flush()
compress_size = compress_size + len(buf)
self.fp.write(buf)
zinfo.compress_size = compress_size
else:
zinfo.compress_size = file_size
zinfo.CRC = CRC
zinfo.file_size = file_size
if not zip64 and self._allowZip64:
if file_size > ZIP64_LIMIT:
raise RuntimeError('File size has increased during compressing')
if compress_size > ZIP64_LIMIT:
raise RuntimeError('Compressed size larger than uncompressed size')
# Seek backwards and write file header (which will now include
# correct CRC and file sizes)
position = self.fp.tell() # Preserve current position in file
self.fp.seek(zinfo.header_offset, 0)
self.fp.write(zinfo.FileHeader(zip64))
self.fp.seek(position, 0)
self.filelist.append(zinfo)
self.NameToInfo[zinfo.filename] = zinfo
def writestr(self, zinfo_or_arcname, data, compress_type=None):
"""Write a file into the archive. The contents is 'data', which
may be either a 'str' or a 'bytes' instance; if it is a 'str',
it is encoded as UTF-8 first.
'zinfo_or_arcname' is either a ZipInfo instance or
the name of the file in the archive."""
if isinstance(data, str):
data = data.encode("utf-8")
if not isinstance(zinfo_or_arcname, ZipInfo):
zinfo = ZipInfo(filename=zinfo_or_arcname,
date_time=time.localtime(time.time())[:6])
zinfo.compress_type = self.compression
zinfo.external_attr = 0o600 << 16
else:
zinfo = zinfo_or_arcname
if not self.fp:
raise RuntimeError(
"Attempt to write to ZIP archive that was already closed")
zinfo.file_size = len(data) # Uncompressed size
zinfo.header_offset = self.fp.tell() # Start of header data
if compress_type is not None:
zinfo.compress_type = compress_type
if zinfo.compress_type == ZIP_LZMA:
# Compressed data includes an end-of-stream (EOS) marker
zinfo.flag_bits |= 0x02
self._writecheck(zinfo)
self._didModify = True
zinfo.CRC = crc32(data) & 0xffffffff # CRC-32 checksum
co = _get_compressor(zinfo.compress_type)
if co:
data = co.compress(data) + co.flush()
zinfo.compress_size = len(data) # Compressed size
else:
zinfo.compress_size = zinfo.file_size
zip64 = zinfo.file_size > ZIP64_LIMIT or \
zinfo.compress_size > ZIP64_LIMIT
if zip64 and not self._allowZip64:
raise LargeZipFile("Filesize would require ZIP64 extensions")
self.fp.write(zinfo.FileHeader(zip64))
self.fp.write(data)
if zinfo.flag_bits & 0x08:
# Write CRC and file sizes after the file data
fmt = '<LQQ' if zip64 else '<LLL'
self.fp.write(struct.pack(fmt, zinfo.CRC, zinfo.compress_size,
zinfo.file_size))
self.fp.flush()
self.filelist.append(zinfo)
self.NameToInfo[zinfo.filename] = zinfo
def __del__(self):
"""Call the "close()" method in case the user forgot."""
self.close()
def close(self):
"""Close the file, and for mode "w" and "a" write the ending
records."""
if self.fp is None:
return
try:
if self.mode in ("w", "a") and self._didModify: # write ending records
count = 0
pos1 = self.fp.tell()
for zinfo in self.filelist: # write central directory
count = count + 1
dt = zinfo.date_time
dosdate = (dt[0] - 1980) << 9 | dt[1] << 5 | dt[2]
dostime = dt[3] << 11 | dt[4] << 5 | (dt[5] // 2)
extra = []
if zinfo.file_size > ZIP64_LIMIT \
or zinfo.compress_size > ZIP64_LIMIT:
extra.append(zinfo.file_size)
extra.append(zinfo.compress_size)
file_size = 0xffffffff
compress_size = 0xffffffff
else:
file_size = zinfo.file_size
compress_size = zinfo.compress_size
if zinfo.header_offset > ZIP64_LIMIT:
extra.append(zinfo.header_offset)
header_offset = 0xffffffff
else:
header_offset = zinfo.header_offset
extra_data = zinfo.extra
min_version = 0
if extra:
# Append a ZIP64 field to the extra's
extra_data = struct.pack(
'<HH' + 'Q'*len(extra),
1, 8*len(extra), *extra) + extra_data
min_version = ZIP64_VERSION
if zinfo.compress_type == ZIP_BZIP2:
min_version = max(BZIP2_VERSION, min_version)
elif zinfo.compress_type == ZIP_LZMA:
min_version = max(LZMA_VERSION, min_version)
extract_version = max(min_version, zinfo.extract_version)
create_version = max(min_version, zinfo.create_version)
try:
filename, flag_bits = zinfo._encodeFilenameFlags()
centdir = struct.pack(structCentralDir,
stringCentralDir, create_version,
zinfo.create_system, extract_version, zinfo.reserved,
flag_bits, zinfo.compress_type, dostime, dosdate,
zinfo.CRC, compress_size, file_size,
len(filename), len(extra_data), len(zinfo.comment),
0, zinfo.internal_attr, zinfo.external_attr,
header_offset)
except DeprecationWarning:
print((structCentralDir, stringCentralDir, create_version,
zinfo.create_system, extract_version, zinfo.reserved,
zinfo.flag_bits, zinfo.compress_type, dostime, dosdate,
zinfo.CRC, compress_size, file_size,
len(zinfo.filename), len(extra_data), len(zinfo.comment),
0, zinfo.internal_attr, zinfo.external_attr,
header_offset), file=sys.stderr)
raise
self.fp.write(centdir)
self.fp.write(filename)
self.fp.write(extra_data)
self.fp.write(zinfo.comment)
pos2 = self.fp.tell()
# Write end-of-zip-archive record
centDirCount = count
centDirSize = pos2 - pos1
centDirOffset = pos1
if (centDirCount >= ZIP_FILECOUNT_LIMIT or
centDirOffset > ZIP64_LIMIT or
centDirSize > ZIP64_LIMIT):
# Need to write the ZIP64 end-of-archive records
zip64endrec = struct.pack(
structEndArchive64, stringEndArchive64,
44, 45, 45, 0, 0, centDirCount, centDirCount,
centDirSize, centDirOffset)
self.fp.write(zip64endrec)
zip64locrec = struct.pack(
structEndArchive64Locator,
stringEndArchive64Locator, 0, pos2, 1)
self.fp.write(zip64locrec)
centDirCount = min(centDirCount, 0xFFFF)
centDirSize = min(centDirSize, 0xFFFFFFFF)
centDirOffset = min(centDirOffset, 0xFFFFFFFF)
endrec = struct.pack(structEndArchive, stringEndArchive,
0, 0, centDirCount, centDirCount,
centDirSize, centDirOffset, len(self._comment))
self.fp.write(endrec)
self.fp.write(self._comment)
self.fp.flush()
finally:
fp = self.fp
self.fp = None
if not self._filePassed:
fp.close()
class PyZipFile(ZipFile):
"""Class to create ZIP archives with Python library files and packages."""
def __init__(self, file, mode="r", compression=ZIP_STORED,
allowZip64=False, optimize=-1):
ZipFile.__init__(self, file, mode=mode, compression=compression,
allowZip64=allowZip64)
self._optimize = optimize
def writepy(self, pathname, basename=""):
"""Add all files from "pathname" to the ZIP archive.
If pathname is a package directory, search the directory and
all package subdirectories recursively for all *.py and enter
the modules into the archive. If pathname is a plain
directory, listdir *.py and enter all modules. Else, pathname
must be a Python *.py file and the module will be put into the
archive. Added modules are always module.pyo or module.pyc.
This method will compile the module.py into module.pyc if
necessary.
"""
dir, name = os.path.split(pathname)
if os.path.isdir(pathname):
initname = os.path.join(pathname, "__init__.py")
if os.path.isfile(initname):
# This is a package directory, add it
if basename:
basename = "%s/%s" % (basename, name)
else:
basename = name
if self.debug:
print("Adding package in", pathname, "as", basename)
fname, arcname = self._get_codename(initname[0:-3], basename)
if self.debug:
print("Adding", arcname)
self.write(fname, arcname)
dirlist = os.listdir(pathname)
dirlist.remove("__init__.py")
# Add all *.py files and package subdirectories
for filename in dirlist:
path = os.path.join(pathname, filename)
root, ext = os.path.splitext(filename)
if os.path.isdir(path):
if os.path.isfile(os.path.join(path, "__init__.py")):
# This is a package directory, add it
self.writepy(path, basename) # Recursive call
elif ext == ".py":
fname, arcname = self._get_codename(path[0:-3],
basename)
if self.debug:
print("Adding", arcname)
self.write(fname, arcname)
else:
# This is NOT a package directory, add its files at top level
if self.debug:
print("Adding files from directory", pathname)
for filename in os.listdir(pathname):
path = os.path.join(pathname, filename)
root, ext = os.path.splitext(filename)
if ext == ".py":
fname, arcname = self._get_codename(path[0:-3],
basename)
if self.debug:
print("Adding", arcname)
self.write(fname, arcname)
else:
if pathname[-3:] != ".py":
raise RuntimeError(
'Files added with writepy() must end with ".py"')
fname, arcname = self._get_codename(pathname[0:-3], basename)
if self.debug:
print("Adding file", arcname)
self.write(fname, arcname)
def _get_codename(self, pathname, basename):
"""Return (filename, archivename) for the path.
Given a module name path, return the correct file path and
archive name, compiling if necessary. For example, given
/python/lib/string, return (/python/lib/string.pyc, string).
"""
def _compile(file, optimize=-1):
import py_compile
if self.debug:
print("Compiling", file)
try:
py_compile.compile(file, doraise=True, optimize=optimize)
except py_compile.PyCompileError as err:
print(err.msg)
return False
return True
file_py = pathname + ".py"
file_pyc = pathname + ".pyc"
file_pyo = pathname + ".pyo"
pycache_pyc = imp.cache_from_source(file_py, True)
pycache_pyo = imp.cache_from_source(file_py, False)
if self._optimize == -1:
# legacy mode: use whatever file is present
if (os.path.isfile(file_pyo) and
os.stat(file_pyo).st_mtime >= os.stat(file_py).st_mtime):
# Use .pyo file.
arcname = fname = file_pyo
elif (os.path.isfile(file_pyc) and
os.stat(file_pyc).st_mtime >= os.stat(file_py).st_mtime):
# Use .pyc file.
arcname = fname = file_pyc
elif (os.path.isfile(pycache_pyc) and
os.stat(pycache_pyc).st_mtime >= os.stat(file_py).st_mtime):
# Use the __pycache__/*.pyc file, but write it to the legacy pyc
# file name in the archive.
fname = pycache_pyc
arcname = file_pyc
elif (os.path.isfile(pycache_pyo) and
os.stat(pycache_pyo).st_mtime >= os.stat(file_py).st_mtime):
# Use the __pycache__/*.pyo file, but write it to the legacy pyo
# file name in the archive.
fname = pycache_pyo
arcname = file_pyo
else:
# Compile py into PEP 3147 pyc file.
if _compile(file_py):
fname = (pycache_pyc if __debug__ else pycache_pyo)
arcname = (file_pyc if __debug__ else file_pyo)
else:
fname = arcname = file_py
else:
# new mode: use given optimization level
if self._optimize == 0:
fname = pycache_pyc
arcname = file_pyc
else:
fname = pycache_pyo
arcname = file_pyo
if not (os.path.isfile(fname) and
os.stat(fname).st_mtime >= os.stat(file_py).st_mtime):
if not _compile(file_py, optimize=self._optimize):
fname = arcname = file_py
archivename = os.path.split(arcname)[1]
if basename:
archivename = "%s/%s" % (basename, archivename)
return (fname, archivename)
def main(args = None):
import textwrap
USAGE=textwrap.dedent("""\
Usage:
zipfile.py -l zipfile.zip # Show listing of a zipfile
zipfile.py -t zipfile.zip # Test if a zipfile is valid
zipfile.py -e zipfile.zip target # Extract zipfile into target dir
zipfile.py -c zipfile.zip src ... # Create zipfile from sources
""")
if args is None:
args = sys.argv[1:]
if not args or args[0] not in ('-l', '-c', '-e', '-t'):
print(USAGE)
sys.exit(1)
if args[0] == '-l':
if len(args) != 2:
print(USAGE)
sys.exit(1)
with ZipFile(args[1], 'r') as zf:
zf.printdir()
elif args[0] == '-t':
if len(args) != 2:
print(USAGE)
sys.exit(1)
with ZipFile(args[1], 'r') as zf:
badfile = zf.testzip()
if badfile:
print("The following enclosed file is corrupted: {!r}".format(badfile))
print("Done testing")
elif args[0] == '-e':
if len(args) != 3:
print(USAGE)
sys.exit(1)
with ZipFile(args[1], 'r') as zf:
out = args[2]
for path in zf.namelist():
if path.startswith('./'):
tgt = os.path.join(out, path[2:])
else:
tgt = os.path.join(out, path)
tgtdir = os.path.dirname(tgt)
if not os.path.exists(tgtdir):
os.makedirs(tgtdir)
with open(tgt, 'wb') as fp:
fp.write(zf.read(path))
elif args[0] == '-c':
if len(args) < 3:
print(USAGE)
sys.exit(1)
def addToZip(zf, path, zippath):
if os.path.isfile(path):
zf.write(path, zippath, ZIP_DEFLATED)
elif os.path.isdir(path):
for nm in os.listdir(path):
addToZip(zf,
os.path.join(path, nm), os.path.join(zippath, nm))
# else: ignore
with ZipFile(args[1], 'w', allowZip64=True) as zf:
for src in args[2:]:
addToZip(zf, src, os.path.basename(src))
if __name__ == "__main__":
main()
|
mindbender-studio/setup
|
refs/heads/master
|
bin/windows/python36/Lib/site-packages/pip/_vendor/distlib/database.py
|
334
|
# -*- coding: utf-8 -*-
#
# Copyright (C) 2012-2016 The Python Software Foundation.
# See LICENSE.txt and CONTRIBUTORS.txt.
#
"""PEP 376 implementation."""
from __future__ import unicode_literals
import base64
import codecs
import contextlib
import hashlib
import logging
import os
import posixpath
import sys
import zipimport
from . import DistlibException, resources
from .compat import StringIO
from .version import get_scheme, UnsupportedVersionError
from .metadata import Metadata, METADATA_FILENAME, WHEEL_METADATA_FILENAME
from .util import (parse_requirement, cached_property, parse_name_and_version,
read_exports, write_exports, CSVReader, CSVWriter)
__all__ = ['Distribution', 'BaseInstalledDistribution',
'InstalledDistribution', 'EggInfoDistribution',
'DistributionPath']
logger = logging.getLogger(__name__)
EXPORTS_FILENAME = 'pydist-exports.json'
COMMANDS_FILENAME = 'pydist-commands.json'
DIST_FILES = ('INSTALLER', METADATA_FILENAME, 'RECORD', 'REQUESTED',
'RESOURCES', EXPORTS_FILENAME, 'SHARED')
DISTINFO_EXT = '.dist-info'
class _Cache(object):
"""
A simple cache mapping names and .dist-info paths to distributions
"""
def __init__(self):
"""
Initialise an instance. There is normally one for each DistributionPath.
"""
self.name = {}
self.path = {}
self.generated = False
def clear(self):
"""
Clear the cache, setting it to its initial state.
"""
self.name.clear()
self.path.clear()
self.generated = False
def add(self, dist):
"""
Add a distribution to the cache.
:param dist: The distribution to add.
"""
if dist.path not in self.path:
self.path[dist.path] = dist
self.name.setdefault(dist.key, []).append(dist)
class DistributionPath(object):
"""
Represents a set of distributions installed on a path (typically sys.path).
"""
def __init__(self, path=None, include_egg=False):
"""
Create an instance from a path, optionally including legacy (distutils/
setuptools/distribute) distributions.
:param path: The path to use, as a list of directories. If not specified,
sys.path is used.
:param include_egg: If True, this instance will look for and return legacy
distributions as well as those based on PEP 376.
"""
if path is None:
path = sys.path
self.path = path
self._include_dist = True
self._include_egg = include_egg
self._cache = _Cache()
self._cache_egg = _Cache()
self._cache_enabled = True
self._scheme = get_scheme('default')
def _get_cache_enabled(self):
return self._cache_enabled
def _set_cache_enabled(self, value):
self._cache_enabled = value
cache_enabled = property(_get_cache_enabled, _set_cache_enabled)
def clear_cache(self):
"""
Clears the internal cache.
"""
self._cache.clear()
self._cache_egg.clear()
def _yield_distributions(self):
"""
Yield .dist-info and/or .egg(-info) distributions.
"""
# We need to check if we've seen some resources already, because on
# some Linux systems (e.g. some Debian/Ubuntu variants) there are
# symlinks which alias other files in the environment.
seen = set()
for path in self.path:
finder = resources.finder_for_path(path)
if finder is None:
continue
r = finder.find('')
if not r or not r.is_container:
continue
rset = sorted(r.resources)
for entry in rset:
r = finder.find(entry)
if not r or r.path in seen:
continue
if self._include_dist and entry.endswith(DISTINFO_EXT):
possible_filenames = [METADATA_FILENAME, WHEEL_METADATA_FILENAME]
for metadata_filename in possible_filenames:
metadata_path = posixpath.join(entry, metadata_filename)
pydist = finder.find(metadata_path)
if pydist:
break
else:
continue
with contextlib.closing(pydist.as_stream()) as stream:
metadata = Metadata(fileobj=stream, scheme='legacy')
logger.debug('Found %s', r.path)
seen.add(r.path)
yield new_dist_class(r.path, metadata=metadata,
env=self)
elif self._include_egg and entry.endswith(('.egg-info',
'.egg')):
logger.debug('Found %s', r.path)
seen.add(r.path)
yield old_dist_class(r.path, self)
def _generate_cache(self):
"""
Scan the path for distributions and populate the cache with
those that are found.
"""
gen_dist = not self._cache.generated
gen_egg = self._include_egg and not self._cache_egg.generated
if gen_dist or gen_egg:
for dist in self._yield_distributions():
if isinstance(dist, InstalledDistribution):
self._cache.add(dist)
else:
self._cache_egg.add(dist)
if gen_dist:
self._cache.generated = True
if gen_egg:
self._cache_egg.generated = True
@classmethod
def distinfo_dirname(cls, name, version):
"""
The *name* and *version* parameters are converted into their
filename-escaped form, i.e. any ``'-'`` characters are replaced
with ``'_'`` other than the one in ``'dist-info'`` and the one
separating the name from the version number.
:parameter name: is converted to a standard distribution name by replacing
any runs of non- alphanumeric characters with a single
``'-'``.
:type name: string
:parameter version: is converted to a standard version string. Spaces
become dots, and all other non-alphanumeric characters
(except dots) become dashes, with runs of multiple
dashes condensed to a single dash.
:type version: string
:returns: directory name
:rtype: string"""
name = name.replace('-', '_')
return '-'.join([name, version]) + DISTINFO_EXT
def get_distributions(self):
"""
Provides an iterator that looks for distributions and returns
:class:`InstalledDistribution` or
:class:`EggInfoDistribution` instances for each one of them.
:rtype: iterator of :class:`InstalledDistribution` and
:class:`EggInfoDistribution` instances
"""
if not self._cache_enabled:
for dist in self._yield_distributions():
yield dist
else:
self._generate_cache()
for dist in self._cache.path.values():
yield dist
if self._include_egg:
for dist in self._cache_egg.path.values():
yield dist
def get_distribution(self, name):
"""
Looks for a named distribution on the path.
This function only returns the first result found, as no more than one
value is expected. If nothing is found, ``None`` is returned.
:rtype: :class:`InstalledDistribution`, :class:`EggInfoDistribution`
or ``None``
"""
result = None
name = name.lower()
if not self._cache_enabled:
for dist in self._yield_distributions():
if dist.key == name:
result = dist
break
else:
self._generate_cache()
if name in self._cache.name:
result = self._cache.name[name][0]
elif self._include_egg and name in self._cache_egg.name:
result = self._cache_egg.name[name][0]
return result
def provides_distribution(self, name, version=None):
"""
Iterates over all distributions to find which distributions provide *name*.
If a *version* is provided, it will be used to filter the results.
This function only returns the first result found, since no more than
one values are expected. If the directory is not found, returns ``None``.
:parameter version: a version specifier that indicates the version
required, conforming to the format in ``PEP-345``
:type name: string
:type version: string
"""
matcher = None
if not version is None:
try:
matcher = self._scheme.matcher('%s (%s)' % (name, version))
except ValueError:
raise DistlibException('invalid name or version: %r, %r' %
(name, version))
for dist in self.get_distributions():
provided = dist.provides
for p in provided:
p_name, p_ver = parse_name_and_version(p)
if matcher is None:
if p_name == name:
yield dist
break
else:
if p_name == name and matcher.match(p_ver):
yield dist
break
def get_file_path(self, name, relative_path):
"""
Return the path to a resource file.
"""
dist = self.get_distribution(name)
if dist is None:
raise LookupError('no distribution named %r found' % name)
return dist.get_resource_path(relative_path)
def get_exported_entries(self, category, name=None):
"""
Return all of the exported entries in a particular category.
:param category: The category to search for entries.
:param name: If specified, only entries with that name are returned.
"""
for dist in self.get_distributions():
r = dist.exports
if category in r:
d = r[category]
if name is not None:
if name in d:
yield d[name]
else:
for v in d.values():
yield v
class Distribution(object):
"""
A base class for distributions, whether installed or from indexes.
Either way, it must have some metadata, so that's all that's needed
for construction.
"""
build_time_dependency = False
"""
Set to True if it's known to be only a build-time dependency (i.e.
not needed after installation).
"""
requested = False
"""A boolean that indicates whether the ``REQUESTED`` metadata file is
present (in other words, whether the package was installed by user
request or it was installed as a dependency)."""
def __init__(self, metadata):
"""
Initialise an instance.
:param metadata: The instance of :class:`Metadata` describing this
distribution.
"""
self.metadata = metadata
self.name = metadata.name
self.key = self.name.lower() # for case-insensitive comparisons
self.version = metadata.version
self.locator = None
self.digest = None
self.extras = None # additional features requested
self.context = None # environment marker overrides
self.download_urls = set()
self.digests = {}
@property
def source_url(self):
"""
The source archive download URL for this distribution.
"""
return self.metadata.source_url
download_url = source_url # Backward compatibility
@property
def name_and_version(self):
"""
A utility property which displays the name and version in parentheses.
"""
return '%s (%s)' % (self.name, self.version)
@property
def provides(self):
"""
A set of distribution names and versions provided by this distribution.
:return: A set of "name (version)" strings.
"""
plist = self.metadata.provides
s = '%s (%s)' % (self.name, self.version)
if s not in plist:
plist.append(s)
return plist
def _get_requirements(self, req_attr):
md = self.metadata
logger.debug('Getting requirements from metadata %r', md.todict())
reqts = getattr(md, req_attr)
return set(md.get_requirements(reqts, extras=self.extras,
env=self.context))
@property
def run_requires(self):
return self._get_requirements('run_requires')
@property
def meta_requires(self):
return self._get_requirements('meta_requires')
@property
def build_requires(self):
return self._get_requirements('build_requires')
@property
def test_requires(self):
return self._get_requirements('test_requires')
@property
def dev_requires(self):
return self._get_requirements('dev_requires')
def matches_requirement(self, req):
"""
Say if this instance matches (fulfills) a requirement.
:param req: The requirement to match.
:rtype req: str
:return: True if it matches, else False.
"""
# Requirement may contain extras - parse to lose those
# from what's passed to the matcher
r = parse_requirement(req)
scheme = get_scheme(self.metadata.scheme)
try:
matcher = scheme.matcher(r.requirement)
except UnsupportedVersionError:
# XXX compat-mode if cannot read the version
logger.warning('could not read version %r - using name only',
req)
name = req.split()[0]
matcher = scheme.matcher(name)
name = matcher.key # case-insensitive
result = False
for p in self.provides:
p_name, p_ver = parse_name_and_version(p)
if p_name != name:
continue
try:
result = matcher.match(p_ver)
break
except UnsupportedVersionError:
pass
return result
def __repr__(self):
"""
Return a textual representation of this instance,
"""
if self.source_url:
suffix = ' [%s]' % self.source_url
else:
suffix = ''
return '<Distribution %s (%s)%s>' % (self.name, self.version, suffix)
def __eq__(self, other):
"""
See if this distribution is the same as another.
:param other: The distribution to compare with. To be equal to one
another. distributions must have the same type, name,
version and source_url.
:return: True if it is the same, else False.
"""
if type(other) is not type(self):
result = False
else:
result = (self.name == other.name and
self.version == other.version and
self.source_url == other.source_url)
return result
def __hash__(self):
"""
Compute hash in a way which matches the equality test.
"""
return hash(self.name) + hash(self.version) + hash(self.source_url)
class BaseInstalledDistribution(Distribution):
"""
This is the base class for installed distributions (whether PEP 376 or
legacy).
"""
hasher = None
def __init__(self, metadata, path, env=None):
"""
Initialise an instance.
:param metadata: An instance of :class:`Metadata` which describes the
distribution. This will normally have been initialised
from a metadata file in the ``path``.
:param path: The path of the ``.dist-info`` or ``.egg-info``
directory for the distribution.
:param env: This is normally the :class:`DistributionPath`
instance where this distribution was found.
"""
super(BaseInstalledDistribution, self).__init__(metadata)
self.path = path
self.dist_path = env
def get_hash(self, data, hasher=None):
"""
Get the hash of some data, using a particular hash algorithm, if
specified.
:param data: The data to be hashed.
:type data: bytes
:param hasher: The name of a hash implementation, supported by hashlib,
or ``None``. Examples of valid values are ``'sha1'``,
``'sha224'``, ``'sha384'``, '``sha256'``, ``'md5'`` and
``'sha512'``. If no hasher is specified, the ``hasher``
attribute of the :class:`InstalledDistribution` instance
is used. If the hasher is determined to be ``None``, MD5
is used as the hashing algorithm.
:returns: The hash of the data. If a hasher was explicitly specified,
the returned hash will be prefixed with the specified hasher
followed by '='.
:rtype: str
"""
if hasher is None:
hasher = self.hasher
if hasher is None:
hasher = hashlib.md5
prefix = ''
else:
hasher = getattr(hashlib, hasher)
prefix = '%s=' % self.hasher
digest = hasher(data).digest()
digest = base64.urlsafe_b64encode(digest).rstrip(b'=').decode('ascii')
return '%s%s' % (prefix, digest)
class InstalledDistribution(BaseInstalledDistribution):
"""
Created with the *path* of the ``.dist-info`` directory provided to the
constructor. It reads the metadata contained in ``pydist.json`` when it is
instantiated., or uses a passed in Metadata instance (useful for when
dry-run mode is being used).
"""
hasher = 'sha256'
def __init__(self, path, metadata=None, env=None):
self.finder = finder = resources.finder_for_path(path)
if finder is None:
import pdb; pdb.set_trace ()
if env and env._cache_enabled and path in env._cache.path:
metadata = env._cache.path[path].metadata
elif metadata is None:
r = finder.find(METADATA_FILENAME)
# Temporary - for Wheel 0.23 support
if r is None:
r = finder.find(WHEEL_METADATA_FILENAME)
# Temporary - for legacy support
if r is None:
r = finder.find('METADATA')
if r is None:
raise ValueError('no %s found in %s' % (METADATA_FILENAME,
path))
with contextlib.closing(r.as_stream()) as stream:
metadata = Metadata(fileobj=stream, scheme='legacy')
super(InstalledDistribution, self).__init__(metadata, path, env)
if env and env._cache_enabled:
env._cache.add(self)
try:
r = finder.find('REQUESTED')
except AttributeError:
import pdb; pdb.set_trace ()
self.requested = r is not None
def __repr__(self):
return '<InstalledDistribution %r %s at %r>' % (
self.name, self.version, self.path)
def __str__(self):
return "%s %s" % (self.name, self.version)
def _get_records(self):
"""
Get the list of installed files for the distribution
:return: A list of tuples of path, hash and size. Note that hash and
size might be ``None`` for some entries. The path is exactly
as stored in the file (which is as in PEP 376).
"""
results = []
r = self.get_distinfo_resource('RECORD')
with contextlib.closing(r.as_stream()) as stream:
with CSVReader(stream=stream) as record_reader:
# Base location is parent dir of .dist-info dir
#base_location = os.path.dirname(self.path)
#base_location = os.path.abspath(base_location)
for row in record_reader:
missing = [None for i in range(len(row), 3)]
path, checksum, size = row + missing
#if not os.path.isabs(path):
# path = path.replace('/', os.sep)
# path = os.path.join(base_location, path)
results.append((path, checksum, size))
return results
@cached_property
def exports(self):
"""
Return the information exported by this distribution.
:return: A dictionary of exports, mapping an export category to a dict
of :class:`ExportEntry` instances describing the individual
export entries, and keyed by name.
"""
result = {}
r = self.get_distinfo_resource(EXPORTS_FILENAME)
if r:
result = self.read_exports()
return result
def read_exports(self):
"""
Read exports data from a file in .ini format.
:return: A dictionary of exports, mapping an export category to a list
of :class:`ExportEntry` instances describing the individual
export entries.
"""
result = {}
r = self.get_distinfo_resource(EXPORTS_FILENAME)
if r:
with contextlib.closing(r.as_stream()) as stream:
result = read_exports(stream)
return result
def write_exports(self, exports):
"""
Write a dictionary of exports to a file in .ini format.
:param exports: A dictionary of exports, mapping an export category to
a list of :class:`ExportEntry` instances describing the
individual export entries.
"""
rf = self.get_distinfo_file(EXPORTS_FILENAME)
with open(rf, 'w') as f:
write_exports(exports, f)
def get_resource_path(self, relative_path):
"""
NOTE: This API may change in the future.
Return the absolute path to a resource file with the given relative
path.
:param relative_path: The path, relative to .dist-info, of the resource
of interest.
:return: The absolute path where the resource is to be found.
"""
r = self.get_distinfo_resource('RESOURCES')
with contextlib.closing(r.as_stream()) as stream:
with CSVReader(stream=stream) as resources_reader:
for relative, destination in resources_reader:
if relative == relative_path:
return destination
raise KeyError('no resource file with relative path %r '
'is installed' % relative_path)
def list_installed_files(self):
"""
Iterates over the ``RECORD`` entries and returns a tuple
``(path, hash, size)`` for each line.
:returns: iterator of (path, hash, size)
"""
for result in self._get_records():
yield result
def write_installed_files(self, paths, prefix, dry_run=False):
"""
Writes the ``RECORD`` file, using the ``paths`` iterable passed in. Any
existing ``RECORD`` file is silently overwritten.
prefix is used to determine when to write absolute paths.
"""
prefix = os.path.join(prefix, '')
base = os.path.dirname(self.path)
base_under_prefix = base.startswith(prefix)
base = os.path.join(base, '')
record_path = self.get_distinfo_file('RECORD')
logger.info('creating %s', record_path)
if dry_run:
return None
with CSVWriter(record_path) as writer:
for path in paths:
if os.path.isdir(path) or path.endswith(('.pyc', '.pyo')):
# do not put size and hash, as in PEP-376
hash_value = size = ''
else:
size = '%d' % os.path.getsize(path)
with open(path, 'rb') as fp:
hash_value = self.get_hash(fp.read())
if path.startswith(base) or (base_under_prefix and
path.startswith(prefix)):
path = os.path.relpath(path, base)
writer.writerow((path, hash_value, size))
# add the RECORD file itself
if record_path.startswith(base):
record_path = os.path.relpath(record_path, base)
writer.writerow((record_path, '', ''))
return record_path
def check_installed_files(self):
"""
Checks that the hashes and sizes of the files in ``RECORD`` are
matched by the files themselves. Returns a (possibly empty) list of
mismatches. Each entry in the mismatch list will be a tuple consisting
of the path, 'exists', 'size' or 'hash' according to what didn't match
(existence is checked first, then size, then hash), the expected
value and the actual value.
"""
mismatches = []
base = os.path.dirname(self.path)
record_path = self.get_distinfo_file('RECORD')
for path, hash_value, size in self.list_installed_files():
if not os.path.isabs(path):
path = os.path.join(base, path)
if path == record_path:
continue
if not os.path.exists(path):
mismatches.append((path, 'exists', True, False))
elif os.path.isfile(path):
actual_size = str(os.path.getsize(path))
if size and actual_size != size:
mismatches.append((path, 'size', size, actual_size))
elif hash_value:
if '=' in hash_value:
hasher = hash_value.split('=', 1)[0]
else:
hasher = None
with open(path, 'rb') as f:
actual_hash = self.get_hash(f.read(), hasher)
if actual_hash != hash_value:
mismatches.append((path, 'hash', hash_value, actual_hash))
return mismatches
@cached_property
def shared_locations(self):
"""
A dictionary of shared locations whose keys are in the set 'prefix',
'purelib', 'platlib', 'scripts', 'headers', 'data' and 'namespace'.
The corresponding value is the absolute path of that category for
this distribution, and takes into account any paths selected by the
user at installation time (e.g. via command-line arguments). In the
case of the 'namespace' key, this would be a list of absolute paths
for the roots of namespace packages in this distribution.
The first time this property is accessed, the relevant information is
read from the SHARED file in the .dist-info directory.
"""
result = {}
shared_path = os.path.join(self.path, 'SHARED')
if os.path.isfile(shared_path):
with codecs.open(shared_path, 'r', encoding='utf-8') as f:
lines = f.read().splitlines()
for line in lines:
key, value = line.split('=', 1)
if key == 'namespace':
result.setdefault(key, []).append(value)
else:
result[key] = value
return result
def write_shared_locations(self, paths, dry_run=False):
"""
Write shared location information to the SHARED file in .dist-info.
:param paths: A dictionary as described in the documentation for
:meth:`shared_locations`.
:param dry_run: If True, the action is logged but no file is actually
written.
:return: The path of the file written to.
"""
shared_path = os.path.join(self.path, 'SHARED')
logger.info('creating %s', shared_path)
if dry_run:
return None
lines = []
for key in ('prefix', 'lib', 'headers', 'scripts', 'data'):
path = paths[key]
if os.path.isdir(paths[key]):
lines.append('%s=%s' % (key, path))
for ns in paths.get('namespace', ()):
lines.append('namespace=%s' % ns)
with codecs.open(shared_path, 'w', encoding='utf-8') as f:
f.write('\n'.join(lines))
return shared_path
def get_distinfo_resource(self, path):
if path not in DIST_FILES:
raise DistlibException('invalid path for a dist-info file: '
'%r at %r' % (path, self.path))
finder = resources.finder_for_path(self.path)
if finder is None:
raise DistlibException('Unable to get a finder for %s' % self.path)
return finder.find(path)
def get_distinfo_file(self, path):
"""
Returns a path located under the ``.dist-info`` directory. Returns a
string representing the path.
:parameter path: a ``'/'``-separated path relative to the
``.dist-info`` directory or an absolute path;
If *path* is an absolute path and doesn't start
with the ``.dist-info`` directory path,
a :class:`DistlibException` is raised
:type path: str
:rtype: str
"""
# Check if it is an absolute path # XXX use relpath, add tests
if path.find(os.sep) >= 0:
# it's an absolute path?
distinfo_dirname, path = path.split(os.sep)[-2:]
if distinfo_dirname != self.path.split(os.sep)[-1]:
raise DistlibException(
'dist-info file %r does not belong to the %r %s '
'distribution' % (path, self.name, self.version))
# The file must be relative
if path not in DIST_FILES:
raise DistlibException('invalid path for a dist-info file: '
'%r at %r' % (path, self.path))
return os.path.join(self.path, path)
def list_distinfo_files(self):
"""
Iterates over the ``RECORD`` entries and returns paths for each line if
the path is pointing to a file located in the ``.dist-info`` directory
or one of its subdirectories.
:returns: iterator of paths
"""
base = os.path.dirname(self.path)
for path, checksum, size in self._get_records():
# XXX add separator or use real relpath algo
if not os.path.isabs(path):
path = os.path.join(base, path)
if path.startswith(self.path):
yield path
def __eq__(self, other):
return (isinstance(other, InstalledDistribution) and
self.path == other.path)
# See http://docs.python.org/reference/datamodel#object.__hash__
__hash__ = object.__hash__
class EggInfoDistribution(BaseInstalledDistribution):
"""Created with the *path* of the ``.egg-info`` directory or file provided
to the constructor. It reads the metadata contained in the file itself, or
if the given path happens to be a directory, the metadata is read from the
file ``PKG-INFO`` under that directory."""
requested = True # as we have no way of knowing, assume it was
shared_locations = {}
def __init__(self, path, env=None):
def set_name_and_version(s, n, v):
s.name = n
s.key = n.lower() # for case-insensitive comparisons
s.version = v
self.path = path
self.dist_path = env
if env and env._cache_enabled and path in env._cache_egg.path:
metadata = env._cache_egg.path[path].metadata
set_name_and_version(self, metadata.name, metadata.version)
else:
metadata = self._get_metadata(path)
# Need to be set before caching
set_name_and_version(self, metadata.name, metadata.version)
if env and env._cache_enabled:
env._cache_egg.add(self)
super(EggInfoDistribution, self).__init__(metadata, path, env)
def _get_metadata(self, path):
requires = None
def parse_requires_data(data):
"""Create a list of dependencies from a requires.txt file.
*data*: the contents of a setuptools-produced requires.txt file.
"""
reqs = []
lines = data.splitlines()
for line in lines:
line = line.strip()
if line.startswith('['):
logger.warning('Unexpected line: quitting requirement scan: %r',
line)
break
r = parse_requirement(line)
if not r:
logger.warning('Not recognised as a requirement: %r', line)
continue
if r.extras:
logger.warning('extra requirements in requires.txt are '
'not supported')
if not r.constraints:
reqs.append(r.name)
else:
cons = ', '.join('%s%s' % c for c in r.constraints)
reqs.append('%s (%s)' % (r.name, cons))
return reqs
def parse_requires_path(req_path):
"""Create a list of dependencies from a requires.txt file.
*req_path*: the path to a setuptools-produced requires.txt file.
"""
reqs = []
try:
with codecs.open(req_path, 'r', 'utf-8') as fp:
reqs = parse_requires_data(fp.read())
except IOError:
pass
return reqs
if path.endswith('.egg'):
if os.path.isdir(path):
meta_path = os.path.join(path, 'EGG-INFO', 'PKG-INFO')
metadata = Metadata(path=meta_path, scheme='legacy')
req_path = os.path.join(path, 'EGG-INFO', 'requires.txt')
requires = parse_requires_path(req_path)
else:
# FIXME handle the case where zipfile is not available
zipf = zipimport.zipimporter(path)
fileobj = StringIO(
zipf.get_data('EGG-INFO/PKG-INFO').decode('utf8'))
metadata = Metadata(fileobj=fileobj, scheme='legacy')
try:
data = zipf.get_data('EGG-INFO/requires.txt')
requires = parse_requires_data(data.decode('utf-8'))
except IOError:
requires = None
elif path.endswith('.egg-info'):
if os.path.isdir(path):
req_path = os.path.join(path, 'requires.txt')
requires = parse_requires_path(req_path)
path = os.path.join(path, 'PKG-INFO')
metadata = Metadata(path=path, scheme='legacy')
else:
raise DistlibException('path must end with .egg-info or .egg, '
'got %r' % path)
if requires:
metadata.add_requirements(requires)
return metadata
def __repr__(self):
return '<EggInfoDistribution %r %s at %r>' % (
self.name, self.version, self.path)
def __str__(self):
return "%s %s" % (self.name, self.version)
def check_installed_files(self):
"""
Checks that the hashes and sizes of the files in ``RECORD`` are
matched by the files themselves. Returns a (possibly empty) list of
mismatches. Each entry in the mismatch list will be a tuple consisting
of the path, 'exists', 'size' or 'hash' according to what didn't match
(existence is checked first, then size, then hash), the expected
value and the actual value.
"""
mismatches = []
record_path = os.path.join(self.path, 'installed-files.txt')
if os.path.exists(record_path):
for path, _, _ in self.list_installed_files():
if path == record_path:
continue
if not os.path.exists(path):
mismatches.append((path, 'exists', True, False))
return mismatches
def list_installed_files(self):
"""
Iterates over the ``installed-files.txt`` entries and returns a tuple
``(path, hash, size)`` for each line.
:returns: a list of (path, hash, size)
"""
def _md5(path):
f = open(path, 'rb')
try:
content = f.read()
finally:
f.close()
return hashlib.md5(content).hexdigest()
def _size(path):
return os.stat(path).st_size
record_path = os.path.join(self.path, 'installed-files.txt')
result = []
if os.path.exists(record_path):
with codecs.open(record_path, 'r', encoding='utf-8') as f:
for line in f:
line = line.strip()
p = os.path.normpath(os.path.join(self.path, line))
# "./" is present as a marker between installed files
# and installation metadata files
if not os.path.exists(p):
logger.warning('Non-existent file: %s', p)
if p.endswith(('.pyc', '.pyo')):
continue
#otherwise fall through and fail
if not os.path.isdir(p):
result.append((p, _md5(p), _size(p)))
result.append((record_path, None, None))
return result
def list_distinfo_files(self, absolute=False):
"""
Iterates over the ``installed-files.txt`` entries and returns paths for
each line if the path is pointing to a file located in the
``.egg-info`` directory or one of its subdirectories.
:parameter absolute: If *absolute* is ``True``, each returned path is
transformed into a local absolute path. Otherwise the
raw value from ``installed-files.txt`` is returned.
:type absolute: boolean
:returns: iterator of paths
"""
record_path = os.path.join(self.path, 'installed-files.txt')
skip = True
with codecs.open(record_path, 'r', encoding='utf-8') as f:
for line in f:
line = line.strip()
if line == './':
skip = False
continue
if not skip:
p = os.path.normpath(os.path.join(self.path, line))
if p.startswith(self.path):
if absolute:
yield p
else:
yield line
def __eq__(self, other):
return (isinstance(other, EggInfoDistribution) and
self.path == other.path)
# See http://docs.python.org/reference/datamodel#object.__hash__
__hash__ = object.__hash__
new_dist_class = InstalledDistribution
old_dist_class = EggInfoDistribution
class DependencyGraph(object):
"""
Represents a dependency graph between distributions.
The dependency relationships are stored in an ``adjacency_list`` that maps
distributions to a list of ``(other, label)`` tuples where ``other``
is a distribution and the edge is labeled with ``label`` (i.e. the version
specifier, if such was provided). Also, for more efficient traversal, for
every distribution ``x``, a list of predecessors is kept in
``reverse_list[x]``. An edge from distribution ``a`` to
distribution ``b`` means that ``a`` depends on ``b``. If any missing
dependencies are found, they are stored in ``missing``, which is a
dictionary that maps distributions to a list of requirements that were not
provided by any other distributions.
"""
def __init__(self):
self.adjacency_list = {}
self.reverse_list = {}
self.missing = {}
def add_distribution(self, distribution):
"""Add the *distribution* to the graph.
:type distribution: :class:`distutils2.database.InstalledDistribution`
or :class:`distutils2.database.EggInfoDistribution`
"""
self.adjacency_list[distribution] = []
self.reverse_list[distribution] = []
#self.missing[distribution] = []
def add_edge(self, x, y, label=None):
"""Add an edge from distribution *x* to distribution *y* with the given
*label*.
:type x: :class:`distutils2.database.InstalledDistribution` or
:class:`distutils2.database.EggInfoDistribution`
:type y: :class:`distutils2.database.InstalledDistribution` or
:class:`distutils2.database.EggInfoDistribution`
:type label: ``str`` or ``None``
"""
self.adjacency_list[x].append((y, label))
# multiple edges are allowed, so be careful
if x not in self.reverse_list[y]:
self.reverse_list[y].append(x)
def add_missing(self, distribution, requirement):
"""
Add a missing *requirement* for the given *distribution*.
:type distribution: :class:`distutils2.database.InstalledDistribution`
or :class:`distutils2.database.EggInfoDistribution`
:type requirement: ``str``
"""
logger.debug('%s missing %r', distribution, requirement)
self.missing.setdefault(distribution, []).append(requirement)
def _repr_dist(self, dist):
return '%s %s' % (dist.name, dist.version)
def repr_node(self, dist, level=1):
"""Prints only a subgraph"""
output = [self._repr_dist(dist)]
for other, label in self.adjacency_list[dist]:
dist = self._repr_dist(other)
if label is not None:
dist = '%s [%s]' % (dist, label)
output.append(' ' * level + str(dist))
suboutput = self.repr_node(other, level + 1)
subs = suboutput.split('\n')
output.extend(subs[1:])
return '\n'.join(output)
def to_dot(self, f, skip_disconnected=True):
"""Writes a DOT output for the graph to the provided file *f*.
If *skip_disconnected* is set to ``True``, then all distributions
that are not dependent on any other distribution are skipped.
:type f: has to support ``file``-like operations
:type skip_disconnected: ``bool``
"""
disconnected = []
f.write("digraph dependencies {\n")
for dist, adjs in self.adjacency_list.items():
if len(adjs) == 0 and not skip_disconnected:
disconnected.append(dist)
for other, label in adjs:
if not label is None:
f.write('"%s" -> "%s" [label="%s"]\n' %
(dist.name, other.name, label))
else:
f.write('"%s" -> "%s"\n' % (dist.name, other.name))
if not skip_disconnected and len(disconnected) > 0:
f.write('subgraph disconnected {\n')
f.write('label = "Disconnected"\n')
f.write('bgcolor = red\n')
for dist in disconnected:
f.write('"%s"' % dist.name)
f.write('\n')
f.write('}\n')
f.write('}\n')
def topological_sort(self):
"""
Perform a topological sort of the graph.
:return: A tuple, the first element of which is a topologically sorted
list of distributions, and the second element of which is a
list of distributions that cannot be sorted because they have
circular dependencies and so form a cycle.
"""
result = []
# Make a shallow copy of the adjacency list
alist = {}
for k, v in self.adjacency_list.items():
alist[k] = v[:]
while True:
# See what we can remove in this run
to_remove = []
for k, v in list(alist.items())[:]:
if not v:
to_remove.append(k)
del alist[k]
if not to_remove:
# What's left in alist (if anything) is a cycle.
break
# Remove from the adjacency list of others
for k, v in alist.items():
alist[k] = [(d, r) for d, r in v if d not in to_remove]
logger.debug('Moving to result: %s',
['%s (%s)' % (d.name, d.version) for d in to_remove])
result.extend(to_remove)
return result, list(alist.keys())
def __repr__(self):
"""Representation of the graph"""
output = []
for dist, adjs in self.adjacency_list.items():
output.append(self.repr_node(dist))
return '\n'.join(output)
def make_graph(dists, scheme='default'):
"""Makes a dependency graph from the given distributions.
:parameter dists: a list of distributions
:type dists: list of :class:`distutils2.database.InstalledDistribution` and
:class:`distutils2.database.EggInfoDistribution` instances
:rtype: a :class:`DependencyGraph` instance
"""
scheme = get_scheme(scheme)
graph = DependencyGraph()
provided = {} # maps names to lists of (version, dist) tuples
# first, build the graph and find out what's provided
for dist in dists:
graph.add_distribution(dist)
for p in dist.provides:
name, version = parse_name_and_version(p)
logger.debug('Add to provided: %s, %s, %s', name, version, dist)
provided.setdefault(name, []).append((version, dist))
# now make the edges
for dist in dists:
requires = (dist.run_requires | dist.meta_requires |
dist.build_requires | dist.dev_requires)
for req in requires:
try:
matcher = scheme.matcher(req)
except UnsupportedVersionError:
# XXX compat-mode if cannot read the version
logger.warning('could not read version %r - using name only',
req)
name = req.split()[0]
matcher = scheme.matcher(name)
name = matcher.key # case-insensitive
matched = False
if name in provided:
for version, provider in provided[name]:
try:
match = matcher.match(version)
except UnsupportedVersionError:
match = False
if match:
graph.add_edge(dist, provider, req)
matched = True
break
if not matched:
graph.add_missing(dist, req)
return graph
def get_dependent_dists(dists, dist):
"""Recursively generate a list of distributions from *dists* that are
dependent on *dist*.
:param dists: a list of distributions
:param dist: a distribution, member of *dists* for which we are interested
"""
if dist not in dists:
raise DistlibException('given distribution %r is not a member '
'of the list' % dist.name)
graph = make_graph(dists)
dep = [dist] # dependent distributions
todo = graph.reverse_list[dist] # list of nodes we should inspect
while todo:
d = todo.pop()
dep.append(d)
for succ in graph.reverse_list[d]:
if succ not in dep:
todo.append(succ)
dep.pop(0) # remove dist from dep, was there to prevent infinite loops
return dep
def get_required_dists(dists, dist):
"""Recursively generate a list of distributions from *dists* that are
required by *dist*.
:param dists: a list of distributions
:param dist: a distribution, member of *dists* for which we are interested
"""
if dist not in dists:
raise DistlibException('given distribution %r is not a member '
'of the list' % dist.name)
graph = make_graph(dists)
req = [] # required distributions
todo = graph.adjacency_list[dist] # list of nodes we should inspect
while todo:
d = todo.pop()[0]
req.append(d)
for pred in graph.adjacency_list[d]:
if pred not in req:
todo.append(pred)
return req
def make_dist(name, version, **kwargs):
"""
A convenience method for making a dist given just a name and version.
"""
summary = kwargs.pop('summary', 'Placeholder for summary')
md = Metadata(**kwargs)
md.name = name
md.version = version
md.summary = summary or 'Placeholder for summary'
return Distribution(md)
|
aboutsajjad/Bridge
|
refs/heads/master
|
app_packages/youtube_dl/extractor/mofosex.py
|
14
|
from __future__ import unicode_literals
from ..utils import (
int_or_none,
str_to_int,
unified_strdate,
)
from .keezmovies import KeezMoviesIE
class MofosexIE(KeezMoviesIE):
_VALID_URL = r'https?://(?:www\.)?mofosex\.com/videos/(?P<id>\d+)/(?P<display_id>[^/?#&.]+)\.html'
_TESTS = [{
'url': 'http://www.mofosex.com/videos/318131/amateur-teen-playing-and-masturbating-318131.html',
'md5': '558fcdafbb63a87c019218d6e49daf8a',
'info_dict': {
'id': '318131',
'display_id': 'amateur-teen-playing-and-masturbating-318131',
'ext': 'mp4',
'title': 'amateur teen playing and masturbating',
'thumbnail': r're:^https?://.*\.jpg$',
'upload_date': '20121114',
'view_count': int,
'like_count': int,
'dislike_count': int,
'age_limit': 18,
}
}, {
# This video is no longer available
'url': 'http://www.mofosex.com/videos/5018/japanese-teen-music-video.html',
'only_matching': True,
}]
def _real_extract(self, url):
webpage, info = self._extract_info(url)
view_count = str_to_int(self._search_regex(
r'VIEWS:</span>\s*([\d,.]+)', webpage, 'view count', fatal=False))
like_count = int_or_none(self._search_regex(
r'id=["\']amountLikes["\'][^>]*>(\d+)', webpage,
'like count', fatal=False))
dislike_count = int_or_none(self._search_regex(
r'id=["\']amountDislikes["\'][^>]*>(\d+)', webpage,
'like count', fatal=False))
upload_date = unified_strdate(self._html_search_regex(
r'Added:</span>([^<]+)', webpage, 'upload date', fatal=False))
info.update({
'view_count': view_count,
'like_count': like_count,
'dislike_count': dislike_count,
'upload_date': upload_date,
'thumbnail': self._og_search_thumbnail(webpage),
})
return info
|
bbozhev/flask-test
|
refs/heads/master
|
flask/lib/python2.7/site-packages/pip/_vendor/html5lib/trie/py.py
|
1323
|
from __future__ import absolute_import, division, unicode_literals
from pip._vendor.six import text_type
from bisect import bisect_left
from ._base import Trie as ABCTrie
class Trie(ABCTrie):
def __init__(self, data):
if not all(isinstance(x, text_type) for x in data.keys()):
raise TypeError("All keys must be strings")
self._data = data
self._keys = sorted(data.keys())
self._cachestr = ""
self._cachepoints = (0, len(data))
def __contains__(self, key):
return key in self._data
def __len__(self):
return len(self._data)
def __iter__(self):
return iter(self._data)
def __getitem__(self, key):
return self._data[key]
def keys(self, prefix=None):
if prefix is None or prefix == "" or not self._keys:
return set(self._keys)
if prefix.startswith(self._cachestr):
lo, hi = self._cachepoints
start = i = bisect_left(self._keys, prefix, lo, hi)
else:
start = i = bisect_left(self._keys, prefix)
keys = set()
if start == len(self._keys):
return keys
while self._keys[i].startswith(prefix):
keys.add(self._keys[i])
i += 1
self._cachestr = prefix
self._cachepoints = (start, i)
return keys
def has_keys_with_prefix(self, prefix):
if prefix in self._data:
return True
if prefix.startswith(self._cachestr):
lo, hi = self._cachepoints
i = bisect_left(self._keys, prefix, lo, hi)
else:
i = bisect_left(self._keys, prefix)
if i == len(self._keys):
return False
return self._keys[i].startswith(prefix)
|
nypdmax/NUMA
|
refs/heads/master
|
tools/qemu-xen/scripts/tracetool/format/events_h.py
|
95
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Generate .h for event description.
"""
__author__ = "Lluís Vilanova <vilanova@ac.upc.edu>"
__copyright__ = "Copyright 2012, Lluís Vilanova <vilanova@ac.upc.edu>"
__license__ = "GPL version 2 or (at your option) any later version"
__maintainer__ = "Stefan Hajnoczi"
__email__ = "stefanha@linux.vnet.ibm.com"
from tracetool import out
def begin(events):
out('/* This file is autogenerated by tracetool, do not edit. */',
'',
'#ifndef TRACE__GENERATED_EVENTS_H',
'#define TRACE__GENERATED_EVENTS_H',
'',
'#include <stdbool.h>',
''
)
# event identifiers
out('typedef enum {')
for e in events:
out(' TRACE_%s,' % e.name.upper())
out(' TRACE_EVENT_COUNT',
'} TraceEventID;',
)
# static state
for e in events:
if 'disable' in e.properties:
enabled = 0
else:
enabled = 1
out('#define TRACE_%s_ENABLED %d' % (e.name.upper(), enabled))
out('#include "trace/event-internal.h"',
'',
'#endif /* TRACE__GENERATED_EVENTS_H */',
)
|
Southpaw-TACTIC/TACTIC
|
refs/heads/4.7
|
src/tactic/command/project_template_cmd.py
|
1
|
###########################################################
#
# Copyright (c) 2010, Southpaw Technology
# All Rights Reserved
#
# PROPRIETARY INFORMATION. This software is proprietary to
# Southpaw Technology, and is not to be reproduced, transmitted,
# or disclosed in any way without written permission.
#
#
#
__all__ = ['ProjectTemplateInstallerCmd', 'ProjectTemplateCreatorCmd', 'ProjectTemplateCheckCmd']
import tacticenv
from pyasm.common import Xml, Environment, TacticException, Common
from pyasm.search import Search, SearchType
from pyasm.biz import Project
from pyasm.command import Command
import os, shutil
import re
class ProjectDeleteCmd(Command):
def execute(self):
# drop database pg
# DATA
# delete from file where project_code = 'pg'
# delete from snapshot where project_code = 'pg'
# delete from task where project_code = 'pg'
# delete from work_hour where project_code = 'pg'
# delete from note where project_code = 'pg'
# delete from wdg_settings where project_code = 'pg'
# configuration
# delete from schema where code = 'pg'
# delete from pipeline where project_code = 'pg'
# delete from search_object where namespace = 'pg'
pass
class ProjectTemplateCreatorCmd(Command):
def execute(self):
self.base_dir = self.kwargs.get("base_dir")
if not self.base_dir:
self.base_dir = Environment.get_template_dir()
self.project_code = self.kwargs.get("project_code")
if not self.project_code:
self.project_code = Project.get_project_code()
assert self.project_code
# project code can be called anything, and we want to have a _template suffix for the template code
#self.plugin_code = "%s_template" % self.project_code
#self.template_project_code = re.sub( '_template$', '', self.plugin_code)
self.template_project_code = self.project_code
self.project = Project.get_by_code(self.project_code)
if not self.project:
raise TacticException('This project [%s] does not exist'%self.project_code)
self.project_type = self.project.get_value("type")
if not self.project_type:
self.project_type = self.project_code
Project.set_project(self.project_code)
self.export_template()
def export_template(self):
xml = Xml()
self.xml = xml
xml.create_doc("manifest")
manifest_node = xml.get_root_node()
# Old implementation. Code is now on the data node
xml.set_attribute(manifest_node, "code", self.template_project_code)
# dump the notification entries
data_node = xml.create_element("data")
xml.append_child(manifest_node, data_node)
code_node = xml.create_element("code")
xml.append_child(data_node, code_node)
xml.set_node_value(code_node, self.template_project_code)
version = self.kwargs.get("version") or ""
version_node = xml.create_element("version")
xml.append_child(data_node, version_node)
xml.set_node_value(version_node, version)
# dump the project entry
data_node = xml.create_element("sobject")
xml.append_child(manifest_node, data_node)
xml.set_attribute(data_node, "expression", "@SOBJECT(sthpw/project['code','%s'])" % self.project_code)
xml.set_attribute(data_node, "search_type", "sthpw/project")
xml.set_attribute(data_node, "unique", "true")
# dump the project_type entry
data_node = xml.create_element("sobject")
xml.append_child(manifest_node, data_node)
xml.set_attribute(data_node, "expression", "@SOBJECT(sthpw/project['code','%s'].sthpw/project_type)" % self.project_code)
xml.set_attribute(data_node, "search_type", "sthpw/project_type")
xml.set_attribute(data_node, "unique", "true")
# dump the schema entry
data_node = xml.create_element("sobject")
xml.append_child(manifest_node, data_node)
xml.set_attribute(data_node, "expression", "@SOBJECT(sthpw/schema['code','%s'])" % self.project_code)
xml.set_attribute(data_node, "search_type", "sthpw/schema")
xml.set_attribute(data_node, "unique", "true")
# find the project template search types
namespace = self.project_type
if not namespace or namespace == "default":
namespace = self.project_code
project_search_types = Search.eval("@GET(sthpw/search_object['namespace','%s'].search_type)" % namespace)
#project_types = Search.eval("@GET(sthpw/search_object['namespace','%s'].search_type)" % self.project_code)
# just dump the definition for data
for search_type in project_search_types:
data_node = xml.create_element("search_type")
xml.append_child(manifest_node, data_node)
xml.set_attribute(data_node, "code", search_type)
search_types = [
"config/custom_script",
"config/widget_config",
"config/naming",
"config/client_trigger",
"config/process",
"config/trigger",
"config/url",
#"config/ingest_rule",
#"config/ingest_session",
]
for search_type in search_types:
data_node = xml.create_element("sobject")
xml.append_child(manifest_node, data_node)
xml.set_attribute(data_node, "search_type", search_type)
# find the currval
st_obj = SearchType.get(search_type)
# have to call nextval() to initiate this sequence in the session in psql since Postgres 8.1
seq_id = SearchType.sequence_nextval(search_type)
seq_id = SearchType.sequence_currval(search_type)
seq_id -= 1
if seq_id > 0:
SearchType.sequence_setval(search_type, seq_id)
xml.set_attribute(data_node, "seq_max", seq_id)
#xml.set_attribute(data_node, "path", "data.spt")
# dump the login_groups entries
data_node = xml.create_element("sobject")
xml.append_child(manifest_node, data_node)
xml.set_attribute(data_node, "expression", "@SOBJECT(sthpw/login_group['project_code','%s'])" % self.project_code)
xml.set_attribute(data_node, "search_type", "sthpw/login_group")
xml.set_attribute(data_node, "unique", "true")
# dump the pipelines entries
data_node = xml.create_element("sobject")
xml.append_child(manifest_node, data_node)
xml.set_attribute(data_node, "expression", "@SOBJECT(sthpw/pipeline['project_code','%s'])" % self.project_code)
xml.set_attribute(data_node, "search_type", "sthpw/pipeline")
xml.set_attribute(data_node, "unique", "true")
# dump the notification entries
data_node = xml.create_element("sobject")
xml.append_child(manifest_node, data_node)
xml.set_attribute(data_node, "expression", "@SOBJECT(sthpw/notification['project_code','%s'])" % self.project_code)
xml.set_attribute(data_node, "search_type", "sthpw/notification")
from .plugin import PluginCreator
creator = PluginCreator( base_dir=self.base_dir, manifest=xml.to_string(), force=True, version=version )
creator.execute()
self.zip_path = creator.get_zip_path()
def get_zip_path(self):
return self.zip_path
class ProjectTemplateInstallerCmd(Command):
'''Install a template project thru a zip file or the unzipped folder'''
def execute(self):
self.new_project = self.kwargs.get("new_project")
if self.new_project in [False, 'false']:
self.new_project = False
else:
self.new_project = True
self.mode = self.kwargs.get("mode")
if not self.mode:
self.mode = 'copy'
# if a path is specified, then handle this
self.path = self.kwargs.get("path")
self.project_code = self.kwargs.get("project_code")
self.is_template = self.kwargs.get("is_template")
# check to see if the project already exists
# FIXME: how to determine which project code? pass it in even with path kwarg for now
project = Project.get_by_code(self.project_code)
if self.new_project and project:
raise TacticException("Project [%s] already exists in this installation. Exiting..." % self.project_code)
if self.path:
self.handle_path(self.path)
assert self.project_code
# determines which template to use
self.template_code = self.kwargs.get("template_code")
if not self.template_code:
self.template_code = self.project_code
# template code can end with _template or not depending if it's coming from a zip file
#if self.template_code.endswith("_template"):
# self.plugin_code = self.template_code
#else:
# self.plugin_code = "%s_template" % self.template_code
#self.template_project_code = re.sub( '_template$', '', self.template_code)
self.template_project_code = self.template_code
self.force_database = self.kwargs.get("force_database")
self.import_template()
def get_template_dir(self, template_dir):
'''check if it exists and return the one that does'''
if not os.path.exists(template_dir):
# for backward compatibility
template_dir2 = '%s_template' %template_dir
if not os.path.exists(template_dir2):
return template_dir
else:
return template_dir2
return template_dir
def import_template(self):
if self.path:
base_dir = os.path.dirname(self.path)
else:
base_dir = Environment.get_template_dir()
version = self.kwargs.get("version")
if version:
template_dir = "%s/%s-%s" % (base_dir, self.template_code, version)
else:
template_dir = "%s/%s" % (base_dir, self.template_code)
template_dir = self.get_template_dir(template_dir)
# if the directory does not exist then look for a zip file
use_zip = False
if not os.path.exists(template_dir):
template_zip = "%s.zip" % (template_dir)
if os.path.exists(template_zip):
use_zip = True
else:
hint = "Please check if you have created the Template already using the Update button in the Template Project view."
if version:
raise TacticException("No template found for [%s] version [%s]. %s" % (self.template_code, version, hint))
else:
raise TacticException("No template found for [%s]. %s" % (self.template_code, hint))
# check to see if the database exists in the default
# database implementation
from pyasm.search import DbContainer, DatabaseImpl
impl = DatabaseImpl.get()
exists = impl.database_exists(self.project_code)
# if the database already exists, then raise an exception
if exists and self.new_project:
msg = "WARNING: Database [%s] already exists" % self.project_code
print(msg)
raise TacticException(msg)
# this is the overriding factor:
if self.is_template == True:
title = Common.get_display_title(self.project_code)
elif self.is_template == False:
title = Common.get_display_title(self.project_code)
elif self.is_template == None:
# these 2 is for old usage using the command line script create_template.py
if self.template_project_code != self.project_code:
self.is_template = False
title = Common.get_display_title(self.project_code)
else:
self.is_template = True
title = Common.get_display_title(self.template_project_code)
# create a new project if this was desired
if self.new_project == True:
from .create_project_cmd import CreateProjectCmd
project_image_path = self.kwargs.get("project_image_path")
# the project_type will get updated properly by the PluginInstaller
# but that break the ties to the project_type entry created though,
# which is ok
creator = CreateProjectCmd(
project_code=self.project_code,
project_title=title,
project_type=self.template_project_code,
is_template=self.is_template,
use_default_side_bar=False,
project_image_path=project_image_path
)
creator.execute()
# set the project
Project.set_project(self.project_code)
# import from a plugin
if use_zip:
kwargs = {
'zip_path': template_zip,
'code': self.project_code
}
else:
kwargs = {
'plugin_dir': template_dir
}
kwargs['filter_line_handler'] = self.filter_line_handler
kwargs['filter_sobject_handler'] = self.filter_sobject_handler
from .plugin import PluginCreator, PluginInstaller
installer = PluginInstaller( **kwargs )
installer.execute()
def handle_path(self, src_path):
src_path = src_path.replace("\\", "/")
# upload folder
basename = os.path.basename(src_path)
if self.mode =='copy':
target_path = src_path
target_dir = os.path.dirname(target_path)
else:
target_dir = Environment.get_upload_dir()
target_path = "%s/%s" % (target_dir, basename)
base_dir = Environment.get_template_dir()
template_dir = "%s/%s" % (base_dir, self.project_code)
if os.path.exists(template_dir):
shutil.rmtree(template_dir)
#raise TacticException("Template is already installed at [%s]" %template_dir)
# unzip the file
from pyasm.common import ZipUtil
# this is fixed for windows if zipping doesn't use compression
paths = ZipUtil.extract(target_path)
# veryify that the paths extracted are the expected ones
rootname, ext = os.path.splitext(basename)
# check if it unzips at the templates folder directly
unzip_at_template_dir = False
# move the plugin zip file to the appropriate folder
if self.mode == 'copy':
# if they manually drop the zip file already here, skip
if target_dir != base_dir:
shutil.copy(target_path, base_dir)
else:
unzip_at_template_dir = True
else:
shutil.move(target_path, base_dir)
# move unzipped files into the plugin area
# remove any version info, only allow 1 particular version installed for now
import re
rootname = re.sub('(.*)(-)(\d.*)', r'\1', rootname)
unzip_path = "%s/%s" % (target_dir, rootname)
dest_dir = '%s/%s'%(base_dir, rootname)
if not unzip_at_template_dir and os.path.exists(dest_dir):
shutil.rmtree(dest_dir)
shutil.move(unzip_path, dest_dir)
def filter_sobject_handler(self, sobject):
search_type = sobject.get_base_search_type()
if search_type == 'sthpw/project':
project = Project.get_by_code(self.project_code)
if project:
sobject.set_value("id", project.get_id() )
# change the code of the project
sobject.set_value("code", self.project_code)
title = Common.get_display_title(self.project_code)
sobject.set_value("title", title)
if self.is_template:
sobject.set_value("is_template", True)
else:
sobject.set_value("is_template", False)
elif search_type == 'sthpw/schema':
sobject.set_value("code", self.project_code)
elif search_type == 'sthpw/notification':
sobject.set_value("project_code", self.project_code)
sobject.set_value("code", "")
elif search_type in ['sthpw/pipeline']:
sobject.set_value("project_code", self.project_code)
if self.template_project_code != self.project_code:
# get the old code
old_code = sobject.get_code()
if old_code.startswith("%s/" % self.template_project_code):
new_code = old_code.replace("%s/" % self.template_project_code, "%s/" % self.project_code)
else:
new_code = "%s/%s" % (self.project_code, old_code)
sobject.set_value("code", new_code)
elif search_type in ['sthpw/login_group']:
sobject.set_value("project_code", self.project_code)
if self.template_project_code != self.project_code:
# get the old login_group
for column in ['login_group', 'code']:
old_code = sobject.get_value(column)
if old_code.startswith("%s/" % self.template_project_code):
new_code = old_code.replace("%s/" % self.template_project_code, "%s/" % self.project_code)
else:
new_code = "%s/%s" % (self.project_code, old_code)
sobject.set_value(column, new_code)
# go through the access rules and replace project
access_rules = sobject.get_xml_value("access_rules")
nodes = access_rules.get_nodes("rules/rule")
for node in nodes:
project_code = Xml.get_attribute(node, "project")
if project_code and project_code != "*" and project_code == self.template_project_code:
Xml.set_attribute(node, "project", self.project_code)
sobject.set_value("access_rules", access_rules.to_string())
return sobject
def filter_line_handler(self, path, line):
'''NOT used now'''
return line
# this is only called if the project code is different from the
# template code
file_name = os.path.basename(path)
if file_name in ['sthpw_project.spt']:
# change codes to project code
if line.startswith('''insert.set_value('code','''):
line = '''insert.set_value('code', """%s""")\n''' % self.project_code
elif line.startswith('''insert.set_value('title','''):
title = Common.get_display_title(self.project_code)
line = '''insert.set_value('title', """%s""")\n''' % title
elif line.startswith('''insert.set_value('is_template','''):
if self.is_template:
line = '''insert.set_value('is_template', """true""")\n'''
else:
line = '''insert.set_value('is_template', """false""")\n'''
elif file_name in ['sthpw_schema.spt']:
if line.startswith('''insert.set_value('code','''):
line = '''insert.set_value('code', """%s""")\n''' % self.project_code
elif file_name in ['sthpw_pipeline.spt']:
if line.startswith('''insert.set_value('project_code','''):
line = '''insert.set_value('project_code', """%s""")\n''' % self.project_code
return line
class ProjectTemplateUpdaterCmd(Command):
def execute(self):
# force every search type and sobject to be unique
manifest_xml = ""
class ProjectTemplateCheckCmd(Command):
'''This will check the integrity of a project to see if is suitable
for export as a distributable project template'''
def execute(self):
self.project_code = self.kwargs.get("project_code")
self.prefix = self.kwargs.get("prefix")
self.project = Project.get_by_code(self.project_code)
self.project_type = self.project.get_value("type")
self.check_project()
self.check_search_type()
def check_project(self):
# check that the project code starts with the prefix
if not self.project.get_code().startswith("%s_" % self.prefix):
raise TacticException("Project code [%s] does not start with prefix [%s]" % (self.project_code, self.prefix) )
# check that the project type is the same as the project code
if not self.project_code != self.project_type:
raise TacticException("Project code [%s] does not match the project_type [%s]" % (self.project_code, self.project_type) )
def check_search_type(self):
# all search objects in the namespace of <project_code> should
# start with the prefix
search = Seach("sthpw/search_type")
search.add_filter("namespace", self.project_type)
search_types = search.get_sobjects()
for search_type in search_types:
if search_type.get_value("search_type").startswith("%s_" % self.prefix):
raise TacticException( "sType [%s] does not start with prefix [%s]" % (search_type.get_value("search_type"), self.prefix) )
if __name__ == '__main__':
from pyasm.security import Batch
Batch(project_code='admin')
#cmd = ProjectTemplateCreatorCmd(project_code='pg')
#Command.execute_cmd(cmd)
cmd = ProjectTemplateInstallerCmd(project_code='scrum')
Command.execute_cmd(cmd)
#cmd = ProjectTemplateCheckCmd(project_code='di', prefix='di')
#Command.execute_cmd(cmd)
|
poljeff/odoo
|
refs/heads/8.0
|
addons/website/tests/test_converter.py
|
280
|
# -*- coding: utf-8 -*-
import textwrap
import unittest2
from lxml import etree, html
from lxml.builder import E
from openerp.tests import common
from openerp.addons.base.ir import ir_qweb
from openerp.addons.website.models.ir_qweb import html_to_text
from openerp.addons.website.models.website import slugify, unslug
class TestUnslug(unittest2.TestCase):
def test_unslug(self):
tests = {
'': (None, None),
'foo': (None, None),
'foo-': (None, None),
'-': (None, None),
'foo-1': ('foo', 1),
'foo-bar-1': ('foo-bar', 1),
'foo--1': ('foo', -1),
'1': (None, 1),
'1-1': ('1', 1),
'--1': (None, None),
'foo---1': (None, None),
'foo1': (None, None),
}
for slug, expected in tests.iteritems():
self.assertEqual(unslug(slug), expected)
class TestHTMLToText(unittest2.TestCase):
def test_rawstring(self):
self.assertEqual(
"foobar",
html_to_text(E.div("foobar")))
def test_br(self):
self.assertEqual(
"foo\nbar",
html_to_text(E.div("foo", E.br(), "bar")))
self.assertEqual(
"foo\n\nbar\nbaz",
html_to_text(E.div(
"foo", E.br(), E.br(),
"bar", E.br(),
"baz")))
def test_p(self):
self.assertEqual(
"foo\n\nbar\n\nbaz",
html_to_text(E.div(
"foo",
E.p("bar"),
"baz")))
self.assertEqual(
"foo",
html_to_text(E.div(E.p("foo"))))
self.assertEqual(
"foo\n\nbar",
html_to_text(E.div("foo", E.p("bar"))))
self.assertEqual(
"foo\n\nbar",
html_to_text(E.div(E.p("foo"), "bar")))
self.assertEqual(
"foo\n\nbar\n\nbaz",
html_to_text(E.div(
E.p("foo"),
E.p("bar"),
E.p("baz"),
)))
def test_div(self):
self.assertEqual(
"foo\nbar\nbaz",
html_to_text(E.div(
"foo",
E.div("bar"),
"baz"
)))
self.assertEqual(
"foo",
html_to_text(E.div(E.div("foo"))))
self.assertEqual(
"foo\nbar",
html_to_text(E.div("foo", E.div("bar"))))
self.assertEqual(
"foo\nbar",
html_to_text(E.div(E.div("foo"), "bar")))
self.assertEqual(
"foo\nbar\nbaz",
html_to_text(E.div(
"foo",
E.div("bar"),
E.div("baz")
)))
def test_other_block(self):
self.assertEqual(
"foo\nbar\nbaz",
html_to_text(E.div(
"foo",
E.section("bar"),
"baz"
)))
def test_inline(self):
self.assertEqual(
"foobarbaz",
html_to_text(E.div("foo", E.span("bar"), "baz")))
def test_whitespace(self):
self.assertEqual(
"foo bar\nbaz",
html_to_text(E.div(
"foo\nbar",
E.br(),
"baz")
))
self.assertEqual(
"foo bar\nbaz",
html_to_text(E.div(
E.div(E.span("foo"), " bar"),
"baz")))
class TestConvertBack(common.TransactionCase):
def setUp(self):
super(TestConvertBack, self).setUp()
def field_rountrip_result(self, field, value, expected):
model = 'website.converter.test'
Model = self.registry(model)
id = Model.create(
self.cr, self.uid, {
field: value
})
[record] = Model.browse(self.cr, self.uid, [id])
e = etree.Element('span')
field_value = 'record.%s' % field
e.set('t-field', field_value)
rendered = self.registry('website.qweb').render_tag_field(
e, {'field': field_value}, '', ir_qweb.QWebContext(self.cr, self.uid, {
'record': record,
}, context={'inherit_branding': True}))
element = html.fromstring(
rendered, parser=html.HTMLParser(encoding='utf-8'))
converter = self.registry('website.qweb').get_converter_for(
element.get('data-oe-type'))
value_back = converter.from_html(
self.cr, self.uid, model, Model._fields[field], element)
if isinstance(expected, str):
expected = expected.decode('utf-8')
self.assertEqual(value_back, expected)
def field_roundtrip(self, field, value):
self.field_rountrip_result(field, value, value)
def test_integer(self):
self.field_roundtrip('integer', 42)
def test_float(self):
self.field_roundtrip('float', 42.567890)
self.field_roundtrip('float', 324542.567890)
def test_numeric(self):
self.field_roundtrip('numeric', 42.77)
def test_char(self):
self.field_roundtrip('char', "foo bar")
self.field_roundtrip('char', "ⒸⓄⓇⒼⒺ")
def test_selection(self):
self.field_roundtrip('selection', 3)
def test_selection_str(self):
self.field_roundtrip('selection_str', 'B')
def test_text(self):
self.field_roundtrip('text', textwrap.dedent("""\
You must obey the dance commander
Givin' out the order for fun
You must obey the dance commander
You know that he's the only one
Who gives the orders here,
Alright
Who gives the orders here,
Alright
It would be awesome
If we could dance-a
It would be awesome, yeah
Let's take the chance-a
It would be awesome, yeah
Let's start the show
Because you never know
You never know
You never know until you go"""))
def test_m2o(self):
""" the M2O field conversion (from html) is markedly different from
others as it directly writes into the m2o and returns nothing at all.
"""
model = 'website.converter.test'
field = 'many2one'
Sub = self.registry('website.converter.test.sub')
sub_id = Sub.create(self.cr, self.uid, {'name': "Foo"})
Model = self.registry(model)
id = Model.create(self.cr, self.uid, {field: sub_id})
[record] = Model.browse(self.cr, self.uid, [id])
e = etree.Element('span')
field_value = 'record.%s' % field
e.set('t-field', field_value)
rendered = self.registry('website.qweb').render_tag_field(
e, {'field': field_value}, '', ir_qweb.QWebContext(self.cr, self.uid, {
'record': record,
}, context={'inherit_branding': True}))
element = html.fromstring(rendered, parser=html.HTMLParser(encoding='utf-8'))
# emulate edition
element.text = "New content"
converter = self.registry('website.qweb').get_converter_for(
element.get('data-oe-type'))
value_back = converter.from_html(
self.cr, self.uid, model, Model._fields[field], element)
self.assertIsNone(
value_back, "the m2o converter should return None to avoid spurious"
" or useless writes on the parent record")
self.assertEqual(
Sub.browse(self.cr, self.uid, sub_id).name,
"New content",
"element edition should have been written directly to the m2o record"
)
class TestTitleToSlug(unittest2.TestCase):
"""
Those tests should pass with or without python-slugify
See website/models/website.py slugify method
"""
def test_spaces(self):
self.assertEqual(
"spaces",
slugify(u" spaces ")
)
def test_unicode(self):
self.assertEqual(
"heterogeneite",
slugify(u"hétérogénéité")
)
def test_underscore(self):
self.assertEqual(
"one-two",
slugify(u"one_two")
)
def test_caps(self):
self.assertEqual(
"camelcase",
slugify(u"CamelCase")
)
def test_special_chars(self):
self.assertEqual(
"o-d-o-o",
slugify(u"o!#d{|\o/@~o&%^?")
)
def test_str_to_unicode(self):
self.assertEqual(
"espana",
slugify("España")
)
def test_numbers(self):
self.assertEqual(
"article-1",
slugify(u"Article 1")
)
def test_all(self):
self.assertEqual(
"do-you-know-martine-a-la-plage",
slugify(u"Do YOU know 'Martine à la plage' ?")
)
|
hopeall/odoo
|
refs/heads/8.0
|
addons/stock_account/wizard/stock_valuation_history.py
|
91
|
from openerp import tools
from openerp.osv import fields, osv
from openerp.tools.translate import _
class wizard_valuation_history(osv.osv_memory):
_name = 'wizard.valuation.history'
_description = 'Wizard that opens the stock valuation history table'
_columns = {
'choose_date': fields.boolean('Choose a Particular Date'),
'date': fields.datetime('Date', required=True),
}
_defaults = {
'choose_date': False,
'date': fields.datetime.now,
}
def open_table(self, cr, uid, ids, context=None):
if context is None:
context = {}
data = self.read(cr, uid, ids, context=context)[0]
ctx = context.copy()
ctx['history_date'] = data['date']
ctx['search_default_group_by_product'] = True
ctx['search_default_group_by_location'] = True
return {
'domain': "[('date', '<=', '" + data['date'] + "')]",
'name': _('Stock Value At Date'),
'view_type': 'form',
'view_mode': 'tree,graph',
'res_model': 'stock.history',
'type': 'ir.actions.act_window',
'context': ctx,
}
class stock_history(osv.osv):
_name = 'stock.history'
_auto = False
_order = 'date asc'
def read_group(self, cr, uid, domain, fields, groupby, offset=0, limit=None, context=None, orderby=False, lazy=True):
res = super(stock_history, self).read_group(cr, uid, domain, fields, groupby, offset=offset, limit=limit, context=context, orderby=orderby, lazy=lazy)
if context is None:
context = {}
date = context.get('history_date')
if 'inventory_value' in fields:
group_lines = {}
for line in res:
domain = line.get('__domain', [])
group_lines.setdefault(str(domain), self.search(cr, uid, domain, context=context))
line_ids = set()
for ids in group_lines.values():
for product_id in ids:
line_ids.add(product_id)
line_ids = list(line_ids)
lines_rec = {}
if line_ids:
cr.execute('SELECT id, product_id, price_unit_on_quant, company_id, quantity FROM stock_history WHERE id in %s', (tuple(line_ids),))
lines_rec = cr.dictfetchall()
lines_dict = dict((line['id'], line) for line in lines_rec)
product_ids = list(set(line_rec['product_id'] for line_rec in lines_rec))
products_rec = self.pool['product.product'].read(cr, uid, product_ids, ['cost_method', 'product_tmpl_id'], context=context)
products_dict = dict((product['id'], product) for product in products_rec)
cost_method_product_tmpl_ids = list(set(product['product_tmpl_id'][0] for product in products_rec if product['cost_method'] != 'real'))
histories = []
if cost_method_product_tmpl_ids:
cr.execute('SELECT DISTINCT ON (product_template_id, company_id) product_template_id, company_id, cost FROM product_price_history WHERE product_template_id in %s AND datetime <= %s ORDER BY product_template_id, company_id, datetime DESC', (tuple(cost_method_product_tmpl_ids), date))
histories = cr.dictfetchall()
histories_dict = {}
for history in histories:
histories_dict[(history['product_template_id'], history['company_id'])] = history['cost']
for line in res:
inv_value = 0.0
lines = group_lines.get(str(line.get('__domain', [])))
for line_id in lines:
line_rec = lines_dict[line_id]
product = products_dict[line_rec['product_id']]
if product['cost_method'] == 'real':
price = line_rec['price_unit_on_quant']
else:
price = histories_dict.get((product['product_tmpl_id'][0], line_rec['company_id']), 0.0)
inv_value += price * line_rec['quantity']
line['inventory_value'] = inv_value
return res
def _get_inventory_value(self, cr, uid, ids, name, attr, context=None):
if context is None:
context = {}
date = context.get('history_date')
product_tmpl_obj = self.pool.get("product.template")
res = {}
for line in self.browse(cr, uid, ids, context=context):
if line.product_id.cost_method == 'real':
res[line.id] = line.quantity * line.price_unit_on_quant
else:
res[line.id] = line.quantity * product_tmpl_obj.get_history_price(cr, uid, line.product_id.product_tmpl_id.id, line.company_id.id, date=date, context=context)
return res
_columns = {
'move_id': fields.many2one('stock.move', 'Stock Move', required=True),
'location_id': fields.many2one('stock.location', 'Location', required=True),
'company_id': fields.many2one('res.company', 'Company'),
'product_id': fields.many2one('product.product', 'Product', required=True),
'product_categ_id': fields.many2one('product.category', 'Product Category', required=True),
'quantity': fields.float('Product Quantity'),
'date': fields.datetime('Operation Date'),
'price_unit_on_quant': fields.float('Value'),
'inventory_value': fields.function(_get_inventory_value, string="Inventory Value", type='float', readonly=True),
'source': fields.char('Source')
}
def init(self, cr):
tools.drop_view_if_exists(cr, 'stock_history')
cr.execute("""
CREATE OR REPLACE VIEW stock_history AS (
SELECT MIN(id) as id,
move_id,
location_id,
company_id,
product_id,
product_categ_id,
SUM(quantity) as quantity,
date,
price_unit_on_quant,
source
FROM
((SELECT
stock_move.id::text || '-' || quant.id::text AS id,
quant.id AS quant_id,
stock_move.id AS move_id,
dest_location.id AS location_id,
dest_location.company_id AS company_id,
stock_move.product_id AS product_id,
product_template.categ_id AS product_categ_id,
quant.qty AS quantity,
stock_move.date AS date,
quant.cost as price_unit_on_quant,
stock_move.origin AS source
FROM
stock_quant as quant, stock_quant_move_rel, stock_move
LEFT JOIN
stock_location dest_location ON stock_move.location_dest_id = dest_location.id
LEFT JOIN
stock_location source_location ON stock_move.location_id = source_location.id
LEFT JOIN
product_product ON product_product.id = stock_move.product_id
LEFT JOIN
product_template ON product_template.id = product_product.product_tmpl_id
WHERE quant.qty>0 AND stock_move.state = 'done' AND dest_location.usage in ('internal', 'transit') AND stock_quant_move_rel.quant_id = quant.id
AND stock_quant_move_rel.move_id = stock_move.id AND (
(source_location.company_id is null and dest_location.company_id is not null) or
(source_location.company_id is not null and dest_location.company_id is null) or
source_location.company_id != dest_location.company_id or
source_location.usage not in ('internal', 'transit'))
) UNION
(SELECT
'-' || stock_move.id::text || '-' || quant.id::text AS id,
quant.id AS quant_id,
stock_move.id AS move_id,
source_location.id AS location_id,
source_location.company_id AS company_id,
stock_move.product_id AS product_id,
product_template.categ_id AS product_categ_id,
- quant.qty AS quantity,
stock_move.date AS date,
quant.cost as price_unit_on_quant,
stock_move.origin AS source
FROM
stock_quant as quant, stock_quant_move_rel, stock_move
LEFT JOIN
stock_location source_location ON stock_move.location_id = source_location.id
LEFT JOIN
stock_location dest_location ON stock_move.location_dest_id = dest_location.id
LEFT JOIN
product_product ON product_product.id = stock_move.product_id
LEFT JOIN
product_template ON product_template.id = product_product.product_tmpl_id
WHERE quant.qty>0 AND stock_move.state = 'done' AND source_location.usage in ('internal', 'transit') AND stock_quant_move_rel.quant_id = quant.id
AND stock_quant_move_rel.move_id = stock_move.id AND (
(dest_location.company_id is null and source_location.company_id is not null) or
(dest_location.company_id is not null and source_location.company_id is null) or
dest_location.company_id != source_location.company_id or
dest_location.usage not in ('internal', 'transit'))
))
AS foo
GROUP BY move_id, location_id, company_id, product_id, product_categ_id, date, price_unit_on_quant, source
)""")
|
qedsoftware/commcare-hq
|
refs/heads/master
|
corehq/tabs/__init__.py
|
2
|
__all__ = ['MENU_TABS']
|
timoschwarzer/blendworks
|
refs/heads/master
|
BlendWorks Server/python/Lib/test/test_code.py
|
94
|
"""This module includes tests of the code object representation.
>>> def f(x):
... def g(y):
... return x + y
... return g
...
>>> dump(f.__code__)
name: f
argcount: 1
kwonlyargcount: 0
names: ()
varnames: ('x', 'g')
cellvars: ('x',)
freevars: ()
nlocals: 2
flags: 3
consts: ('None', '<code object g>', "'f.<locals>.g'")
>>> dump(f(4).__code__)
name: g
argcount: 1
kwonlyargcount: 0
names: ()
varnames: ('y',)
cellvars: ()
freevars: ('x',)
nlocals: 1
flags: 19
consts: ('None',)
>>> def h(x, y):
... a = x + y
... b = x - y
... c = a * b
... return c
...
>>> dump(h.__code__)
name: h
argcount: 2
kwonlyargcount: 0
names: ()
varnames: ('x', 'y', 'a', 'b', 'c')
cellvars: ()
freevars: ()
nlocals: 5
flags: 67
consts: ('None',)
>>> def attrs(obj):
... print(obj.attr1)
... print(obj.attr2)
... print(obj.attr3)
>>> dump(attrs.__code__)
name: attrs
argcount: 1
kwonlyargcount: 0
names: ('print', 'attr1', 'attr2', 'attr3')
varnames: ('obj',)
cellvars: ()
freevars: ()
nlocals: 1
flags: 67
consts: ('None',)
>>> def optimize_away():
... 'doc string'
... 'not a docstring'
... 53
... 0x53
>>> dump(optimize_away.__code__)
name: optimize_away
argcount: 0
kwonlyargcount: 0
names: ()
varnames: ()
cellvars: ()
freevars: ()
nlocals: 0
flags: 67
consts: ("'doc string'", 'None')
>>> def keywordonly_args(a,b,*,k1):
... return a,b,k1
...
>>> dump(keywordonly_args.__code__)
name: keywordonly_args
argcount: 2
kwonlyargcount: 1
names: ()
varnames: ('a', 'b', 'k1')
cellvars: ()
freevars: ()
nlocals: 3
flags: 67
consts: ('None',)
"""
import unittest
import weakref
from test.support import run_doctest, run_unittest, cpython_only
def consts(t):
"""Yield a doctest-safe sequence of object reprs."""
for elt in t:
r = repr(elt)
if r.startswith("<code object"):
yield "<code object %s>" % elt.co_name
else:
yield r
def dump(co):
"""Print out a text representation of a code object."""
for attr in ["name", "argcount", "kwonlyargcount", "names", "varnames",
"cellvars", "freevars", "nlocals", "flags"]:
print("%s: %s" % (attr, getattr(co, "co_" + attr)))
print("consts:", tuple(consts(co.co_consts)))
class CodeTest(unittest.TestCase):
@cpython_only
def test_newempty(self):
import _testcapi
co = _testcapi.code_newempty("filename", "funcname", 15)
self.assertEqual(co.co_filename, "filename")
self.assertEqual(co.co_name, "funcname")
self.assertEqual(co.co_firstlineno, 15)
class CodeWeakRefTest(unittest.TestCase):
def test_basic(self):
# Create a code object in a clean environment so that we know we have
# the only reference to it left.
namespace = {}
exec("def f(): pass", globals(), namespace)
f = namespace["f"]
del namespace
self.called = False
def callback(code):
self.called = True
# f is now the last reference to the function, and through it, the code
# object. While we hold it, check that we can create a weakref and
# deref it. Then delete it, and check that the callback gets called and
# the reference dies.
coderef = weakref.ref(f.__code__, callback)
self.assertTrue(bool(coderef()))
del f
self.assertFalse(bool(coderef()))
self.assertTrue(self.called)
def test_main(verbose=None):
from test import test_code
run_doctest(test_code, verbose)
run_unittest(CodeTest, CodeWeakRefTest)
if __name__ == "__main__":
test_main()
|
espadrine/opera
|
refs/heads/master
|
chromium/src/third_party/webdriver/pylib/test/selenium/webdriver/common/select_element_handling_tests.py
|
28
|
#!/usr/bin/python
# Copyright 2008-2009 WebDriver committers
# Copyright 2008-2009 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from selenium.webdriver.common.by import By
class SelectElementHandlingTests(unittest.TestCase):
def testShouldBeAbleToChangeTheSelectedOptionInASelect(self):
self._loadPage("formPage")
selectBox = self.driver.find_element(by=By.XPATH, value="//select[@name='selectomatic']")
options = selectBox.find_elements(by=By.TAG_NAME, value="option")
one = options[0]
two = options[1]
self.assertTrue(one.is_selected())
self.assertFalse(two.is_selected())
two.click()
self.assertFalse(one.is_selected())
self.assertTrue(two.is_selected())
def testShouldBeAbleToSelectMoreThanOneOptionFromASelectWhichAllowsMultipleChoices(self):
self._loadPage("formPage")
multiSelect = self.driver.find_element(by=By.ID, value="multi")
options = multiSelect.find_elements(by=By.TAG_NAME, value="option")
for option in options:
if not option.is_selected():
option.click()
for i in range(len(options)):
option = options[i]
self.assertTrue(option.is_selected(),
"Option at index is not selected but should be: " + str(i))
def _pageURL(self, name):
return "http://localhost:%d/%s.html" % (self.webserver.port, name)
def _loadSimplePage(self):
self._loadPage("simpleTest")
def _loadPage(self, name):
self.driver.get(self._pageURL(name))
|
pansapiens/mytardis
|
refs/heads/develop
|
tardis/apps/oaipmh/provider/base.py
|
5
|
import oaipmh.error
import oaipmh.interfaces
class BaseProvider(oaipmh.interfaces.IOAI, object):
"""
A base provider which roughly implements the PyOAI interface for OAI-PMH
servers.
Extend this if you're writing your own provider for a new type or a
different metadata format.
"""
def __init__(self, site):
self._site = site
def getRecord(self, metadataPrefix, identifier):
"""Get a record for a metadataPrefix and identifier.
:param metadataPrefix: identifies metadata set to retrieve
:type metadataPrefix: string
:param identifier: - repository-unique identifier of record
:type identifier: string
:raises oaipmh.error.CannotDisseminateFormatError: if
``metadataPrefix`` is unknown or not supported by identifier.
:raises oaipmh.error.IdDoesNotExistError: if identifier is
unknown or illegal.
:returns: a ``header``, ``metadata``, ``about`` tuple describing
the record.
"""
raise oaipmh.error.IdDoesNotExistError
def identify(self):
raise NotImplementedError
def listIdentifiers(self, metadataPrefix, set=None, from_=None, until=None):
"""Get a list of header information on records.
:param metadataPrefix: identifies metadata set to retrieve
:type metadataPrefix: string
:param set: set identifier; only return headers in set
:type set: string
:param from_: only retrieve headers from from_ date forward
(in naive UTC)
:type from_: datetime
:param until: only retrieve headers with dates up to and including
until date (in naive UTC)
:type until: datetime
:raise error.CannotDisseminateFormatError: if metadataPrefix
is not supported by the repository.
:raises error.NoSetHierarchyError: if the repository does not
support sets.
:returns: an iterable of headers.
"""
raise oaipmh.error.CannotDisseminateFormatError
def listMetadataFormats(self, identifier=None):
"""List metadata formats supported by repository or record.
:param identifier: identify record for which we want to know all
supported metadata formats. If absent, list all metadata
formats supported by repository.
:type identifier: string
:raises error.IdDoesNotExistError: if record with
identifier does not exist.
:raises error.NoMetadataFormatsError: if no formats are
available for the indicated record.
:returns: an iterable of ``metadataPrefix``, ``schema``,
``metadataNamespace`` tuples (each entry in the tuple is a string).
"""
return []
def listRecords(self, metadataPrefix, set=None, from_=None, until=None):
"""
Get a list of header, metadata and about information on records.
:param metadataPrefix: identifies metadata set to retrieve
:type metadataPrefix: string
:param set: set identifier; only return records in set
:type set: string
:param from_: only retrieve records from ``from_`` date forward
(in naive UTC)
:type from_: datetime
:param until: only retrieve records with dates up to and including
until date (in naive UTC)
:type until: datetime
:raises oaipmh.error.CannotDisseminateFormatError: if ``metadataPrefix``
is not supported by the repository.
:raises oaipmh.error.NoSetHierarchyError: if the repository does not
support sets.
:returns: an iterable of ``header``, ``metadata``, ``about`` tuples.
"""
raise oaipmh.error.CannotDisseminateFormatError
def listSets(self):
"""
Get a list of sets in the repository.
:raises error.NoSetHierarchyError: if the repository does not support
sets.
:returns: an iterable of setSpec, setName tuples (strings).
"""
raise oaipmh.error.NoSetHierarchyError
def writeMetadata(self, element, metadata):
"""
Create XML elements under the given element, using the provided
metadata.
Should avoid doing any model-lookups, as they should be done when
creating the metadata.
:param element: element to put all content under (as SubElements)
:type element: lxml.etree.Element
:param metadata: metadata to turn into XML
:type metadata: oaipmh.common.Metadata
"""
raise NotImplementedError
|
vipul-sharma20/oh-mainline
|
refs/heads/master
|
vendor/packages/twisted/twisted/application/__init__.py
|
60
|
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
#
"""
Configuration objects for Twisted Applications
"""
|
lindzey/pelican-plugins
|
refs/heads/master
|
textile_reader/textile_reader.py
|
59
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals, print_function
from pelican import signals
from pelican.readers import BaseReader
from pelican.utils import pelican_open
try:
from textile import textile
except ImportError:
textile = False
class TextileReader(BaseReader):
"""Reader for Textile files. Written using the core MarkdownReader as
a template. Textile input files must be of the form:
Title: An example
Date: 2013-08-10
----
p. Lorem ipsum dolar sit amet...
Specifically, the header values as with Markdown files, then four
dashes, then the body.
"""
enabled = bool(textile)
file_extensions = ['textile']
def __init__(self, *args, **kwargs):
super(TextileReader, self).__init__(*args, **kwargs)
def _parse_metadata(self, meta):
"""Process the metadata dict, lowercasing the keys and textilizing the
value of the 'summary' key (if present). Keys that share the same
lowercased form will be overridden in some arbitrary order.
"""
output = {}
for name, value in meta.items():
name = name.lower()
if name == "summary":
value = textile(value)
output[name] = self.process_metadata(name, value)
return output
def read(self, source_path):
"""Parse content and metadata of textile files."""
with pelican_open(source_path) as text:
parts = text.split('----', 1)
if len(parts) == 2:
headerlines = parts[0].splitlines()
headerpairs = map(lambda l: l.split(':', 1), headerlines)
headerdict = {pair[0]: pair[1].strip()
for pair in headerpairs
if len(pair) == 2}
metadata = self._parse_metadata(headerdict)
content = textile(parts[1])
else:
metadata = {}
content = textile(text)
return content, metadata
def add_reader(readers):
readers.reader_classes['textile'] = TextileReader
def register():
signals.readers_init.connect(add_reader)
|
jimtyhurst/team-budget
|
refs/heads/master
|
budget_proj/budget_app/tests.py
|
1
|
from django.test import TestCase, Client
# NOTE: disabled these for now but expect they'll be used in the near future
# from django.urls import reverse
# from budget_app.models import OCRB
# from mixer.backend.django import mixer
# from rest_framework import status
# from rest_framework.test import APITestCase
# Other Ideas:
# - http://stackoverflow.com/questions/24904362/how-to-write-unit-tests-for-django-rest-framework-apis
# - https://github.com/hackoregon/hacku-devops-2017/wiki/Assignment-5
# - http://www.django-rest-framework.org/api-guide/testing/
class TestCodeEndpoint(TestCase):
def setup(self):
self.client = Client()
def test_ok(self):
response = self.client.get('/budget/code/')
self.assertEqual(response.status_code, 200)
def test_code_get_request_works_with_query_param(self):
response = self.client.get("/budget/code/", {'code': 'AT'})
self.assertEqual(response.status_code, 200)
json_content = response.json()
code_data = json_content['results']
codes = [item["code"] for item in code_data]
for code in codes:
self.assertEqual(code, 'AT')
def test_code_response_is_paginated(self):
response = self.client.get('/budget/code/')
json = response.json()
self.assertTrue('count' in json)
self.assertTrue('next' in json)
self.assertTrue('previous' in json)
self.assertTrue('results' in json)
class TestHistoryEndpoint(TestCase):
def setup(self):
self.client = Client()
def test_ok(self):
response = self.client.get('/budget/history/')
self.assertEqual(response.status_code, 200)
def test_history_get_request_works_with_query_param(self):
query_params = {'fiscal_year': '2015-16', 'bureau_code': 'PS'}
response = self.client.get("/budget/history/", query_params)
self.assertEqual(response.status_code, 200)
results = response.json()['results']
fiscal_years = [item["fiscal_year"] for item in results]
for fiscal_year in fiscal_years:
self.assertEqual(fiscal_year, '2015-16')
def test_history_response_is_paginated(self):
response = self.client.get('/budget/history/')
json = response.json()
self.assertTrue('count' in json)
self.assertTrue('next' in json)
self.assertTrue('previous' in json)
self.assertTrue('results' in json)
class TestKpmEndpoint(TestCase):
def setup(self):
self.client = Client()
def test_ok(self):
response = self.client.get('/budget/kpm/')
self.assertEqual(response.status_code, 200)
def test_kpm_get_request_works_with_query_param(self):
response = self.client.get("/budget/kpm/?fy=2015-16")
self.assertEqual(response.status_code, 200)
json_content = response.json()
results = json_content['results']
fiscal_years = [item["fy"] for item in results]
for fiscal_year in fiscal_years:
self.assertEqual(fiscal_year, '2015-16')
def test_kpm_response_is_paginated(self):
response = self.client.get('/budget/kpm/')
json = response.json()
self.assertTrue('count' in json)
self.assertTrue('next' in json)
self.assertTrue('previous' in json)
self.assertTrue('results' in json)
def test_kpm_accepts_page_query_param(self):
# regression test
response = self.client.get('/budget/kpm/', {'page': 1})
self.assertEqual(response.status_code, 200)
class TestOcrbEndpoint(TestCase):
def setup(self):
self.client = Client()
def test_ok(self):
response = self.client.get('/budget/ocrb/')
self.assertEqual(response.status_code, 200)
def test_ocrb_get_request_works_with_query_param(self):
response = self.client.get("/budget/ocrb/?fy=2015-16")
self.assertEqual(response.status_code, 200)
json_content = response.json()
results = json_content['results']
fiscal_years = [item["fy"] for item in results]
for fiscal_year in fiscal_years:
self.assertEqual(fiscal_year, '2015-16')
def test_ocrb_response_is_paginated(self):
response = self.client.get('/budget/ocrb/')
json = response.json()
# look for pagination keys
self.assertTrue('count' in json)
self.assertTrue('next' in json)
self.assertTrue('previous' in json)
self.assertTrue('results' in json)
def test_ocrb_accepts_page_query_param(self):
# regression test
response = self.client.get('/budget/ocrb/', {'page': 1})
self.assertEqual(response.status_code, 200)
class TestOcrbSummaryView(TestCase):
def setup(self):
self.client = Client()
def test_ok(self):
response = self.client.get('/budget/ocrb/summary/')
self.assertEqual(response.status_code, 200)
def test_kpm_response_is_paginated(self):
response = self.client.get('/budget/ocrb/')
json = response.json()
# look for pagination keys
self.assertTrue('count' in json)
self.assertTrue('next' in json)
self.assertTrue('previous' in json)
self.assertTrue('results' in json)
|
laszlocsomor/tensorflow
|
refs/heads/master
|
tensorflow/contrib/training/python/training/resample_test.py
|
107
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import math
import numpy
from tensorflow.contrib.training.python.training import resample
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
class ResampleTest(test.TestCase):
"""Tests that resampling runs and outputs are close to expected values."""
def testRepeatRange(self):
cases = [
([], []),
([0], []),
([1], [0]),
([1, 0], [0]),
([0, 1], [1]),
([3], [0, 0, 0]),
([0, 1, 2, 3], [1, 2, 2, 3, 3, 3]),
]
with self.test_session() as sess:
for inputs, expected in cases:
array_inputs = numpy.array(inputs, dtype=numpy.int32)
actual = sess.run(resample._repeat_range(array_inputs))
self.assertAllEqual(actual, expected)
def testRoundtrip(self, rate=0.25, count=5, n=500):
"""Tests `resample(x, weights)` and resample(resample(x, rate), 1/rate)`."""
foo = self.get_values(count)
bar = self.get_values(count)
weights = self.get_weights(count)
resampled_in, rates = resample.weighted_resample(
[foo, bar], constant_op.constant(weights), rate, seed=123)
resampled_back_out = resample.resample_at_rate(
resampled_in, 1.0 / rates, seed=456)
init = control_flow_ops.group(variables.local_variables_initializer(),
variables.global_variables_initializer())
with self.test_session() as s:
s.run(init) # initialize
# outputs
counts_resampled = collections.Counter()
counts_reresampled = collections.Counter()
for _ in range(n):
resampled_vs, reresampled_vs = s.run([resampled_in, resampled_back_out])
self.assertAllEqual(resampled_vs[0], resampled_vs[1])
self.assertAllEqual(reresampled_vs[0], reresampled_vs[1])
for v in resampled_vs[0]:
counts_resampled[v] += 1
for v in reresampled_vs[0]:
counts_reresampled[v] += 1
# assert that resampling worked as expected
self.assert_expected(weights, rate, counts_resampled, n)
# and that re-resampling gives the approx identity.
self.assert_expected(
[1.0 for _ in weights],
1.0,
counts_reresampled,
n,
abs_delta=0.1 * n * count)
def testCorrectRates(self, rate=0.25, count=10, n=500, rtol=0.1):
"""Tests that the rates returned by weighted_resample are correct."""
# The approach here is to verify that:
# - sum(1/rate) approximates the size of the original collection
# - sum(1/rate * value) approximates the sum of the original inputs,
# - sum(1/rate * value)/sum(1/rate) approximates the mean.
vals = self.get_values(count)
weights = self.get_weights(count)
resampled, rates = resample.weighted_resample([vals],
constant_op.constant(weights),
rate)
invrates = 1.0 / rates
init = control_flow_ops.group(variables.local_variables_initializer(),
variables.global_variables_initializer())
expected_sum_op = math_ops.reduce_sum(vals)
with self.test_session() as s:
s.run(init)
expected_sum = n * s.run(expected_sum_op)
weight_sum = 0.0
weighted_value_sum = 0.0
for _ in range(n):
val, inv_rate = s.run([resampled[0], invrates])
weight_sum += sum(inv_rate)
weighted_value_sum += sum(val * inv_rate)
# sum(inv_rate) ~= N*count:
expected_count = count * n
self.assertAlmostEqual(
expected_count, weight_sum, delta=(rtol * expected_count))
# sum(vals) * n ~= weighted_sum(resampled, 1.0/weights)
self.assertAlmostEqual(
expected_sum, weighted_value_sum, delta=(rtol * expected_sum))
# Mean ~= weighted mean:
expected_mean = expected_sum / float(n * count)
self.assertAlmostEqual(
expected_mean,
weighted_value_sum / weight_sum,
delta=(rtol * expected_mean))
def testZeroRateUnknownShapes(self, count=10):
"""Tests that resampling runs with completely runtime shapes."""
# Use placeholcers without shape set:
vals = array_ops.placeholder(dtype=dtypes.int32)
rates = array_ops.placeholder(dtype=dtypes.float32)
resampled = resample.resample_at_rate([vals], rates)
with self.test_session() as s:
rs, = s.run(resampled, {
vals: list(range(count)),
rates: numpy.zeros(
shape=[count], dtype=numpy.float32)
})
self.assertEqual(rs.shape, (0,))
def testDtypes(self, count=10):
"""Test that we can define the ops with float64 weights."""
vals = self.get_values(count)
weights = math_ops.cast(self.get_weights(count), dtypes.float64)
# should not error:
resample.resample_at_rate([vals], weights)
resample.weighted_resample(
[vals], weights, overall_rate=math_ops.cast(1.0, dtypes.float64))
def get_weights(self, n, mean=10.0, stddev=5):
"""Returns random positive weight values."""
assert mean > 0, 'Weights have to be positive.'
results = []
while len(results) < n:
v = numpy.random.normal(mean, stddev)
if v > 0:
results.append(v)
return results
def get_values(self, n):
return constant_op.constant(list(range(n)))
def assert_expected(self,
weights,
overall_rate,
counts,
n,
tol=2.0,
abs_delta=0):
# Overall, we expect sum(counts) there to be `overall_rate` * n *
# len(weights)... with a stddev on that expectation equivalent to
# performing (n * len(weights)) trials each with probability of
# overall_rate.
expected_overall_count = len(weights) * n * overall_rate
actual_overall_count = sum(counts.values())
stddev = math.sqrt(len(weights) * n * overall_rate * (1 - overall_rate))
self.assertAlmostEqual(
expected_overall_count,
actual_overall_count,
delta=(stddev * tol + abs_delta))
# And we can form a similar expectation for each item -- it should
# appear in the results a number of time proportional to its
# weight, which is similar to performing `expected_overall_count`
# trials each with a probability of weight/weight_sum.
weight_sum = sum(weights)
fractions = [w / weight_sum for w in weights]
expected_counts = [expected_overall_count * f for f in fractions]
stddevs = [
math.sqrt(expected_overall_count * f * (1 - f)) for f in fractions
]
for i in range(len(expected_counts)):
expected_count = expected_counts[i]
actual_count = counts[i]
self.assertAlmostEqual(
expected_count, actual_count, delta=(stddevs[i] * tol + abs_delta))
if __name__ == '__main__':
test.main()
|
MobinRanjbar/hue
|
refs/heads/master
|
desktop/core/ext-py/django-extensions-1.5.0/django_extensions/management/commands/set_fake_passwords.py
|
35
|
"""
set_fake_passwords.py
Reset all user passwords to a common value. Useful for testing in a
development environment. As such, this command is only available when
setting.DEBUG is True.
"""
from optparse import make_option
from django.conf import settings
from django.core.management.base import NoArgsCommand, CommandError
from django_extensions.management.utils import signalcommand
DEFAULT_FAKE_PASSWORD = 'password'
class Command(NoArgsCommand):
option_list = NoArgsCommand.option_list + (
make_option('--prompt', dest='prompt_passwd', default=False, action='store_true',
help='Prompts for the new password to apply to all users'),
make_option('--password', dest='default_passwd', default=DEFAULT_FAKE_PASSWORD,
help='Use this as default password.'),
)
help = 'DEBUG only: sets all user passwords to a common value ("%s" by default)' % (DEFAULT_FAKE_PASSWORD, )
requires_model_validation = False
@signalcommand
def handle_noargs(self, **options):
if not settings.DEBUG:
raise CommandError('Only available in debug mode')
try:
from django.contrib.auth import get_user_model # Django 1.5
except ImportError:
from django_extensions.future_1_5 import get_user_model
if options.get('prompt_passwd', False):
from getpass import getpass
passwd = getpass('Password: ')
if not passwd:
raise CommandError('You must enter a valid password')
else:
passwd = options.get('default_passwd', DEFAULT_FAKE_PASSWORD)
User = get_user_model()
user = User()
user.set_password(passwd)
count = User.objects.all().update(password=user.password)
print('Reset %d passwords' % count)
|
atmark-techno/atmark-dist
|
refs/heads/master
|
user/python/Lib/encodings/koi8_r.py
|
4
|
""" Python Character Mapping Codec generated from 'KOI8-R.TXT'.
Written by Marc-Andre Lemburg (mal@lemburg.com).
(c) Copyright CNRI, All Rights Reserved. NO WARRANTY.
"""#"
import codecs
### Codec APIs
class Codec(codecs.Codec):
def encode(self,input,errors='strict'):
return codecs.charmap_encode(input,errors,encoding_map)
def decode(self,input,errors='strict'):
return codecs.charmap_decode(input,errors,decoding_map)
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return (Codec().encode,Codec().decode,StreamReader,StreamWriter)
### Decoding Map
decoding_map = {
0x0080: 0x2500, # BOX DRAWINGS LIGHT HORIZONTAL
0x0081: 0x2502, # BOX DRAWINGS LIGHT VERTICAL
0x0082: 0x250c, # BOX DRAWINGS LIGHT DOWN AND RIGHT
0x0083: 0x2510, # BOX DRAWINGS LIGHT DOWN AND LEFT
0x0084: 0x2514, # BOX DRAWINGS LIGHT UP AND RIGHT
0x0085: 0x2518, # BOX DRAWINGS LIGHT UP AND LEFT
0x0086: 0x251c, # BOX DRAWINGS LIGHT VERTICAL AND RIGHT
0x0087: 0x2524, # BOX DRAWINGS LIGHT VERTICAL AND LEFT
0x0088: 0x252c, # BOX DRAWINGS LIGHT DOWN AND HORIZONTAL
0x0089: 0x2534, # BOX DRAWINGS LIGHT UP AND HORIZONTAL
0x008a: 0x253c, # BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL
0x008b: 0x2580, # UPPER HALF BLOCK
0x008c: 0x2584, # LOWER HALF BLOCK
0x008d: 0x2588, # FULL BLOCK
0x008e: 0x258c, # LEFT HALF BLOCK
0x008f: 0x2590, # RIGHT HALF BLOCK
0x0090: 0x2591, # LIGHT SHADE
0x0091: 0x2592, # MEDIUM SHADE
0x0092: 0x2593, # DARK SHADE
0x0093: 0x2320, # TOP HALF INTEGRAL
0x0094: 0x25a0, # BLACK SQUARE
0x0095: 0x2219, # BULLET OPERATOR
0x0096: 0x221a, # SQUARE ROOT
0x0097: 0x2248, # ALMOST EQUAL TO
0x0098: 0x2264, # LESS-THAN OR EQUAL TO
0x0099: 0x2265, # GREATER-THAN OR EQUAL TO
0x009a: 0x00a0, # NO-BREAK SPACE
0x009b: 0x2321, # BOTTOM HALF INTEGRAL
0x009c: 0x00b0, # DEGREE SIGN
0x009d: 0x00b2, # SUPERSCRIPT TWO
0x009e: 0x00b7, # MIDDLE DOT
0x009f: 0x00f7, # DIVISION SIGN
0x00a0: 0x2550, # BOX DRAWINGS DOUBLE HORIZONTAL
0x00a1: 0x2551, # BOX DRAWINGS DOUBLE VERTICAL
0x00a2: 0x2552, # BOX DRAWINGS DOWN SINGLE AND RIGHT DOUBLE
0x00a3: 0x0451, # CYRILLIC SMALL LETTER IO
0x00a4: 0x2553, # BOX DRAWINGS DOWN DOUBLE AND RIGHT SINGLE
0x00a5: 0x2554, # BOX DRAWINGS DOUBLE DOWN AND RIGHT
0x00a6: 0x2555, # BOX DRAWINGS DOWN SINGLE AND LEFT DOUBLE
0x00a7: 0x2556, # BOX DRAWINGS DOWN DOUBLE AND LEFT SINGLE
0x00a8: 0x2557, # BOX DRAWINGS DOUBLE DOWN AND LEFT
0x00a9: 0x2558, # BOX DRAWINGS UP SINGLE AND RIGHT DOUBLE
0x00aa: 0x2559, # BOX DRAWINGS UP DOUBLE AND RIGHT SINGLE
0x00ab: 0x255a, # BOX DRAWINGS DOUBLE UP AND RIGHT
0x00ac: 0x255b, # BOX DRAWINGS UP SINGLE AND LEFT DOUBLE
0x00ad: 0x255c, # BOX DRAWINGS UP DOUBLE AND LEFT SINGLE
0x00ae: 0x255d, # BOX DRAWINGS DOUBLE UP AND LEFT
0x00af: 0x255e, # BOX DRAWINGS VERTICAL SINGLE AND RIGHT DOUBLE
0x00b0: 0x255f, # BOX DRAWINGS VERTICAL DOUBLE AND RIGHT SINGLE
0x00b1: 0x2560, # BOX DRAWINGS DOUBLE VERTICAL AND RIGHT
0x00b2: 0x2561, # BOX DRAWINGS VERTICAL SINGLE AND LEFT DOUBLE
0x00b3: 0x0401, # CYRILLIC CAPITAL LETTER IO
0x00b4: 0x2562, # BOX DRAWINGS VERTICAL DOUBLE AND LEFT SINGLE
0x00b5: 0x2563, # BOX DRAWINGS DOUBLE VERTICAL AND LEFT
0x00b6: 0x2564, # BOX DRAWINGS DOWN SINGLE AND HORIZONTAL DOUBLE
0x00b7: 0x2565, # BOX DRAWINGS DOWN DOUBLE AND HORIZONTAL SINGLE
0x00b8: 0x2566, # BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL
0x00b9: 0x2567, # BOX DRAWINGS UP SINGLE AND HORIZONTAL DOUBLE
0x00ba: 0x2568, # BOX DRAWINGS UP DOUBLE AND HORIZONTAL SINGLE
0x00bb: 0x2569, # BOX DRAWINGS DOUBLE UP AND HORIZONTAL
0x00bc: 0x256a, # BOX DRAWINGS VERTICAL SINGLE AND HORIZONTAL DOUBLE
0x00bd: 0x256b, # BOX DRAWINGS VERTICAL DOUBLE AND HORIZONTAL SINGLE
0x00be: 0x256c, # BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL
0x00bf: 0x00a9, # COPYRIGHT SIGN
0x00c0: 0x044e, # CYRILLIC SMALL LETTER YU
0x00c1: 0x0430, # CYRILLIC SMALL LETTER A
0x00c2: 0x0431, # CYRILLIC SMALL LETTER BE
0x00c3: 0x0446, # CYRILLIC SMALL LETTER TSE
0x00c4: 0x0434, # CYRILLIC SMALL LETTER DE
0x00c5: 0x0435, # CYRILLIC SMALL LETTER IE
0x00c6: 0x0444, # CYRILLIC SMALL LETTER EF
0x00c7: 0x0433, # CYRILLIC SMALL LETTER GHE
0x00c8: 0x0445, # CYRILLIC SMALL LETTER HA
0x00c9: 0x0438, # CYRILLIC SMALL LETTER I
0x00ca: 0x0439, # CYRILLIC SMALL LETTER SHORT I
0x00cb: 0x043a, # CYRILLIC SMALL LETTER KA
0x00cc: 0x043b, # CYRILLIC SMALL LETTER EL
0x00cd: 0x043c, # CYRILLIC SMALL LETTER EM
0x00ce: 0x043d, # CYRILLIC SMALL LETTER EN
0x00cf: 0x043e, # CYRILLIC SMALL LETTER O
0x00d0: 0x043f, # CYRILLIC SMALL LETTER PE
0x00d1: 0x044f, # CYRILLIC SMALL LETTER YA
0x00d2: 0x0440, # CYRILLIC SMALL LETTER ER
0x00d3: 0x0441, # CYRILLIC SMALL LETTER ES
0x00d4: 0x0442, # CYRILLIC SMALL LETTER TE
0x00d5: 0x0443, # CYRILLIC SMALL LETTER U
0x00d6: 0x0436, # CYRILLIC SMALL LETTER ZHE
0x00d7: 0x0432, # CYRILLIC SMALL LETTER VE
0x00d8: 0x044c, # CYRILLIC SMALL LETTER SOFT SIGN
0x00d9: 0x044b, # CYRILLIC SMALL LETTER YERU
0x00da: 0x0437, # CYRILLIC SMALL LETTER ZE
0x00db: 0x0448, # CYRILLIC SMALL LETTER SHA
0x00dc: 0x044d, # CYRILLIC SMALL LETTER E
0x00dd: 0x0449, # CYRILLIC SMALL LETTER SHCHA
0x00de: 0x0447, # CYRILLIC SMALL LETTER CHE
0x00df: 0x044a, # CYRILLIC SMALL LETTER HARD SIGN
0x00e0: 0x042e, # CYRILLIC CAPITAL LETTER YU
0x00e1: 0x0410, # CYRILLIC CAPITAL LETTER A
0x00e2: 0x0411, # CYRILLIC CAPITAL LETTER BE
0x00e3: 0x0426, # CYRILLIC CAPITAL LETTER TSE
0x00e4: 0x0414, # CYRILLIC CAPITAL LETTER DE
0x00e5: 0x0415, # CYRILLIC CAPITAL LETTER IE
0x00e6: 0x0424, # CYRILLIC CAPITAL LETTER EF
0x00e7: 0x0413, # CYRILLIC CAPITAL LETTER GHE
0x00e8: 0x0425, # CYRILLIC CAPITAL LETTER HA
0x00e9: 0x0418, # CYRILLIC CAPITAL LETTER I
0x00ea: 0x0419, # CYRILLIC CAPITAL LETTER SHORT I
0x00eb: 0x041a, # CYRILLIC CAPITAL LETTER KA
0x00ec: 0x041b, # CYRILLIC CAPITAL LETTER EL
0x00ed: 0x041c, # CYRILLIC CAPITAL LETTER EM
0x00ee: 0x041d, # CYRILLIC CAPITAL LETTER EN
0x00ef: 0x041e, # CYRILLIC CAPITAL LETTER O
0x00f0: 0x041f, # CYRILLIC CAPITAL LETTER PE
0x00f1: 0x042f, # CYRILLIC CAPITAL LETTER YA
0x00f2: 0x0420, # CYRILLIC CAPITAL LETTER ER
0x00f3: 0x0421, # CYRILLIC CAPITAL LETTER ES
0x00f4: 0x0422, # CYRILLIC CAPITAL LETTER TE
0x00f5: 0x0423, # CYRILLIC CAPITAL LETTER U
0x00f6: 0x0416, # CYRILLIC CAPITAL LETTER ZHE
0x00f7: 0x0412, # CYRILLIC CAPITAL LETTER VE
0x00f8: 0x042c, # CYRILLIC CAPITAL LETTER SOFT SIGN
0x00f9: 0x042b, # CYRILLIC CAPITAL LETTER YERU
0x00fa: 0x0417, # CYRILLIC CAPITAL LETTER ZE
0x00fb: 0x0428, # CYRILLIC CAPITAL LETTER SHA
0x00fc: 0x042d, # CYRILLIC CAPITAL LETTER E
0x00fd: 0x0429, # CYRILLIC CAPITAL LETTER SHCHA
0x00fe: 0x0427, # CYRILLIC CAPITAL LETTER CHE
0x00ff: 0x042a, # CYRILLIC CAPITAL LETTER HARD SIGN
}
### Encoding Map
encoding_map = {}
for k,v in decoding_map.items():
encoding_map[v] = k
|
abhisg/scikit-learn
|
refs/heads/master
|
doc/tutorial/text_analytics/data/movie_reviews/fetch_data.py
|
278
|
"""Script to download the movie review dataset"""
import os
import tarfile
try:
from urllib import urlopen
except ImportError:
from urllib.request import urlopen
URL = ("http://www.cs.cornell.edu/people/pabo/"
"movie-review-data/review_polarity.tar.gz")
ARCHIVE_NAME = URL.rsplit('/', 1)[1]
DATA_FOLDER = "txt_sentoken"
if not os.path.exists(DATA_FOLDER):
if not os.path.exists(ARCHIVE_NAME):
print("Downloading dataset from %s (3 MB)" % URL)
opener = urlopen(URL)
open(ARCHIVE_NAME, 'wb').write(opener.read())
print("Decompressing %s" % ARCHIVE_NAME)
tarfile.open(ARCHIVE_NAME, "r:gz").extractall(path='.')
os.remove(ARCHIVE_NAME)
|
vineethguna/heroku-buildpack-libsandbox
|
refs/heads/master
|
vendor/distribute-0.6.34/setuptools/command/__init__.py
|
210
|
__all__ = [
'alias', 'bdist_egg', 'bdist_rpm', 'build_ext', 'build_py', 'develop',
'easy_install', 'egg_info', 'install', 'install_lib', 'rotate', 'saveopts',
'sdist', 'setopt', 'test', 'upload', 'install_egg_info', 'install_scripts',
'register', 'bdist_wininst', 'upload_docs',
]
from setuptools.command import install_scripts
import sys
if sys.version>='2.5':
# In Python 2.5 and above, distutils includes its own upload command
__all__.remove('upload')
from distutils.command.bdist import bdist
if 'egg' not in bdist.format_commands:
bdist.format_command['egg'] = ('bdist_egg', "Python .egg file")
bdist.format_commands.append('egg')
del bdist, sys
|
michaelaye/vispy
|
refs/heads/master
|
examples/tutorial/visuals/T02_measurements.py
|
17
|
# -*- coding: utf-8 -*-
# -----------------------------------------------------------------------------
# Copyright (c) 2015, Vispy Development Team. All Rights Reserved.
# Distributed under the (new) BSD License. See LICENSE.txt for more info.
# -----------------------------------------------------------------------------
"""
Tutorial: Creating Visuals
==========================
02. Making physical measurements
--------------------------------
In the last tutorial we created a simple Visual subclass that draws a
rectangle. In this tutorial, we will make two additions:
1. Draw a rectangular border instead of a solid rectangle
2. Make the border a fixed pixel width, even when displayed inside a
user-zoomable ViewBox.
The border is made by drawing a line_strip with 10 vertices::
1--------------3
| |
| 2------4 | [ note that points 9 and 10 are
| | | | the same as points 1 and 2 ]
| 8------6 |
| |
7--------------5
In order to ensure that the border has a fixed width in pixels, we need to
adjust the spacing between the inner and outer rectangles whenever the user
changes the zoom of the ViewBox.
How? Recall that each
time the visual is drawn, it is given a TransformSystem instance that carries
information about the size of logical and physical pixels relative to the
visual [link to TransformSystem documentation]. Essentially, we have 4
coordinate systems:
Visual -> Document -> Framebuffer -> Render
The user specifies the position and size of the rectangle in Visual
coordinates, and in [tutorial 1] we used the vertex shader to convert directly
from Visual coordinates to render coordinates. In this tutorial we will
convert first to document coordinates, then make the adjustment for the border
width, then convert the remainder of the way to render coordinates.
Let's say, for example that the user specifies the box width to be 20, and the
border width to be 5. To draw the border correctly, we cannot simply
add/subtract 5 from the inner rectangle coordinates; if the user zooms
in by a factor of 2 then the border would become 10 px wide.
Another way to say this is that a vector with length=1 in Visual coordinates
does not _necessarily_ have a length of 1 pixel on the canvas. Instead, we must
make use of the Document coordinate system, in which a vector of length=1
does correspond to 1 pixel.
There are a few ways we could make this measurement of pixel length. Here's
how we'll do it in this tutorial:
1. Begin with vertices for a rectangle with border width 0 (that is, vertex
1 is the same as vertex 2, 3=4, and so on).
2. In the vertex shader, first map the vertices to the document coordinate
system using the visual->document transform.
3. Add/subtract the line width from the mapped vertices.
4. Map the rest of the way to render coordinates with a second transform:
document->framebuffer->render.
Note that this problem _cannot_ be solved using a simple scale factor! It is
necessary to use these transformations in order to draw correctly when there
is rotation or anosotropic scaling involved.
"""
from vispy import app, gloo, visuals, scene
import numpy as np
vertex_shader = """
void main() {
// First map the vertex to document coordinates
vec4 doc_pos = $visual_to_doc(vec4($position, 0, 1));
// Also need to map the adjustment direction vector, but this is tricky!
// We need to adjust separately for each component of the vector:
vec4 adjusted;
if ( $adjust_dir.x == 0 ) {
// If this is an outer vertex, no adjustment for line weight is needed.
// (In fact, trying to make the adjustment would result in no
// triangles being drawn, hence the if/else block)
adjusted = doc_pos;
}
else {
// Inner vertexes must be adjusted for line width, but this is
// surprisingly tricky given that the rectangle may have been scaled
// and rotated!
vec4 doc_x = $visual_to_doc(vec4($adjust_dir.x, 0, 0, 0)) -
$visual_to_doc(vec4(0, 0, 0, 0));
vec4 doc_y = $visual_to_doc(vec4(0, $adjust_dir.y, 0, 0)) -
$visual_to_doc(vec4(0, 0, 0, 0));
doc_x = normalize(doc_x);
doc_y = normalize(doc_y);
// Now doc_x + doc_y points in the direction we need in order to
// correct the line weight of _both_ segments, but the magnitude of
// that correction is wrong. To correct it we first need to
// measure the width that would result from using doc_x + doc_y:
vec4 proj_y_x = dot(doc_x, doc_y) * doc_x; // project y onto x
float cur_width = length(doc_y - proj_y_x); // measure current weight
// And now we can adjust vertex position for line width:
adjusted = doc_pos + ($line_width / cur_width) * (doc_x + doc_y);
}
// Finally map the remainder of the way to render coordinates
gl_Position = $doc_to_render(adjusted);
}
"""
fragment_shader = """
void main() {
gl_FragColor = $color;
}
"""
class MyRectVisual(visuals.Visual):
"""Visual that draws a rectangular outline.
Parameters
----------
x : float
x coordinate of rectangle origin
y : float
y coordinate of rectangle origin
w : float
width of rectangle
h : float
height of rectangle
weight : float
width of border (in px)
"""
def __init__(self, x, y, w, h, weight=4.0):
visuals.Visual.__init__(self, vertex_shader, fragment_shader)
# 10 vertices for 8 triangles (using triangle_strip) forming a
# rectangular outline
self.vert_buffer = gloo.VertexBuffer(np.array([
[x, y],
[x, y],
[x+w, y],
[x+w, y],
[x+w, y+h],
[x+w, y+h],
[x, y+h],
[x, y+h],
[x, y],
[x, y],
], dtype=np.float32))
# Direction each vertex should move to correct for line width
# (the length of this vector will be corrected in the shader)
self.adj_buffer = gloo.VertexBuffer(np.array([
[0, 0],
[1, 1],
[0, 0],
[-1, 1],
[0, 0],
[-1, -1],
[0, 0],
[1, -1],
[0, 0],
[1, 1],
], dtype=np.float32))
self.shared_program.vert['position'] = self.vert_buffer
self.shared_program.vert['adjust_dir'] = self.adj_buffer
self.shared_program.vert['line_width'] = weight
self.shared_program.frag['color'] = (1, 0, 0, 1)
self.set_gl_state(cull_face=False)
self._draw_mode = 'triangle_strip'
def _prepare_transforms(self, view):
# Set the two transforms required by the vertex shader:
tr = view.transforms
view_vert = view.view_program.vert
view_vert['visual_to_doc'] = tr.get_transform('visual', 'document')
view_vert['doc_to_render'] = tr.get_transform('document', 'render')
# As in the previous tutorial, we auto-generate a Visual+Node class for use
# in the scenegraph.
MyRect = scene.visuals.create_visual_node(MyRectVisual)
# Finally we will test the visual by displaying in a scene.
canvas = scene.SceneCanvas(keys='interactive', show=True)
# This time we add a ViewBox to let the user zoom/pan
view = canvas.central_widget.add_view()
view.camera = 'panzoom'
view.camera.rect = (0, 0, 800, 800)
# ..and add the rects to the view instead of canvas.scene
rects = [MyRect(100, 100, 200, 300, parent=view.scene),
MyRect(500, 100, 200, 300, parent=view.scene)]
# Again, rotate one rectangle to ensure the transforms are working as we
# expect.
tr = visuals.transforms.MatrixTransform()
tr.rotate(25, (0, 0, 1))
rects[1].transform = tr
# Add some text instructions
text = scene.visuals.Text("Drag right mouse button to zoom.", color='w',
anchor_x='left', parent=view, pos=(20, 30))
# ..and optionally start the event loop
if __name__ == '__main__':
import sys
if sys.flags.interactive != 1:
app.run()
|
leanhhao86/cafe_management
|
refs/heads/master
|
oo/test.py
|
1
|
import sys
try:
untrusted.execute()
except:
e = sys.exc_info()
print(e)
|
lokeshpancharia/BuildingMachineLearningSystemsWithPython
|
refs/heads/master
|
ch10/thresholded_figure.py
|
21
|
import mahotas as mh
import numpy as np
from matplotlib import pyplot as plt
# Load image & convert to B&W
image = mh.imread('../SimpleImageDataset/scene00.jpg')
image = mh.colors.rgb2grey(image, dtype=np.uint8)
plt.imshow(image)
plt.gray()
plt.title('original image')
thresh = mh.thresholding.otsu(image)
print('Otsu threshold is {}.'.format(thresh))
threshed = (image > thresh)
plt.figure()
plt.imshow(threshed)
plt.title('threholded image')
mh.imsave('thresholded.png', threshed.astype(np.uint8)*255)
im16 = mh.gaussian_filter(image, 16)
# Repeat the thresholding operations with the blurred image
thresh = mh.thresholding.otsu(im16.astype(np.uint8))
threshed = (im16 > thresh)
plt.figure()
plt.imshow(threshed)
plt.title('threholded image (after blurring)')
print('Otsu threshold after blurring is {}.'.format(thresh))
mh.imsave('thresholded16.png', threshed.astype(np.uint8)*255)
plt.show()
|
bj-yinyan/beets
|
refs/heads/master
|
beetsplug/fetchart.py
|
4
|
# This file is part of beets.
# Copyright 2013, Adrian Sampson.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
"""Fetches album art.
"""
from contextlib import closing
import logging
import os
import re
from tempfile import NamedTemporaryFile
import requests
from beets.plugins import BeetsPlugin
from beets.util.artresizer import ArtResizer
from beets import importer
from beets import ui
from beets import util
from beets import config
IMAGE_EXTENSIONS = ['png', 'jpg', 'jpeg']
CONTENT_TYPES = ('image/jpeg',)
DOWNLOAD_EXTENSION = '.jpg'
log = logging.getLogger('beets')
requests_session = requests.Session()
requests_session.headers = {'User-Agent': 'beets'}
def _fetch_image(url):
"""Downloads an image from a URL and checks whether it seems to
actually be an image. If so, returns a path to the downloaded image.
Otherwise, returns None.
"""
log.debug(u'fetchart: downloading art: {0}'.format(url))
try:
with closing(requests_session.get(url, stream=True)) as resp:
if 'Content-Type' not in resp.headers \
or resp.headers['Content-Type'] not in CONTENT_TYPES:
log.debug(u'fetchart: not an image')
return
# Generate a temporary file with the correct extension.
with NamedTemporaryFile(suffix=DOWNLOAD_EXTENSION, delete=False) \
as fh:
for chunk in resp.iter_content():
fh.write(chunk)
log.debug(u'fetchart: downloaded art to: {0}'.format(
util.displayable_path(fh.name)
))
return fh.name
except (IOError, requests.RequestException):
log.debug(u'fetchart: error fetching art')
# ART SOURCES ################################################################
# Cover Art Archive.
CAA_URL = 'http://coverartarchive.org/release/{mbid}/front-500.jpg'
CAA_GROUP_URL = 'http://coverartarchive.org/release-group/{mbid}/front-500.jpg'
def caa_art(release_id):
"""Return the Cover Art Archive URL given a MusicBrainz release ID.
"""
return CAA_URL.format(mbid=release_id)
def caa_group_art(release_group_id):
"""Return the Cover Art Archive release group URL given a MusicBrainz
release group ID.
"""
return CAA_GROUP_URL.format(mbid=release_group_id)
# Art from Amazon.
AMAZON_URL = 'http://images.amazon.com/images/P/%s.%02i.LZZZZZZZ.jpg'
AMAZON_INDICES = (1, 2)
def art_for_asin(asin):
"""Generate URLs for an Amazon ID (ASIN) string."""
for index in AMAZON_INDICES:
yield AMAZON_URL % (asin, index)
# AlbumArt.org scraper.
AAO_URL = 'http://www.albumart.org/index_detail.php'
AAO_PAT = r'href\s*=\s*"([^>"]*)"[^>]*title\s*=\s*"View larger image"'
def aao_art(asin):
"""Return art URL from AlbumArt.org given an ASIN."""
# Get the page from albumart.org.
try:
resp = requests_session.get(AAO_URL, params={'asin': asin})
log.debug(u'fetchart: scraped art URL: {0}'.format(resp.url))
except requests.RequestException:
log.debug(u'fetchart: error scraping art page')
return
# Search the page for the image URL.
m = re.search(AAO_PAT, resp.text)
if m:
image_url = m.group(1)
return image_url
else:
log.debug(u'fetchart: no image found on page')
# Google Images scraper.
GOOGLE_URL = 'https://ajax.googleapis.com/ajax/services/search/images'
def google_art(album):
"""Return art URL from google.org given an album title and
interpreter.
"""
search_string = (album.albumartist + ',' + album.album).encode('utf-8')
response = requests_session.get(GOOGLE_URL, params={
'v': '1.0',
'q': search_string,
'start': '0',
})
# Get results using JSON.
try:
results = response.json()
data = results['responseData']
dataInfo = data['results']
for myUrl in dataInfo:
return myUrl['unescapedUrl']
except:
log.debug(u'fetchart: error scraping art page')
return
# Art from the filesystem.
def filename_priority(filename, cover_names):
"""Sort order for image names.
Return indexes of cover names found in the image filename. This
means that images with lower-numbered and more keywords will have higher
priority.
"""
return [idx for (idx, x) in enumerate(cover_names) if x in filename]
def art_in_path(path, cover_names, cautious):
"""Look for album art files in a specified directory."""
if not os.path.isdir(path):
return
# Find all files that look like images in the directory.
images = []
for fn in os.listdir(path):
for ext in IMAGE_EXTENSIONS:
if fn.lower().endswith('.' + ext):
images.append(fn)
# Look for "preferred" filenames.
images = sorted(images, key=lambda x: filename_priority(x, cover_names))
cover_pat = r"(\b|_)({0})(\b|_)".format('|'.join(cover_names))
for fn in images:
if re.search(cover_pat, os.path.splitext(fn)[0], re.I):
log.debug(u'fetchart: using well-named art file {0}'.format(
util.displayable_path(fn)
))
return os.path.join(path, fn)
# Fall back to any image in the folder.
if images and not cautious:
log.debug(u'fetchart: using fallback art file {0}'.format(
util.displayable_path(images[0])
))
return os.path.join(path, images[0])
# Try each source in turn.
def _source_urls(album):
"""Generate possible source URLs for an album's art. The URLs are
not guaranteed to work so they each need to be attempted in turn.
This allows the main `art_for_album` function to abort iteration
through this sequence early to avoid the cost of scraping when not
necessary.
"""
# Cover Art Archive.
if album.mb_albumid:
yield caa_art(album.mb_albumid)
if album.mb_releasegroupid:
yield caa_group_art(album.mb_releasegroupid)
# Amazon and AlbumArt.org.
if album.asin:
for url in art_for_asin(album.asin):
yield url
url = aao_art(album.asin)
if url:
yield url
if config['fetchart']['google_search']:
url = google_art(album)
if url:
yield url
def art_for_album(album, paths, maxwidth=None, local_only=False):
"""Given an Album object, returns a path to downloaded art for the
album (or None if no art is found). If `maxwidth`, then images are
resized to this maximum pixel size. If `local_only`, then only local
image files from the filesystem are returned; no network requests
are made.
"""
out = None
# Local art.
cover_names = config['fetchart']['cover_names'].as_str_seq()
cover_names = map(util.bytestring_path, cover_names)
cautious = config['fetchart']['cautious'].get(bool)
if paths:
for path in paths:
out = art_in_path(path, cover_names, cautious)
if out:
break
# Web art sources.
remote_priority = config['fetchart']['remote_priority'].get(bool)
if not local_only and (remote_priority or not out):
for url in _source_urls(album):
if maxwidth:
url = ArtResizer.shared.proxy_url(maxwidth, url)
candidate = _fetch_image(url)
if candidate:
out = candidate
break
if maxwidth and out:
out = ArtResizer.shared.resize(maxwidth, out)
return out
# PLUGIN LOGIC ###############################################################
def batch_fetch_art(lib, albums, force, maxwidth=None):
"""Fetch album art for each of the albums. This implements the manual
fetchart CLI command.
"""
for album in albums:
if album.artpath and not force:
message = 'has album art'
else:
# In ordinary invocations, look for images on the
# filesystem. When forcing, however, always go to the Web
# sources.
local_paths = None if force else [album.path]
path = art_for_album(album, local_paths, maxwidth)
if path:
album.set_art(path, False)
album.store()
message = ui.colorize('green', 'found album art')
else:
message = ui.colorize('red', 'no art found')
log.info(u'{0} - {1}: {2}'.format(album.albumartist, album.album,
message))
class FetchArtPlugin(BeetsPlugin):
def __init__(self):
super(FetchArtPlugin, self).__init__()
self.config.add({
'auto': True,
'maxwidth': 0,
'remote_priority': False,
'cautious': False,
'google_search': False,
'cover_names': ['cover', 'front', 'art', 'album', 'folder'],
})
# Holds paths to downloaded images between fetching them and
# placing them in the filesystem.
self.art_paths = {}
self.maxwidth = self.config['maxwidth'].get(int)
if self.config['auto']:
# Enable two import hooks when fetching is enabled.
self.import_stages = [self.fetch_art]
self.register_listener('import_task_files', self.assign_art)
# Asynchronous; after music is added to the library.
def fetch_art(self, session, task):
"""Find art for the album being imported."""
if task.is_album: # Only fetch art for full albums.
if task.choice_flag == importer.action.ASIS:
# For as-is imports, don't search Web sources for art.
local = True
elif task.choice_flag == importer.action.APPLY:
# Search everywhere for art.
local = False
else:
# For any other choices (e.g., TRACKS), do nothing.
return
path = art_for_album(task.album, task.paths, self.maxwidth, local)
if path:
self.art_paths[task] = path
# Synchronous; after music files are put in place.
def assign_art(self, session, task):
"""Place the discovered art in the filesystem."""
if task in self.art_paths:
path = self.art_paths.pop(task)
album = task.album
src_removed = (config['import']['delete'].get(bool) or
config['import']['move'].get(bool))
album.set_art(path, not src_removed)
album.store()
if src_removed:
task.prune(path)
# Manual album art fetching.
def commands(self):
cmd = ui.Subcommand('fetchart', help='download album art')
cmd.parser.add_option('-f', '--force', dest='force',
action='store_true', default=False,
help='re-download art when already present')
def func(lib, opts, args):
batch_fetch_art(lib, lib.albums(ui.decargs(args)), opts.force,
self.maxwidth)
cmd.func = func
return [cmd]
|
tuxun/smw-funwiki
|
refs/heads/master
|
vendor/justinrainbow/json-schema/docs/conf.py
|
74
|
# -*- coding: utf-8 -*-
#
# JsonSchema documentation build configuration file, created by
# sphinx-quickstart on Sat Dec 10 15:34:44 2011.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = []
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'JsonSchema'
copyright = u'2011, Justin Rainbow, Bruno Prieto Reis'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '1.0.0'
# The full version, including alpha/beta/rc tags.
release = '1.0.0'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'JsonSchemadoc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'JsonSchema.tex', u'JsonSchema Documentation',
u'Justin Rainbow, Bruno Prieto Reis', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'jsonschema', u'JsonSchema Documentation',
[u'Justin Rainbow, Bruno Prieto Reis'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'JsonSchema', u'JsonSchema Documentation', u'Justin Rainbow, Bruno Prieto Reis',
'JsonSchema', 'One line description of project.', 'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
|
mjiang-27/django_learn
|
refs/heads/master
|
form_basic/calc/tests.py
|
24123
|
from django.test import TestCase
# Create your tests here.
|
RiccardoPecora/MP
|
refs/heads/master
|
Lib/md5.py
|
62
|
# $Id$
#
# Copyright (C) 2005 Gregory P. Smith (greg@krypto.org)
# Licensed to PSF under a Contributor Agreement.
import warnings
warnings.warn("the md5 module is deprecated; use hashlib instead",
DeprecationWarning, 2)
from hashlib import md5
new = md5
blocksize = 1 # legacy value (wrong in any useful sense)
digest_size = 16
|
rguillebert/CythonCTypesBackend
|
refs/heads/ctypes_backend
|
tests/run/import_error_T734.py
|
2
|
# mode: run
# ticket: 734
def test_import_error():
"""
>>> test_import_error()
Traceback (most recent call last):
ImportError: cannot import name xxx
"""
from sys import xxx
|
rwillmer/django
|
refs/heads/master
|
django/contrib/sites/migrations/0002_alter_domain_unique.py
|
170
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import django.contrib.sites.models
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('sites', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='site',
name='domain',
field=models.CharField(max_length=100, unique=True, validators=[django.contrib.sites.models._simple_domain_name_validator], verbose_name='domain name'),
),
]
|
kzvyahin/cfme_tests
|
refs/heads/master
|
cfme/tests/services/test_rest_services.py
|
1
|
# -*- coding: utf-8 -*-
import datetime
import fauxfactory
import pytest
from cfme.rest import dialog as _dialog
from cfme.rest import services as _services
from cfme.rest import service_catalogs as _service_catalogs
from cfme.rest import service_templates as _service_templates
from cfme import test_requirements
from utils.providers import setup_a_provider as _setup_a_provider
from utils.wait import wait_for
from utils import error, version
pytestmark = [test_requirements.service,
pytest.mark.tier(2)]
@pytest.fixture(scope="module")
def a_provider():
return _setup_a_provider("infra")
@pytest.fixture(scope="function")
def dialog():
return _dialog()
@pytest.fixture(scope="function")
def service_catalogs(request, rest_api):
return _service_catalogs(request, rest_api)
@pytest.fixture(scope="function")
def services(request, rest_api, a_provider, dialog, service_catalogs):
return _services(request, rest_api, a_provider, dialog, service_catalogs)
@pytest.fixture(scope='function')
def service_templates(request, rest_api, dialog):
return _service_templates(request, rest_api, dialog)
class TestServiceRESTAPI(object):
def test_edit_service(self, rest_api, services):
"""Tests editing a service.
Prerequisities:
* An appliance with ``/api`` available.
Steps:
* POST /api/services (method ``edit``) with the ``name``
* Check if the service with ``new_name`` exists
Metadata:
test_flag: rest
"""
ser = services[0]
new_name = fauxfactory.gen_alphanumeric()
ser.action.edit(name=new_name)
wait_for(
lambda: rest_api.collections.services.find_by(name=new_name),
num_sec=180,
delay=10,
)
def test_edit_multiple_services(self, rest_api, services):
"""Tests editing multiple service catalogs at time.
Prerequisities:
* An appliance with ``/api`` available.
Steps:
* POST /api/services (method ``edit``) with the list of dictionaries used to edit
* Check if the services with ``new_name`` each exists
Metadata:
test_flag: rest
"""
new_names = []
services_data_edited = []
for ser in services:
new_name = fauxfactory.gen_alphanumeric()
new_names.append(new_name)
services_data_edited.append({
"href": ser.href,
"name": new_name,
})
rest_api.collections.services.action.edit(*services_data_edited)
for new_name in new_names:
wait_for(
lambda: rest_api.collections.service_templates.find_by(name=new_name),
num_sec=180,
delay=10,
)
def test_delete_service(self, rest_api, services):
service = rest_api.collections.services[0]
service.action.delete()
with error.expected("ActiveRecord::RecordNotFound"):
service.action.delete()
def test_delete_services(self, rest_api, services):
rest_api.collections.services.action.delete(*services)
with error.expected("ActiveRecord::RecordNotFound"):
rest_api.collections.services.action.delete(*services)
def test_retire_service_now(self, rest_api, services):
"""Test retiring a service
Prerequisities:
* An appliance with ``/api`` available.
Steps:
* Retrieve list of entities using GET /api/services , pick the first one
* POST /api/service/<id> (method ``retire``)
Metadata:
test_flag: rest
"""
assert "retire" in rest_api.collections.services.action.all
retire_service = services[0]
retire_service.action.retire()
wait_for(
lambda: not rest_api.collections.services.find_by(name=retire_service.name),
num_sec=600,
delay=10,
)
def test_retire_service_future(self, rest_api, services):
"""Test retiring a service
Prerequisities:
* An appliance with ``/api`` available.
Steps:
* Retrieve list of entities using GET /api/services , pick the first one
* POST /api/service/<id> (method ``retire``) with the ``retire_date``
Metadata:
test_flag: rest
"""
assert "retire" in rest_api.collections.services.action.all
retire_service = services[0]
date = (datetime.datetime.now() + datetime.timedelta(days=5)).strftime('%m/%d/%y')
future = {
"date": date,
"warn": "4",
}
date_before = retire_service.updated_at
retire_service.action.retire(future)
def _finished():
retire_service.reload()
if retire_service.updated_at > date_before:
return True
return False
wait_for(_finished, num_sec=600, delay=5, message="REST automation_request finishes")
@pytest.mark.uncollectif(lambda: version.current_version() < '5.5')
def test_set_service_owner(self, rest_api, services):
if "set_ownership" not in rest_api.collections.services.action.all:
pytest.skip("Set owner action for service is not implemented in this version")
service = services[0]
user = rest_api.collections.users.get(userid='admin')
data = {
"owner": {"href": user.href}
}
service.action.set_ownership(data)
service.reload()
assert hasattr(service, "evm_owner")
assert service.evm_owner.userid == user.userid
@pytest.mark.uncollectif(lambda: version.current_version() < '5.5')
def test_set_services_owner(self, rest_api, services):
if "set_ownership" not in rest_api.collections.services.action.all:
pytest.skip("Set owner action for service is not implemented in this version")
data = []
user = rest_api.collections.users.get(userid='admin')
for service in services:
tmp_data = {
"href": service.href,
"owner": {"href": user.href}
}
data.append(tmp_data)
rest_api.collections.services.action.set_ownership(*data)
for service in services:
service.reload()
assert hasattr(service, "evm_owner")
assert service.evm_owner.userid == user.userid
class TestServiceDialogsRESTAPI(object):
@pytest.mark.uncollectif(lambda: version.current_version() < '5.7')
@pytest.mark.parametrize("method", ["post", "delete"])
def test_delete_service_dialog(self, rest_api, dialog, method):
service_dialog = rest_api.collections.service_dialogs.find_by(label=dialog.label)[0]
service_dialog.action.delete(force_method=method)
with error.expected("ActiveRecord::RecordNotFound"):
service_dialog.action.delete()
@pytest.mark.uncollectif(lambda: version.current_version() < '5.7')
def test_delete_service_dialogs(self, rest_api, dialog):
service_dialog = rest_api.collections.service_dialogs.find_by(label=dialog.label)[0]
rest_api.collections.service_dialogs.action.delete(service_dialog)
with error.expected("ActiveRecord::RecordNotFound"):
rest_api.collections.service_dialogs.action.delete(service_dialog)
class TestServiceTemplateRESTAPI(object):
def test_edit_service_template(self, rest_api, service_templates):
"""Tests editing a service template.
Prerequisities:
* An appliance with ``/api`` available.
Steps:
* POST /api/service_templates (method ``edit``) with the ``name``
* Check if the service_template with ``new_name`` exists
Metadata:
test_flag: rest
"""
scl = rest_api.collections.service_templates[0]
new_name = fauxfactory.gen_alphanumeric()
scl.action.edit(name=new_name)
wait_for(
lambda: rest_api.collections.service_catalogs.find_by(name=new_name),
num_sec=180,
delay=10,
)
def test_delete_service_templates(self, rest_api, service_templates):
rest_api.collections.service_templates.action.delete(*service_templates)
with error.expected("ActiveRecord::RecordNotFound"):
rest_api.collections.service_templates.action.delete(*service_templates)
def test_delete_service_template(self, rest_api, service_templates):
s_tpl = rest_api.collections.service_templates[0]
s_tpl.action.delete()
with error.expected("ActiveRecord::RecordNotFound"):
s_tpl.action.delete()
@pytest.mark.uncollectif(lambda: version.current_version() < '5.5')
def test_assign_unassign_service_template_to_service_catalog(self, rest_api, service_catalogs,
service_templates):
"""Tests assigning and unassigning the service templates to service catalog.
Prerequisities:
* An appliance with ``/api`` available.
Steps:
* POST /api/service_catalogs/<id>/service_templates (method ``assign``)
with the list of dictionaries service templates list
* Check if the service_templates were assigned to the service catalog
* POST /api/service_catalogs/<id>/service_templates (method ``unassign``)
with the list of dictionaries service templates list
* Check if the service_templates were unassigned to the service catalog
Metadata:
test_flag: rest
"""
scl = service_catalogs[0]
stpl = service_templates[0]
scl.service_templates.action.assign(stpl)
scl.reload()
assert stpl.id in [st.id for st in scl.service_templates.all]
scl.service_templates.action.unassign(stpl)
scl.reload()
assert stpl.id not in [st.id for st in scl.service_templates.all]
def test_edit_multiple_service_templates(self, rest_api, service_templates):
"""Tests editing multiple service catalogs at time.
Prerequisities:
* An appliance with ``/api`` available.
Steps:
* POST /api/service_templates (method ``edit``)
with the list of dictionaries used to edit
* Check if the service_templates with ``new_name`` each exists
Metadata:
test_flag: rest
"""
new_names = []
service_tpls_data_edited = []
for tpl in service_templates:
new_name = fauxfactory.gen_alphanumeric()
new_names.append(new_name)
service_tpls_data_edited.append({
"href": tpl.href,
"name": new_name,
})
rest_api.collections.service_templates.action.edit(*service_tpls_data_edited)
for new_name in new_names:
wait_for(
lambda: rest_api.collections.service_templates.find_by(name=new_name),
num_sec=180,
delay=10,
)
|
thaumos/ansible
|
refs/heads/devel
|
test/runner/lib/target.py
|
14
|
"""Test target identification, iteration and inclusion/exclusion."""
from __future__ import absolute_import, print_function
import collections
import os
import re
import errno
import itertools
import abc
import sys
from lib.util import (
ApplicationError,
display,
read_lines_without_comments,
)
MODULE_EXTENSIONS = '.py', '.ps1'
def find_target_completion(target_func, prefix):
"""
:type target_func: () -> collections.Iterable[CompletionTarget]
:type prefix: unicode
:rtype: list[str]
"""
try:
targets = target_func()
if sys.version_info[0] == 2:
prefix = prefix.encode()
short = os.environ.get('COMP_TYPE') == '63' # double tab completion from bash
matches = walk_completion_targets(targets, prefix, short)
return matches
except Exception as ex: # pylint: disable=locally-disabled, broad-except
return [u'%s' % ex]
def walk_completion_targets(targets, prefix, short=False):
"""
:type targets: collections.Iterable[CompletionTarget]
:type prefix: str
:type short: bool
:rtype: tuple[str]
"""
aliases = set(alias for target in targets for alias in target.aliases)
if prefix.endswith('/') and prefix in aliases:
aliases.remove(prefix)
matches = [alias for alias in aliases if alias.startswith(prefix) and '/' not in alias[len(prefix):-1]]
if short:
offset = len(os.path.dirname(prefix))
if offset:
offset += 1
relative_matches = [match[offset:] for match in matches if len(match) > offset]
if len(relative_matches) > 1:
matches = relative_matches
return tuple(sorted(matches))
def walk_internal_targets(targets, includes=None, excludes=None, requires=None):
"""
:type targets: collections.Iterable[T <= CompletionTarget]
:type includes: list[str]
:type excludes: list[str]
:type requires: list[str]
:rtype: tuple[T <= CompletionTarget]
"""
targets = tuple(targets)
include_targets = sorted(filter_targets(targets, includes, errors=True, directories=False), key=lambda t: t.name)
if requires:
require_targets = set(filter_targets(targets, requires, errors=True, directories=False))
include_targets = [target for target in include_targets if target in require_targets]
if excludes:
list(filter_targets(targets, excludes, errors=True, include=False, directories=False))
internal_targets = set(filter_targets(include_targets, excludes, errors=False, include=False, directories=False))
return tuple(sorted(internal_targets, key=lambda t: t.name))
def walk_external_targets(targets, includes=None, excludes=None, requires=None):
"""
:type targets: collections.Iterable[CompletionTarget]
:type includes: list[str]
:type excludes: list[str]
:type requires: list[str]
:rtype: tuple[CompletionTarget], tuple[CompletionTarget]
"""
targets = tuple(targets)
if requires:
include_targets = list(filter_targets(targets, includes, errors=True, directories=False))
require_targets = set(filter_targets(targets, requires, errors=True, directories=False))
includes = [target.name for target in include_targets if target in require_targets]
if includes:
include_targets = sorted(filter_targets(targets, includes, errors=True), key=lambda t: t.name)
else:
include_targets = []
else:
include_targets = sorted(filter_targets(targets, includes, errors=True), key=lambda t: t.name)
if excludes:
exclude_targets = sorted(filter_targets(targets, excludes, errors=True), key=lambda t: t.name)
else:
exclude_targets = []
previous = None
include = []
for target in include_targets:
if isinstance(previous, DirectoryTarget) and isinstance(target, DirectoryTarget) \
and previous.name == target.name:
previous.modules = tuple(set(previous.modules) | set(target.modules))
else:
include.append(target)
previous = target
previous = None
exclude = []
for target in exclude_targets:
if isinstance(previous, DirectoryTarget) and isinstance(target, DirectoryTarget) \
and previous.name == target.name:
previous.modules = tuple(set(previous.modules) | set(target.modules))
else:
exclude.append(target)
previous = target
return tuple(include), tuple(exclude)
def filter_targets(targets, patterns, include=True, directories=True, errors=True):
"""
:type targets: collections.Iterable[CompletionTarget]
:type patterns: list[str]
:type include: bool
:type directories: bool
:type errors: bool
:rtype: collections.Iterable[CompletionTarget]
"""
unmatched = set(patterns or ())
compiled_patterns = dict((p, re.compile('^%s$' % p)) for p in patterns) if patterns else None
for target in targets:
matched_directories = set()
match = False
if patterns:
for alias in target.aliases:
for pattern in patterns:
if compiled_patterns[pattern].match(alias):
match = True
try:
unmatched.remove(pattern)
except KeyError:
pass
if alias.endswith('/'):
if target.base_path and len(target.base_path) > len(alias):
matched_directories.add(target.base_path)
else:
matched_directories.add(alias)
elif include:
match = True
if not target.base_path:
matched_directories.add('.')
for alias in target.aliases:
if alias.endswith('/'):
if target.base_path and len(target.base_path) > len(alias):
matched_directories.add(target.base_path)
else:
matched_directories.add(alias)
if match != include:
continue
if directories and matched_directories:
yield DirectoryTarget(sorted(matched_directories, key=len)[0], target.modules)
else:
yield target
if errors:
if unmatched:
raise TargetPatternsNotMatched(unmatched)
def walk_module_targets():
"""
:rtype: collections.Iterable[TestTarget]
"""
path = 'lib/ansible/modules'
for target in walk_test_targets(path, path + '/', extensions=MODULE_EXTENSIONS):
if not target.module:
continue
yield target
def walk_units_targets():
"""
:rtype: collections.Iterable[TestTarget]
"""
return walk_test_targets(path='test/units', module_path='test/units/modules/', extensions=('.py',), prefix='test_')
def walk_compile_targets():
"""
:rtype: collections.Iterable[TestTarget]
"""
return walk_test_targets(module_path='lib/ansible/modules/', extensions=('.py',), extra_dirs=('bin',))
def walk_sanity_targets():
"""
:rtype: collections.Iterable[TestTarget]
"""
return walk_test_targets(module_path='lib/ansible/modules/')
def walk_posix_integration_targets(include_hidden=False):
"""
:type include_hidden: bool
:rtype: collections.Iterable[IntegrationTarget]
"""
for target in walk_integration_targets():
if 'posix/' in target.aliases or (include_hidden and 'hidden/posix/' in target.aliases):
yield target
def walk_network_integration_targets(include_hidden=False):
"""
:type include_hidden: bool
:rtype: collections.Iterable[IntegrationTarget]
"""
for target in walk_integration_targets():
if 'network/' in target.aliases or (include_hidden and 'hidden/network/' in target.aliases):
yield target
def walk_windows_integration_targets(include_hidden=False):
"""
:type include_hidden: bool
:rtype: collections.Iterable[IntegrationTarget]
"""
for target in walk_integration_targets():
if 'windows/' in target.aliases or (include_hidden and 'hidden/windows/' in target.aliases):
yield target
def walk_integration_targets():
"""
:rtype: collections.Iterable[IntegrationTarget]
"""
path = 'test/integration/targets'
modules = frozenset(t.module for t in walk_module_targets())
paths = sorted(os.path.join(path, p) for p in os.listdir(path))
prefixes = load_integration_prefixes()
for path in paths:
if os.path.isdir(path):
yield IntegrationTarget(path, modules, prefixes)
def load_integration_prefixes():
"""
:rtype: dict[str, str]
"""
path = 'test/integration'
names = sorted(f for f in os.listdir(path) if os.path.splitext(f)[0] == 'target-prefixes')
prefixes = {}
for name in names:
prefix = os.path.splitext(name)[1][1:]
with open(os.path.join(path, name), 'r') as prefix_fd:
prefixes.update(dict((k, prefix) for k in prefix_fd.read().splitlines()))
return prefixes
def walk_test_targets(path=None, module_path=None, extensions=None, prefix=None, extra_dirs=None):
"""
:type path: str | None
:type module_path: str | None
:type extensions: tuple[str] | None
:type prefix: str | None
:type extra_dirs: tuple[str] | None
:rtype: collections.Iterable[TestTarget]
"""
for root, _, file_names in os.walk(path or '.', topdown=False):
if root.endswith('/__pycache__'):
continue
if '/.tox/' in root:
continue
if path is None:
root = root[2:]
if root.startswith('.') and root != '.github':
continue
for file_name in file_names:
name, ext = os.path.splitext(os.path.basename(file_name))
if name.startswith('.'):
continue
if extensions and ext not in extensions:
continue
if prefix and not name.startswith(prefix):
continue
file_path = os.path.join(root, file_name)
if os.path.islink(file_path):
# special case to allow a symlink of ansible_release.py -> ../release.py
if file_path != 'lib/ansible/module_utils/ansible_release.py':
continue
yield TestTarget(file_path, module_path, prefix, path)
if extra_dirs:
for extra_dir in extra_dirs:
file_names = os.listdir(extra_dir)
for file_name in file_names:
file_path = os.path.join(extra_dir, file_name)
if os.path.isfile(file_path) and not os.path.islink(file_path):
yield TestTarget(file_path, module_path, prefix, path)
def analyze_integration_target_dependencies(integration_targets):
"""
:type integration_targets: list[IntegrationTarget]
:rtype: dict[str,set[str]]
"""
real_target_root = os.path.realpath('test/integration/targets') + '/'
role_targets = [t for t in integration_targets if t.type == 'role']
hidden_role_target_names = set(t.name for t in role_targets if 'hidden/' in t.aliases)
dependencies = collections.defaultdict(set)
# handle setup dependencies
for target in integration_targets:
for setup_target_name in target.setup_always + target.setup_once:
dependencies[setup_target_name].add(target.name)
# handle target dependencies
for target in integration_targets:
for need_target in target.needs_target:
dependencies[need_target].add(target.name)
# handle symlink dependencies between targets
# this use case is supported, but discouraged
for target in integration_targets:
for root, _dummy, file_names in os.walk(target.path):
for name in file_names:
path = os.path.join(root, name)
if not os.path.islink(path):
continue
real_link_path = os.path.realpath(path)
if not real_link_path.startswith(real_target_root):
continue
link_target = real_link_path[len(real_target_root):].split('/')[0]
if link_target == target.name:
continue
dependencies[link_target].add(target.name)
# intentionally primitive analysis of role meta to avoid a dependency on pyyaml
# script based targets are scanned as they may execute a playbook with role dependencies
for target in integration_targets:
meta_dir = os.path.join(target.path, 'meta')
if not os.path.isdir(meta_dir):
continue
meta_paths = sorted([os.path.join(meta_dir, name) for name in os.listdir(meta_dir)])
for meta_path in meta_paths:
if os.path.exists(meta_path):
with open(meta_path, 'rb') as meta_fd:
# try and decode the file as a utf-8 string, skip if it contains invalid chars (binary file)
try:
meta_lines = meta_fd.read().decode('utf-8').splitlines()
except UnicodeDecodeError:
continue
for meta_line in meta_lines:
if re.search(r'^ *#.*$', meta_line):
continue
if not meta_line.strip():
continue
for hidden_target_name in hidden_role_target_names:
if hidden_target_name in meta_line:
dependencies[hidden_target_name].add(target.name)
while True:
changes = 0
for dummy, dependent_target_names in dependencies.items():
for dependent_target_name in list(dependent_target_names):
new_target_names = dependencies.get(dependent_target_name)
if new_target_names:
for new_target_name in new_target_names:
if new_target_name not in dependent_target_names:
dependent_target_names.add(new_target_name)
changes += 1
if not changes:
break
for target_name in sorted(dependencies):
consumers = dependencies[target_name]
if not consumers:
continue
display.info('%s:' % target_name, verbosity=4)
for consumer in sorted(consumers):
display.info(' %s' % consumer, verbosity=4)
return dependencies
class CompletionTarget(object):
"""Command-line argument completion target base class."""
__metaclass__ = abc.ABCMeta
def __init__(self):
self.name = None
self.path = None
self.base_path = None
self.modules = tuple()
self.aliases = tuple()
def __eq__(self, other):
if isinstance(other, CompletionTarget):
return self.__repr__() == other.__repr__()
return False
def __ne__(self, other):
return not self.__eq__(other)
def __lt__(self, other):
return self.name.__lt__(other.name)
def __gt__(self, other):
return self.name.__gt__(other.name)
def __hash__(self):
return hash(self.__repr__())
def __repr__(self):
if self.modules:
return '%s (%s)' % (self.name, ', '.join(self.modules))
return self.name
class DirectoryTarget(CompletionTarget):
"""Directory target."""
def __init__(self, path, modules):
"""
:type path: str
:type modules: tuple[str]
"""
super(DirectoryTarget, self).__init__()
self.name = path
self.path = path
self.modules = modules
class TestTarget(CompletionTarget):
"""Generic test target."""
def __init__(self, path, module_path, module_prefix, base_path):
"""
:type path: str
:type module_path: str | None
:type module_prefix: str | None
:type base_path: str
"""
super(TestTarget, self).__init__()
self.name = path
self.path = path
self.base_path = base_path + '/' if base_path else None
name, ext = os.path.splitext(os.path.basename(self.path))
if module_path and path.startswith(module_path) and name != '__init__' and ext in MODULE_EXTENSIONS:
self.module = name[len(module_prefix or ''):].lstrip('_')
self.modules = (self.module,)
else:
self.module = None
self.modules = tuple()
aliases = [self.path, self.module]
parts = self.path.split('/')
for i in range(1, len(parts)):
alias = '%s/' % '/'.join(parts[:i])
aliases.append(alias)
aliases = [a for a in aliases if a]
self.aliases = tuple(sorted(aliases))
class IntegrationTarget(CompletionTarget):
"""Integration test target."""
non_posix = frozenset((
'network',
'windows',
))
categories = frozenset(non_posix | frozenset((
'posix',
'module',
'needs',
'skip',
)))
def __init__(self, path, modules, prefixes):
"""
:type path: str
:type modules: frozenset[str]
:type prefixes: dict[str, str]
"""
super(IntegrationTarget, self).__init__()
self.name = os.path.basename(path)
self.path = path
# script_path and type
contents = sorted(os.listdir(path))
runme_files = tuple(c for c in contents if os.path.splitext(c)[0] == 'runme')
test_files = tuple(c for c in contents if os.path.splitext(c)[0] == 'test')
self.script_path = None
if runme_files:
self.type = 'script'
self.script_path = os.path.join(path, runme_files[0])
elif test_files:
self.type = 'special'
elif os.path.isdir(os.path.join(path, 'tasks')) or os.path.isdir(os.path.join(path, 'defaults')):
self.type = 'role'
else:
self.type = 'role' # ansible will consider these empty roles, so ansible-test should as well
# static_aliases
try:
aliases_path = os.path.join(path, 'aliases')
static_aliases = tuple(read_lines_without_comments(aliases_path, remove_blank_lines=True))
except IOError as ex:
if ex.errno != errno.ENOENT:
raise
static_aliases = tuple()
# modules
if self.name in modules:
module_name = self.name
elif self.name.startswith('win_') and self.name[4:] in modules:
module_name = self.name[4:]
else:
module_name = None
self.modules = tuple(sorted(a for a in static_aliases + tuple([module_name]) if a in modules))
# groups
groups = [self.type]
groups += [a for a in static_aliases if a not in modules]
groups += ['module/%s' % m for m in self.modules]
if not self.modules:
groups.append('non_module')
if 'destructive' not in groups:
groups.append('non_destructive')
if '_' in self.name:
prefix = self.name[:self.name.find('_')]
else:
prefix = None
if prefix in prefixes:
group = prefixes[prefix]
if group != prefix:
group = '%s/%s' % (group, prefix)
groups.append(group)
if self.name.startswith('win_'):
groups.append('windows')
if self.name.startswith('connection_'):
groups.append('connection')
if self.name.startswith('setup_') or self.name.startswith('prepare_'):
groups.append('hidden')
if self.type not in ('script', 'role'):
groups.append('hidden')
# Collect file paths before group expansion to avoid including the directories.
# Ignore references to test targets, as those must be defined using `needs/target/*` or other target references.
self.needs_file = tuple(sorted(set('/'.join(g.split('/')[2:]) for g in groups if
g.startswith('needs/file/') and not g.startswith('needs/file/test/integration/targets/'))))
for group in itertools.islice(groups, 0, len(groups)):
if '/' in group:
parts = group.split('/')
for i in range(1, len(parts)):
groups.append('/'.join(parts[:i]))
if not any(g in self.non_posix for g in groups):
groups.append('posix')
# aliases
aliases = [self.name] + \
['%s/' % g for g in groups] + \
['%s/%s' % (g, self.name) for g in groups if g not in self.categories]
if 'hidden/' in aliases:
aliases = ['hidden/'] + ['hidden/%s' % a for a in aliases if not a.startswith('hidden/')]
self.aliases = tuple(sorted(set(aliases)))
# configuration
self.setup_once = tuple(sorted(set(g.split('/')[2] for g in groups if g.startswith('setup/once/'))))
self.setup_always = tuple(sorted(set(g.split('/')[2] for g in groups if g.startswith('setup/always/'))))
self.needs_target = tuple(sorted(set(g.split('/')[2] for g in groups if g.startswith('needs/target/'))))
class TargetPatternsNotMatched(ApplicationError):
"""One or more targets were not matched when a match was required."""
def __init__(self, patterns):
"""
:type patterns: set[str]
"""
self.patterns = sorted(patterns)
if len(patterns) > 1:
message = 'Target patterns not matched:\n%s' % '\n'.join(self.patterns)
else:
message = 'Target pattern not matched: %s' % self.patterns[0]
super(TargetPatternsNotMatched, self).__init__(message)
|
biocommons/hgvs
|
refs/heads/main
|
tests/test_hgvs_posedit.py
|
1
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import, division, print_function, unicode_literals
import unittest
import pytest
import hgvs.edit
import hgvs.location
import hgvs.posedit
@pytest.mark.quick
@pytest.mark.models
class Test_PosEdit(unittest.TestCase):
def test_PosEdit(self):
pos = hgvs.location.Interval(
hgvs.location.BaseOffsetPosition(base=12, offset=+34),
hgvs.location.BaseOffsetPosition(base=56, offset=-78))
edit = hgvs.edit.NARefAlt("AA", None)
pe = hgvs.posedit.PosEdit(pos=pos, edit=edit)
self.assertEqual(pe.format(conf={'max_ref_length': None}), "12+34_56-78delAA")
if __name__ == "__main__":
unittest.main()
# <LICENSE>
# Copyright 2018 HGVS Contributors (https://github.com/biocommons/hgvs)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# </LICENSE>
|
youprofit/NewsBlur
|
refs/heads/master
|
utils/tlnbt.py
|
9
|
#!/usr/bin/env python
import tlnb
import sys
if __name__ == "__main__":
role = "task"
if len(sys.argv) > 1:
role = sys.argv[1]
tlnb.main(role=role, role2="ec2task")
|
Byron/bgui
|
refs/heads/master
|
src/python/bprocessg/qt/res/__init__.py
|
1
|
#-*-coding:utf-8-*-
"""
@package bprocess.gui.ui
@brief Contains all compiled .ui packages
@author Sebastian Thiel
@copyright [GNU Lesser General Public License](https://www.gnu.org/licenses/lgpl.html)
"""
from .package_ui import *
from .viewer_ui import *
|
hugolouzada/ImpeachmentPrediction
|
refs/heads/master
|
Prediction/SplitTrainValid/__init__.py
|
12133432
| |
Pingmin/linux
|
refs/heads/master
|
scripts/gdb/linux/__init__.py
|
2010
|
# nothing to do for the initialization of this package
|
BaskerShu/typeidea
|
refs/heads/master
|
typeidea/comment/adminforms.py
|
1
|
# -*- coding: utf-8 -*-
from django import forms
class CommentAdminForm(forms.ModelForm):
content = forms.CharField(widget=forms.Textarea, max_length=2000)
|
ptcrypto/p2pool-adaptive
|
refs/heads/master
|
p2pool/networks/lennycoin.py
|
2
|
from p2pool.bitcoin import networks
PARENT = networks.nets['lennycoin']
SHARE_PERIOD = 10 # seconds
CHAIN_LENGTH = 12*60*60//10 # shares
REAL_CHAIN_LENGTH = 12*60*60//10 # shares
TARGET_LOOKBEHIND = 20 # shares
SPREAD = 50 # blocks
IDENTIFIER = 'c1e2b3c4e0b5fefe'.decode('hex')
PREFIX = 'e2e3c4c5b7b9e1ec'.decode('hex')
P2P_PORT = 8556
MIN_TARGET = 0
MAX_TARGET = 2**256//2**20 - 1
PERSIST = False
WORKER_PORT = 9556
BOOTSTRAP_ADDRS = 'p2pool.e-pool.net p2pool-eu.gotgeeks.com p2pool-us.gotgeeks.com rav3n.dtdns.net p2pool.gotgeeks.com p2pool.dtdns.net solidpool.org'.split(' ')
ANNOUNCE_CHANNEL = '#p2pool-alt'
VERSION_CHECK = lambda v: True
|
Distrotech/intellij-community
|
refs/heads/master
|
python/edu/interactive-learning-python/resources/com/jetbrains/python/edu/user_tester.py
|
41
|
import sys
import imp
import os
import subprocess
USER_TESTS = "userTests"
TEST_FAILED = "FAILED"
TEST_PASSED = "PASSED"
INPUT = "input"
OUTPUT = "output"
def get_index(logical_name, full_name):
logical_name_len = len(logical_name)
if full_name[:logical_name_len] == logical_name:
return int(full_name[logical_name_len])
return -1
def process_user_tests(file_path):
user_tests = []
imp.load_source('user_file', file_path)
user_tests_dir_path = os.path.abspath(os.path.join(file_path, os.pardir, USER_TESTS))
user_test_files = os.listdir(user_tests_dir_path)
for user_file in user_test_files:
index = get_index(INPUT, user_file)
if index == -1:
continue
output = OUTPUT + str(index)
if output in user_test_files:
input_path = os.path.abspath(os.path.join(user_tests_dir_path, user_file))
output_path = os.path.abspath(os.path.join(user_tests_dir_path, output))
user_tests.append((input_path, output_path, index))
return sorted(user_tests, key=(lambda x: x[2]))
def run_user_test(python, executable_path):
user_tests = process_user_tests(executable_path)
for test in user_tests:
input, output, index = test
test_output = subprocess.check_output([python, executable_path, input])
expected_output = open(output).read()
test_status = TEST_PASSED if test_output == expected_output else TEST_FAILED
print "TEST" + str(index) + " " + test_status
print "OUTPUT:"
print test_output + "\n"
if test_status == TEST_FAILED:
print "EXPECTED OUTPUT:"
print expected_output + "\n"
if __name__ == "__main__":
python = sys.argv[1]
executable_path = sys.argv[2]
run_user_test(python , executable_path)
|
AICP/external_chromium_org
|
refs/heads/lp5.0
|
tools/telemetry/telemetry/core/gpu_device_unittest.py
|
38
|
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import unittest
from telemetry.core import gpu_device
class TestGPUDevice(unittest.TestCase):
def testConstruction(self):
device = gpu_device.GPUDevice(1000, 2000, 'test_vendor', 'test_device')
self.assertEquals(device.vendor_id, 1000)
self.assertEquals(device.device_id, 2000)
self.assertEquals(device.vendor_string, 'test_vendor')
self.assertEquals(device.device_string, 'test_device')
def testFromDict(self):
dictionary = { 'vendor_id': 3000,
'device_id': 4000,
'vendor_string': 'test_vendor_2',
'device_string': 'test_device_2' }
device = gpu_device.GPUDevice.FromDict(dictionary)
self.assertEquals(device.vendor_id, 3000)
self.assertEquals(device.device_id, 4000)
self.assertEquals(device.vendor_string, 'test_vendor_2')
self.assertEquals(device.device_string, 'test_device_2')
def testMissingAttrsFromDict(self):
data = {
'vendor_id': 1,
'device_id': 2,
'vendor_string': 'a',
'device_string': 'b'
}
for k in data:
data_copy = data.copy()
del data_copy[k]
try:
gpu_device.GPUDevice.FromDict(data_copy)
self.fail('Should raise exception if attribute "%s" is missing' % k)
except AssertionError:
raise
except:
pass
|
Tesla-Redux-Devices/android_kernel_samsung_msm8930-common
|
refs/heads/lp5.1
|
tools/perf/scripts/python/Perf-Trace-Util/lib/Perf/Trace/SchedGui.py
|
12980
|
# SchedGui.py - Python extension for perf script, basic GUI code for
# traces drawing and overview.
#
# Copyright (C) 2010 by Frederic Weisbecker <fweisbec@gmail.com>
#
# This software is distributed under the terms of the GNU General
# Public License ("GPL") version 2 as published by the Free Software
# Foundation.
try:
import wx
except ImportError:
raise ImportError, "You need to install the wxpython lib for this script"
class RootFrame(wx.Frame):
Y_OFFSET = 100
RECT_HEIGHT = 100
RECT_SPACE = 50
EVENT_MARKING_WIDTH = 5
def __init__(self, sched_tracer, title, parent = None, id = -1):
wx.Frame.__init__(self, parent, id, title)
(self.screen_width, self.screen_height) = wx.GetDisplaySize()
self.screen_width -= 10
self.screen_height -= 10
self.zoom = 0.5
self.scroll_scale = 20
self.sched_tracer = sched_tracer
self.sched_tracer.set_root_win(self)
(self.ts_start, self.ts_end) = sched_tracer.interval()
self.update_width_virtual()
self.nr_rects = sched_tracer.nr_rectangles() + 1
self.height_virtual = RootFrame.Y_OFFSET + (self.nr_rects * (RootFrame.RECT_HEIGHT + RootFrame.RECT_SPACE))
# whole window panel
self.panel = wx.Panel(self, size=(self.screen_width, self.screen_height))
# scrollable container
self.scroll = wx.ScrolledWindow(self.panel)
self.scroll.SetScrollbars(self.scroll_scale, self.scroll_scale, self.width_virtual / self.scroll_scale, self.height_virtual / self.scroll_scale)
self.scroll.EnableScrolling(True, True)
self.scroll.SetFocus()
# scrollable drawing area
self.scroll_panel = wx.Panel(self.scroll, size=(self.screen_width - 15, self.screen_height / 2))
self.scroll_panel.Bind(wx.EVT_PAINT, self.on_paint)
self.scroll_panel.Bind(wx.EVT_KEY_DOWN, self.on_key_press)
self.scroll_panel.Bind(wx.EVT_LEFT_DOWN, self.on_mouse_down)
self.scroll.Bind(wx.EVT_PAINT, self.on_paint)
self.scroll.Bind(wx.EVT_KEY_DOWN, self.on_key_press)
self.scroll.Bind(wx.EVT_LEFT_DOWN, self.on_mouse_down)
self.scroll.Fit()
self.Fit()
self.scroll_panel.SetDimensions(-1, -1, self.width_virtual, self.height_virtual, wx.SIZE_USE_EXISTING)
self.txt = None
self.Show(True)
def us_to_px(self, val):
return val / (10 ** 3) * self.zoom
def px_to_us(self, val):
return (val / self.zoom) * (10 ** 3)
def scroll_start(self):
(x, y) = self.scroll.GetViewStart()
return (x * self.scroll_scale, y * self.scroll_scale)
def scroll_start_us(self):
(x, y) = self.scroll_start()
return self.px_to_us(x)
def paint_rectangle_zone(self, nr, color, top_color, start, end):
offset_px = self.us_to_px(start - self.ts_start)
width_px = self.us_to_px(end - self.ts_start)
offset_py = RootFrame.Y_OFFSET + (nr * (RootFrame.RECT_HEIGHT + RootFrame.RECT_SPACE))
width_py = RootFrame.RECT_HEIGHT
dc = self.dc
if top_color is not None:
(r, g, b) = top_color
top_color = wx.Colour(r, g, b)
brush = wx.Brush(top_color, wx.SOLID)
dc.SetBrush(brush)
dc.DrawRectangle(offset_px, offset_py, width_px, RootFrame.EVENT_MARKING_WIDTH)
width_py -= RootFrame.EVENT_MARKING_WIDTH
offset_py += RootFrame.EVENT_MARKING_WIDTH
(r ,g, b) = color
color = wx.Colour(r, g, b)
brush = wx.Brush(color, wx.SOLID)
dc.SetBrush(brush)
dc.DrawRectangle(offset_px, offset_py, width_px, width_py)
def update_rectangles(self, dc, start, end):
start += self.ts_start
end += self.ts_start
self.sched_tracer.fill_zone(start, end)
def on_paint(self, event):
dc = wx.PaintDC(self.scroll_panel)
self.dc = dc
width = min(self.width_virtual, self.screen_width)
(x, y) = self.scroll_start()
start = self.px_to_us(x)
end = self.px_to_us(x + width)
self.update_rectangles(dc, start, end)
def rect_from_ypixel(self, y):
y -= RootFrame.Y_OFFSET
rect = y / (RootFrame.RECT_HEIGHT + RootFrame.RECT_SPACE)
height = y % (RootFrame.RECT_HEIGHT + RootFrame.RECT_SPACE)
if rect < 0 or rect > self.nr_rects - 1 or height > RootFrame.RECT_HEIGHT:
return -1
return rect
def update_summary(self, txt):
if self.txt:
self.txt.Destroy()
self.txt = wx.StaticText(self.panel, -1, txt, (0, (self.screen_height / 2) + 50))
def on_mouse_down(self, event):
(x, y) = event.GetPositionTuple()
rect = self.rect_from_ypixel(y)
if rect == -1:
return
t = self.px_to_us(x) + self.ts_start
self.sched_tracer.mouse_down(rect, t)
def update_width_virtual(self):
self.width_virtual = self.us_to_px(self.ts_end - self.ts_start)
def __zoom(self, x):
self.update_width_virtual()
(xpos, ypos) = self.scroll.GetViewStart()
xpos = self.us_to_px(x) / self.scroll_scale
self.scroll.SetScrollbars(self.scroll_scale, self.scroll_scale, self.width_virtual / self.scroll_scale, self.height_virtual / self.scroll_scale, xpos, ypos)
self.Refresh()
def zoom_in(self):
x = self.scroll_start_us()
self.zoom *= 2
self.__zoom(x)
def zoom_out(self):
x = self.scroll_start_us()
self.zoom /= 2
self.__zoom(x)
def on_key_press(self, event):
key = event.GetRawKeyCode()
if key == ord("+"):
self.zoom_in()
return
if key == ord("-"):
self.zoom_out()
return
key = event.GetKeyCode()
(x, y) = self.scroll.GetViewStart()
if key == wx.WXK_RIGHT:
self.scroll.Scroll(x + 1, y)
elif key == wx.WXK_LEFT:
self.scroll.Scroll(x - 1, y)
elif key == wx.WXK_DOWN:
self.scroll.Scroll(x, y + 1)
elif key == wx.WXK_UP:
self.scroll.Scroll(x, y - 1)
|
hoosteeno/fjord
|
refs/heads/master
|
fjord/base/plugin_utils.py
|
6
|
import logging
from django.utils.module_loading import import_string
def load_providers(provider_list, logger=None):
"""Loads providers in the provided list
:arg provider_list: list of Python dotted module paths; the provider
class should be the last name in the path
:arg logger: (optional) logger to log to for errors
:returns: list of providers.
>>> load_providers(['some.path.Class', 'some.other.path.OtherClass'])
[<Class instance>, <OtherClass instance>]
.. Note::
All exceptions when importing and loading providers are
swallowed and logged as errors. Providers that don't import or
don't load are not returned in the return list.
"""
if logger is None:
logger = logging.getLogger('i.plugin_utils')
provider_objects = []
for provider in provider_list:
try:
# Import the class and instantiate it.
provider_objects.append(import_string(provider)())
except Exception:
logger.exception('Provider {0} failed to import'.format(provider))
providers = []
for provider in provider_objects:
try:
provider.load()
providers.append(provider)
except Exception:
logger.exception('Provider {0} failed to load'.format(provider))
return providers
|
trungdtbk/faucet
|
refs/heads/master
|
tests/unit/faucet/test_valveapp_smoke.py
|
2
|
#!/usr/bin/env python
"""Unit tests run as PYTHONPATH=../../.. python3 ./test_valveapp_smoke.py."""
# Copyright (C) 2015 Research and Innovation Advanced Network New Zealand Ltd.
# Copyright (C) 2015--2019 The Contributors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from collections import namedtuple
import os
import unittest
from prometheus_client import CollectorRegistry
from ryu.controller import dpset
from ryu.controller.ofp_event import EventOFPMsgBase
from faucet import faucet
from faucet import faucet_experimental_api
class RyuAppSmokeTest(unittest.TestCase): # pytype: disable=module-attr
"""Test bare instantiation of controller classes."""
@staticmethod
def _fake_dp():
datapath = namedtuple('datapath', ['id', 'close'])(0, lambda: None)
return datapath
def test_faucet(self):
"""Test FAUCET can be initialized."""
os.environ['FAUCET_CONFIG'] = '/dev/null'
os.environ['FAUCET_LOG'] = '/dev/null'
os.environ['FAUCET_EXCEPTION_LOG'] = '/dev/null'
ryu_app = faucet.Faucet(
dpset={},
faucet_experimental_api=faucet_experimental_api.FaucetExperimentalAPI(),
reg=CollectorRegistry())
ryu_app.reload_config(None)
self.assertFalse(ryu_app._config_files_changed()) # pylint: disable=protected-access
ryu_app.metric_update(None)
ryu_app.get_config()
ryu_app.get_tables(0)
event_dp = dpset.EventDPReconnected(dp=self._fake_dp())
for enter in (True, False):
event_dp.enter = enter
ryu_app.connect_or_disconnect_handler(event_dp)
for event_handler in (
ryu_app.error_handler,
ryu_app.features_handler,
ryu_app.packet_in_handler,
ryu_app.desc_stats_reply_handler,
ryu_app.port_status_handler,
ryu_app.flowremoved_handler,
ryu_app.reconnect_handler,
ryu_app._datapath_connect, # pylint: disable=protected-access
ryu_app._datapath_disconnect): # pylint: disable=protected-access
msg = namedtuple('msg', ['datapath'])(self._fake_dp())
event = EventOFPMsgBase(msg=msg)
event.dp = msg.datapath
event_handler(event)
ryu_app._check_thread_exception() # pylint: disable=protected-access
ryu_app._thread_jitter(1) # pylint: disable=protected-access
if __name__ == "__main__":
unittest.main() # pytype: disable=module-attr
|
superr/android_kernel_502_falcon
|
refs/heads/super
|
tools/perf/scripts/python/Perf-Trace-Util/lib/Perf/Trace/SchedGui.py
|
12980
|
# SchedGui.py - Python extension for perf script, basic GUI code for
# traces drawing and overview.
#
# Copyright (C) 2010 by Frederic Weisbecker <fweisbec@gmail.com>
#
# This software is distributed under the terms of the GNU General
# Public License ("GPL") version 2 as published by the Free Software
# Foundation.
try:
import wx
except ImportError:
raise ImportError, "You need to install the wxpython lib for this script"
class RootFrame(wx.Frame):
Y_OFFSET = 100
RECT_HEIGHT = 100
RECT_SPACE = 50
EVENT_MARKING_WIDTH = 5
def __init__(self, sched_tracer, title, parent = None, id = -1):
wx.Frame.__init__(self, parent, id, title)
(self.screen_width, self.screen_height) = wx.GetDisplaySize()
self.screen_width -= 10
self.screen_height -= 10
self.zoom = 0.5
self.scroll_scale = 20
self.sched_tracer = sched_tracer
self.sched_tracer.set_root_win(self)
(self.ts_start, self.ts_end) = sched_tracer.interval()
self.update_width_virtual()
self.nr_rects = sched_tracer.nr_rectangles() + 1
self.height_virtual = RootFrame.Y_OFFSET + (self.nr_rects * (RootFrame.RECT_HEIGHT + RootFrame.RECT_SPACE))
# whole window panel
self.panel = wx.Panel(self, size=(self.screen_width, self.screen_height))
# scrollable container
self.scroll = wx.ScrolledWindow(self.panel)
self.scroll.SetScrollbars(self.scroll_scale, self.scroll_scale, self.width_virtual / self.scroll_scale, self.height_virtual / self.scroll_scale)
self.scroll.EnableScrolling(True, True)
self.scroll.SetFocus()
# scrollable drawing area
self.scroll_panel = wx.Panel(self.scroll, size=(self.screen_width - 15, self.screen_height / 2))
self.scroll_panel.Bind(wx.EVT_PAINT, self.on_paint)
self.scroll_panel.Bind(wx.EVT_KEY_DOWN, self.on_key_press)
self.scroll_panel.Bind(wx.EVT_LEFT_DOWN, self.on_mouse_down)
self.scroll.Bind(wx.EVT_PAINT, self.on_paint)
self.scroll.Bind(wx.EVT_KEY_DOWN, self.on_key_press)
self.scroll.Bind(wx.EVT_LEFT_DOWN, self.on_mouse_down)
self.scroll.Fit()
self.Fit()
self.scroll_panel.SetDimensions(-1, -1, self.width_virtual, self.height_virtual, wx.SIZE_USE_EXISTING)
self.txt = None
self.Show(True)
def us_to_px(self, val):
return val / (10 ** 3) * self.zoom
def px_to_us(self, val):
return (val / self.zoom) * (10 ** 3)
def scroll_start(self):
(x, y) = self.scroll.GetViewStart()
return (x * self.scroll_scale, y * self.scroll_scale)
def scroll_start_us(self):
(x, y) = self.scroll_start()
return self.px_to_us(x)
def paint_rectangle_zone(self, nr, color, top_color, start, end):
offset_px = self.us_to_px(start - self.ts_start)
width_px = self.us_to_px(end - self.ts_start)
offset_py = RootFrame.Y_OFFSET + (nr * (RootFrame.RECT_HEIGHT + RootFrame.RECT_SPACE))
width_py = RootFrame.RECT_HEIGHT
dc = self.dc
if top_color is not None:
(r, g, b) = top_color
top_color = wx.Colour(r, g, b)
brush = wx.Brush(top_color, wx.SOLID)
dc.SetBrush(brush)
dc.DrawRectangle(offset_px, offset_py, width_px, RootFrame.EVENT_MARKING_WIDTH)
width_py -= RootFrame.EVENT_MARKING_WIDTH
offset_py += RootFrame.EVENT_MARKING_WIDTH
(r ,g, b) = color
color = wx.Colour(r, g, b)
brush = wx.Brush(color, wx.SOLID)
dc.SetBrush(brush)
dc.DrawRectangle(offset_px, offset_py, width_px, width_py)
def update_rectangles(self, dc, start, end):
start += self.ts_start
end += self.ts_start
self.sched_tracer.fill_zone(start, end)
def on_paint(self, event):
dc = wx.PaintDC(self.scroll_panel)
self.dc = dc
width = min(self.width_virtual, self.screen_width)
(x, y) = self.scroll_start()
start = self.px_to_us(x)
end = self.px_to_us(x + width)
self.update_rectangles(dc, start, end)
def rect_from_ypixel(self, y):
y -= RootFrame.Y_OFFSET
rect = y / (RootFrame.RECT_HEIGHT + RootFrame.RECT_SPACE)
height = y % (RootFrame.RECT_HEIGHT + RootFrame.RECT_SPACE)
if rect < 0 or rect > self.nr_rects - 1 or height > RootFrame.RECT_HEIGHT:
return -1
return rect
def update_summary(self, txt):
if self.txt:
self.txt.Destroy()
self.txt = wx.StaticText(self.panel, -1, txt, (0, (self.screen_height / 2) + 50))
def on_mouse_down(self, event):
(x, y) = event.GetPositionTuple()
rect = self.rect_from_ypixel(y)
if rect == -1:
return
t = self.px_to_us(x) + self.ts_start
self.sched_tracer.mouse_down(rect, t)
def update_width_virtual(self):
self.width_virtual = self.us_to_px(self.ts_end - self.ts_start)
def __zoom(self, x):
self.update_width_virtual()
(xpos, ypos) = self.scroll.GetViewStart()
xpos = self.us_to_px(x) / self.scroll_scale
self.scroll.SetScrollbars(self.scroll_scale, self.scroll_scale, self.width_virtual / self.scroll_scale, self.height_virtual / self.scroll_scale, xpos, ypos)
self.Refresh()
def zoom_in(self):
x = self.scroll_start_us()
self.zoom *= 2
self.__zoom(x)
def zoom_out(self):
x = self.scroll_start_us()
self.zoom /= 2
self.__zoom(x)
def on_key_press(self, event):
key = event.GetRawKeyCode()
if key == ord("+"):
self.zoom_in()
return
if key == ord("-"):
self.zoom_out()
return
key = event.GetKeyCode()
(x, y) = self.scroll.GetViewStart()
if key == wx.WXK_RIGHT:
self.scroll.Scroll(x + 1, y)
elif key == wx.WXK_LEFT:
self.scroll.Scroll(x - 1, y)
elif key == wx.WXK_DOWN:
self.scroll.Scroll(x, y + 1)
elif key == wx.WXK_UP:
self.scroll.Scroll(x, y - 1)
|
MayOneUS/pledgeservice
|
refs/heads/master
|
testlib/setuptools/tests/server.py
|
452
|
"""Basic http server for tests to simulate PyPI or custom indexes
"""
import sys
import time
import threading
from setuptools.compat import BaseHTTPRequestHandler
from setuptools.compat import (urllib2, URLError, HTTPServer,
SimpleHTTPRequestHandler)
class IndexServer(HTTPServer):
"""Basic single-threaded http server simulating a package index
You can use this server in unittest like this::
s = IndexServer()
s.start()
index_url = s.base_url() + 'mytestindex'
# do some test requests to the index
# The index files should be located in setuptools/tests/indexes
s.stop()
"""
def __init__(self, server_address=('', 0),
RequestHandlerClass=SimpleHTTPRequestHandler):
HTTPServer.__init__(self, server_address, RequestHandlerClass)
self._run = True
def serve(self):
while self._run:
self.handle_request()
def start(self):
self.thread = threading.Thread(target=self.serve)
self.thread.start()
def stop(self):
"Stop the server"
# Let the server finish the last request and wait for a new one.
time.sleep(0.1)
# self.shutdown is not supported on python < 2.6, so just
# set _run to false, and make a request, causing it to
# terminate.
self._run = False
url = 'http://127.0.0.1:%(server_port)s/' % vars(self)
try:
if sys.version_info >= (2, 6):
urllib2.urlopen(url, timeout=5)
else:
urllib2.urlopen(url)
except URLError:
# ignore any errors; all that's important is the request
pass
self.thread.join()
self.socket.close()
def base_url(self):
port = self.server_port
return 'http://127.0.0.1:%s/setuptools/tests/indexes/' % port
class RequestRecorder(BaseHTTPRequestHandler):
def do_GET(self):
requests = vars(self.server).setdefault('requests', [])
requests.append(self)
self.send_response(200, 'OK')
class MockServer(HTTPServer, threading.Thread):
"""
A simple HTTP Server that records the requests made to it.
"""
def __init__(self, server_address=('', 0),
RequestHandlerClass=RequestRecorder):
HTTPServer.__init__(self, server_address, RequestHandlerClass)
threading.Thread.__init__(self)
self.setDaemon(True)
self.requests = []
def run(self):
self.serve_forever()
def url(self):
return 'http://localhost:%(server_port)s/' % vars(self)
url = property(url)
|
OpenMined/PySyft
|
refs/heads/master
|
packages/syft/tests/syft/lib/python/bool/serde_test.py
|
1
|
# syft absolute
import syft as sy
from syft.lib.python.bool import Bool
from syft.proto.lib.python.bool_pb2 import Bool as Bool_PB
def test_serde() -> None:
syft_bool = Bool(True)
serialized = syft_bool._object2proto()
assert isinstance(serialized, Bool_PB)
deserialized = Bool._proto2object(proto=serialized)
assert isinstance(deserialized, Bool)
assert deserialized.id == syft_bool.id
assert deserialized == syft_bool
def test_send(client: sy.VirtualMachineClient) -> None:
syft_bool = Bool(5)
ptr = syft_bool.send(client)
# Check pointer type
assert ptr.__class__.__name__ == "BoolPointer"
# Check that we can get back the object
res = ptr.get()
assert res == syft_bool
|
aware-why/multithreaded_crawler
|
refs/heads/master
|
threaded_spider/logger.py
|
1
|
"""
Logger based on logging and basic.log module.
"""
from __future__ import with_statement
import logging, logging.handlers
import os, sys
from threaded_spider.basic import log
from threaded_spider.basic.util import unicode_to_str
from threaded_spider.basic.threadable import threadingmodule
# Logging levels
NOTSET = logging.DEBUG - 1
DEBUG = logging.DEBUG
INFO = logging.INFO
WARN = logging.WARN
ERROR = logging.ERROR
CRITICAL = logging.CRITICAL
level_ids = [DEBUG, INFO, WARN, ERROR, CRITICAL]
level_ids.reverse()
level_pairs = [(NOTSET, "NOISY"),
(DEBUG, "DEBUG"),
(INFO, "INFO"),
(WARN, "WARN"),
(ERROR, "ERROR"),
(CRITICAL, "CRITICAL"),
]
level_pairs_reverse = [(v, k) for k, v in level_pairs]
level_pairs.extend(level_pairs_reverse)
level_map = dict([(k, v) for k, v in level_pairs])
class FobjBasedOnLogging(object):
def __init__(self, file_path, enable_console_output=True):
self.logger = logging.getLogger()
self.logger.setLevel(NOTSET)
self.fh = None
self.console = None
if file_path is not None:
self._add_file_handler(file_path)
else:
# If file_path is None, enable console output by default!
enable_console_output = True
if enable_console_output:
self._add_console_handler()
def _add_file_handler(self, file_path):
# fh = logging.handlers.RotatingFileHandler(file_path, mode='a',
# maxBytes=10*1000*1000,
# backupCount=3,
# encoding=None)
fh = logging.FileHandler(file_path, mode='w')
fh.setLevel(NOTSET)
self.logger.addHandler(fh)
self.fh = fh
def _add_console_handler(self):
# Bind stderr to logging as handler
console = logging.StreamHandler(sys.stderr)
console.setLevel(NOTSET)
self.logger.addHandler(console)
self.console = console
def write(self, msg):
msg = msg.rstrip('\r \n')
self.logger.log(NOTSET, msg)
def flush(self):
"""Each log statement imediatelly flush buffer to disk file.
It maybe result in low performance.
"""
self.fh.flush()
self.console.flush()
def close(self):
pass
def fileno(self):
return -1
def read(self):
raise IOError("Can't read from the log")
readline = read
readlines = read
seek = read
tell = read
class CrawlerLogObserver(log.FileLogObserver):
def __init__(self, file_path, log_level=INFO, log_encoding='utf-8',
crawler=None, enable_console_output=True):
self.level = log_level
self.encoding = log_encoding
if crawler:
self.crawler = crawler
self.emit = self._emit_with_crawler
else:
self.emit = self._emit
f = FobjBasedOnLogging(file_path,
enable_console_output=enable_console_output)
log.FileLogObserver.__init__(self, f)
def _emit(self, eventDict):
ev = _adapt_eventdict(eventDict, self.level, self.encoding)
if ev is not None:
log.FileLogObserver.emit(self, ev)
return ev
def _emit_with_crawler(self, eventDict):
ev = self._emit(eventDict)
if ev:
level = ev['log_level']
# TODO: self.crawler do something.
def _adapt_eventdict(eventDict, log_level=INFO, encoding='utf-8'):
"""Adapt the basic log eventDict to make it suitable for logging
with a crawler log observer.
@param log_level: the minimum level being logged.
@param encoding: the text encoding.
@ret None: Indicate the event should be ignored by a crawler log
observer.
@ret dict: An event dict consisting of logging meta information.
"""
ev = eventDict.copy()
if ev['isError']:
ev.setdefault('log_level', ERROR)
# Optional, strip away the noise from outside crawler.
# Ignore non-error message from outside crawler
# if ev.get('system') != 'crawler' and not ev['isError']:
# return
cur_level = ev.get('log_level', INFO)
if cur_level < log_level:
return
spider = ev.get('spider')
if spider:
ev['system'] = spider.name
cur_level_name = _get_log_level_name(cur_level)
tid = threadingmodule.currentThread().getName()
# Generally, message\format\isError are mutually exclusive.
# `message` has priority over isError, `isError` over `format`.
# See basic.log.textFromEventDict for more details.
message = ev.get('message')
if message:
message = [unicode_to_str(x, encoding) for x in message]
message[0] = '[%s] [%s] %s' % (cur_level_name, tid, message[0])
ev['message'] = message
# The exception source description string
why = ev.get('why')
if why:
why = unicode_to_str(why, encoding)
why = '[%s] [%s] %s' % (cur_level_name, tid, why)
ev['why'] = why
fmt = ev.get('format')
if fmt:
fmt = unicode_to_str(fmt, encoding)
fmt = '[%s] [%s] %s' % (cur_level_name, tid, fmt)
ev['format'] = fmt
return ev
def _get_log_level_id(level_name_or_id):
if isinstance(level_name_or_id, int):
if level_name_or_id < NOTSET:
return level_ids[level_name_or_id - 1]
else:
return level_name_or_id
elif isinstance(level_name_or_id, basestring):
return level_map.get(level_name_or_id, NOTSET)
else:
raise ValueError('Unknown log level: %r' % level_name_or_id)
def _get_log_level_name(level_name_or_id):
if isinstance(level_name_or_id, basestring):
if level_name_or_id in level_map:
return level_name_or_id
return 'UNKNOWN-LEVEL'
elif isinstance(level_name_or_id, int):
return level_map.get(level_name_or_id, 'UNKNOWN-LEVEL')
else:
raise ValueError('Unknown log name: %r' % level_name_or_id)
def start(log_file=None, log_level='INFO', enable_console_output=True,
log_encoding='utf-8', redirect_stdout_to_logfile=False,
crawler=None):
"""
@param enable_console_output: log observer's messages write to stderr also.
If enabled, the messages passed to debug/info/warn/error/critical methods
will *also* be wrote to console just like a duplicate.
@param redirect_stdout_to_logfile: redirect stderr to log observers.
If enabled, messages passed to print/sys.stdout/sys.stderr will be
delivered to log observers instead of the stdout or stderr.
"""
log_level = _get_log_level_id(log_level)
log_observer = CrawlerLogObserver(log_file, log_level=log_level,
log_encoding=log_encoding,
enable_console_output=enable_console_output,
crawler=crawler)
log.startLoggingWithObserver(log_observer.emit,
setStdout=redirect_stdout_to_logfile)
return log_observer
def _log(message=None, **kw):
kw.setdefault('system', 'crawler')
if message is None:
# Make sure `format` is in kw.
log.msg(**kw)
else:
log.msg(message, **kw)
def debug(message=None, **kw):
kw['log_level'] = DEBUG
_log(message, **kw)
def info(message=None, **kw):
kw['log_level'] = INFO
_log(message, **kw)
def warn(message=None, **kw):
kw['log_level'] = WARN
_log(message, **kw)
def _log_err(_exc_value=None, _exc_desc=None, **kw):
kw.setdefault('system', 'crawler')
_exc_desc = _exc_desc or kw.pop('why', None)
log.err(_exc_value, _exc_desc, **kw)
def error(_exc_value=None, _exc_desc=None, **kw):
kw['log_level'] = ERROR
_log_err(_exc_value, _exc_desc, **kw)
def critical(_exc_value=None, _exc_desc=None, **kw):
kw['log_level'] = CRITICAL
_log_err(_exc_value, _exc_desc, **kw)
|
jalexvig/tensorflow
|
refs/heads/master
|
tensorflow/python/training/basic_loops.py
|
32
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Basic loop for training."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import errors
from tensorflow.python.util.tf_export import tf_export
@tf_export("train.basic_train_loop")
def basic_train_loop(supervisor, train_step_fn, args=None,
kwargs=None, master=""):
"""Basic loop to train a model.
Calls `train_step_fn` in a loop to train a model. The function is called as:
```python
train_step_fn(session, *args, **kwargs)
```
It is passed a `tf.Session` in addition to `args` and `kwargs`. The function
typically runs one training step in the session.
Args:
supervisor: `tf.train.Supervisor` to run the training services.
train_step_fn: Callable to execute one training step. Called
repeatedly as `train_step_fn(session, *args **kwargs)`.
args: Optional positional arguments passed to `train_step_fn`.
kwargs: Optional keyword arguments passed to `train_step_fn`.
master: Master to use to create the training session. Defaults to
`""` which causes the session to be created in the local process.
"""
if args is None:
args = []
if kwargs is None:
kwargs = {}
should_retry = True
while should_retry:
try:
should_retry = False
with supervisor.managed_session(master) as sess:
while not supervisor.should_stop():
train_step_fn(sess, *args, **kwargs)
except errors.AbortedError:
# Always re-run on AbortedError as it indicates a restart of one of the
# distributed tensorflow servers.
should_retry = True
|
MayOneUS/pledgeservice
|
refs/heads/master
|
lib/requests/packages/urllib3/util/__init__.py
|
306
|
# urllib3/util/__init__.py
# Copyright 2008-2014 Andrey Petrov and contributors (see CONTRIBUTORS.txt)
#
# This module is part of urllib3 and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
from .connection import is_connection_dropped
from .request import make_headers
from .response import is_fp_closed
from .ssl_ import (
SSLContext,
HAS_SNI,
assert_fingerprint,
resolve_cert_reqs,
resolve_ssl_version,
ssl_wrap_socket,
)
from .timeout import (
current_time,
Timeout,
)
from .url import (
get_host,
parse_url,
split_first,
Url,
)
|
openweave/openweave-core
|
refs/heads/master
|
src/test-apps/happy/topologies/__init__.py
|
12133432
| |
City-of-Bloomington/green-rental
|
refs/heads/master
|
south/tests/fakeapp/__init__.py
|
12133432
| |
pythonvietnam/scikit-learn
|
refs/heads/master
|
sklearn/tree/tests/__init__.py
|
12133432
| |
potash/scikit-learn
|
refs/heads/master
|
sklearn/ensemble/tests/__init__.py
|
12133432
| |
ayushagrawal288/zamboni
|
refs/heads/master
|
mkt/abuse/tests/__init__.py
|
12133432
| |
air805ronin/homestead
|
refs/heads/master
|
homestead/crafting.py
|
12133432
| |
yimun/Algorithm
|
refs/heads/master
|
LeetCode/105.Different Ways to Add Parentheses/diffways.py
|
1
|
#!/usr/bin/env
class Solution(object):
def diffWaysToCompute(self, input):
if input.isdigit():
return [int(input)]
res = []
for i in xrange(len(input)):
if input[i] in "-+*":
res1 = self.diffWaysToCompute(input[:i])
res2 = self.diffWaysToCompute(input[i+1:])
for j in res1:
for k in res2:
res.append(self.helper(j, k, input[i]))
return res
def helper(self, m, n, op):
if op == "+":
return m+n
elif op == "-":
return m-n
else:
return m*n
if __name__ == '__main__':
s = Solution();
print s.diffWaysToCompute("2-1-1")
print s.diffWaysToCompute("2*3-4*5")
|
gymnasium/edx-platform
|
refs/heads/open-release/hawthorn.master
|
common/djangoapps/third_party_auth/migrations/0012_auto_20170626_1135.py
|
24
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('third_party_auth', '0011_auto_20170616_0112'),
]
operations = [
migrations.AddField(
model_name='ltiproviderconfig',
name='send_to_registration_first',
field=models.BooleanField(default=False, help_text='If this option is selected, users will be directed to the registration page immediately after authenticating with the third party instead of the login page.'),
),
migrations.AddField(
model_name='oauth2providerconfig',
name='send_to_registration_first',
field=models.BooleanField(default=False, help_text='If this option is selected, users will be directed to the registration page immediately after authenticating with the third party instead of the login page.'),
),
migrations.AddField(
model_name='samlproviderconfig',
name='send_to_registration_first',
field=models.BooleanField(default=False, help_text='If this option is selected, users will be directed to the registration page immediately after authenticating with the third party instead of the login page.'),
),
]
|
ramielrowe/magnum
|
refs/heads/master
|
magnum/objects/replicationcontroller.py
|
8
|
# Copyright 2015 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_versionedobjects import fields
from magnum.db import api as dbapi
from magnum.objects import base
@base.MagnumObjectRegistry.register
class ReplicationController(base.MagnumPersistentObject, base.MagnumObject,
base.MagnumObjectDictCompat):
# Version 1.0: Initial version
VERSION = '1.0'
dbapi = dbapi.get_instance()
fields = {
'id': fields.IntegerField(),
'uuid': fields.StringField(nullable=True),
'name': fields.StringField(nullable=True),
'project_id': fields.StringField(nullable=True),
'user_id': fields.StringField(nullable=True),
'images': fields.ListOfStringsField(nullable=True),
'bay_uuid': fields.StringField(nullable=True),
'labels': fields.DictOfStringsField(nullable=True),
'replicas': fields.IntegerField(nullable=True),
'manifest_url': fields.StringField(nullable=True),
'manifest': fields.StringField(nullable=True),
}
@staticmethod
def _from_db_object(rc, db_rc):
"""Converts a database entity to a formal object."""
for field in rc.fields:
# ignore manifest_url as it was used for create rc
if field == 'manifest_url':
continue
# ignore manifest as it was used for create rc
if field == 'manifest':
continue
rc[field] = db_rc[field]
rc.obj_reset_changes()
return rc
@staticmethod
def _from_db_object_list(db_objects, cls, context):
"""Converts a list of database entities to a list of formal objects."""
return [ReplicationController._from_db_object(cls(context), obj)
for obj in db_objects]
@base.remotable_classmethod
def get_by_id(cls, context, rc_id):
"""Find a ReplicationController based on its integer id and return a
ReplicationController object.
:param rc_id: the id of a ReplicationController.
:returns: a :class:`ReplicationController` object.
"""
db_rc = cls.dbapi.get_rc_by_id(context, rc_id)
rc = ReplicationController._from_db_object(cls(context), db_rc)
return rc
@base.remotable_classmethod
def get_by_uuid(cls, context, uuid):
"""Find a ReplicationController based on uuid and return
a :class:`ReplicationController` object.
:param uuid: the uuid of a ReplicationController.
:param context: Security context
:returns: a :class:`ReplicationController` object.
"""
db_rc = cls.dbapi.get_rc_by_uuid(context, uuid)
rc = ReplicationController._from_db_object(cls(context), db_rc)
return rc
@base.remotable_classmethod
def get_by_name(cls, context, name):
"""Find a ReplicationController based on name and return
a :class:`ReplicationController` object.
:param name: the name of a ReplicationController.
:param context: Security context
:returns: a :class:`ReplicationController` object.
"""
db_rc = cls.dbapi.get_rc_by_name(name)
rc = ReplicationController._from_db_object(cls(context), db_rc)
return rc
@base.remotable_classmethod
def list(cls, context, limit=None, marker=None,
sort_key=None, sort_dir=None):
"""Return a list of ReplicationController objects.
:param context: Security context.
:param limit: maximum number of resources to return in a single result.
:param marker: pagination marker for large data sets.
:param sort_key: column to sort results by.
:param sort_dir: direction to sort. "asc" or "desc".
:returns: a list of :class:`ReplicationController` object.
"""
db_rcs = cls.dbapi.get_rc_list(context, limit=limit,
marker=marker,
sort_key=sort_key,
sort_dir=sort_dir)
return ReplicationController._from_db_object_list(db_rcs, cls, context)
@base.remotable
def create(self, context=None):
"""Create a ReplicationController record in the DB.
:param context: Security context. NOTE: This should only
be used internally by the indirection_api.
Unfortunately, RPC requires context as the first
argument, even though we don't use it.
A context should be set when instantiating the
object, e.g.: ReplicationController(context)
"""
values = self.obj_get_changes()
db_rc = self.dbapi.create_rc(values)
self._from_db_object(self, db_rc)
@base.remotable
def destroy(self, context=None):
"""Delete the ReplicationController from the DB.
:param context: Security context. NOTE: This should only
be used internally by the indirection_api.
Unfortunately, RPC requires context as the first
argument, even though we don't use it.
A context should be set when instantiating the
object, e.g.: ReplicationController(context)
"""
self.dbapi.destroy_rc(self.uuid)
self.obj_reset_changes()
@base.remotable
def save(self, context=None):
"""Save updates to this ReplicationController.
Updates will be made column by column based on the result
of self.what_changed().
:param context: Security context. NOTE: This should only
be used internally by the indirection_api.
Unfortunately, RPC requires context as the first
argument, even though we don't use it.
A context should be set when instantiating the
object, e.g.: ReplicationController(context)
"""
updates = self.obj_get_changes()
self.dbapi.update_rc(self.uuid, updates)
self.obj_reset_changes()
@base.remotable
def refresh(self, context=None):
"""Loads updates for this ReplicationController.
Loads a rc with the same uuid from the database and
checks for updated attributes. Updates are applied from
the loaded rc column by column, if there are any updates.
:param context: Security context. NOTE: This should only
be used internally by the indirection_api.
Unfortunately, RPC requires context as the first
argument, even though we don't use it.
A context should be set when instantiating the
object, e.g.: ReplicationController(context)
"""
current = self.__class__.get_by_uuid(self._context, uuid=self.uuid)
for field in self.fields:
if field == 'manifest_url':
continue
if field == 'manifest':
continue
if self.obj_attr_is_set(field) and self[field] != current[field]:
self[field] = current[field]
|
ruchee/vimrc
|
refs/heads/master
|
vimfiles/bundle/vim-python/submodules/pylint/tests/functional/u/useless/useless_super_delegation_py38.py
|
2
|
# pylint: disable=missing-docstring,too-few-public-methods
from typing import Any
class Egg:
def __init__(self, first: Any, /, second: Any) -> None:
pass
class Spam(Egg):
def __init__(self, first: float, /, second: float) -> None:
super().__init__(first, second)
class Ham(Egg):
def __init__(self, first: Any, /, second: Any) -> None: # [useless-super-delegation]
super().__init__(first, second)
|
huntxu/fuel-library
|
refs/heads/master
|
deployment/puppet/nailgun/files/cobbler/fence_ssh.centos7.py
|
2
|
#!/usr/bin/python
import atexit
import exceptions
import pexpect
import sys
import time
sys.path.append("/usr/share/fence")
from fencing import all_opt
from fencing import atexit_handler
from fencing import check_input
from fencing import fence_action
from fencing import fence_login
from fencing import process_input
from fencing import show_docs
# BEGIN_VERSION_GENERATION
RELEASE_VERSION = "0.1.0"
BUILD_DATE = "(built Wed Oct 31 11:20:18 UTC 2012)"
MIRANTIS_COPYRIGHT = "Copyright (C) Mirantis, Inc. 2012 All rights reserved."
# END_VERSION_GENERATION
def get_power_status(conn, options):
try:
conn.sendline("/bin/echo 1")
conn.log_expect(options, options["--command-prompt"],
int(options["--shell-timeout"]))
except Exception:
return "off"
return "on"
def set_power_status(conn, options):
if options["--action"] == "off":
try:
conn.sendline("/sbin/reboot")
conn.log_expect(options, options["--command-prompt"],
int(options["--power-timeout"]))
time.sleep(2)
except Exception:
pass
def main():
device_opt = ["help", "version", "agent", "verbose", "debug",
"action", "ipaddr", "login", "passwd", "passwd_script",
"secure", "identity_file", "port", "separator",
"inet4_only", "inet6_only", "ipport",
"power_timeout", "shell_timeout",
"login_timeout", "power_wait"]
atexit.register(atexit_handler)
all_opt["login_timeout"]["default"] = 60
pinput = process_input(device_opt)
# use ssh to manipulate node
pinput["--ssh"] = 1
pinput["--command-prompt"] = ".*"
options = check_input(device_opt, pinput)
if options["--action"] != "off":
sys.exit(0)
options["-c"] = "\[EXPECT\]#\ "
# this string will be appended to the end of ssh command
options["ssh_options"] = "-t -o 'StrictHostKeyChecking=no' " \
"'/bin/bash -c \"PS1=%s /bin/bash " \
"--noprofile --norc\"'" % options["-c"]
options["-X"] = "-t -o 'StrictHostKeyChecking=no' " \
"'/bin/bash -c \"PS1=%s /bin/bash " \
"--noprofile --norc\"'" % options["-c"]
docs = {}
docs["shortdesc"] = "Fence agent that can just reboot node via ssh"
docs["longdesc"] = "fence_ssh is an I/O Fencing agent " \
"which can be used to reboot nodes via ssh."
show_docs(options, docs)
# Operate the fencing device
# this method will actually launch ssh command
conn = fence_login(options)
result = fence_action(conn, options, set_power_status,
get_power_status, None)
try:
conn.close()
except exceptions.OSError:
pass
except pexpect.ExceptionPexpect:
pass
sys.exit(result)
if __name__ == "__main__":
main()
|
bhargav2408/python-for-android
|
refs/heads/master
|
python3-alpha/python3-src/Lib/_pyio.py
|
45
|
"""
Python implementation of the io module.
"""
import os
import abc
import codecs
import warnings
# Import _thread instead of threading to reduce startup cost
try:
from _thread import allocate_lock as Lock
except ImportError:
from _dummy_thread import allocate_lock as Lock
import io
from io import (__all__, SEEK_SET, SEEK_CUR, SEEK_END)
from errno import EINTR
# open() uses st_blksize whenever we can
DEFAULT_BUFFER_SIZE = 8 * 1024 # bytes
# NOTE: Base classes defined here are registered with the "official" ABCs
# defined in io.py. We don't use real inheritance though, because we don't
# want to inherit the C implementations.
class BlockingIOError(IOError):
"""Exception raised when I/O would block on a non-blocking I/O stream."""
def __init__(self, errno, strerror, characters_written=0):
super().__init__(errno, strerror)
if not isinstance(characters_written, int):
raise TypeError("characters_written must be a integer")
self.characters_written = characters_written
def open(file, mode="r", buffering=-1, encoding=None, errors=None,
newline=None, closefd=True):
r"""Open file and return a stream. Raise IOError upon failure.
file is either a text or byte string giving the name (and the path
if the file isn't in the current working directory) of the file to
be opened or an integer file descriptor of the file to be
wrapped. (If a file descriptor is given, it is closed when the
returned I/O object is closed, unless closefd is set to False.)
mode is an optional string that specifies the mode in which the file
is opened. It defaults to 'r' which means open for reading in text
mode. Other common values are 'w' for writing (truncating the file if
it already exists), and 'a' for appending (which on some Unix systems,
means that all writes append to the end of the file regardless of the
current seek position). In text mode, if encoding is not specified the
encoding used is platform dependent. (For reading and writing raw
bytes use binary mode and leave encoding unspecified.) The available
modes are:
========= ===============================================================
Character Meaning
--------- ---------------------------------------------------------------
'r' open for reading (default)
'w' open for writing, truncating the file first
'a' open for writing, appending to the end of the file if it exists
'b' binary mode
't' text mode (default)
'+' open a disk file for updating (reading and writing)
'U' universal newline mode (for backwards compatibility; unneeded
for new code)
========= ===============================================================
The default mode is 'rt' (open for reading text). For binary random
access, the mode 'w+b' opens and truncates the file to 0 bytes, while
'r+b' opens the file without truncation.
Python distinguishes between files opened in binary and text modes,
even when the underlying operating system doesn't. Files opened in
binary mode (appending 'b' to the mode argument) return contents as
bytes objects without any decoding. In text mode (the default, or when
't' is appended to the mode argument), the contents of the file are
returned as strings, the bytes having been first decoded using a
platform-dependent encoding or using the specified encoding if given.
buffering is an optional integer used to set the buffering policy.
Pass 0 to switch buffering off (only allowed in binary mode), 1 to select
line buffering (only usable in text mode), and an integer > 1 to indicate
the size of a fixed-size chunk buffer. When no buffering argument is
given, the default buffering policy works as follows:
* Binary files are buffered in fixed-size chunks; the size of the buffer
is chosen using a heuristic trying to determine the underlying device's
"block size" and falling back on `io.DEFAULT_BUFFER_SIZE`.
On many systems, the buffer will typically be 4096 or 8192 bytes long.
* "Interactive" text files (files for which isatty() returns True)
use line buffering. Other text files use the policy described above
for binary files.
encoding is the str name of the encoding used to decode or encode the
file. This should only be used in text mode. The default encoding is
platform dependent, but any encoding supported by Python can be
passed. See the codecs module for the list of supported encodings.
errors is an optional string that specifies how encoding errors are to
be handled---this argument should not be used in binary mode. Pass
'strict' to raise a ValueError exception if there is an encoding error
(the default of None has the same effect), or pass 'ignore' to ignore
errors. (Note that ignoring encoding errors can lead to data loss.)
See the documentation for codecs.register for a list of the permitted
encoding error strings.
newline is a string controlling how universal newlines works (it only
applies to text mode). It can be None, '', '\n', '\r', and '\r\n'. It works
as follows:
* On input, if newline is None, universal newlines mode is
enabled. Lines in the input can end in '\n', '\r', or '\r\n', and
these are translated into '\n' before being returned to the
caller. If it is '', universal newline mode is enabled, but line
endings are returned to the caller untranslated. If it has any of
the other legal values, input lines are only terminated by the given
string, and the line ending is returned to the caller untranslated.
* On output, if newline is None, any '\n' characters written are
translated to the system default line separator, os.linesep. If
newline is '', no translation takes place. If newline is any of the
other legal values, any '\n' characters written are translated to
the given string.
closedfd is a bool. If closefd is False, the underlying file descriptor will
be kept open when the file is closed. This does not work when a file name is
given and must be True in that case.
open() returns a file object whose type depends on the mode, and
through which the standard file operations such as reading and writing
are performed. When open() is used to open a file in a text mode ('w',
'r', 'wt', 'rt', etc.), it returns a TextIOWrapper. When used to open
a file in a binary mode, the returned class varies: in read binary
mode, it returns a BufferedReader; in write binary and append binary
modes, it returns a BufferedWriter, and in read/write mode, it returns
a BufferedRandom.
It is also possible to use a string or bytearray as a file for both
reading and writing. For strings StringIO can be used like a file
opened in a text mode, and for bytes a BytesIO can be used like a file
opened in a binary mode.
"""
if not isinstance(file, (str, bytes, int)):
raise TypeError("invalid file: %r" % file)
if not isinstance(mode, str):
raise TypeError("invalid mode: %r" % mode)
if not isinstance(buffering, int):
raise TypeError("invalid buffering: %r" % buffering)
if encoding is not None and not isinstance(encoding, str):
raise TypeError("invalid encoding: %r" % encoding)
if errors is not None and not isinstance(errors, str):
raise TypeError("invalid errors: %r" % errors)
modes = set(mode)
if modes - set("arwb+tU") or len(mode) > len(modes):
raise ValueError("invalid mode: %r" % mode)
reading = "r" in modes
writing = "w" in modes
appending = "a" in modes
updating = "+" in modes
text = "t" in modes
binary = "b" in modes
if "U" in modes:
if writing or appending:
raise ValueError("can't use U and writing mode at once")
reading = True
if text and binary:
raise ValueError("can't have text and binary mode at once")
if reading + writing + appending > 1:
raise ValueError("can't have read/write/append mode at once")
if not (reading or writing or appending):
raise ValueError("must have exactly one of read/write/append mode")
if binary and encoding is not None:
raise ValueError("binary mode doesn't take an encoding argument")
if binary and errors is not None:
raise ValueError("binary mode doesn't take an errors argument")
if binary and newline is not None:
raise ValueError("binary mode doesn't take a newline argument")
raw = FileIO(file,
(reading and "r" or "") +
(writing and "w" or "") +
(appending and "a" or "") +
(updating and "+" or ""),
closefd)
line_buffering = False
if buffering == 1 or buffering < 0 and raw.isatty():
buffering = -1
line_buffering = True
if buffering < 0:
buffering = DEFAULT_BUFFER_SIZE
try:
bs = os.fstat(raw.fileno()).st_blksize
except (os.error, AttributeError):
pass
else:
if bs > 1:
buffering = bs
if buffering < 0:
raise ValueError("invalid buffering size")
if buffering == 0:
if binary:
return raw
raise ValueError("can't have unbuffered text I/O")
if updating:
buffer = BufferedRandom(raw, buffering)
elif writing or appending:
buffer = BufferedWriter(raw, buffering)
elif reading:
buffer = BufferedReader(raw, buffering)
else:
raise ValueError("unknown mode: %r" % mode)
if binary:
return buffer
text = TextIOWrapper(buffer, encoding, errors, newline, line_buffering)
text.mode = mode
return text
class DocDescriptor:
"""Helper for builtins.open.__doc__
"""
def __get__(self, obj, typ):
return (
"open(file, mode='r', buffering=-1, encoding=None, "
"errors=None, newline=None, closefd=True)\n\n" +
open.__doc__)
class OpenWrapper:
"""Wrapper for builtins.open
Trick so that open won't become a bound method when stored
as a class variable (as dbm.dumb does).
See initstdio() in Python/pythonrun.c.
"""
__doc__ = DocDescriptor()
def __new__(cls, *args, **kwargs):
return open(*args, **kwargs)
# In normal operation, both `UnsupportedOperation`s should be bound to the
# same object.
try:
UnsupportedOperation = io.UnsupportedOperation
except AttributeError:
class UnsupportedOperation(ValueError, IOError):
pass
class IOBase(metaclass=abc.ABCMeta):
"""The abstract base class for all I/O classes, acting on streams of
bytes. There is no public constructor.
This class provides dummy implementations for many methods that
derived classes can override selectively; the default implementations
represent a file that cannot be read, written or seeked.
Even though IOBase does not declare read, readinto, or write because
their signatures will vary, implementations and clients should
consider those methods part of the interface. Also, implementations
may raise UnsupportedOperation when operations they do not support are
called.
The basic type used for binary data read from or written to a file is
bytes. bytearrays are accepted too, and in some cases (such as
readinto) needed. Text I/O classes work with str data.
Note that calling any method (even inquiries) on a closed stream is
undefined. Implementations may raise IOError in this case.
IOBase (and its subclasses) support the iterator protocol, meaning
that an IOBase object can be iterated over yielding the lines in a
stream.
IOBase also supports the :keyword:`with` statement. In this example,
fp is closed after the suite of the with statement is complete:
with open('spam.txt', 'r') as fp:
fp.write('Spam and eggs!')
"""
### Internal ###
def _unsupported(self, name):
"""Internal: raise an IOError exception for unsupported operations."""
raise UnsupportedOperation("%s.%s() not supported" %
(self.__class__.__name__, name))
### Positioning ###
def seek(self, pos, whence=0):
"""Change stream position.
Change the stream position to byte offset offset. offset is
interpreted relative to the position indicated by whence. Values
for whence are ints:
* 0 -- start of stream (the default); offset should be zero or positive
* 1 -- current stream position; offset may be negative
* 2 -- end of stream; offset is usually negative
Return an int indicating the new absolute position.
"""
self._unsupported("seek")
def tell(self):
"""Return an int indicating the current stream position."""
return self.seek(0, 1)
def truncate(self, pos=None):
"""Truncate file to size bytes.
Size defaults to the current IO position as reported by tell(). Return
the new size.
"""
self._unsupported("truncate")
### Flush and close ###
def flush(self):
"""Flush write buffers, if applicable.
This is not implemented for read-only and non-blocking streams.
"""
self._checkClosed()
# XXX Should this return the number of bytes written???
__closed = False
def close(self):
"""Flush and close the IO object.
This method has no effect if the file is already closed.
"""
if not self.__closed:
self.flush()
self.__closed = True
def __del__(self):
"""Destructor. Calls close()."""
# The try/except block is in case this is called at program
# exit time, when it's possible that globals have already been
# deleted, and then the close() call might fail. Since
# there's nothing we can do about such failures and they annoy
# the end users, we suppress the traceback.
try:
self.close()
except:
pass
### Inquiries ###
def seekable(self):
"""Return a bool indicating whether object supports random access.
If False, seek(), tell() and truncate() will raise UnsupportedOperation.
This method may need to do a test seek().
"""
return False
def _checkSeekable(self, msg=None):
"""Internal: raise UnsupportedOperation if file is not seekable
"""
if not self.seekable():
raise UnsupportedOperation("File or stream is not seekable."
if msg is None else msg)
def readable(self):
"""Return a bool indicating whether object was opened for reading.
If False, read() will raise UnsupportedOperation.
"""
return False
def _checkReadable(self, msg=None):
"""Internal: raise UnsupportedOperation if file is not readable
"""
if not self.readable():
raise UnsupportedOperation("File or stream is not readable."
if msg is None else msg)
def writable(self):
"""Return a bool indicating whether object was opened for writing.
If False, write() and truncate() will raise UnsupportedOperation.
"""
return False
def _checkWritable(self, msg=None):
"""Internal: raise UnsupportedOperation if file is not writable
"""
if not self.writable():
raise UnsupportedOperation("File or stream is not writable."
if msg is None else msg)
@property
def closed(self):
"""closed: bool. True iff the file has been closed.
For backwards compatibility, this is a property, not a predicate.
"""
return self.__closed
def _checkClosed(self, msg=None):
"""Internal: raise an ValueError if file is closed
"""
if self.closed:
raise ValueError("I/O operation on closed file."
if msg is None else msg)
### Context manager ###
def __enter__(self): # That's a forward reference
"""Context management protocol. Returns self (an instance of IOBase)."""
self._checkClosed()
return self
def __exit__(self, *args):
"""Context management protocol. Calls close()"""
self.close()
### Lower-level APIs ###
# XXX Should these be present even if unimplemented?
def fileno(self):
"""Returns underlying file descriptor (an int) if one exists.
An IOError is raised if the IO object does not use a file descriptor.
"""
self._unsupported("fileno")
def isatty(self):
"""Return a bool indicating whether this is an 'interactive' stream.
Return False if it can't be determined.
"""
self._checkClosed()
return False
### Readline[s] and writelines ###
def readline(self, limit=-1):
r"""Read and return a line of bytes from the stream.
If limit is specified, at most limit bytes will be read.
Limit should be an int.
The line terminator is always b'\n' for binary files; for text
files, the newlines argument to open can be used to select the line
terminator(s) recognized.
"""
# For backwards compatibility, a (slowish) readline().
if hasattr(self, "peek"):
def nreadahead():
readahead = self.peek(1)
if not readahead:
return 1
n = (readahead.find(b"\n") + 1) or len(readahead)
if limit >= 0:
n = min(n, limit)
return n
else:
def nreadahead():
return 1
if limit is None:
limit = -1
elif not isinstance(limit, int):
raise TypeError("limit must be an integer")
res = bytearray()
while limit < 0 or len(res) < limit:
b = self.read(nreadahead())
if not b:
break
res += b
if res.endswith(b"\n"):
break
return bytes(res)
def __iter__(self):
self._checkClosed()
return self
def __next__(self):
line = self.readline()
if not line:
raise StopIteration
return line
def readlines(self, hint=None):
"""Return a list of lines from the stream.
hint can be specified to control the number of lines read: no more
lines will be read if the total size (in bytes/characters) of all
lines so far exceeds hint.
"""
if hint is None or hint <= 0:
return list(self)
n = 0
lines = []
for line in self:
lines.append(line)
n += len(line)
if n >= hint:
break
return lines
def writelines(self, lines):
self._checkClosed()
for line in lines:
self.write(line)
io.IOBase.register(IOBase)
class RawIOBase(IOBase):
"""Base class for raw binary I/O."""
# The read() method is implemented by calling readinto(); derived
# classes that want to support read() only need to implement
# readinto() as a primitive operation. In general, readinto() can be
# more efficient than read().
# (It would be tempting to also provide an implementation of
# readinto() in terms of read(), in case the latter is a more suitable
# primitive operation, but that would lead to nasty recursion in case
# a subclass doesn't implement either.)
def read(self, n=-1):
"""Read and return up to n bytes, where n is an int.
Returns an empty bytes object on EOF, or None if the object is
set not to block and has no data to read.
"""
if n is None:
n = -1
if n < 0:
return self.readall()
b = bytearray(n.__index__())
n = self.readinto(b)
if n is None:
return None
del b[n:]
return bytes(b)
def readall(self):
"""Read until EOF, using multiple read() call."""
res = bytearray()
while True:
data = self.read(DEFAULT_BUFFER_SIZE)
if not data:
break
res += data
if res:
return bytes(res)
else:
# b'' or None
return data
def readinto(self, b):
"""Read up to len(b) bytes into bytearray b.
Returns an int representing the number of bytes read (0 for EOF), or
None if the object is set not to block and has no data to read.
"""
self._unsupported("readinto")
def write(self, b):
"""Write the given buffer to the IO stream.
Returns the number of bytes written, which may be less than len(b).
"""
self._unsupported("write")
io.RawIOBase.register(RawIOBase)
from _io import FileIO
RawIOBase.register(FileIO)
class BufferedIOBase(IOBase):
"""Base class for buffered IO objects.
The main difference with RawIOBase is that the read() method
supports omitting the size argument, and does not have a default
implementation that defers to readinto().
In addition, read(), readinto() and write() may raise
BlockingIOError if the underlying raw stream is in non-blocking
mode and not ready; unlike their raw counterparts, they will never
return None.
A typical implementation should not inherit from a RawIOBase
implementation, but wrap one.
"""
def read(self, n=None):
"""Read and return up to n bytes, where n is an int.
If the argument is omitted, None, or negative, reads and
returns all data until EOF.
If the argument is positive, and the underlying raw stream is
not 'interactive', multiple raw reads may be issued to satisfy
the byte count (unless EOF is reached first). But for
interactive raw streams (XXX and for pipes?), at most one raw
read will be issued, and a short result does not imply that
EOF is imminent.
Returns an empty bytes array on EOF.
Raises BlockingIOError if the underlying raw stream has no
data at the moment.
"""
self._unsupported("read")
def read1(self, n=None):
"""Read up to n bytes with at most one read() system call,
where n is an int.
"""
self._unsupported("read1")
def readinto(self, b):
"""Read up to len(b) bytes into bytearray b.
Like read(), this may issue multiple reads to the underlying raw
stream, unless the latter is 'interactive'.
Returns an int representing the number of bytes read (0 for EOF).
Raises BlockingIOError if the underlying raw stream has no
data at the moment.
"""
# XXX This ought to work with anything that supports the buffer API
data = self.read(len(b))
n = len(data)
try:
b[:n] = data
except TypeError as err:
import array
if not isinstance(b, array.array):
raise err
b[:n] = array.array('b', data)
return n
def write(self, b):
"""Write the given bytes buffer to the IO stream.
Return the number of bytes written, which is never less than
len(b).
Raises BlockingIOError if the buffer is full and the
underlying raw stream cannot accept more data at the moment.
"""
self._unsupported("write")
def detach(self):
"""
Separate the underlying raw stream from the buffer and return it.
After the raw stream has been detached, the buffer is in an unusable
state.
"""
self._unsupported("detach")
io.BufferedIOBase.register(BufferedIOBase)
class _BufferedIOMixin(BufferedIOBase):
"""A mixin implementation of BufferedIOBase with an underlying raw stream.
This passes most requests on to the underlying raw stream. It
does *not* provide implementations of read(), readinto() or
write().
"""
def __init__(self, raw):
self._raw = raw
### Positioning ###
def seek(self, pos, whence=0):
new_position = self.raw.seek(pos, whence)
if new_position < 0:
raise IOError("seek() returned an invalid position")
return new_position
def tell(self):
pos = self.raw.tell()
if pos < 0:
raise IOError("tell() returned an invalid position")
return pos
def truncate(self, pos=None):
# Flush the stream. We're mixing buffered I/O with lower-level I/O,
# and a flush may be necessary to synch both views of the current
# file state.
self.flush()
if pos is None:
pos = self.tell()
# XXX: Should seek() be used, instead of passing the position
# XXX directly to truncate?
return self.raw.truncate(pos)
### Flush and close ###
def flush(self):
if self.closed:
raise ValueError("flush of closed file")
self.raw.flush()
def close(self):
if self.raw is not None and not self.closed:
self.flush()
self.raw.close()
def detach(self):
if self.raw is None:
raise ValueError("raw stream already detached")
self.flush()
raw = self._raw
self._raw = None
return raw
### Inquiries ###
def seekable(self):
return self.raw.seekable()
def readable(self):
return self.raw.readable()
def writable(self):
return self.raw.writable()
@property
def raw(self):
return self._raw
@property
def closed(self):
return self.raw.closed
@property
def name(self):
return self.raw.name
@property
def mode(self):
return self.raw.mode
def __getstate__(self):
raise TypeError("can not serialize a '{0}' object"
.format(self.__class__.__name__))
def __repr__(self):
clsname = self.__class__.__name__
try:
name = self.name
except AttributeError:
return "<_pyio.{0}>".format(clsname)
else:
return "<_pyio.{0} name={1!r}>".format(clsname, name)
### Lower-level APIs ###
def fileno(self):
return self.raw.fileno()
def isatty(self):
return self.raw.isatty()
class BytesIO(BufferedIOBase):
"""Buffered I/O implementation using an in-memory bytes buffer."""
def __init__(self, initial_bytes=None):
buf = bytearray()
if initial_bytes is not None:
buf += initial_bytes
self._buffer = buf
self._pos = 0
def __getstate__(self):
if self.closed:
raise ValueError("__getstate__ on closed file")
return self.__dict__.copy()
def getvalue(self):
"""Return the bytes value (contents) of the buffer
"""
if self.closed:
raise ValueError("getvalue on closed file")
return bytes(self._buffer)
def getbuffer(self):
"""Return a readable and writable view of the buffer.
"""
return memoryview(self._buffer)
def read(self, n=None):
if self.closed:
raise ValueError("read from closed file")
if n is None:
n = -1
if n < 0:
n = len(self._buffer)
if len(self._buffer) <= self._pos:
return b""
newpos = min(len(self._buffer), self._pos + n)
b = self._buffer[self._pos : newpos]
self._pos = newpos
return bytes(b)
def read1(self, n):
"""This is the same as read.
"""
return self.read(n)
def write(self, b):
if self.closed:
raise ValueError("write to closed file")
if isinstance(b, str):
raise TypeError("can't write str to binary stream")
n = len(b)
if n == 0:
return 0
pos = self._pos
if pos > len(self._buffer):
# Inserts null bytes between the current end of the file
# and the new write position.
padding = b'\x00' * (pos - len(self._buffer))
self._buffer += padding
self._buffer[pos:pos + n] = b
self._pos += n
return n
def seek(self, pos, whence=0):
if self.closed:
raise ValueError("seek on closed file")
try:
pos.__index__
except AttributeError as err:
raise TypeError("an integer is required") from err
if whence == 0:
if pos < 0:
raise ValueError("negative seek position %r" % (pos,))
self._pos = pos
elif whence == 1:
self._pos = max(0, self._pos + pos)
elif whence == 2:
self._pos = max(0, len(self._buffer) + pos)
else:
raise ValueError("invalid whence value")
return self._pos
def tell(self):
if self.closed:
raise ValueError("tell on closed file")
return self._pos
def truncate(self, pos=None):
if self.closed:
raise ValueError("truncate on closed file")
if pos is None:
pos = self._pos
else:
try:
pos.__index__
except AttributeError as err:
raise TypeError("an integer is required") from err
if pos < 0:
raise ValueError("negative truncate position %r" % (pos,))
del self._buffer[pos:]
return pos
def readable(self):
return True
def writable(self):
return True
def seekable(self):
return True
class BufferedReader(_BufferedIOMixin):
"""BufferedReader(raw[, buffer_size])
A buffer for a readable, sequential BaseRawIO object.
The constructor creates a BufferedReader for the given readable raw
stream and buffer_size. If buffer_size is omitted, DEFAULT_BUFFER_SIZE
is used.
"""
def __init__(self, raw, buffer_size=DEFAULT_BUFFER_SIZE):
"""Create a new buffered reader using the given readable raw IO object.
"""
if not raw.readable():
raise IOError('"raw" argument must be readable.')
_BufferedIOMixin.__init__(self, raw)
if buffer_size <= 0:
raise ValueError("invalid buffer size")
self.buffer_size = buffer_size
self._reset_read_buf()
self._read_lock = Lock()
def _reset_read_buf(self):
self._read_buf = b""
self._read_pos = 0
def read(self, n=None):
"""Read n bytes.
Returns exactly n bytes of data unless the underlying raw IO
stream reaches EOF or if the call would block in non-blocking
mode. If n is negative, read until EOF or until read() would
block.
"""
if n is not None and n < -1:
raise ValueError("invalid number of bytes to read")
with self._read_lock:
return self._read_unlocked(n)
def _read_unlocked(self, n=None):
nodata_val = b""
empty_values = (b"", None)
buf = self._read_buf
pos = self._read_pos
# Special case for when the number of bytes to read is unspecified.
if n is None or n == -1:
self._reset_read_buf()
chunks = [buf[pos:]] # Strip the consumed bytes.
current_size = 0
while True:
# Read until EOF or until read() would block.
try:
chunk = self.raw.read()
except IOError as e:
if e.errno != EINTR:
raise
continue
if chunk in empty_values:
nodata_val = chunk
break
current_size += len(chunk)
chunks.append(chunk)
return b"".join(chunks) or nodata_val
# The number of bytes to read is specified, return at most n bytes.
avail = len(buf) - pos # Length of the available buffered data.
if n <= avail:
# Fast path: the data to read is fully buffered.
self._read_pos += n
return buf[pos:pos+n]
# Slow path: read from the stream until enough bytes are read,
# or until an EOF occurs or until read() would block.
chunks = [buf[pos:]]
wanted = max(self.buffer_size, n)
while avail < n:
try:
chunk = self.raw.read(wanted)
except IOError as e:
if e.errno != EINTR:
raise
continue
if chunk in empty_values:
nodata_val = chunk
break
avail += len(chunk)
chunks.append(chunk)
# n is more then avail only when an EOF occurred or when
# read() would have blocked.
n = min(n, avail)
out = b"".join(chunks)
self._read_buf = out[n:] # Save the extra data in the buffer.
self._read_pos = 0
return out[:n] if out else nodata_val
def peek(self, n=0):
"""Returns buffered bytes without advancing the position.
The argument indicates a desired minimal number of bytes; we
do at most one raw read to satisfy it. We never return more
than self.buffer_size.
"""
with self._read_lock:
return self._peek_unlocked(n)
def _peek_unlocked(self, n=0):
want = min(n, self.buffer_size)
have = len(self._read_buf) - self._read_pos
if have < want or have <= 0:
to_read = self.buffer_size - have
while True:
try:
current = self.raw.read(to_read)
except IOError as e:
if e.errno != EINTR:
raise
continue
break
if current:
self._read_buf = self._read_buf[self._read_pos:] + current
self._read_pos = 0
return self._read_buf[self._read_pos:]
def read1(self, n):
"""Reads up to n bytes, with at most one read() system call."""
# Returns up to n bytes. If at least one byte is buffered, we
# only return buffered bytes. Otherwise, we do one raw read.
if n < 0:
raise ValueError("number of bytes to read must be positive")
if n == 0:
return b""
with self._read_lock:
self._peek_unlocked(1)
return self._read_unlocked(
min(n, len(self._read_buf) - self._read_pos))
def tell(self):
return _BufferedIOMixin.tell(self) - len(self._read_buf) + self._read_pos
def seek(self, pos, whence=0):
if not (0 <= whence <= 2):
raise ValueError("invalid whence value")
with self._read_lock:
if whence == 1:
pos -= len(self._read_buf) - self._read_pos
pos = _BufferedIOMixin.seek(self, pos, whence)
self._reset_read_buf()
return pos
class BufferedWriter(_BufferedIOMixin):
"""A buffer for a writeable sequential RawIO object.
The constructor creates a BufferedWriter for the given writeable raw
stream. If the buffer_size is not given, it defaults to
DEFAULT_BUFFER_SIZE.
"""
_warning_stack_offset = 2
def __init__(self, raw,
buffer_size=DEFAULT_BUFFER_SIZE, max_buffer_size=None):
if not raw.writable():
raise IOError('"raw" argument must be writable.')
_BufferedIOMixin.__init__(self, raw)
if buffer_size <= 0:
raise ValueError("invalid buffer size")
if max_buffer_size is not None:
warnings.warn("max_buffer_size is deprecated", DeprecationWarning,
self._warning_stack_offset)
self.buffer_size = buffer_size
self._write_buf = bytearray()
self._write_lock = Lock()
def write(self, b):
if self.closed:
raise ValueError("write to closed file")
if isinstance(b, str):
raise TypeError("can't write str to binary stream")
with self._write_lock:
# XXX we can implement some more tricks to try and avoid
# partial writes
if len(self._write_buf) > self.buffer_size:
# We're full, so let's pre-flush the buffer
try:
self._flush_unlocked()
except BlockingIOError as e:
# We can't accept anything else.
# XXX Why not just let the exception pass through?
raise BlockingIOError(e.errno, e.strerror, 0)
before = len(self._write_buf)
self._write_buf.extend(b)
written = len(self._write_buf) - before
if len(self._write_buf) > self.buffer_size:
try:
self._flush_unlocked()
except BlockingIOError as e:
if len(self._write_buf) > self.buffer_size:
# We've hit the buffer_size. We have to accept a partial
# write and cut back our buffer.
overage = len(self._write_buf) - self.buffer_size
written -= overage
self._write_buf = self._write_buf[:self.buffer_size]
raise BlockingIOError(e.errno, e.strerror, written)
return written
def truncate(self, pos=None):
with self._write_lock:
self._flush_unlocked()
if pos is None:
pos = self.raw.tell()
return self.raw.truncate(pos)
def flush(self):
with self._write_lock:
self._flush_unlocked()
def _flush_unlocked(self):
if self.closed:
raise ValueError("flush of closed file")
written = 0
try:
while self._write_buf:
try:
n = self.raw.write(self._write_buf)
except IOError as e:
if e.errno != EINTR:
raise
continue
if n > len(self._write_buf) or n < 0:
raise IOError("write() returned incorrect number of bytes")
del self._write_buf[:n]
written += n
except BlockingIOError as e:
n = e.characters_written
del self._write_buf[:n]
written += n
raise BlockingIOError(e.errno, e.strerror, written)
def tell(self):
return _BufferedIOMixin.tell(self) + len(self._write_buf)
def seek(self, pos, whence=0):
if not (0 <= whence <= 2):
raise ValueError("invalid whence")
with self._write_lock:
self._flush_unlocked()
return _BufferedIOMixin.seek(self, pos, whence)
class BufferedRWPair(BufferedIOBase):
"""A buffered reader and writer object together.
A buffered reader object and buffered writer object put together to
form a sequential IO object that can read and write. This is typically
used with a socket or two-way pipe.
reader and writer are RawIOBase objects that are readable and
writeable respectively. If the buffer_size is omitted it defaults to
DEFAULT_BUFFER_SIZE.
"""
# XXX The usefulness of this (compared to having two separate IO
# objects) is questionable.
def __init__(self, reader, writer,
buffer_size=DEFAULT_BUFFER_SIZE, max_buffer_size=None):
"""Constructor.
The arguments are two RawIO instances.
"""
if max_buffer_size is not None:
warnings.warn("max_buffer_size is deprecated", DeprecationWarning, 2)
if not reader.readable():
raise IOError('"reader" argument must be readable.')
if not writer.writable():
raise IOError('"writer" argument must be writable.')
self.reader = BufferedReader(reader, buffer_size)
self.writer = BufferedWriter(writer, buffer_size)
def read(self, n=None):
if n is None:
n = -1
return self.reader.read(n)
def readinto(self, b):
return self.reader.readinto(b)
def write(self, b):
return self.writer.write(b)
def peek(self, n=0):
return self.reader.peek(n)
def read1(self, n):
return self.reader.read1(n)
def readable(self):
return self.reader.readable()
def writable(self):
return self.writer.writable()
def flush(self):
return self.writer.flush()
def close(self):
self.writer.close()
self.reader.close()
def isatty(self):
return self.reader.isatty() or self.writer.isatty()
@property
def closed(self):
return self.writer.closed
class BufferedRandom(BufferedWriter, BufferedReader):
"""A buffered interface to random access streams.
The constructor creates a reader and writer for a seekable stream,
raw, given in the first argument. If the buffer_size is omitted it
defaults to DEFAULT_BUFFER_SIZE.
"""
_warning_stack_offset = 3
def __init__(self, raw,
buffer_size=DEFAULT_BUFFER_SIZE, max_buffer_size=None):
raw._checkSeekable()
BufferedReader.__init__(self, raw, buffer_size)
BufferedWriter.__init__(self, raw, buffer_size, max_buffer_size)
def seek(self, pos, whence=0):
if not (0 <= whence <= 2):
raise ValueError("invalid whence")
self.flush()
if self._read_buf:
# Undo read ahead.
with self._read_lock:
self.raw.seek(self._read_pos - len(self._read_buf), 1)
# First do the raw seek, then empty the read buffer, so that
# if the raw seek fails, we don't lose buffered data forever.
pos = self.raw.seek(pos, whence)
with self._read_lock:
self._reset_read_buf()
if pos < 0:
raise IOError("seek() returned invalid position")
return pos
def tell(self):
if self._write_buf:
return BufferedWriter.tell(self)
else:
return BufferedReader.tell(self)
def truncate(self, pos=None):
if pos is None:
pos = self.tell()
# Use seek to flush the read buffer.
return BufferedWriter.truncate(self, pos)
def read(self, n=None):
if n is None:
n = -1
self.flush()
return BufferedReader.read(self, n)
def readinto(self, b):
self.flush()
return BufferedReader.readinto(self, b)
def peek(self, n=0):
self.flush()
return BufferedReader.peek(self, n)
def read1(self, n):
self.flush()
return BufferedReader.read1(self, n)
def write(self, b):
if self._read_buf:
# Undo readahead
with self._read_lock:
self.raw.seek(self._read_pos - len(self._read_buf), 1)
self._reset_read_buf()
return BufferedWriter.write(self, b)
class TextIOBase(IOBase):
"""Base class for text I/O.
This class provides a character and line based interface to stream
I/O. There is no readinto method because Python's character strings
are immutable. There is no public constructor.
"""
def read(self, n=-1):
"""Read at most n characters from stream, where n is an int.
Read from underlying buffer until we have n characters or we hit EOF.
If n is negative or omitted, read until EOF.
Returns a string.
"""
self._unsupported("read")
def write(self, s):
"""Write string s to stream and returning an int."""
self._unsupported("write")
def truncate(self, pos=None):
"""Truncate size to pos, where pos is an int."""
self._unsupported("truncate")
def readline(self):
"""Read until newline or EOF.
Returns an empty string if EOF is hit immediately.
"""
self._unsupported("readline")
def detach(self):
"""
Separate the underlying buffer from the TextIOBase and return it.
After the underlying buffer has been detached, the TextIO is in an
unusable state.
"""
self._unsupported("detach")
@property
def encoding(self):
"""Subclasses should override."""
return None
@property
def newlines(self):
"""Line endings translated so far.
Only line endings translated during reading are considered.
Subclasses should override.
"""
return None
@property
def errors(self):
"""Error setting of the decoder or encoder.
Subclasses should override."""
return None
io.TextIOBase.register(TextIOBase)
class IncrementalNewlineDecoder(codecs.IncrementalDecoder):
r"""Codec used when reading a file in universal newlines mode. It wraps
another incremental decoder, translating \r\n and \r into \n. It also
records the types of newlines encountered. When used with
translate=False, it ensures that the newline sequence is returned in
one piece.
"""
def __init__(self, decoder, translate, errors='strict'):
codecs.IncrementalDecoder.__init__(self, errors=errors)
self.translate = translate
self.decoder = decoder
self.seennl = 0
self.pendingcr = False
def decode(self, input, final=False):
# decode input (with the eventual \r from a previous pass)
if self.decoder is None:
output = input
else:
output = self.decoder.decode(input, final=final)
if self.pendingcr and (output or final):
output = "\r" + output
self.pendingcr = False
# retain last \r even when not translating data:
# then readline() is sure to get \r\n in one pass
if output.endswith("\r") and not final:
output = output[:-1]
self.pendingcr = True
# Record which newlines are read
crlf = output.count('\r\n')
cr = output.count('\r') - crlf
lf = output.count('\n') - crlf
self.seennl |= (lf and self._LF) | (cr and self._CR) \
| (crlf and self._CRLF)
if self.translate:
if crlf:
output = output.replace("\r\n", "\n")
if cr:
output = output.replace("\r", "\n")
return output
def getstate(self):
if self.decoder is None:
buf = b""
flag = 0
else:
buf, flag = self.decoder.getstate()
flag <<= 1
if self.pendingcr:
flag |= 1
return buf, flag
def setstate(self, state):
buf, flag = state
self.pendingcr = bool(flag & 1)
if self.decoder is not None:
self.decoder.setstate((buf, flag >> 1))
def reset(self):
self.seennl = 0
self.pendingcr = False
if self.decoder is not None:
self.decoder.reset()
_LF = 1
_CR = 2
_CRLF = 4
@property
def newlines(self):
return (None,
"\n",
"\r",
("\r", "\n"),
"\r\n",
("\n", "\r\n"),
("\r", "\r\n"),
("\r", "\n", "\r\n")
)[self.seennl]
class TextIOWrapper(TextIOBase):
r"""Character and line based layer over a BufferedIOBase object, buffer.
encoding gives the name of the encoding that the stream will be
decoded or encoded with. It defaults to locale.getpreferredencoding.
errors determines the strictness of encoding and decoding (see the
codecs.register) and defaults to "strict".
newline can be None, '', '\n', '\r', or '\r\n'. It controls the
handling of line endings. If it is None, universal newlines is
enabled. With this enabled, on input, the lines endings '\n', '\r',
or '\r\n' are translated to '\n' before being returned to the
caller. Conversely, on output, '\n' is translated to the system
default line seperator, os.linesep. If newline is any other of its
legal values, that newline becomes the newline when the file is read
and it is returned untranslated. On output, '\n' is converted to the
newline.
If line_buffering is True, a call to flush is implied when a call to
write contains a newline character.
"""
_CHUNK_SIZE = 2048
def __init__(self, buffer, encoding=None, errors=None, newline=None,
line_buffering=False, write_through=False):
if newline is not None and not isinstance(newline, str):
raise TypeError("illegal newline type: %r" % (type(newline),))
if newline not in (None, "", "\n", "\r", "\r\n"):
raise ValueError("illegal newline value: %r" % (newline,))
if encoding is None:
try:
encoding = os.device_encoding(buffer.fileno())
except (AttributeError, UnsupportedOperation):
pass
if encoding is None:
try:
import locale
except ImportError:
# Importing locale may fail if Python is being built
encoding = "ascii"
else:
encoding = locale.getpreferredencoding()
if not isinstance(encoding, str):
raise ValueError("invalid encoding: %r" % encoding)
if errors is None:
errors = "strict"
else:
if not isinstance(errors, str):
raise ValueError("invalid errors: %r" % errors)
self._buffer = buffer
self._line_buffering = line_buffering
self._encoding = encoding
self._errors = errors
self._readuniversal = not newline
self._readtranslate = newline is None
self._readnl = newline
self._writetranslate = newline != ''
self._writenl = newline or os.linesep
self._encoder = None
self._decoder = None
self._decoded_chars = '' # buffer for text returned from decoder
self._decoded_chars_used = 0 # offset into _decoded_chars for read()
self._snapshot = None # info for reconstructing decoder state
self._seekable = self._telling = self.buffer.seekable()
self._has_read1 = hasattr(self.buffer, 'read1')
if self._seekable and self.writable():
position = self.buffer.tell()
if position != 0:
try:
self._get_encoder().setstate(0)
except LookupError:
# Sometimes the encoder doesn't exist
pass
# self._snapshot is either None, or a tuple (dec_flags, next_input)
# where dec_flags is the second (integer) item of the decoder state
# and next_input is the chunk of input bytes that comes next after the
# snapshot point. We use this to reconstruct decoder states in tell().
# Naming convention:
# - "bytes_..." for integer variables that count input bytes
# - "chars_..." for integer variables that count decoded characters
def __repr__(self):
result = "<_pyio.TextIOWrapper"
try:
name = self.name
except AttributeError:
pass
else:
result += " name={0!r}".format(name)
try:
mode = self.mode
except AttributeError:
pass
else:
result += " mode={0!r}".format(mode)
return result + " encoding={0!r}>".format(self.encoding)
@property
def encoding(self):
return self._encoding
@property
def errors(self):
return self._errors
@property
def line_buffering(self):
return self._line_buffering
@property
def buffer(self):
return self._buffer
def seekable(self):
return self._seekable
def readable(self):
return self.buffer.readable()
def writable(self):
return self.buffer.writable()
def flush(self):
self.buffer.flush()
self._telling = self._seekable
def close(self):
if self.buffer is not None and not self.closed:
self.flush()
self.buffer.close()
@property
def closed(self):
return self.buffer.closed
@property
def name(self):
return self.buffer.name
def fileno(self):
return self.buffer.fileno()
def isatty(self):
return self.buffer.isatty()
def write(self, s):
'Write data, where s is a str'
if self.closed:
raise ValueError("write to closed file")
if not isinstance(s, str):
raise TypeError("can't write %s to text stream" %
s.__class__.__name__)
length = len(s)
haslf = (self._writetranslate or self._line_buffering) and "\n" in s
if haslf and self._writetranslate and self._writenl != "\n":
s = s.replace("\n", self._writenl)
encoder = self._encoder or self._get_encoder()
# XXX What if we were just reading?
b = encoder.encode(s)
self.buffer.write(b)
if self._line_buffering and (haslf or "\r" in s):
self.flush()
self._snapshot = None
if self._decoder:
self._decoder.reset()
return length
def _get_encoder(self):
make_encoder = codecs.getincrementalencoder(self._encoding)
self._encoder = make_encoder(self._errors)
return self._encoder
def _get_decoder(self):
make_decoder = codecs.getincrementaldecoder(self._encoding)
decoder = make_decoder(self._errors)
if self._readuniversal:
decoder = IncrementalNewlineDecoder(decoder, self._readtranslate)
self._decoder = decoder
return decoder
# The following three methods implement an ADT for _decoded_chars.
# Text returned from the decoder is buffered here until the client
# requests it by calling our read() or readline() method.
def _set_decoded_chars(self, chars):
"""Set the _decoded_chars buffer."""
self._decoded_chars = chars
self._decoded_chars_used = 0
def _get_decoded_chars(self, n=None):
"""Advance into the _decoded_chars buffer."""
offset = self._decoded_chars_used
if n is None:
chars = self._decoded_chars[offset:]
else:
chars = self._decoded_chars[offset:offset + n]
self._decoded_chars_used += len(chars)
return chars
def _rewind_decoded_chars(self, n):
"""Rewind the _decoded_chars buffer."""
if self._decoded_chars_used < n:
raise AssertionError("rewind decoded_chars out of bounds")
self._decoded_chars_used -= n
def _read_chunk(self):
"""
Read and decode the next chunk of data from the BufferedReader.
"""
# The return value is True unless EOF was reached. The decoded
# string is placed in self._decoded_chars (replacing its previous
# value). The entire input chunk is sent to the decoder, though
# some of it may remain buffered in the decoder, yet to be
# converted.
if self._decoder is None:
raise ValueError("no decoder")
if self._telling:
# To prepare for tell(), we need to snapshot a point in the
# file where the decoder's input buffer is empty.
dec_buffer, dec_flags = self._decoder.getstate()
# Given this, we know there was a valid snapshot point
# len(dec_buffer) bytes ago with decoder state (b'', dec_flags).
# Read a chunk, decode it, and put the result in self._decoded_chars.
if self._has_read1:
input_chunk = self.buffer.read1(self._CHUNK_SIZE)
else:
input_chunk = self.buffer.read(self._CHUNK_SIZE)
eof = not input_chunk
self._set_decoded_chars(self._decoder.decode(input_chunk, eof))
if self._telling:
# At the snapshot point, len(dec_buffer) bytes before the read,
# the next input to be decoded is dec_buffer + input_chunk.
self._snapshot = (dec_flags, dec_buffer + input_chunk)
return not eof
def _pack_cookie(self, position, dec_flags=0,
bytes_to_feed=0, need_eof=0, chars_to_skip=0):
# The meaning of a tell() cookie is: seek to position, set the
# decoder flags to dec_flags, read bytes_to_feed bytes, feed them
# into the decoder with need_eof as the EOF flag, then skip
# chars_to_skip characters of the decoded result. For most simple
# decoders, tell() will often just give a byte offset in the file.
return (position | (dec_flags<<64) | (bytes_to_feed<<128) |
(chars_to_skip<<192) | bool(need_eof)<<256)
def _unpack_cookie(self, bigint):
rest, position = divmod(bigint, 1<<64)
rest, dec_flags = divmod(rest, 1<<64)
rest, bytes_to_feed = divmod(rest, 1<<64)
need_eof, chars_to_skip = divmod(rest, 1<<64)
return position, dec_flags, bytes_to_feed, need_eof, chars_to_skip
def tell(self):
if not self._seekable:
raise UnsupportedOperation("underlying stream is not seekable")
if not self._telling:
raise IOError("telling position disabled by next() call")
self.flush()
position = self.buffer.tell()
decoder = self._decoder
if decoder is None or self._snapshot is None:
if self._decoded_chars:
# This should never happen.
raise AssertionError("pending decoded text")
return position
# Skip backward to the snapshot point (see _read_chunk).
dec_flags, next_input = self._snapshot
position -= len(next_input)
# How many decoded characters have been used up since the snapshot?
chars_to_skip = self._decoded_chars_used
if chars_to_skip == 0:
# We haven't moved from the snapshot point.
return self._pack_cookie(position, dec_flags)
# Starting from the snapshot position, we will walk the decoder
# forward until it gives us enough decoded characters.
saved_state = decoder.getstate()
try:
# Note our initial start point.
decoder.setstate((b'', dec_flags))
start_pos = position
start_flags, bytes_fed, chars_decoded = dec_flags, 0, 0
need_eof = 0
# Feed the decoder one byte at a time. As we go, note the
# nearest "safe start point" before the current location
# (a point where the decoder has nothing buffered, so seek()
# can safely start from there and advance to this location).
next_byte = bytearray(1)
for next_byte[0] in next_input:
bytes_fed += 1
chars_decoded += len(decoder.decode(next_byte))
dec_buffer, dec_flags = decoder.getstate()
if not dec_buffer and chars_decoded <= chars_to_skip:
# Decoder buffer is empty, so this is a safe start point.
start_pos += bytes_fed
chars_to_skip -= chars_decoded
start_flags, bytes_fed, chars_decoded = dec_flags, 0, 0
if chars_decoded >= chars_to_skip:
break
else:
# We didn't get enough decoded data; signal EOF to get more.
chars_decoded += len(decoder.decode(b'', final=True))
need_eof = 1
if chars_decoded < chars_to_skip:
raise IOError("can't reconstruct logical file position")
# The returned cookie corresponds to the last safe start point.
return self._pack_cookie(
start_pos, start_flags, bytes_fed, need_eof, chars_to_skip)
finally:
decoder.setstate(saved_state)
def truncate(self, pos=None):
self.flush()
if pos is None:
pos = self.tell()
return self.buffer.truncate(pos)
def detach(self):
if self.buffer is None:
raise ValueError("buffer is already detached")
self.flush()
buffer = self._buffer
self._buffer = None
return buffer
def seek(self, cookie, whence=0):
if self.closed:
raise ValueError("tell on closed file")
if not self._seekable:
raise UnsupportedOperation("underlying stream is not seekable")
if whence == 1: # seek relative to current position
if cookie != 0:
raise UnsupportedOperation("can't do nonzero cur-relative seeks")
# Seeking to the current position should attempt to
# sync the underlying buffer with the current position.
whence = 0
cookie = self.tell()
if whence == 2: # seek relative to end of file
if cookie != 0:
raise UnsupportedOperation("can't do nonzero end-relative seeks")
self.flush()
position = self.buffer.seek(0, 2)
self._set_decoded_chars('')
self._snapshot = None
if self._decoder:
self._decoder.reset()
return position
if whence != 0:
raise ValueError("invalid whence (%r, should be 0, 1 or 2)" %
(whence,))
if cookie < 0:
raise ValueError("negative seek position %r" % (cookie,))
self.flush()
# The strategy of seek() is to go back to the safe start point
# and replay the effect of read(chars_to_skip) from there.
start_pos, dec_flags, bytes_to_feed, need_eof, chars_to_skip = \
self._unpack_cookie(cookie)
# Seek back to the safe start point.
self.buffer.seek(start_pos)
self._set_decoded_chars('')
self._snapshot = None
# Restore the decoder to its state from the safe start point.
if cookie == 0 and self._decoder:
self._decoder.reset()
elif self._decoder or dec_flags or chars_to_skip:
self._decoder = self._decoder or self._get_decoder()
self._decoder.setstate((b'', dec_flags))
self._snapshot = (dec_flags, b'')
if chars_to_skip:
# Just like _read_chunk, feed the decoder and save a snapshot.
input_chunk = self.buffer.read(bytes_to_feed)
self._set_decoded_chars(
self._decoder.decode(input_chunk, need_eof))
self._snapshot = (dec_flags, input_chunk)
# Skip chars_to_skip of the decoded characters.
if len(self._decoded_chars) < chars_to_skip:
raise IOError("can't restore logical file position")
self._decoded_chars_used = chars_to_skip
# Finally, reset the encoder (merely useful for proper BOM handling)
try:
encoder = self._encoder or self._get_encoder()
except LookupError:
# Sometimes the encoder doesn't exist
pass
else:
if cookie != 0:
encoder.setstate(0)
else:
encoder.reset()
return cookie
def read(self, n=None):
self._checkReadable()
if n is None:
n = -1
decoder = self._decoder or self._get_decoder()
try:
n.__index__
except AttributeError as err:
raise TypeError("an integer is required") from err
if n < 0:
# Read everything.
result = (self._get_decoded_chars() +
decoder.decode(self.buffer.read(), final=True))
self._set_decoded_chars('')
self._snapshot = None
return result
else:
# Keep reading chunks until we have n characters to return.
eof = False
result = self._get_decoded_chars(n)
while len(result) < n and not eof:
eof = not self._read_chunk()
result += self._get_decoded_chars(n - len(result))
return result
def __next__(self):
self._telling = False
line = self.readline()
if not line:
self._snapshot = None
self._telling = self._seekable
raise StopIteration
return line
def readline(self, limit=None):
if self.closed:
raise ValueError("read from closed file")
if limit is None:
limit = -1
elif not isinstance(limit, int):
raise TypeError("limit must be an integer")
# Grab all the decoded text (we will rewind any extra bits later).
line = self._get_decoded_chars()
start = 0
# Make the decoder if it doesn't already exist.
if not self._decoder:
self._get_decoder()
pos = endpos = None
while True:
if self._readtranslate:
# Newlines are already translated, only search for \n
pos = line.find('\n', start)
if pos >= 0:
endpos = pos + 1
break
else:
start = len(line)
elif self._readuniversal:
# Universal newline search. Find any of \r, \r\n, \n
# The decoder ensures that \r\n are not split in two pieces
# In C we'd look for these in parallel of course.
nlpos = line.find("\n", start)
crpos = line.find("\r", start)
if crpos == -1:
if nlpos == -1:
# Nothing found
start = len(line)
else:
# Found \n
endpos = nlpos + 1
break
elif nlpos == -1:
# Found lone \r
endpos = crpos + 1
break
elif nlpos < crpos:
# Found \n
endpos = nlpos + 1
break
elif nlpos == crpos + 1:
# Found \r\n
endpos = crpos + 2
break
else:
# Found \r
endpos = crpos + 1
break
else:
# non-universal
pos = line.find(self._readnl)
if pos >= 0:
endpos = pos + len(self._readnl)
break
if limit >= 0 and len(line) >= limit:
endpos = limit # reached length limit
break
# No line ending seen yet - get more data'
while self._read_chunk():
if self._decoded_chars:
break
if self._decoded_chars:
line += self._get_decoded_chars()
else:
# end of file
self._set_decoded_chars('')
self._snapshot = None
return line
if limit >= 0 and endpos > limit:
endpos = limit # don't exceed limit
# Rewind _decoded_chars to just after the line ending we found.
self._rewind_decoded_chars(len(line) - endpos)
return line[:endpos]
@property
def newlines(self):
return self._decoder.newlines if self._decoder else None
class StringIO(TextIOWrapper):
"""Text I/O implementation using an in-memory buffer.
The initial_value argument sets the value of object. The newline
argument is like the one of TextIOWrapper's constructor.
"""
def __init__(self, initial_value="", newline="\n"):
super(StringIO, self).__init__(BytesIO(),
encoding="utf-8",
errors="strict",
newline=newline)
# Issue #5645: make universal newlines semantics the same as in the
# C version, even under Windows.
if newline is None:
self._writetranslate = False
if initial_value is not None:
if not isinstance(initial_value, str):
raise TypeError("initial_value must be str or None, not {0}"
.format(type(initial_value).__name__))
initial_value = str(initial_value)
self.write(initial_value)
self.seek(0)
def getvalue(self):
self.flush()
return self.buffer.getvalue().decode(self._encoding, self._errors)
def __repr__(self):
# TextIOWrapper tells the encoding in its repr. In StringIO,
# that's a implementation detail.
return object.__repr__(self)
@property
def errors(self):
return None
@property
def encoding(self):
return None
def detach(self):
# This doesn't make sense on StringIO.
self._unsupported("detach")
|
ikbelkirasan/Arduino
|
refs/heads/master
|
arduino-core/src/processing/app/i18n/python/requests/packages/charade/euckrfreq.py
|
3120
|
######################## BEGIN LICENSE BLOCK ########################
# The Original Code is Mozilla Communicator client code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 1998
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
# Sampling from about 20M text materials include literature and computer technology
# 128 --> 0.79
# 256 --> 0.92
# 512 --> 0.986
# 1024 --> 0.99944
# 2048 --> 0.99999
#
# Idea Distribution Ratio = 0.98653 / (1-0.98653) = 73.24
# Random Distribution Ration = 512 / (2350-512) = 0.279.
#
# Typical Distribution Ratio
EUCKR_TYPICAL_DISTRIBUTION_RATIO = 6.0
EUCKR_TABLE_SIZE = 2352
# Char to FreqOrder table ,
EUCKRCharToFreqOrder = ( \
13, 130, 120,1396, 481,1719,1720, 328, 609, 212,1721, 707, 400, 299,1722, 87,
1397,1723, 104, 536,1117,1203,1724,1267, 685,1268, 508,1725,1726,1727,1728,1398,
1399,1729,1730,1731, 141, 621, 326,1057, 368,1732, 267, 488, 20,1733,1269,1734,
945,1400,1735, 47, 904,1270,1736,1737, 773, 248,1738, 409, 313, 786, 429,1739,
116, 987, 813,1401, 683, 75,1204, 145,1740,1741,1742,1743, 16, 847, 667, 622,
708,1744,1745,1746, 966, 787, 304, 129,1747, 60, 820, 123, 676,1748,1749,1750,
1751, 617,1752, 626,1753,1754,1755,1756, 653,1757,1758,1759,1760,1761,1762, 856,
344,1763,1764,1765,1766, 89, 401, 418, 806, 905, 848,1767,1768,1769, 946,1205,
709,1770,1118,1771, 241,1772,1773,1774,1271,1775, 569,1776, 999,1777,1778,1779,
1780, 337, 751,1058, 28, 628, 254,1781, 177, 906, 270, 349, 891,1079,1782, 19,
1783, 379,1784, 315,1785, 629, 754,1402, 559,1786, 636, 203,1206,1787, 710, 567,
1788, 935, 814,1789,1790,1207, 766, 528,1791,1792,1208,1793,1794,1795,1796,1797,
1403,1798,1799, 533,1059,1404,1405,1156,1406, 936, 884,1080,1800, 351,1801,1802,
1803,1804,1805, 801,1806,1807,1808,1119,1809,1157, 714, 474,1407,1810, 298, 899,
885,1811,1120, 802,1158,1812, 892,1813,1814,1408, 659,1815,1816,1121,1817,1818,
1819,1820,1821,1822, 319,1823, 594, 545,1824, 815, 937,1209,1825,1826, 573,1409,
1022,1827,1210,1828,1829,1830,1831,1832,1833, 556, 722, 807,1122,1060,1834, 697,
1835, 900, 557, 715,1836,1410, 540,1411, 752,1159, 294, 597,1211, 976, 803, 770,
1412,1837,1838, 39, 794,1413, 358,1839, 371, 925,1840, 453, 661, 788, 531, 723,
544,1023,1081, 869, 91,1841, 392, 430, 790, 602,1414, 677,1082, 457,1415,1416,
1842,1843, 475, 327,1024,1417, 795, 121,1844, 733, 403,1418,1845,1846,1847, 300,
119, 711,1212, 627,1848,1272, 207,1849,1850, 796,1213, 382,1851, 519,1852,1083,
893,1853,1854,1855, 367, 809, 487, 671,1856, 663,1857,1858, 956, 471, 306, 857,
1859,1860,1160,1084,1861,1862,1863,1864,1865,1061,1866,1867,1868,1869,1870,1871,
282, 96, 574,1872, 502,1085,1873,1214,1874, 907,1875,1876, 827, 977,1419,1420,
1421, 268,1877,1422,1878,1879,1880, 308,1881, 2, 537,1882,1883,1215,1884,1885,
127, 791,1886,1273,1423,1887, 34, 336, 404, 643,1888, 571, 654, 894, 840,1889,
0, 886,1274, 122, 575, 260, 908, 938,1890,1275, 410, 316,1891,1892, 100,1893,
1894,1123, 48,1161,1124,1025,1895, 633, 901,1276,1896,1897, 115, 816,1898, 317,
1899, 694,1900, 909, 734,1424, 572, 866,1425, 691, 85, 524,1010, 543, 394, 841,
1901,1902,1903,1026,1904,1905,1906,1907,1908,1909, 30, 451, 651, 988, 310,1910,
1911,1426, 810,1216, 93,1912,1913,1277,1217,1914, 858, 759, 45, 58, 181, 610,
269,1915,1916, 131,1062, 551, 443,1000, 821,1427, 957, 895,1086,1917,1918, 375,
1919, 359,1920, 687,1921, 822,1922, 293,1923,1924, 40, 662, 118, 692, 29, 939,
887, 640, 482, 174,1925, 69,1162, 728,1428, 910,1926,1278,1218,1279, 386, 870,
217, 854,1163, 823,1927,1928,1929,1930, 834,1931, 78,1932, 859,1933,1063,1934,
1935,1936,1937, 438,1164, 208, 595,1938,1939,1940,1941,1219,1125,1942, 280, 888,
1429,1430,1220,1431,1943,1944,1945,1946,1947,1280, 150, 510,1432,1948,1949,1950,
1951,1952,1953,1954,1011,1087,1955,1433,1043,1956, 881,1957, 614, 958,1064,1065,
1221,1958, 638,1001, 860, 967, 896,1434, 989, 492, 553,1281,1165,1959,1282,1002,
1283,1222,1960,1961,1962,1963, 36, 383, 228, 753, 247, 454,1964, 876, 678,1965,
1966,1284, 126, 464, 490, 835, 136, 672, 529, 940,1088,1435, 473,1967,1968, 467,
50, 390, 227, 587, 279, 378, 598, 792, 968, 240, 151, 160, 849, 882,1126,1285,
639,1044, 133, 140, 288, 360, 811, 563,1027, 561, 142, 523,1969,1970,1971, 7,
103, 296, 439, 407, 506, 634, 990,1972,1973,1974,1975, 645,1976,1977,1978,1979,
1980,1981, 236,1982,1436,1983,1984,1089, 192, 828, 618, 518,1166, 333,1127,1985,
818,1223,1986,1987,1988,1989,1990,1991,1992,1993, 342,1128,1286, 746, 842,1994,
1995, 560, 223,1287, 98, 8, 189, 650, 978,1288,1996,1437,1997, 17, 345, 250,
423, 277, 234, 512, 226, 97, 289, 42, 167,1998, 201,1999,2000, 843, 836, 824,
532, 338, 783,1090, 182, 576, 436,1438,1439, 527, 500,2001, 947, 889,2002,2003,
2004,2005, 262, 600, 314, 447,2006, 547,2007, 693, 738,1129,2008, 71,1440, 745,
619, 688,2009, 829,2010,2011, 147,2012, 33, 948,2013,2014, 74, 224,2015, 61,
191, 918, 399, 637,2016,1028,1130, 257, 902,2017,2018,2019,2020,2021,2022,2023,
2024,2025,2026, 837,2027,2028,2029,2030, 179, 874, 591, 52, 724, 246,2031,2032,
2033,2034,1167, 969,2035,1289, 630, 605, 911,1091,1168,2036,2037,2038,1441, 912,
2039, 623,2040,2041, 253,1169,1290,2042,1442, 146, 620, 611, 577, 433,2043,1224,
719,1170, 959, 440, 437, 534, 84, 388, 480,1131, 159, 220, 198, 679,2044,1012,
819,1066,1443, 113,1225, 194, 318,1003,1029,2045,2046,2047,2048,1067,2049,2050,
2051,2052,2053, 59, 913, 112,2054, 632,2055, 455, 144, 739,1291,2056, 273, 681,
499,2057, 448,2058,2059, 760,2060,2061, 970, 384, 169, 245,1132,2062,2063, 414,
1444,2064,2065, 41, 235,2066, 157, 252, 877, 568, 919, 789, 580,2067, 725,2068,
2069,1292,2070,2071,1445,2072,1446,2073,2074, 55, 588, 66,1447, 271,1092,2075,
1226,2076, 960,1013, 372,2077,2078,2079,2080,2081,1293,2082,2083,2084,2085, 850,
2086,2087,2088,2089,2090, 186,2091,1068, 180,2092,2093,2094, 109,1227, 522, 606,
2095, 867,1448,1093, 991,1171, 926, 353,1133,2096, 581,2097,2098,2099,1294,1449,
1450,2100, 596,1172,1014,1228,2101,1451,1295,1173,1229,2102,2103,1296,1134,1452,
949,1135,2104,2105,1094,1453,1454,1455,2106,1095,2107,2108,2109,2110,2111,2112,
2113,2114,2115,2116,2117, 804,2118,2119,1230,1231, 805,1456, 405,1136,2120,2121,
2122,2123,2124, 720, 701,1297, 992,1457, 927,1004,2125,2126,2127,2128,2129,2130,
22, 417,2131, 303,2132, 385,2133, 971, 520, 513,2134,1174, 73,1096, 231, 274,
962,1458, 673,2135,1459,2136, 152,1137,2137,2138,2139,2140,1005,1138,1460,1139,
2141,2142,2143,2144, 11, 374, 844,2145, 154,1232, 46,1461,2146, 838, 830, 721,
1233, 106,2147, 90, 428, 462, 578, 566,1175, 352,2148,2149, 538,1234, 124,1298,
2150,1462, 761, 565,2151, 686,2152, 649,2153, 72, 173,2154, 460, 415,2155,1463,
2156,1235, 305,2157,2158,2159,2160,2161,2162, 579,2163,2164,2165,2166,2167, 747,
2168,2169,2170,2171,1464, 669,2172,2173,2174,2175,2176,1465,2177, 23, 530, 285,
2178, 335, 729,2179, 397,2180,2181,2182,1030,2183,2184, 698,2185,2186, 325,2187,
2188, 369,2189, 799,1097,1015, 348,2190,1069, 680,2191, 851,1466,2192,2193, 10,
2194, 613, 424,2195, 979, 108, 449, 589, 27, 172, 81,1031, 80, 774, 281, 350,
1032, 525, 301, 582,1176,2196, 674,1045,2197,2198,1467, 730, 762,2199,2200,2201,
2202,1468,2203, 993,2204,2205, 266,1070, 963,1140,2206,2207,2208, 664,1098, 972,
2209,2210,2211,1177,1469,1470, 871,2212,2213,2214,2215,2216,1471,2217,2218,2219,
2220,2221,2222,2223,2224,2225,2226,2227,1472,1236,2228,2229,2230,2231,2232,2233,
2234,2235,1299,2236,2237, 200,2238, 477, 373,2239,2240, 731, 825, 777,2241,2242,
2243, 521, 486, 548,2244,2245,2246,1473,1300, 53, 549, 137, 875, 76, 158,2247,
1301,1474, 469, 396,1016, 278, 712,2248, 321, 442, 503, 767, 744, 941,1237,1178,
1475,2249, 82, 178,1141,1179, 973,2250,1302,2251, 297,2252,2253, 570,2254,2255,
2256, 18, 450, 206,2257, 290, 292,1142,2258, 511, 162, 99, 346, 164, 735,2259,
1476,1477, 4, 554, 343, 798,1099,2260,1100,2261, 43, 171,1303, 139, 215,2262,
2263, 717, 775,2264,1033, 322, 216,2265, 831,2266, 149,2267,1304,2268,2269, 702,
1238, 135, 845, 347, 309,2270, 484,2271, 878, 655, 238,1006,1478,2272, 67,2273,
295,2274,2275, 461,2276, 478, 942, 412,2277,1034,2278,2279,2280, 265,2281, 541,
2282,2283,2284,2285,2286, 70, 852,1071,2287,2288,2289,2290, 21, 56, 509, 117,
432,2291,2292, 331, 980, 552,1101, 148, 284, 105, 393,1180,1239, 755,2293, 187,
2294,1046,1479,2295, 340,2296, 63,1047, 230,2297,2298,1305, 763,1306, 101, 800,
808, 494,2299,2300,2301, 903,2302, 37,1072, 14, 5,2303, 79, 675,2304, 312,
2305,2306,2307,2308,2309,1480, 6,1307,2310,2311,2312, 1, 470, 35, 24, 229,
2313, 695, 210, 86, 778, 15, 784, 592, 779, 32, 77, 855, 964,2314, 259,2315,
501, 380,2316,2317, 83, 981, 153, 689,1308,1481,1482,1483,2318,2319, 716,1484,
2320,2321,2322,2323,2324,2325,1485,2326,2327, 128, 57, 68, 261,1048, 211, 170,
1240, 31,2328, 51, 435, 742,2329,2330,2331, 635,2332, 264, 456,2333,2334,2335,
425,2336,1486, 143, 507, 263, 943,2337, 363, 920,1487, 256,1488,1102, 243, 601,
1489,2338,2339,2340,2341,2342,2343,2344, 861,2345,2346,2347,2348,2349,2350, 395,
2351,1490,1491, 62, 535, 166, 225,2352,2353, 668, 419,1241, 138, 604, 928,2354,
1181,2355,1492,1493,2356,2357,2358,1143,2359, 696,2360, 387, 307,1309, 682, 476,
2361,2362, 332, 12, 222, 156,2363, 232,2364, 641, 276, 656, 517,1494,1495,1035,
416, 736,1496,2365,1017, 586,2366,2367,2368,1497,2369, 242,2370,2371,2372,1498,
2373, 965, 713,2374,2375,2376,2377, 740, 982,1499, 944,1500,1007,2378,2379,1310,
1501,2380,2381,2382, 785, 329,2383,2384,1502,2385,2386,2387, 932,2388,1503,2389,
2390,2391,2392,1242,2393,2394,2395,2396,2397, 994, 950,2398,2399,2400,2401,1504,
1311,2402,2403,2404,2405,1049, 749,2406,2407, 853, 718,1144,1312,2408,1182,1505,
2409,2410, 255, 516, 479, 564, 550, 214,1506,1507,1313, 413, 239, 444, 339,1145,
1036,1508,1509,1314,1037,1510,1315,2411,1511,2412,2413,2414, 176, 703, 497, 624,
593, 921, 302,2415, 341, 165,1103,1512,2416,1513,2417,2418,2419, 376,2420, 700,
2421,2422,2423, 258, 768,1316,2424,1183,2425, 995, 608,2426,2427,2428,2429, 221,
2430,2431,2432,2433,2434,2435,2436,2437, 195, 323, 726, 188, 897, 983,1317, 377,
644,1050, 879,2438, 452,2439,2440,2441,2442,2443,2444, 914,2445,2446,2447,2448,
915, 489,2449,1514,1184,2450,2451, 515, 64, 427, 495,2452, 583,2453, 483, 485,
1038, 562, 213,1515, 748, 666,2454,2455,2456,2457, 334,2458, 780, 996,1008, 705,
1243,2459,2460,2461,2462,2463, 114,2464, 493,1146, 366, 163,1516, 961,1104,2465,
291,2466,1318,1105,2467,1517, 365,2468, 355, 951,1244,2469,1319,2470, 631,2471,
2472, 218,1320, 364, 320, 756,1518,1519,1321,1520,1322,2473,2474,2475,2476, 997,
2477,2478,2479,2480, 665,1185,2481, 916,1521,2482,2483,2484, 584, 684,2485,2486,
797,2487,1051,1186,2488,2489,2490,1522,2491,2492, 370,2493,1039,1187, 65,2494,
434, 205, 463,1188,2495, 125, 812, 391, 402, 826, 699, 286, 398, 155, 781, 771,
585,2496, 590, 505,1073,2497, 599, 244, 219, 917,1018, 952, 646,1523,2498,1323,
2499,2500, 49, 984, 354, 741,2501, 625,2502,1324,2503,1019, 190, 357, 757, 491,
95, 782, 868,2504,2505,2506,2507,2508,2509, 134,1524,1074, 422,1525, 898,2510,
161,2511,2512,2513,2514, 769,2515,1526,2516,2517, 411,1325,2518, 472,1527,2519,
2520,2521,2522,2523,2524, 985,2525,2526,2527,2528,2529,2530, 764,2531,1245,2532,
2533, 25, 204, 311,2534, 496,2535,1052,2536,2537,2538,2539,2540,2541,2542, 199,
704, 504, 468, 758, 657,1528, 196, 44, 839,1246, 272, 750,2543, 765, 862,2544,
2545,1326,2546, 132, 615, 933,2547, 732,2548,2549,2550,1189,1529,2551, 283,1247,
1053, 607, 929,2552,2553,2554, 930, 183, 872, 616,1040,1147,2555,1148,1020, 441,
249,1075,2556,2557,2558, 466, 743,2559,2560,2561, 92, 514, 426, 420, 526,2562,
2563,2564,2565,2566,2567,2568, 185,2569,2570,2571,2572, 776,1530, 658,2573, 362,
2574, 361, 922,1076, 793,2575,2576,2577,2578,2579,2580,1531, 251,2581,2582,2583,
2584,1532, 54, 612, 237,1327,2585,2586, 275, 408, 647, 111,2587,1533,1106, 465,
3, 458, 9, 38,2588, 107, 110, 890, 209, 26, 737, 498,2589,1534,2590, 431,
202, 88,1535, 356, 287,1107, 660,1149,2591, 381,1536, 986,1150, 445,1248,1151,
974,2592,2593, 846,2594, 446, 953, 184,1249,1250, 727,2595, 923, 193, 883,2596,
2597,2598, 102, 324, 539, 817,2599, 421,1041,2600, 832,2601, 94, 175, 197, 406,
2602, 459,2603,2604,2605,2606,2607, 330, 555,2608,2609,2610, 706,1108, 389,2611,
2612,2613,2614, 233,2615, 833, 558, 931, 954,1251,2616,2617,1537, 546,2618,2619,
1009,2620,2621,2622,1538, 690,1328,2623, 955,2624,1539,2625,2626, 772,2627,2628,
2629,2630,2631, 924, 648, 863, 603,2632,2633, 934,1540, 864, 865,2634, 642,1042,
670,1190,2635,2636,2637,2638, 168,2639, 652, 873, 542,1054,1541,2640,2641,2642, # 512, 256
#Everything below is of no interest for detection purpose
2643,2644,2645,2646,2647,2648,2649,2650,2651,2652,2653,2654,2655,2656,2657,2658,
2659,2660,2661,2662,2663,2664,2665,2666,2667,2668,2669,2670,2671,2672,2673,2674,
2675,2676,2677,2678,2679,2680,2681,2682,2683,2684,2685,2686,2687,2688,2689,2690,
2691,2692,2693,2694,2695,2696,2697,2698,2699,1542, 880,2700,2701,2702,2703,2704,
2705,2706,2707,2708,2709,2710,2711,2712,2713,2714,2715,2716,2717,2718,2719,2720,
2721,2722,2723,2724,2725,1543,2726,2727,2728,2729,2730,2731,2732,1544,2733,2734,
2735,2736,2737,2738,2739,2740,2741,2742,2743,2744,2745,2746,2747,2748,2749,2750,
2751,2752,2753,2754,1545,2755,2756,2757,2758,2759,2760,2761,2762,2763,2764,2765,
2766,1546,2767,1547,2768,2769,2770,2771,2772,2773,2774,2775,2776,2777,2778,2779,
2780,2781,2782,2783,2784,2785,2786,1548,2787,2788,2789,1109,2790,2791,2792,2793,
2794,2795,2796,2797,2798,2799,2800,2801,2802,2803,2804,2805,2806,2807,2808,2809,
2810,2811,2812,1329,2813,2814,2815,2816,2817,2818,2819,2820,2821,2822,2823,2824,
2825,2826,2827,2828,2829,2830,2831,2832,2833,2834,2835,2836,2837,2838,2839,2840,
2841,2842,2843,2844,2845,2846,2847,2848,2849,2850,2851,2852,2853,2854,2855,2856,
1549,2857,2858,2859,2860,1550,2861,2862,1551,2863,2864,2865,2866,2867,2868,2869,
2870,2871,2872,2873,2874,1110,1330,2875,2876,2877,2878,2879,2880,2881,2882,2883,
2884,2885,2886,2887,2888,2889,2890,2891,2892,2893,2894,2895,2896,2897,2898,2899,
2900,2901,2902,2903,2904,2905,2906,2907,2908,2909,2910,2911,2912,2913,2914,2915,
2916,2917,2918,2919,2920,2921,2922,2923,2924,2925,2926,2927,2928,2929,2930,1331,
2931,2932,2933,2934,2935,2936,2937,2938,2939,2940,2941,2942,2943,1552,2944,2945,
2946,2947,2948,2949,2950,2951,2952,2953,2954,2955,2956,2957,2958,2959,2960,2961,
2962,2963,2964,1252,2965,2966,2967,2968,2969,2970,2971,2972,2973,2974,2975,2976,
2977,2978,2979,2980,2981,2982,2983,2984,2985,2986,2987,2988,2989,2990,2991,2992,
2993,2994,2995,2996,2997,2998,2999,3000,3001,3002,3003,3004,3005,3006,3007,3008,
3009,3010,3011,3012,1553,3013,3014,3015,3016,3017,1554,3018,1332,3019,3020,3021,
3022,3023,3024,3025,3026,3027,3028,3029,3030,3031,3032,3033,3034,3035,3036,3037,
3038,3039,3040,3041,3042,3043,3044,3045,3046,3047,3048,3049,3050,1555,3051,3052,
3053,1556,1557,3054,3055,3056,3057,3058,3059,3060,3061,3062,3063,3064,3065,3066,
3067,1558,3068,3069,3070,3071,3072,3073,3074,3075,3076,1559,3077,3078,3079,3080,
3081,3082,3083,1253,3084,3085,3086,3087,3088,3089,3090,3091,3092,3093,3094,3095,
3096,3097,3098,3099,3100,3101,3102,3103,3104,3105,3106,3107,3108,1152,3109,3110,
3111,3112,3113,1560,3114,3115,3116,3117,1111,3118,3119,3120,3121,3122,3123,3124,
3125,3126,3127,3128,3129,3130,3131,3132,3133,3134,3135,3136,3137,3138,3139,3140,
3141,3142,3143,3144,3145,3146,3147,3148,3149,3150,3151,3152,3153,3154,3155,3156,
3157,3158,3159,3160,3161,3162,3163,3164,3165,3166,3167,3168,3169,3170,3171,3172,
3173,3174,3175,3176,1333,3177,3178,3179,3180,3181,3182,3183,3184,3185,3186,3187,
3188,3189,1561,3190,3191,1334,3192,3193,3194,3195,3196,3197,3198,3199,3200,3201,
3202,3203,3204,3205,3206,3207,3208,3209,3210,3211,3212,3213,3214,3215,3216,3217,
3218,3219,3220,3221,3222,3223,3224,3225,3226,3227,3228,3229,3230,3231,3232,3233,
3234,1562,3235,3236,3237,3238,3239,3240,3241,3242,3243,3244,3245,3246,3247,3248,
3249,3250,3251,3252,3253,3254,3255,3256,3257,3258,3259,3260,3261,3262,3263,3264,
3265,3266,3267,3268,3269,3270,3271,3272,3273,3274,3275,3276,3277,1563,3278,3279,
3280,3281,3282,3283,3284,3285,3286,3287,3288,3289,3290,3291,3292,3293,3294,3295,
3296,3297,3298,3299,3300,3301,3302,3303,3304,3305,3306,3307,3308,3309,3310,3311,
3312,3313,3314,3315,3316,3317,3318,3319,3320,3321,3322,3323,3324,3325,3326,3327,
3328,3329,3330,3331,3332,3333,3334,3335,3336,3337,3338,3339,3340,3341,3342,3343,
3344,3345,3346,3347,3348,3349,3350,3351,3352,3353,3354,3355,3356,3357,3358,3359,
3360,3361,3362,3363,3364,1335,3365,3366,3367,3368,3369,3370,3371,3372,3373,3374,
3375,3376,3377,3378,3379,3380,3381,3382,3383,3384,3385,3386,3387,1336,3388,3389,
3390,3391,3392,3393,3394,3395,3396,3397,3398,3399,3400,3401,3402,3403,3404,3405,
3406,3407,3408,3409,3410,3411,3412,3413,3414,1337,3415,3416,3417,3418,3419,1338,
3420,3421,3422,1564,1565,3423,3424,3425,3426,3427,3428,3429,3430,3431,1254,3432,
3433,3434,1339,3435,3436,3437,3438,3439,1566,3440,3441,3442,3443,3444,3445,3446,
3447,3448,3449,3450,3451,3452,3453,3454,1255,3455,3456,3457,3458,3459,1567,1191,
3460,1568,1569,3461,3462,3463,1570,3464,3465,3466,3467,3468,1571,3469,3470,3471,
3472,3473,1572,3474,3475,3476,3477,3478,3479,3480,3481,3482,3483,3484,3485,3486,
1340,3487,3488,3489,3490,3491,3492,1021,3493,3494,3495,3496,3497,3498,1573,3499,
1341,3500,3501,3502,3503,3504,3505,3506,3507,3508,3509,3510,3511,1342,3512,3513,
3514,3515,3516,1574,1343,3517,3518,3519,1575,3520,1576,3521,3522,3523,3524,3525,
3526,3527,3528,3529,3530,3531,3532,3533,3534,3535,3536,3537,3538,3539,3540,3541,
3542,3543,3544,3545,3546,3547,3548,3549,3550,3551,3552,3553,3554,3555,3556,3557,
3558,3559,3560,3561,3562,3563,3564,3565,3566,3567,3568,3569,3570,3571,3572,3573,
3574,3575,3576,3577,3578,3579,3580,1577,3581,3582,1578,3583,3584,3585,3586,3587,
3588,3589,3590,3591,3592,3593,3594,3595,3596,3597,3598,3599,3600,3601,3602,3603,
3604,1579,3605,3606,3607,3608,3609,3610,3611,3612,3613,3614,3615,3616,3617,3618,
3619,3620,3621,3622,3623,3624,3625,3626,3627,3628,3629,1580,3630,3631,1581,3632,
3633,3634,3635,3636,3637,3638,3639,3640,3641,3642,3643,3644,3645,3646,3647,3648,
3649,3650,3651,3652,3653,3654,3655,3656,1582,3657,3658,3659,3660,3661,3662,3663,
3664,3665,3666,3667,3668,3669,3670,3671,3672,3673,3674,3675,3676,3677,3678,3679,
3680,3681,3682,3683,3684,3685,3686,3687,3688,3689,3690,3691,3692,3693,3694,3695,
3696,3697,3698,3699,3700,1192,3701,3702,3703,3704,1256,3705,3706,3707,3708,1583,
1257,3709,3710,3711,3712,3713,3714,3715,3716,1584,3717,3718,3719,3720,3721,3722,
3723,3724,3725,3726,3727,3728,3729,3730,3731,3732,3733,3734,3735,3736,3737,3738,
3739,3740,3741,3742,3743,3744,3745,1344,3746,3747,3748,3749,3750,3751,3752,3753,
3754,3755,3756,1585,3757,3758,3759,3760,3761,3762,3763,3764,3765,3766,1586,3767,
3768,3769,3770,3771,3772,3773,3774,3775,3776,3777,3778,1345,3779,3780,3781,3782,
3783,3784,3785,3786,3787,3788,3789,3790,3791,3792,3793,3794,3795,1346,1587,3796,
3797,1588,3798,3799,3800,3801,3802,3803,3804,3805,3806,1347,3807,3808,3809,3810,
3811,1589,3812,3813,3814,3815,3816,3817,3818,3819,3820,3821,1590,3822,3823,1591,
1348,3824,3825,3826,3827,3828,3829,3830,1592,3831,3832,1593,3833,3834,3835,3836,
3837,3838,3839,3840,3841,3842,3843,3844,1349,3845,3846,3847,3848,3849,3850,3851,
3852,3853,3854,3855,3856,3857,3858,1594,3859,3860,3861,3862,3863,3864,3865,3866,
3867,3868,3869,1595,3870,3871,3872,3873,1596,3874,3875,3876,3877,3878,3879,3880,
3881,3882,3883,3884,3885,3886,1597,3887,3888,3889,3890,3891,3892,3893,3894,3895,
1598,3896,3897,3898,1599,1600,3899,1350,3900,1351,3901,3902,1352,3903,3904,3905,
3906,3907,3908,3909,3910,3911,3912,3913,3914,3915,3916,3917,3918,3919,3920,3921,
3922,3923,3924,1258,3925,3926,3927,3928,3929,3930,3931,1193,3932,1601,3933,3934,
3935,3936,3937,3938,3939,3940,3941,3942,3943,1602,3944,3945,3946,3947,3948,1603,
3949,3950,3951,3952,3953,3954,3955,3956,3957,3958,3959,3960,3961,3962,3963,3964,
3965,1604,3966,3967,3968,3969,3970,3971,3972,3973,3974,3975,3976,3977,1353,3978,
3979,3980,3981,3982,3983,3984,3985,3986,3987,3988,3989,3990,3991,1354,3992,3993,
3994,3995,3996,3997,3998,3999,4000,4001,4002,4003,4004,4005,4006,4007,4008,4009,
4010,4011,4012,4013,4014,4015,4016,4017,4018,4019,4020,4021,4022,4023,1355,4024,
4025,4026,4027,4028,4029,4030,4031,4032,4033,4034,4035,4036,4037,4038,4039,4040,
1605,4041,4042,4043,4044,4045,4046,4047,4048,4049,4050,4051,4052,4053,4054,4055,
4056,4057,4058,4059,4060,1606,4061,4062,4063,4064,1607,4065,4066,4067,4068,4069,
4070,4071,4072,4073,4074,4075,4076,1194,4077,4078,1608,4079,4080,4081,4082,4083,
4084,4085,4086,4087,1609,4088,4089,4090,4091,4092,4093,4094,4095,4096,4097,4098,
4099,4100,4101,4102,4103,4104,4105,4106,4107,4108,1259,4109,4110,4111,4112,4113,
4114,4115,4116,4117,4118,4119,4120,4121,4122,4123,4124,1195,4125,4126,4127,1610,
4128,4129,4130,4131,4132,4133,4134,4135,4136,4137,1356,4138,4139,4140,4141,4142,
4143,4144,1611,4145,4146,4147,4148,4149,4150,4151,4152,4153,4154,4155,4156,4157,
4158,4159,4160,4161,4162,4163,4164,4165,4166,4167,4168,4169,4170,4171,4172,4173,
4174,4175,4176,4177,4178,4179,4180,4181,4182,4183,4184,4185,4186,4187,4188,4189,
4190,4191,4192,4193,4194,4195,4196,4197,4198,4199,4200,4201,4202,4203,4204,4205,
4206,4207,4208,4209,4210,4211,4212,4213,4214,4215,4216,4217,4218,4219,1612,4220,
4221,4222,4223,4224,4225,4226,4227,1357,4228,1613,4229,4230,4231,4232,4233,4234,
4235,4236,4237,4238,4239,4240,4241,4242,4243,1614,4244,4245,4246,4247,4248,4249,
4250,4251,4252,4253,4254,4255,4256,4257,4258,4259,4260,4261,4262,4263,4264,4265,
4266,4267,4268,4269,4270,1196,1358,4271,4272,4273,4274,4275,4276,4277,4278,4279,
4280,4281,4282,4283,4284,4285,4286,4287,1615,4288,4289,4290,4291,4292,4293,4294,
4295,4296,4297,4298,4299,4300,4301,4302,4303,4304,4305,4306,4307,4308,4309,4310,
4311,4312,4313,4314,4315,4316,4317,4318,4319,4320,4321,4322,4323,4324,4325,4326,
4327,4328,4329,4330,4331,4332,4333,4334,1616,4335,4336,4337,4338,4339,4340,4341,
4342,4343,4344,4345,4346,4347,4348,4349,4350,4351,4352,4353,4354,4355,4356,4357,
4358,4359,4360,1617,4361,4362,4363,4364,4365,1618,4366,4367,4368,4369,4370,4371,
4372,4373,4374,4375,4376,4377,4378,4379,4380,4381,4382,4383,4384,4385,4386,4387,
4388,4389,4390,4391,4392,4393,4394,4395,4396,4397,4398,4399,4400,4401,4402,4403,
4404,4405,4406,4407,4408,4409,4410,4411,4412,4413,4414,4415,4416,1619,4417,4418,
4419,4420,4421,4422,4423,4424,4425,1112,4426,4427,4428,4429,4430,1620,4431,4432,
4433,4434,4435,4436,4437,4438,4439,4440,4441,4442,1260,1261,4443,4444,4445,4446,
4447,4448,4449,4450,4451,4452,4453,4454,4455,1359,4456,4457,4458,4459,4460,4461,
4462,4463,4464,4465,1621,4466,4467,4468,4469,4470,4471,4472,4473,4474,4475,4476,
4477,4478,4479,4480,4481,4482,4483,4484,4485,4486,4487,4488,4489,1055,4490,4491,
4492,4493,4494,4495,4496,4497,4498,4499,4500,4501,4502,4503,4504,4505,4506,4507,
4508,4509,4510,4511,4512,4513,4514,4515,4516,4517,4518,1622,4519,4520,4521,1623,
4522,4523,4524,4525,4526,4527,4528,4529,4530,4531,4532,4533,4534,4535,1360,4536,
4537,4538,4539,4540,4541,4542,4543, 975,4544,4545,4546,4547,4548,4549,4550,4551,
4552,4553,4554,4555,4556,4557,4558,4559,4560,4561,4562,4563,4564,4565,4566,4567,
4568,4569,4570,4571,1624,4572,4573,4574,4575,4576,1625,4577,4578,4579,4580,4581,
4582,4583,4584,1626,4585,4586,4587,4588,4589,4590,4591,4592,4593,4594,4595,1627,
4596,4597,4598,4599,4600,4601,4602,4603,4604,4605,4606,4607,4608,4609,4610,4611,
4612,4613,4614,4615,1628,4616,4617,4618,4619,4620,4621,4622,4623,4624,4625,4626,
4627,4628,4629,4630,4631,4632,4633,4634,4635,4636,4637,4638,4639,4640,4641,4642,
4643,4644,4645,4646,4647,4648,4649,1361,4650,4651,4652,4653,4654,4655,4656,4657,
4658,4659,4660,4661,1362,4662,4663,4664,4665,4666,4667,4668,4669,4670,4671,4672,
4673,4674,4675,4676,4677,4678,4679,4680,4681,4682,1629,4683,4684,4685,4686,4687,
1630,4688,4689,4690,4691,1153,4692,4693,4694,1113,4695,4696,4697,4698,4699,4700,
4701,4702,4703,4704,4705,4706,4707,4708,4709,4710,4711,1197,4712,4713,4714,4715,
4716,4717,4718,4719,4720,4721,4722,4723,4724,4725,4726,4727,4728,4729,4730,4731,
4732,4733,4734,4735,1631,4736,1632,4737,4738,4739,4740,4741,4742,4743,4744,1633,
4745,4746,4747,4748,4749,1262,4750,4751,4752,4753,4754,1363,4755,4756,4757,4758,
4759,4760,4761,4762,4763,4764,4765,4766,4767,4768,1634,4769,4770,4771,4772,4773,
4774,4775,4776,4777,4778,1635,4779,4780,4781,4782,4783,4784,4785,4786,4787,4788,
4789,1636,4790,4791,4792,4793,4794,4795,4796,4797,4798,4799,4800,4801,4802,4803,
4804,4805,4806,1637,4807,4808,4809,1638,4810,4811,4812,4813,4814,4815,4816,4817,
4818,1639,4819,4820,4821,4822,4823,4824,4825,4826,4827,4828,4829,4830,4831,4832,
4833,1077,4834,4835,4836,4837,4838,4839,4840,4841,4842,4843,4844,4845,4846,4847,
4848,4849,4850,4851,4852,4853,4854,4855,4856,4857,4858,4859,4860,4861,4862,4863,
4864,4865,4866,4867,4868,4869,4870,4871,4872,4873,4874,4875,4876,4877,4878,4879,
4880,4881,4882,4883,1640,4884,4885,1641,4886,4887,4888,4889,4890,4891,4892,4893,
4894,4895,4896,4897,4898,4899,4900,4901,4902,4903,4904,4905,4906,4907,4908,4909,
4910,4911,1642,4912,4913,4914,1364,4915,4916,4917,4918,4919,4920,4921,4922,4923,
4924,4925,4926,4927,4928,4929,4930,4931,1643,4932,4933,4934,4935,4936,4937,4938,
4939,4940,4941,4942,4943,4944,4945,4946,4947,4948,4949,4950,4951,4952,4953,4954,
4955,4956,4957,4958,4959,4960,4961,4962,4963,4964,4965,4966,4967,4968,4969,4970,
4971,4972,4973,4974,4975,4976,4977,4978,4979,4980,1644,4981,4982,4983,4984,1645,
4985,4986,1646,4987,4988,4989,4990,4991,4992,4993,4994,4995,4996,4997,4998,4999,
5000,5001,5002,5003,5004,5005,1647,5006,1648,5007,5008,5009,5010,5011,5012,1078,
5013,5014,5015,5016,5017,5018,5019,5020,5021,5022,5023,5024,5025,5026,5027,5028,
1365,5029,5030,5031,5032,5033,5034,5035,5036,5037,5038,5039,1649,5040,5041,5042,
5043,5044,5045,1366,5046,5047,5048,5049,5050,5051,5052,5053,5054,5055,1650,5056,
5057,5058,5059,5060,5061,5062,5063,5064,5065,5066,5067,5068,5069,5070,5071,5072,
5073,5074,5075,5076,5077,1651,5078,5079,5080,5081,5082,5083,5084,5085,5086,5087,
5088,5089,5090,5091,5092,5093,5094,5095,5096,5097,5098,5099,5100,5101,5102,5103,
5104,5105,5106,5107,5108,5109,5110,1652,5111,5112,5113,5114,5115,5116,5117,5118,
1367,5119,5120,5121,5122,5123,5124,5125,5126,5127,5128,5129,1653,5130,5131,5132,
5133,5134,5135,5136,5137,5138,5139,5140,5141,5142,5143,5144,5145,5146,5147,5148,
5149,1368,5150,1654,5151,1369,5152,5153,5154,5155,5156,5157,5158,5159,5160,5161,
5162,5163,5164,5165,5166,5167,5168,5169,5170,5171,5172,5173,5174,5175,5176,5177,
5178,1370,5179,5180,5181,5182,5183,5184,5185,5186,5187,5188,5189,5190,5191,5192,
5193,5194,5195,5196,5197,5198,1655,5199,5200,5201,5202,1656,5203,5204,5205,5206,
1371,5207,1372,5208,5209,5210,5211,1373,5212,5213,1374,5214,5215,5216,5217,5218,
5219,5220,5221,5222,5223,5224,5225,5226,5227,5228,5229,5230,5231,5232,5233,5234,
5235,5236,5237,5238,5239,5240,5241,5242,5243,5244,5245,5246,5247,1657,5248,5249,
5250,5251,1658,1263,5252,5253,5254,5255,5256,1375,5257,5258,5259,5260,5261,5262,
5263,5264,5265,5266,5267,5268,5269,5270,5271,5272,5273,5274,5275,5276,5277,5278,
5279,5280,5281,5282,5283,1659,5284,5285,5286,5287,5288,5289,5290,5291,5292,5293,
5294,5295,5296,5297,5298,5299,5300,1660,5301,5302,5303,5304,5305,5306,5307,5308,
5309,5310,5311,5312,5313,5314,5315,5316,5317,5318,5319,5320,5321,1376,5322,5323,
5324,5325,5326,5327,5328,5329,5330,5331,5332,5333,1198,5334,5335,5336,5337,5338,
5339,5340,5341,5342,5343,1661,5344,5345,5346,5347,5348,5349,5350,5351,5352,5353,
5354,5355,5356,5357,5358,5359,5360,5361,5362,5363,5364,5365,5366,5367,5368,5369,
5370,5371,5372,5373,5374,5375,5376,5377,5378,5379,5380,5381,5382,5383,5384,5385,
5386,5387,5388,5389,5390,5391,5392,5393,5394,5395,5396,5397,5398,1264,5399,5400,
5401,5402,5403,5404,5405,5406,5407,5408,5409,5410,5411,5412,1662,5413,5414,5415,
5416,1663,5417,5418,5419,5420,5421,5422,5423,5424,5425,5426,5427,5428,5429,5430,
5431,5432,5433,5434,5435,5436,5437,5438,1664,5439,5440,5441,5442,5443,5444,5445,
5446,5447,5448,5449,5450,5451,5452,5453,5454,5455,5456,5457,5458,5459,5460,5461,
5462,5463,5464,5465,5466,5467,5468,5469,5470,5471,5472,5473,5474,5475,5476,5477,
5478,1154,5479,5480,5481,5482,5483,5484,5485,1665,5486,5487,5488,5489,5490,5491,
5492,5493,5494,5495,5496,5497,5498,5499,5500,5501,5502,5503,5504,5505,5506,5507,
5508,5509,5510,5511,5512,5513,5514,5515,5516,5517,5518,5519,5520,5521,5522,5523,
5524,5525,5526,5527,5528,5529,5530,5531,5532,5533,5534,5535,5536,5537,5538,5539,
5540,5541,5542,5543,5544,5545,5546,5547,5548,1377,5549,5550,5551,5552,5553,5554,
5555,5556,5557,5558,5559,5560,5561,5562,5563,5564,5565,5566,5567,5568,5569,5570,
1114,5571,5572,5573,5574,5575,5576,5577,5578,5579,5580,5581,5582,5583,5584,5585,
5586,5587,5588,5589,5590,5591,5592,1378,5593,5594,5595,5596,5597,5598,5599,5600,
5601,5602,5603,5604,5605,5606,5607,5608,5609,5610,5611,5612,5613,5614,1379,5615,
5616,5617,5618,5619,5620,5621,5622,5623,5624,5625,5626,5627,5628,5629,5630,5631,
5632,5633,5634,1380,5635,5636,5637,5638,5639,5640,5641,5642,5643,5644,5645,5646,
5647,5648,5649,1381,1056,5650,5651,5652,5653,5654,5655,5656,5657,5658,5659,5660,
1666,5661,5662,5663,5664,5665,5666,5667,5668,1667,5669,1668,5670,5671,5672,5673,
5674,5675,5676,5677,5678,1155,5679,5680,5681,5682,5683,5684,5685,5686,5687,5688,
5689,5690,5691,5692,5693,5694,5695,5696,5697,5698,1669,5699,5700,5701,5702,5703,
5704,5705,1670,5706,5707,5708,5709,5710,1671,5711,5712,5713,5714,1382,5715,5716,
5717,5718,5719,5720,5721,5722,5723,5724,5725,1672,5726,5727,1673,1674,5728,5729,
5730,5731,5732,5733,5734,5735,5736,1675,5737,5738,5739,5740,5741,5742,5743,5744,
1676,5745,5746,5747,5748,5749,5750,5751,1383,5752,5753,5754,5755,5756,5757,5758,
5759,5760,5761,5762,5763,5764,5765,5766,5767,5768,1677,5769,5770,5771,5772,5773,
1678,5774,5775,5776, 998,5777,5778,5779,5780,5781,5782,5783,5784,5785,1384,5786,
5787,5788,5789,5790,5791,5792,5793,5794,5795,5796,5797,5798,5799,5800,1679,5801,
5802,5803,1115,1116,5804,5805,5806,5807,5808,5809,5810,5811,5812,5813,5814,5815,
5816,5817,5818,5819,5820,5821,5822,5823,5824,5825,5826,5827,5828,5829,5830,5831,
5832,5833,5834,5835,5836,5837,5838,5839,5840,5841,5842,5843,5844,5845,5846,5847,
5848,5849,5850,5851,5852,5853,5854,5855,1680,5856,5857,5858,5859,5860,5861,5862,
5863,5864,1681,5865,5866,5867,1682,5868,5869,5870,5871,5872,5873,5874,5875,5876,
5877,5878,5879,1683,5880,1684,5881,5882,5883,5884,1685,5885,5886,5887,5888,5889,
5890,5891,5892,5893,5894,5895,5896,5897,5898,5899,5900,5901,5902,5903,5904,5905,
5906,5907,1686,5908,5909,5910,5911,5912,5913,5914,5915,5916,5917,5918,5919,5920,
5921,5922,5923,5924,5925,5926,5927,5928,5929,5930,5931,5932,5933,5934,5935,1687,
5936,5937,5938,5939,5940,5941,5942,5943,5944,5945,5946,5947,5948,5949,5950,5951,
5952,1688,1689,5953,1199,5954,5955,5956,5957,5958,5959,5960,5961,1690,5962,5963,
5964,5965,5966,5967,5968,5969,5970,5971,5972,5973,5974,5975,5976,5977,5978,5979,
5980,5981,1385,5982,1386,5983,5984,5985,5986,5987,5988,5989,5990,5991,5992,5993,
5994,5995,5996,5997,5998,5999,6000,6001,6002,6003,6004,6005,6006,6007,6008,6009,
6010,6011,6012,6013,6014,6015,6016,6017,6018,6019,6020,6021,6022,6023,6024,6025,
6026,6027,1265,6028,6029,1691,6030,6031,6032,6033,6034,6035,6036,6037,6038,6039,
6040,6041,6042,6043,6044,6045,6046,6047,6048,6049,6050,6051,6052,6053,6054,6055,
6056,6057,6058,6059,6060,6061,6062,6063,6064,6065,6066,6067,6068,6069,6070,6071,
6072,6073,6074,6075,6076,6077,6078,6079,6080,6081,6082,6083,6084,1692,6085,6086,
6087,6088,6089,6090,6091,6092,6093,6094,6095,6096,6097,6098,6099,6100,6101,6102,
6103,6104,6105,6106,6107,6108,6109,6110,6111,6112,6113,6114,6115,6116,6117,6118,
6119,6120,6121,6122,6123,6124,6125,6126,6127,6128,6129,6130,6131,1693,6132,6133,
6134,6135,6136,1694,6137,6138,6139,6140,6141,1695,6142,6143,6144,6145,6146,6147,
6148,6149,6150,6151,6152,6153,6154,6155,6156,6157,6158,6159,6160,6161,6162,6163,
6164,6165,6166,6167,6168,6169,6170,6171,6172,6173,6174,6175,6176,6177,6178,6179,
6180,6181,6182,6183,6184,6185,1696,6186,6187,6188,6189,6190,6191,6192,6193,6194,
6195,6196,6197,6198,6199,6200,6201,6202,6203,6204,6205,6206,6207,6208,6209,6210,
6211,6212,6213,6214,6215,6216,6217,6218,6219,1697,6220,6221,6222,6223,6224,6225,
6226,6227,6228,6229,6230,6231,6232,6233,6234,6235,6236,6237,6238,6239,6240,6241,
6242,6243,6244,6245,6246,6247,6248,6249,6250,6251,6252,6253,1698,6254,6255,6256,
6257,6258,6259,6260,6261,6262,6263,1200,6264,6265,6266,6267,6268,6269,6270,6271, #1024
6272,6273,6274,6275,6276,6277,6278,6279,6280,6281,6282,6283,6284,6285,6286,6287,
6288,6289,6290,6291,6292,6293,6294,6295,6296,6297,6298,6299,6300,6301,6302,1699,
6303,6304,1700,6305,6306,6307,6308,6309,6310,6311,6312,6313,6314,6315,6316,6317,
6318,6319,6320,6321,6322,6323,6324,6325,6326,6327,6328,6329,6330,6331,6332,6333,
6334,6335,6336,6337,6338,6339,1701,6340,6341,6342,6343,6344,1387,6345,6346,6347,
6348,6349,6350,6351,6352,6353,6354,6355,6356,6357,6358,6359,6360,6361,6362,6363,
6364,6365,6366,6367,6368,6369,6370,6371,6372,6373,6374,6375,6376,6377,6378,6379,
6380,6381,6382,6383,6384,6385,6386,6387,6388,6389,6390,6391,6392,6393,6394,6395,
6396,6397,6398,6399,6400,6401,6402,6403,6404,6405,6406,6407,6408,6409,6410,6411,
6412,6413,1702,6414,6415,6416,6417,6418,6419,6420,6421,6422,1703,6423,6424,6425,
6426,6427,6428,6429,6430,6431,6432,6433,6434,6435,6436,6437,6438,1704,6439,6440,
6441,6442,6443,6444,6445,6446,6447,6448,6449,6450,6451,6452,6453,6454,6455,6456,
6457,6458,6459,6460,6461,6462,6463,6464,6465,6466,6467,6468,6469,6470,6471,6472,
6473,6474,6475,6476,6477,6478,6479,6480,6481,6482,6483,6484,6485,6486,6487,6488,
6489,6490,6491,6492,6493,6494,6495,6496,6497,6498,6499,6500,6501,6502,6503,1266,
6504,6505,6506,6507,6508,6509,6510,6511,6512,6513,6514,6515,6516,6517,6518,6519,
6520,6521,6522,6523,6524,6525,6526,6527,6528,6529,6530,6531,6532,6533,6534,6535,
6536,6537,6538,6539,6540,6541,6542,6543,6544,6545,6546,6547,6548,6549,6550,6551,
1705,1706,6552,6553,6554,6555,6556,6557,6558,6559,6560,6561,6562,6563,6564,6565,
6566,6567,6568,6569,6570,6571,6572,6573,6574,6575,6576,6577,6578,6579,6580,6581,
6582,6583,6584,6585,6586,6587,6588,6589,6590,6591,6592,6593,6594,6595,6596,6597,
6598,6599,6600,6601,6602,6603,6604,6605,6606,6607,6608,6609,6610,6611,6612,6613,
6614,6615,6616,6617,6618,6619,6620,6621,6622,6623,6624,6625,6626,6627,6628,6629,
6630,6631,6632,6633,6634,6635,6636,6637,1388,6638,6639,6640,6641,6642,6643,6644,
1707,6645,6646,6647,6648,6649,6650,6651,6652,6653,6654,6655,6656,6657,6658,6659,
6660,6661,6662,6663,1708,6664,6665,6666,6667,6668,6669,6670,6671,6672,6673,6674,
1201,6675,6676,6677,6678,6679,6680,6681,6682,6683,6684,6685,6686,6687,6688,6689,
6690,6691,6692,6693,6694,6695,6696,6697,6698,6699,6700,6701,6702,6703,6704,6705,
6706,6707,6708,6709,6710,6711,6712,6713,6714,6715,6716,6717,6718,6719,6720,6721,
6722,6723,6724,6725,1389,6726,6727,6728,6729,6730,6731,6732,6733,6734,6735,6736,
1390,1709,6737,6738,6739,6740,6741,6742,1710,6743,6744,6745,6746,1391,6747,6748,
6749,6750,6751,6752,6753,6754,6755,6756,6757,1392,6758,6759,6760,6761,6762,6763,
6764,6765,6766,6767,6768,6769,6770,6771,6772,6773,6774,6775,6776,6777,6778,6779,
6780,1202,6781,6782,6783,6784,6785,6786,6787,6788,6789,6790,6791,6792,6793,6794,
6795,6796,6797,6798,6799,6800,6801,6802,6803,6804,6805,6806,6807,6808,6809,1711,
6810,6811,6812,6813,6814,6815,6816,6817,6818,6819,6820,6821,6822,6823,6824,6825,
6826,6827,6828,6829,6830,6831,6832,6833,6834,6835,6836,1393,6837,6838,6839,6840,
6841,6842,6843,6844,6845,6846,6847,6848,6849,6850,6851,6852,6853,6854,6855,6856,
6857,6858,6859,6860,6861,6862,6863,6864,6865,6866,6867,6868,6869,6870,6871,6872,
6873,6874,6875,6876,6877,6878,6879,6880,6881,6882,6883,6884,6885,6886,6887,6888,
6889,6890,6891,6892,6893,6894,6895,6896,6897,6898,6899,6900,6901,6902,1712,6903,
6904,6905,6906,6907,6908,6909,6910,1713,6911,6912,6913,6914,6915,6916,6917,6918,
6919,6920,6921,6922,6923,6924,6925,6926,6927,6928,6929,6930,6931,6932,6933,6934,
6935,6936,6937,6938,6939,6940,6941,6942,6943,6944,6945,6946,6947,6948,6949,6950,
6951,6952,6953,6954,6955,6956,6957,6958,6959,6960,6961,6962,6963,6964,6965,6966,
6967,6968,6969,6970,6971,6972,6973,6974,1714,6975,6976,6977,6978,6979,6980,6981,
6982,6983,6984,6985,6986,6987,6988,1394,6989,6990,6991,6992,6993,6994,6995,6996,
6997,6998,6999,7000,1715,7001,7002,7003,7004,7005,7006,7007,7008,7009,7010,7011,
7012,7013,7014,7015,7016,7017,7018,7019,7020,7021,7022,7023,7024,7025,7026,7027,
7028,1716,7029,7030,7031,7032,7033,7034,7035,7036,7037,7038,7039,7040,7041,7042,
7043,7044,7045,7046,7047,7048,7049,7050,7051,7052,7053,7054,7055,7056,7057,7058,
7059,7060,7061,7062,7063,7064,7065,7066,7067,7068,7069,7070,7071,7072,7073,7074,
7075,7076,7077,7078,7079,7080,7081,7082,7083,7084,7085,7086,7087,7088,7089,7090,
7091,7092,7093,7094,7095,7096,7097,7098,7099,7100,7101,7102,7103,7104,7105,7106,
7107,7108,7109,7110,7111,7112,7113,7114,7115,7116,7117,7118,7119,7120,7121,7122,
7123,7124,7125,7126,7127,7128,7129,7130,7131,7132,7133,7134,7135,7136,7137,7138,
7139,7140,7141,7142,7143,7144,7145,7146,7147,7148,7149,7150,7151,7152,7153,7154,
7155,7156,7157,7158,7159,7160,7161,7162,7163,7164,7165,7166,7167,7168,7169,7170,
7171,7172,7173,7174,7175,7176,7177,7178,7179,7180,7181,7182,7183,7184,7185,7186,
7187,7188,7189,7190,7191,7192,7193,7194,7195,7196,7197,7198,7199,7200,7201,7202,
7203,7204,7205,7206,7207,1395,7208,7209,7210,7211,7212,7213,1717,7214,7215,7216,
7217,7218,7219,7220,7221,7222,7223,7224,7225,7226,7227,7228,7229,7230,7231,7232,
7233,7234,7235,7236,7237,7238,7239,7240,7241,7242,7243,7244,7245,7246,7247,7248,
7249,7250,7251,7252,7253,7254,7255,7256,7257,7258,7259,7260,7261,7262,7263,7264,
7265,7266,7267,7268,7269,7270,7271,7272,7273,7274,7275,7276,7277,7278,7279,7280,
7281,7282,7283,7284,7285,7286,7287,7288,7289,7290,7291,7292,7293,7294,7295,7296,
7297,7298,7299,7300,7301,7302,7303,7304,7305,7306,7307,7308,7309,7310,7311,7312,
7313,1718,7314,7315,7316,7317,7318,7319,7320,7321,7322,7323,7324,7325,7326,7327,
7328,7329,7330,7331,7332,7333,7334,7335,7336,7337,7338,7339,7340,7341,7342,7343,
7344,7345,7346,7347,7348,7349,7350,7351,7352,7353,7354,7355,7356,7357,7358,7359,
7360,7361,7362,7363,7364,7365,7366,7367,7368,7369,7370,7371,7372,7373,7374,7375,
7376,7377,7378,7379,7380,7381,7382,7383,7384,7385,7386,7387,7388,7389,7390,7391,
7392,7393,7394,7395,7396,7397,7398,7399,7400,7401,7402,7403,7404,7405,7406,7407,
7408,7409,7410,7411,7412,7413,7414,7415,7416,7417,7418,7419,7420,7421,7422,7423,
7424,7425,7426,7427,7428,7429,7430,7431,7432,7433,7434,7435,7436,7437,7438,7439,
7440,7441,7442,7443,7444,7445,7446,7447,7448,7449,7450,7451,7452,7453,7454,7455,
7456,7457,7458,7459,7460,7461,7462,7463,7464,7465,7466,7467,7468,7469,7470,7471,
7472,7473,7474,7475,7476,7477,7478,7479,7480,7481,7482,7483,7484,7485,7486,7487,
7488,7489,7490,7491,7492,7493,7494,7495,7496,7497,7498,7499,7500,7501,7502,7503,
7504,7505,7506,7507,7508,7509,7510,7511,7512,7513,7514,7515,7516,7517,7518,7519,
7520,7521,7522,7523,7524,7525,7526,7527,7528,7529,7530,7531,7532,7533,7534,7535,
7536,7537,7538,7539,7540,7541,7542,7543,7544,7545,7546,7547,7548,7549,7550,7551,
7552,7553,7554,7555,7556,7557,7558,7559,7560,7561,7562,7563,7564,7565,7566,7567,
7568,7569,7570,7571,7572,7573,7574,7575,7576,7577,7578,7579,7580,7581,7582,7583,
7584,7585,7586,7587,7588,7589,7590,7591,7592,7593,7594,7595,7596,7597,7598,7599,
7600,7601,7602,7603,7604,7605,7606,7607,7608,7609,7610,7611,7612,7613,7614,7615,
7616,7617,7618,7619,7620,7621,7622,7623,7624,7625,7626,7627,7628,7629,7630,7631,
7632,7633,7634,7635,7636,7637,7638,7639,7640,7641,7642,7643,7644,7645,7646,7647,
7648,7649,7650,7651,7652,7653,7654,7655,7656,7657,7658,7659,7660,7661,7662,7663,
7664,7665,7666,7667,7668,7669,7670,7671,7672,7673,7674,7675,7676,7677,7678,7679,
7680,7681,7682,7683,7684,7685,7686,7687,7688,7689,7690,7691,7692,7693,7694,7695,
7696,7697,7698,7699,7700,7701,7702,7703,7704,7705,7706,7707,7708,7709,7710,7711,
7712,7713,7714,7715,7716,7717,7718,7719,7720,7721,7722,7723,7724,7725,7726,7727,
7728,7729,7730,7731,7732,7733,7734,7735,7736,7737,7738,7739,7740,7741,7742,7743,
7744,7745,7746,7747,7748,7749,7750,7751,7752,7753,7754,7755,7756,7757,7758,7759,
7760,7761,7762,7763,7764,7765,7766,7767,7768,7769,7770,7771,7772,7773,7774,7775,
7776,7777,7778,7779,7780,7781,7782,7783,7784,7785,7786,7787,7788,7789,7790,7791,
7792,7793,7794,7795,7796,7797,7798,7799,7800,7801,7802,7803,7804,7805,7806,7807,
7808,7809,7810,7811,7812,7813,7814,7815,7816,7817,7818,7819,7820,7821,7822,7823,
7824,7825,7826,7827,7828,7829,7830,7831,7832,7833,7834,7835,7836,7837,7838,7839,
7840,7841,7842,7843,7844,7845,7846,7847,7848,7849,7850,7851,7852,7853,7854,7855,
7856,7857,7858,7859,7860,7861,7862,7863,7864,7865,7866,7867,7868,7869,7870,7871,
7872,7873,7874,7875,7876,7877,7878,7879,7880,7881,7882,7883,7884,7885,7886,7887,
7888,7889,7890,7891,7892,7893,7894,7895,7896,7897,7898,7899,7900,7901,7902,7903,
7904,7905,7906,7907,7908,7909,7910,7911,7912,7913,7914,7915,7916,7917,7918,7919,
7920,7921,7922,7923,7924,7925,7926,7927,7928,7929,7930,7931,7932,7933,7934,7935,
7936,7937,7938,7939,7940,7941,7942,7943,7944,7945,7946,7947,7948,7949,7950,7951,
7952,7953,7954,7955,7956,7957,7958,7959,7960,7961,7962,7963,7964,7965,7966,7967,
7968,7969,7970,7971,7972,7973,7974,7975,7976,7977,7978,7979,7980,7981,7982,7983,
7984,7985,7986,7987,7988,7989,7990,7991,7992,7993,7994,7995,7996,7997,7998,7999,
8000,8001,8002,8003,8004,8005,8006,8007,8008,8009,8010,8011,8012,8013,8014,8015,
8016,8017,8018,8019,8020,8021,8022,8023,8024,8025,8026,8027,8028,8029,8030,8031,
8032,8033,8034,8035,8036,8037,8038,8039,8040,8041,8042,8043,8044,8045,8046,8047,
8048,8049,8050,8051,8052,8053,8054,8055,8056,8057,8058,8059,8060,8061,8062,8063,
8064,8065,8066,8067,8068,8069,8070,8071,8072,8073,8074,8075,8076,8077,8078,8079,
8080,8081,8082,8083,8084,8085,8086,8087,8088,8089,8090,8091,8092,8093,8094,8095,
8096,8097,8098,8099,8100,8101,8102,8103,8104,8105,8106,8107,8108,8109,8110,8111,
8112,8113,8114,8115,8116,8117,8118,8119,8120,8121,8122,8123,8124,8125,8126,8127,
8128,8129,8130,8131,8132,8133,8134,8135,8136,8137,8138,8139,8140,8141,8142,8143,
8144,8145,8146,8147,8148,8149,8150,8151,8152,8153,8154,8155,8156,8157,8158,8159,
8160,8161,8162,8163,8164,8165,8166,8167,8168,8169,8170,8171,8172,8173,8174,8175,
8176,8177,8178,8179,8180,8181,8182,8183,8184,8185,8186,8187,8188,8189,8190,8191,
8192,8193,8194,8195,8196,8197,8198,8199,8200,8201,8202,8203,8204,8205,8206,8207,
8208,8209,8210,8211,8212,8213,8214,8215,8216,8217,8218,8219,8220,8221,8222,8223,
8224,8225,8226,8227,8228,8229,8230,8231,8232,8233,8234,8235,8236,8237,8238,8239,
8240,8241,8242,8243,8244,8245,8246,8247,8248,8249,8250,8251,8252,8253,8254,8255,
8256,8257,8258,8259,8260,8261,8262,8263,8264,8265,8266,8267,8268,8269,8270,8271,
8272,8273,8274,8275,8276,8277,8278,8279,8280,8281,8282,8283,8284,8285,8286,8287,
8288,8289,8290,8291,8292,8293,8294,8295,8296,8297,8298,8299,8300,8301,8302,8303,
8304,8305,8306,8307,8308,8309,8310,8311,8312,8313,8314,8315,8316,8317,8318,8319,
8320,8321,8322,8323,8324,8325,8326,8327,8328,8329,8330,8331,8332,8333,8334,8335,
8336,8337,8338,8339,8340,8341,8342,8343,8344,8345,8346,8347,8348,8349,8350,8351,
8352,8353,8354,8355,8356,8357,8358,8359,8360,8361,8362,8363,8364,8365,8366,8367,
8368,8369,8370,8371,8372,8373,8374,8375,8376,8377,8378,8379,8380,8381,8382,8383,
8384,8385,8386,8387,8388,8389,8390,8391,8392,8393,8394,8395,8396,8397,8398,8399,
8400,8401,8402,8403,8404,8405,8406,8407,8408,8409,8410,8411,8412,8413,8414,8415,
8416,8417,8418,8419,8420,8421,8422,8423,8424,8425,8426,8427,8428,8429,8430,8431,
8432,8433,8434,8435,8436,8437,8438,8439,8440,8441,8442,8443,8444,8445,8446,8447,
8448,8449,8450,8451,8452,8453,8454,8455,8456,8457,8458,8459,8460,8461,8462,8463,
8464,8465,8466,8467,8468,8469,8470,8471,8472,8473,8474,8475,8476,8477,8478,8479,
8480,8481,8482,8483,8484,8485,8486,8487,8488,8489,8490,8491,8492,8493,8494,8495,
8496,8497,8498,8499,8500,8501,8502,8503,8504,8505,8506,8507,8508,8509,8510,8511,
8512,8513,8514,8515,8516,8517,8518,8519,8520,8521,8522,8523,8524,8525,8526,8527,
8528,8529,8530,8531,8532,8533,8534,8535,8536,8537,8538,8539,8540,8541,8542,8543,
8544,8545,8546,8547,8548,8549,8550,8551,8552,8553,8554,8555,8556,8557,8558,8559,
8560,8561,8562,8563,8564,8565,8566,8567,8568,8569,8570,8571,8572,8573,8574,8575,
8576,8577,8578,8579,8580,8581,8582,8583,8584,8585,8586,8587,8588,8589,8590,8591,
8592,8593,8594,8595,8596,8597,8598,8599,8600,8601,8602,8603,8604,8605,8606,8607,
8608,8609,8610,8611,8612,8613,8614,8615,8616,8617,8618,8619,8620,8621,8622,8623,
8624,8625,8626,8627,8628,8629,8630,8631,8632,8633,8634,8635,8636,8637,8638,8639,
8640,8641,8642,8643,8644,8645,8646,8647,8648,8649,8650,8651,8652,8653,8654,8655,
8656,8657,8658,8659,8660,8661,8662,8663,8664,8665,8666,8667,8668,8669,8670,8671,
8672,8673,8674,8675,8676,8677,8678,8679,8680,8681,8682,8683,8684,8685,8686,8687,
8688,8689,8690,8691,8692,8693,8694,8695,8696,8697,8698,8699,8700,8701,8702,8703,
8704,8705,8706,8707,8708,8709,8710,8711,8712,8713,8714,8715,8716,8717,8718,8719,
8720,8721,8722,8723,8724,8725,8726,8727,8728,8729,8730,8731,8732,8733,8734,8735,
8736,8737,8738,8739,8740,8741)
# flake8: noqa
|
alisidd/tensorflow
|
refs/heads/asgd-dc
|
tensorflow/contrib/learn/python/learn/utils/inspect_checkpoint.py
|
123
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A simple script for inspect checkpoint files."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import sys
from tensorflow.contrib.framework.python.framework import checkpoint_utils
from tensorflow.python.platform import app
FLAGS = None
def print_tensors_in_checkpoint_file(file_name, tensor_name):
"""Prints tensors in a checkpoint file.
If no `tensor_name` is provided, prints the tensor names and shapes
in the checkpoint file.
If `tensor_name` is provided, prints the content of the tensor.
Args:
file_name: Name of the checkpoint file.
tensor_name: Name of the tensor in the checkpoint file to print.
"""
try:
if not tensor_name:
variables = checkpoint_utils.list_variables(file_name)
for name, shape in variables:
print("%s\t%s" % (name, str(shape)))
else:
print("tensor_name: ", tensor_name)
print(checkpoint_utils.load_variable(file_name, tensor_name))
except Exception as e: # pylint: disable=broad-except
print(str(e))
if "corrupted compressed block contents" in str(e):
print("It's likely that your checkpoint file has been compressed "
"with SNAPPY.")
def main(unused_argv):
if not FLAGS.file_name:
print("Usage: inspect_checkpoint --file_name=<checkpoint_file_name "
"or directory> [--tensor_name=tensor_to_print]")
sys.exit(1)
else:
print_tensors_in_checkpoint_file(FLAGS.file_name, FLAGS.tensor_name)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.register("type", "bool", lambda v: v.lower() == "true")
parser.add_argument(
"--file_name",
type=str,
default="",
help="Checkpoint filename"
)
parser.add_argument(
"--tensor_name",
type=str,
default="",
help="Name of the tensor to inspect"
)
FLAGS, unparsed = parser.parse_known_args()
app.run(main=main, argv=[sys.argv[0]] + unparsed)
|
puttarajubr/commcare-hq
|
refs/heads/master
|
corehq/apps/accounting/subscription_changes.py
|
1
|
import json
import logging
import datetime
from django.core.urlresolvers import reverse
from django.utils.safestring import mark_safe
from django.utils.translation import ugettext_lazy as _, ungettext
from corehq import privileges, Domain, toggles
from corehq.apps.accounting.utils import get_active_reminders_by_domain_name
from corehq.apps.app_manager.models import Application
from corehq.apps.fixtures.models import FixtureDataType
from corehq.apps.orgs.models import Organization
from corehq.apps.reminders.models import METHOD_SMS_SURVEY, METHOD_IVR_SURVEY
from corehq.apps.users.models import CommCareUser, UserRole
from corehq.const import USER_DATE_FORMAT
from couchexport.models import SavedExportSchema
from dimagi.utils.couch.database import iter_docs
from dimagi.utils.decorators.memoized import memoized
logger = logging.getLogger('accounting')
LATER_SUBSCRIPTION_NOTIFICATION = 'later_subscription'
class BaseModifySubscriptionHandler(object):
supported_privileges = []
action_type = "base"
def __init__(self, domain, new_plan_version, changed_privs, verbose=False,
date_start=None, web_user=None):
self.web_user = web_user
self.verbose = verbose
self.date_start = date_start or datetime.date.today()
if isinstance(changed_privs, set):
changed_privs = list(changed_privs)
if not isinstance(domain, Domain):
domain = Domain.get_by_name(domain)
self.domain = domain
# plan dependent privilege
changed_privs.append(privileges.MOBILE_WORKER_CREATION)
# check to make sure that no subscriptions are scheduled to
# start in the future
changed_privs.append(LATER_SUBSCRIPTION_NOTIFICATION)
self.privileges = filter(lambda x: x in self.supported_privileges, changed_privs)
self.new_plan_version = new_plan_version
def get_response(self):
response = []
for priv in self.privileges:
if self.verbose:
print "Applying %s %s." % (priv, self.action_type)
message = getattr(self, 'response_%s' % priv)
if message is not None:
response.append(message)
return response
class BaseModifySubscriptionActionHandler(BaseModifySubscriptionHandler):
def get_response(self):
response = super(BaseModifySubscriptionActionHandler, self).get_response()
return len(filter(lambda x: not x, response)) == 0
class DomainDowngradeActionHandler(BaseModifySubscriptionActionHandler):
"""
This carries out the downgrade action based on each privilege.
Each response should return a boolean.
"""
supported_privileges = [
privileges.OUTBOUND_SMS,
privileges.INBOUND_SMS,
privileges.ROLE_BASED_ACCESS,
LATER_SUBSCRIPTION_NOTIFICATION,
]
action_type = "downgrade"
@property
@memoized
def _active_reminders(self):
return get_active_reminders_by_domain_name(self.domain.name)
@property
def response_outbound_sms(self):
"""
Reminder rules will be deactivated.
"""
try:
for reminder in self._active_reminders:
reminder.active = False
reminder.save()
if self.verbose:
print ("Deactivated Reminder %s [%s]"
% (reminder.nickname, reminder._id))
except Exception:
logger.exception(
"[BILLING] Failed to downgrade outbound sms for domain %s."
% self.domain.name
)
return False
return True
@property
def response_inbound_sms(self):
"""
All Reminder rules utilizing "survey" will be deactivated.
"""
try:
surveys = filter(lambda x: x.method in [METHOD_IVR_SURVEY, METHOD_SMS_SURVEY], self._active_reminders)
for survey in surveys:
survey.active = False
survey.save()
if self.verbose:
print ("Deactivated Survey %s [%s]"
% (survey.nickname, survey._id))
except Exception:
logger.exception(
"[Billing] Failed to downgrade outbound sms for domain %s."
% self.domain.name
)
return False
return True
@property
def response_role_based_access(self):
"""
Perform Role Based Access Downgrade
- Archive custom roles.
- Set user roles using custom roles to Read Only.
- Reset initial roles to standard permissions.
"""
custom_roles = [r.get_id for r in UserRole.get_custom_roles_by_domain(self.domain.name)]
if not custom_roles:
return True
if self.verbose:
for role in custom_roles:
print ("Archiving Custom Role %s" % role)
# temporarily disable this part of the downgrade until we
# have a better user experience for notifying the downgraded user
# read_only_role = UserRole.get_read_only_role_by_domain(self.domain.name)
# web_users = WebUser.by_domain(self.domain.name)
# for web_user in web_users:
# if web_user.get_domain_membership(self.domain.name).role_id in custom_roles:
# web_user.set_role(self.domain.name, read_only_role.get_qualified_id())
# web_user.save()
# for cc_user in CommCareUser.by_domain(self.domain.name):
# if cc_user.get_domain_membership(self.domain.name).role_id in custom_roles:
# cc_user.set_role(self.domain.name, 'none')
# cc_user.save()
UserRole.archive_custom_roles_for_domain(self.domain.name)
UserRole.reset_initial_roles_for_domain(self.domain.name)
return True
@property
def response_later_subscription(self):
"""
Makes sure that subscriptions beginning in the future are ended.
"""
from corehq.apps.accounting.models import (
Subscription, SubscriptionAdjustment, SubscriptionAdjustmentReason
)
try:
for later_subscription in Subscription.objects.filter(
subscriber__domain=self.domain.name,
date_start__gt=self.date_start
).order_by('date_start').all():
later_subscription.date_start = datetime.date.today()
later_subscription.date_end = datetime.date.today()
later_subscription.save()
SubscriptionAdjustment.record_adjustment(
later_subscription,
reason=SubscriptionAdjustmentReason.CANCEL,
web_user=self.web_user,
note="Cancelled due to changing subscription",
)
except Subscription.DoesNotExist:
pass
return True
class DomainUpgradeActionHandler(BaseModifySubscriptionActionHandler):
"""
This carries out the upgrade action based on each privilege.
Each response should return a boolean.
"""
supported_privileges = [
privileges.ROLE_BASED_ACCESS,
]
action_type = "upgrade"
@property
def response_role_based_access(self):
"""
Perform Role Based Access Upgrade
- Un-archive custom roles.
"""
if self.verbose:
num_archived_roles = len(UserRole.by_domain(self.domain.name,
is_archived=True))
if num_archived_roles:
print "Re-Activating %d archived roles." % num_archived_roles
UserRole.unarchive_roles_for_domain(self.domain.name)
return True
class DomainDowngradeStatusHandler(BaseModifySubscriptionHandler):
"""
This returns a list of alerts for the user if their current domain is using features that
will be removed during the downgrade.
"""
supported_privileges = [
privileges.CLOUDCARE,
privileges.LOOKUP_TABLES,
privileges.CUSTOM_BRANDING,
privileges.CROSS_PROJECT_REPORTS,
privileges.OUTBOUND_SMS,
privileges.INBOUND_SMS,
privileges.DEIDENTIFIED_DATA,
privileges.MOBILE_WORKER_CREATION,
privileges.ROLE_BASED_ACCESS,
LATER_SUBSCRIPTION_NOTIFICATION,
]
action_type = "notification"
def _fmt_alert(self, message, details=None):
if details is not None and not isinstance(details, list):
raise ValueError("details should be a list.")
return {
'message': message,
'details': details,
}
@property
def response_cloudcare(self):
"""
CloudCare enabled apps will have cloudcare_enabled set to false on downgrade.
"""
key = [self.domain.name]
db = Application.get_db()
domain_apps = db.view(
'app_manager/applications_brief',
reduce=False,
startkey=key,
endkey=key + [{}],
).all()
cloudcare_enabled_apps = []
for app_doc in iter_docs(db, [a['id'] for a in domain_apps]):
if app_doc.get('cloudcare_enabled', False):
cloudcare_enabled_apps.append((app_doc['_id'], app_doc['name']))
if not cloudcare_enabled_apps:
return None
num_apps = len(cloudcare_enabled_apps)
return self._fmt_alert(
ungettext(
"You have %(num_apps)d application that will lose CloudCare "
"access if you select this plan.",
"You have %(num_apps)d applications that will lose CloudCare "
"access if you select this plan.",
num_apps
) % {
'num_apps': num_apps,
},
[mark_safe('<a href="%(url)s">%(title)s</a>') % {
'title': a[1],
'url': reverse('view_app', args=[self.domain.name, a[0]])
} for a in cloudcare_enabled_apps],
)
@property
def response_lookup_tables(self):
"""
Lookup tables will be deleted on downgrade.
"""
num_fixtures = FixtureDataType.total_by_domain(self.domain.name)
if num_fixtures > 0:
return self._fmt_alert(
ungettext(
"You have %(num_fix)s Lookup Table set up. Selecting this "
"plan will delete this Lookup Table.",
"You have %(num_fix)s Lookup Tables set up. Selecting "
"this plan will delete these Lookup Tables.",
num_fixtures
) % {'num_fix': num_fixtures}
)
@property
def response_custom_branding(self):
"""
Custom logos will be removed on downgrade.
"""
if self.domain.has_custom_logo:
return self._fmt_alert(_("You are using custom branding. "
"Selecting this plan will remove this "
"feature."))
@property
def response_cross_project_reports(self):
"""
Organization menu and corresponding reports are hidden on downgrade.
"""
if self.domain.organization:
org = Organization.get_by_name(self.domain.organization)
return self._fmt_alert(
_("You will lose access to cross-project reports for the "
"organization '%(org_name)s'.") %
{
'org_name': org.title,
})
@property
@memoized
def _active_reminder_methods(self):
reminder_rules = get_active_reminders_by_domain_name(self.domain.name)
return [reminder.method for reminder in reminder_rules]
@property
def response_outbound_sms(self):
"""
Reminder rules will be deactivated.
"""
num_active = len(self._active_reminder_methods)
if num_active > 0:
return self._fmt_alert(
ungettext(
"You have %(num_active)d active Reminder Rule. Selecting "
"this plan will deactivate this rule.",
"You have %(num_active)d active Reminder Rules. Selecting "
"this plan will deactivate these rules.",
num_active
) % {
'num_active': num_active,
}
)
@property
def response_inbound_sms(self):
"""
All Reminder rules utilizing "survey" will be deactivated.
"""
surveys = filter(lambda x: x in [METHOD_IVR_SURVEY, METHOD_SMS_SURVEY], self._active_reminder_methods)
num_survey = len(surveys)
if num_survey > 0:
return self._fmt_alert(
ungettext(
"You have %(num_active)d active Reminder Rule for a Survey. "
"Selecting this plan will deactivate this rule.",
"You have %(num_active)d active Reminder Rules for a Survey. "
"Selecting this plan will deactivate these rules.",
num_survey
) % {
'num_active': num_survey,
}
)
@property
def response_deidentified_data(self):
"""
De-id exports will be hidden
"""
startkey = json.dumps([self.domain.name, ""])[:-3]
endkey = "%s{" % startkey
reports = SavedExportSchema.view(
"couchexport/saved_export_schemas",
startkey=startkey,
endkey=endkey,
include_docs=True,
)
num_deid_reports = len(filter(lambda r: r.is_safe, reports))
if num_deid_reports > 0:
return self._fmt_alert(
ungettext(
"You have %(num)d De-Identified Export. Selecting this "
"plan will remove it.",
"You have %(num)d De-Identified Exports. Selecting this "
"plan will remove them.",
num_deid_reports
) % {
'num': num_deid_reports,
}
)
@property
def response_mobile_worker_creation(self):
"""
Get the allowed number of mobile workers based on plan version.
"""
from corehq.apps.accounting.models import FeatureType, FeatureRate
num_users = CommCareUser.total_by_domain(self.domain.name, is_active=True)
try:
user_rate = self.new_plan_version.feature_rates.filter(
feature__feature_type=FeatureType.USER).latest('date_created')
if user_rate.monthly_limit == -1:
return
num_allowed = user_rate.monthly_limit
num_extra = num_users - num_allowed
if num_extra > 0:
return self._fmt_alert(
ungettext(
"You have %(num_users)d Mobile Worker over the monthly "
"limit of %(monthly_limit)d for this new plan. There "
"will be an additional monthly charge of USD "
"%(excess_fee)s per Mobile Worker, totalling USD "
"%(monthly_total)s per month, if you select this plan.",
"You have %(num_users)d Mobile Workers over the "
"monthly limit of %(monthly_limit)d for this new plan. "
"There will be an additional monthly charge "
"of USD %(excess_fee)s per Mobile Worker, totalling "
"USD %(monthly_total)s per month, if you "
"select this plan.",
num_extra
) % {
'num_users': num_extra,
'monthly_limit': user_rate.monthly_limit,
'excess_fee': user_rate.per_excess_fee,
'monthly_total': user_rate.per_excess_fee * num_extra,
}
)
except FeatureRate.DoesNotExist:
logger.error(
"[BILLING] "
"It seems that the plan %s did not have rate for Mobile "
"Workers. This is problematic." %
self.new_plan_version.plan.name
)
@property
def response_role_based_access(self):
"""
Alert the user if there are currently custom roles set up for the domain.
"""
custom_roles = [r.name for r in UserRole.get_custom_roles_by_domain(self.domain.name)]
num_roles = len(custom_roles)
if num_roles > 0:
return self._fmt_alert(
ungettext(
"You have %(num_roles)d Custom Role configured for your "
"project. If you select this plan, all users with that "
"role will change to having the Read Only role.",
"You have %(num_roles)d Custom Roles configured for your "
"project . If you select this plan, all users with these "
"roles will change to having the Read Only role.",
num_roles
) % {
'num_roles': num_roles,
}, custom_roles)
@property
def response_later_subscription(self):
"""
Alert the user if they have subscriptions scheduled to start
in the future.
"""
from corehq.apps.accounting.models import Subscription
later_subs = Subscription.objects.filter(
subscriber__domain=self.domain.name,
date_start__gt=self.date_start
).order_by('date_start')
if later_subs.exists():
next_subscription = later_subs[0]
plan_desc = next_subscription.plan_version.user_facing_description
return self._fmt_alert(_(
"You have a subscription SCHEDULED TO START on %(date_start)s. "
"Changing this plan will CANCEL that %(plan_name)s "
"subscription."
) % {
'date_start': next_subscription.date_start.strftime(USER_DATE_FORMAT),
'plan_name': plan_desc['name'],
})
|
TheFraserLab/ASEr
|
refs/heads/master
|
ASEr/cluster.py
|
2
|
"""
Submit jobs to slurm or torque, or with multiprocessing.
============================================================================
AUTHOR: Michael D Dacre, mike.dacre@gmail.com
ORGANIZATION: Stanford University
LICENSE: MIT License, property of Stanford, use as you wish
CREATED: 2016-44-20 23:03
Last modified: 2016-03-30 21:33
DESCRIPTION: Allows simple job submission with either torque, slurm, or
with the multiprocessing module.
To set the environement, set QUEUE to one of ['torque',
'slurm', 'normal'], or run get_cluster_environment().
To submit a job, run submit().
All jobs write out a job file before submission, even though
this is not necessary (or useful) with multiprocessing. In
normal mode, this is a .cluster file, in slurm is is a
.cluster.sbatch and a .cluster.script file, in torque it is a
.cluster.qsub file.
The name argument is required for submit, it is used to
generate the STDOUT and STDERR files. Irrespective of mode
the STDOUT file will be name.cluster.out and the STDERR file
will be name.cluster.err.
Note: `.cluster` is added to all names to make deletion less
dangerous
Dependency tracking is supported in torque or slurm mode,
to use it pass a list of job ids to submit or submit_file with
the `dependencies` keyword argument.
To clean up cluster files, run clean(directory), if directory
is not provided, the current directory is used.
This will delete all files in that were generated by this
script.
CAUTION: The clean() function will delete **EVERY** file with
extensions matching those in this file::
.cluster.err
.cluster.out
.cluster.sbatch & .cluster.script for slurm mode
.cluster.qsub for torque mode
.cluster for normal mode
============================================================================
"""
import os
import re
from time import sleep
from textwrap import dedent
from subprocess import check_output, CalledProcessError
from multiprocessing import Pool, pool
# Us
from ASEr import run
from ASEr import logme
#########################
# Which system to use #
#########################
# Default is normal, change to 'slurm' or 'torque' as needed.
QUEUE = 'normal'
ALLOWED_QUEUES = ['torque', 'slurm', 'normal']
#########################################################
# The multiprocessing pool, only used in 'local' mode #
#########################################################
POOL = None
# Reset broken multithreading
# Some of the numpy C libraries can break multithreading, this command
# fixes the issue.
check_output("taskset -p 0xff %d &>/dev/null" % os.getpid(), shell=True)
def get_cluster_environment():
"""Detect the local cluster environment and set QUEUE globally.
Uses which to search for sbatch first, then qsub. If neither is found,
QUEUE is set to local.
:returns: QUEUE variable ('torque', 'slurm', or 'local')
"""
global QUEUE
if run.which('sbatch'):
QUEUE = 'slurm'
elif run.which('qsub'):
QUEUE = 'torque'
else:
QUEUE = 'local'
if QUEUE == 'slurm' or QUEUE == 'torque':
logme.log('{} detected, using for cluster submissions'.format(QUEUE),
'debug')
else:
logme.log('No cluster environment detected, using multiprocessing',
'debug')
return QUEUE
#####################################
# Wait for cluster jobs to finish #
#####################################
def wait(jobs):
"""Wait for jobs to finish.
:jobs: A single job or list of jobs to wait for. With torque or slurm,
these should be job IDs, with normal mode, these are
multiprocessing job objects (returned by submit())
"""
check_queue() # Make sure the QUEUE is usable
# Sanitize argument
if not isinstance(jobs, (list, tuple)):
jobs = [jobs]
for job in jobs:
if not isinstance(job, (str, int, pool.ApplyResult)):
raise ClusterError('job must be int, string, or ApplyResult, ' +
'is {}'.format(type(job)))
if QUEUE == 'normal':
for job in jobs:
if not isinstance(job, pool.ApplyResult):
raise ClusterError('jobs must be ApplyResult objects')
job.wait()
elif QUEUE == 'torque':
# Wait for 5 seconds before checking, as jobs take a while to be queued
# sometimes
sleep(5)
s = re.compile(r' +') # For splitting qstat output
# Jobs must be strings for comparison operations
jobs = [str(j) for j in jobs]
while True:
c = 0
try:
q = check_output(['qstat', '-a']).decode().rstrip().split('\n')
except CalledProcessError:
if c == 5:
raise
c += 1
sleep(2)
continue
# Check header
if not re.split(r' {2,100}', q[3])[9] == 'S':
raise ClusterError('Unrecognized torque qstat format')
# Build a list of completed jobs
complete = []
for j in q[5:]:
i = s.split(j)
if i[9] == 'C':
complete.append(i[0].split('.')[0])
# Build a list of all jobs
all = [s.split(j)[0].split('.')[0] for j in q[5:]]
# Trim down job list
jobs = [j for j in jobs if j in all]
jobs = [j for j in jobs if j not in complete]
if len(jobs) == 0:
return
sleep(2)
elif QUEUE == 'slurm':
# Wait for 2 seconds before checking, as jobs take a while to be queued
# sometimes
sleep(2)
# Jobs must be strings for comparison operations
jobs = [str(j) for j in jobs]
while True:
# Slurm allows us to get a custom output for faster parsing
q = check_output(
['squeue', '-h', '-o', "'%A,%t'"]).decode().rstrip().split(',')
# Build a list of jobs
complete = [i[0] for i in q if i[1] == 'CD']
failed = [i[0] for i in q if i[1] == 'F']
all = [i[0] for i in q]
# Trim down job list, ignore failures
jobs = [i for i in jobs if i not in all]
jobs = [i for i in jobs if i not in complete]
jobs = [i for i in jobs if i not in failed]
if len(jobs) == 0:
return
sleep(2)
#########################
# Submissions scripts #
#########################
def submit(command, name, threads=None, time=None, cores=None, mem=None,
partition=None, modules=[], path=None, dependencies=None):
"""Submit a script to the cluster.
Used in all modes::
:command: The command to execute.
:name: The name of the job.
Used for normal mode::
:threads: Total number of threads to use at a time, defaults to all.
Used for torque and slurm::
:time: The time to run for in HH:MM:SS.
:cores: How many cores to run on.
:mem: Memory to use in MB.
:partition: Partition to run on, default 'normal'.
:modules: Modules to load with the 'module load' command.
:path: Where to create the script, if None, current dir used.
Returns:
Job number in torque/slurm mode, 0 in normal mode
"""
check_queue() # Make sure the QUEUE is usable
if QUEUE == 'slurm' or QUEUE == 'torque':
return submit_file(make_job_file(command, name, time, cores,
mem, partition, modules, path),
dependencies=dependencies)
elif QUEUE == 'normal':
return submit_file(make_job_file(command, name), name=name,
threads=threads)
def submit_file(script_file, name=None, dependencies=None, threads=None):
"""Submit a job file to the cluster.
If QUEUE is torque, qsub is used; if QUEUE is slurm, sbatch is used;
if QUEUE is normal, the file is executed with subprocess.
:dependencies: A job number or list of job numbers.
In slurm: `--dependency=afterok:` is used
For torque: `-W depend=afterok:` is used
:threads: Total number of threads to use at a time, defaults to all.
ONLY USED IN NORMAL MODE
:name: The name of the job, only used in normal mode.
:returns: job number for torque or slurm
multiprocessing job object for normal mode
"""
check_queue() # Make sure the QUEUE is usable
# Sanitize arguments
name = str(name)
# Check dependencies
if dependencies:
if isinstance(dependencies, (str, int)):
dependencies = [dependencies]
if not isinstance(dependencies, (list, tuple)):
raise Exception('dependencies must be a list, int, or string.')
dependencies = [str(i) for i in dependencies]
if QUEUE == 'slurm':
if dependencies:
dependencies = '--dependency=afterok:{}'.format(
':'.join([str(d) for d in dependencies]))
args = ['sbatch', dependencies, script_file]
else:
args = ['sbatch', script_file]
# Try to submit job 5 times
count = 0
while True:
try:
job = int(check_output(args).decode().rstrip().split(' ')[-1])
except CalledProcessError:
if count == 5:
raise
count += 1
sleep(1)
continue
break
return job
elif QUEUE == 'torque':
if dependencies:
dependencies = '-W depend={}'.format(
','.join(['afterok:' + d for d in dependencies]))
args = ['qsub', dependencies, script_file]
else:
args = ['qsub', script_file]
# Try to submit job 5 times
count = 0
while True:
try:
job = int(check_output(args).decode().rstrip().split('.')[0])
except CalledProcessError:
if count == 5:
raise
count += 1
sleep(1)
continue
break
return job
elif QUEUE == 'normal':
global POOL
if not POOL:
POOL = Pool(threads) if threads else Pool()
command = 'bash {}'.format(script_file)
args = dict(stdout=name + '.cluster.out', stderr=name + '.cluster.err')
return POOL.apply_async(run.cmd, (command,), args)
#########################
# Job file generation #
#########################
def make_job_file(command, name, time=None, cores=1, mem=None, partition=None,
modules=[], path=None):
"""Make a job file compatible with the chosen cluster.
If mode is normal, this is just a simple shell script.
Note: Only requests one node.
:command: The command to execute.
:name: The name of the job.
:time: The time to run for in HH:MM:SS.
:cores: How many cores to run on.
:mem: Memory to use in MB.
:partition: Partition to run on, default 'normal'.
:modules: Modules to load with the 'module load' command.
:path: Where to create the script, if None, current dir used.
:returns: The absolute path of the submission script.
"""
check_queue() # Make sure the QUEUE is usable
# Sanitize arguments
name = str(name)
cores = cores if cores else 1 # In case cores are passed as None
modules = [modules] if isinstance(modules, str) else modules
usedir = os.path.abspath(path) if path else os.path.abspath('.')
precmd = ''
for module in modules:
precmd += 'module load {}\n'.format(module)
precmd += dedent("""\
cd {}
date +'%d-%H:%M:%S'
echo "Running {}"
""".format(usedir, name))
pstcmd = dedent("""\
exitcode=$?
echo Done
date +'%d-%H:%M:%S'
if [[ $exitcode != 0 ]]; then
echo Exited with code: $? >&2
fi
""")
if QUEUE == 'slurm':
scrpt = os.path.join(usedir, '{}.cluster.sbatch'.format(name))
with open(scrpt, 'w') as outfile:
outfile.write('#!/bin/bash\n')
if partition:
outfile.write('#SBATCH -p {}\n'.format(partition))
outfile.write('#SBATCH --ntasks 1\n')
outfile.write('#SBATCH --cpus-per-task {}\n'.format(cores))
if time:
outfile.write('#SBATCH --time={}\n'.format(time))
if mem:
outfile.write('#SBATCH --mem={}\n'.format(mem))
outfile.write('#SBATCH -o {}.cluster.out\n'.format(name))
outfile.write('#SBATCH -e {}.cluster.err\n'.format(name))
outfile.write('cd {}\n'.format(usedir))
outfile.write('srun bash {}.script\n'.format(
os.path.join(usedir, name)))
with open(os.path.join(usedir, name + '.script'), 'w') as outfile:
outfile.write('#!/bin/bash\n')
outfile.write('mkdir -p $LOCAL_SCRATCH\n')
outfile.write(precmd)
outfile.write(command + '\n')
outfile.write(pstcmd)
elif QUEUE == 'torque':
scrpt = os.path.join(usedir, '{}.cluster.qsub'.format(name))
with open(scrpt, 'w') as outfile:
outfile.write('#!/bin/bash\n')
if partition:
outfile.write('#PBS -q {}\n'.format(partition))
outfile.write('#PBS -l nodes=1:ppn={}\n'.format(cores))
if time:
outfile.write('#PBS -l walltime={}\n'.format(time))
if mem:
outfile.write('#PBS mem={}MB\n'.format(mem))
outfile.write('#PBS -o {}.cluster.out\n'.format(name))
outfile.write('#PBS -e {}.cluster.err\n\n'.format(name))
outfile.write('mkdir -p $LOCAL_SCRATCH\n')
outfile.write(precmd)
outfile.write(command + '\n')
outfile.write(pstcmd)
elif QUEUE == 'normal':
scrpt = os.path.join(usedir, '{}.cluster'.format(name))
with open(scrpt, 'w') as outfile:
outfile.write('#!/bin/bash\n')
outfile.write(precmd)
outfile.write(command + '\n')
outfile.write(pstcmd)
# Return the path to the script
return scrpt
##############
# Cleaning #
##############
def clean(directory='.'):
"""Delete all files made by this module in directory.
CAUTION: The clean() function will delete **EVERY** file with
extensions matching those in this file::
.cluster.err
.cluster.out
.cluster.sbatch & .cluster.script for slurm mode
.cluster.qsub for torque mode
.cluster for normal mode
:directory: The directory to run in, defaults to the current directory.
:returns: A set of deleted files
"""
check_queue() # Make sure the QUEUE is usable
extensions = ['.cluster.err', '.cluster.out']
if QUEUE == 'normal':
extensions.append('.cluster')
elif QUEUE == 'slurm':
extensions = extensions + ['.cluster.sbatch', '.cluster.script']
elif QUEUE == 'torque':
extensions.append('.cluster.qsub')
files = [i for i in os.listdir(os.path.abspath(directory))
if os.path.isfile(i)]
if not files:
logme.log('No files found.', 'debug')
return []
deleted = []
for f in files:
for extension in extensions:
if f.endswith(extension):
os.remove(f)
deleted.append(f)
return deleted
###################
# House Keeping #
###################
class ClusterError(Exception):
"""A custom exception for cluster errors."""
pass
def check_queue():
"""Raise exception if QUEUE is incorrect."""
if QUEUE not in ALLOWED_QUEUES:
raise ClusterError('QUEUE value {} is not recognized, '.format(QUEUE) +
'should be: normal, torque, or slurm')
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.