repo_name stringlengths 5 100 | ref stringlengths 12 67 | path stringlengths 4 244 | copies stringlengths 1 8 | content stringlengths 0 1.05M ⌀ |
|---|---|---|---|---|
SaptakS/open-event-orga-server | refs/heads/master | config.py | 2 | # -*- coding: utf-8 -*-
import os
basedir = os.path.abspath(os.path.dirname(__file__))
VERSION_NAME = '1.0.0-alpha.10'
LANGUAGES = {
'en': 'English',
'bn': 'Bengali/Bangla',
'zh_Hans': 'Chinese (Simplified)',
'zh_Hant': 'Chinese (Traditional)',
'fr': 'French',
'de': 'German',
'id': 'Indonesian',
'ko': 'Korean',
'pl': 'Polish',
'es': 'Spanish',
'th': 'Thai',
'vi': 'Vietnamese',
'hi': 'Hindi',
'ja': 'Japanese',
'ru': 'Russian',
}
class Config(object):
"""
The base configuration option. Contains the defaults.
"""
DEBUG = False
DEVELOPMENT = False
STAGING = False
PRODUCTION = False
TESTING = False
CACHING = False
PROFILE = False
SQLALCHEMY_RECORD_QUERIES = False
INTEGRATE_SOCKETIO = False
VERSION = VERSION_NAME
SQLALCHEMY_TRACK_MODIFICATIONS = True
ERROR_404_HELP = False
CSRF_ENABLED = True
SERVER_NAME = os.getenv('SERVER_NAME')
CORS_HEADERS = 'Content-Type'
SQLALCHEMY_DATABASE_URI = os.environ.get('DATABASE_URL', None)
DATABASE_QUERY_TIMEOUT = 0.1
if not SQLALCHEMY_DATABASE_URI:
print '`DATABASE_URL` either not exported or empty'
exit()
BASE_DIR = basedir
FORCE_SSL = os.getenv('FORCE_SSL', 'no') == 'yes'
UPLOADS_FOLDER = BASE_DIR + '/static/uploads/'
TEMP_UPLOADS_FOLDER = BASE_DIR + '/static/uploads/temp/'
UPLOAD_FOLDER = UPLOADS_FOLDER
STATIC_URL = '/static/'
STATIC_ROOT = 'staticfiles'
STATICFILES_DIRS = (os.path.join(BASE_DIR, 'static'),)
if FORCE_SSL:
PREFERRED_URL_SCHEME = 'https'
class ProductionConfig(Config):
"""
The configuration for a production environment
"""
MINIFY_PAGE = True
PRODUCTION = True
INTEGRATE_SOCKETIO = False
CACHING = True
# if force on
socketio_integration = os.environ.get('INTEGRATE_SOCKETIO')
if socketio_integration == 'true':
INTEGRATE_SOCKETIO = True
class StagingConfig(ProductionConfig):
"""
The configuration for a staging environment
"""
PRODUCTION = False
STAGING = True
class DevelopmentConfig(Config):
"""
The configuration for a development environment
"""
DEVELOPMENT = True
DEBUG = True
CACHING = True
# Test database performance
SQLALCHEMY_RECORD_QUERIES = True
# If Env Var `INTEGRATE_SOCKETIO` is set to 'true', then integrate SocketIO
socketio_integration = os.environ.get('INTEGRATE_SOCKETIO')
INTEGRATE_SOCKETIO = bool(socketio_integration == 'true')
class TestingConfig(Config):
"""
The configuration for a test suit
"""
INTEGRATE_SOCKETIO = False
TESTING = True
CELERY_ALWAYS_EAGER = True
CELERY_EAGER_PROPAGATES_EXCEPTIONS = True
SQLALCHEMY_RECORD_QUERIES = True
DEBUG_TB_ENABLED = False
BROKER_BACKEND = 'memory'
|
butchman0922/gourmet | refs/heads/master | gourmet/plugins/web_plugin/gourmetweb/recview/tests.py | 1940 | """
This file demonstrates two different styles of tests (one doctest and one
unittest). These will both pass when you run "manage.py test".
Replace these with more appropriate tests for your application.
"""
from django.test import TestCase
class SimpleTest(TestCase):
def test_basic_addition(self):
"""
Tests that 1 + 1 always equals 2.
"""
self.failUnlessEqual(1 + 1, 2)
__test__ = {"doctest": """
Another way to test that 1 + 1 is equal to 2.
>>> 1 + 1 == 2
True
"""}
|
sugarguo/Flask_Blog | refs/heads/master | ext_lib/setuptools/tests/test_upload_docs.py | 522 | """build_ext tests
"""
import sys, os, shutil, tempfile, unittest, site, zipfile
from setuptools.command.upload_docs import upload_docs
from setuptools.dist import Distribution
SETUP_PY = """\
from setuptools import setup
setup(name='foo')
"""
class TestUploadDocsTest(unittest.TestCase):
def setUp(self):
self.dir = tempfile.mkdtemp()
setup = os.path.join(self.dir, 'setup.py')
f = open(setup, 'w')
f.write(SETUP_PY)
f.close()
self.old_cwd = os.getcwd()
os.chdir(self.dir)
self.upload_dir = os.path.join(self.dir, 'build')
os.mkdir(self.upload_dir)
# A test document.
f = open(os.path.join(self.upload_dir, 'index.html'), 'w')
f.write("Hello world.")
f.close()
# An empty folder.
os.mkdir(os.path.join(self.upload_dir, 'empty'))
if sys.version >= "2.6":
self.old_base = site.USER_BASE
site.USER_BASE = upload_docs.USER_BASE = tempfile.mkdtemp()
self.old_site = site.USER_SITE
site.USER_SITE = upload_docs.USER_SITE = tempfile.mkdtemp()
def tearDown(self):
os.chdir(self.old_cwd)
shutil.rmtree(self.dir)
if sys.version >= "2.6":
shutil.rmtree(site.USER_BASE)
shutil.rmtree(site.USER_SITE)
site.USER_BASE = self.old_base
site.USER_SITE = self.old_site
def test_create_zipfile(self):
# Test to make sure zipfile creation handles common cases.
# This explicitly includes a folder containing an empty folder.
dist = Distribution()
cmd = upload_docs(dist)
cmd.upload_dir = self.upload_dir
cmd.target_dir = self.upload_dir
tmp_dir = tempfile.mkdtemp()
tmp_file = os.path.join(tmp_dir, 'foo.zip')
try:
zip_file = cmd.create_zipfile(tmp_file)
assert zipfile.is_zipfile(tmp_file)
zip_file = zipfile.ZipFile(tmp_file) # woh...
assert zip_file.namelist() == ['index.html']
zip_file.close()
finally:
shutil.rmtree(tmp_dir)
|
gratefulfrog/lib | refs/heads/master | python/pmg_tk/startup/remote_pdb_load.py | 1 | # Copyright Notice
# ================
#
# The PyMOL Plugin source code in this file is copyrighted, but you can
# freely use and copy it as long as you don't change or remove any of
# the copyright notices.
#
# ----------------------------------------------------------------------
# This PyMOL Plugin is Copyright (C) 2004 by Charles Moad <cmoad@indiana.edu>
#
# All Rights Reserved
#
# Permission to use, copy, modify, distribute, and distribute modified
# versions of this software and its documentation for any purpose and
# without fee is hereby granted, provided that the above copyright
# notice appear in all copies and that both the copyright notice and
# this permission notice appear in supporting documentation, and that
# the name(s) of the author(s) not be used in advertising or publicity
# pertaining to distribution of the software without specific, written
# prior permission.
#
# THE AUTHOR(S) DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
# INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN
# NO EVENT SHALL THE AUTHOR(S) BE LIABLE FOR ANY SPECIAL, INDIRECT OR
# CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF
# USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR
# OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
# PERFORMANCE OF THIS SOFTWARE.
# ----------------------------------------------------------------------
from Tkinter import *
from pymol import cmd
def __init__(self):
# Simply add the menu entry and callback
self.menuBar.addmenuitem('Plugin', 'command',
'PDB Loader Service',
label = 'PDB Loader Service',
command = lambda s=self : FetchPDB(s))
# FetchPDB(self)
# Class that simply takes the pdbcode from a dialog and retrieves the file
class FetchPDB:
def __init__(self, app):
import tkSimpleDialog
import tkMessageBox
import urllib
import gzip
import os
import string
pdbCode = tkSimpleDialog.askstring('PDB Loader Service',
'Please enter a 4-digit pdb code:',
parent=app.root)
if pdbCode: # None is returned for user cancel
result = cmd.fetch(pdbCode)
if result==-1:
tkMessageBox.showerror('Invalid Code',
'You entered an invalid pdb code: ' + pdbCode,
parent=app.root)
|
benthomasson/ansible | refs/heads/devel | lib/ansible/plugins/lookup/items.py | 46 | # (c) 2012, Michael DeHaan <michael.dehaan@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from ansible.plugins.lookup import LookupBase
class LookupModule(LookupBase):
def run(self, terms, **kwargs):
return self._flatten(terms)
|
ITNano/WikiSubtitleReader | refs/heads/master | diff_tool.py | 1 | # Contains diff functionality
import re
import codecs
import kuplett_parser
import os.path
import preprocess
class DiffData():
def __init__(self, line, isNewLine):
self.line = line
self.isNewLine = isNewLine
def find_unchanged_lines(sourcename, content, allowEmptyLines):
content = preprocess.preprocess_general(content, ':')
result = []
filename = get_diff_filename(sourcename)
if os.path.exists(filename):
file = codecs.open(filename, 'r', 'utf-8')
counterContent = 0
counterPrev = 0
prevLines = file.readlines()
for line in content:
if allowEmptyLines or len(line) > 0:
foundMatch = False
line = line.strip()
for index in range(counterPrev, len(prevLines)):
if line == prevLines[index].strip():
result.append(DiffData(index, False))
counterPrev = index+1
foundMatch = True
break
if not foundMatch:
result.append(DiffData(counterContent, True))
counterContent += 1
else:
counter = 0
for line in content:
if allowEmptyLines or len(line.strip()) > 0:
result.append(DiffData(counter, True))
counter += 1
# Save data to next time
if len(os.path.dirname(filename).strip()) > 0 and not os.path.exists(os.path.dirname(filename)):
os.makedirs(os.path.dirname(filename))
outfile = codecs.open(filename, 'w', 'utf-8')
for line in content:
outfile.write(line+"\n")
return result
def has_diff(sourcefile):
return os.path.exists(get_diff_filename(sourcefile))
def get_diff_filename(sourcefile):
return "diff/"+kuplett_parser.get_generic_name(sourcefile) |
markuswissinger/ducktestpy | refs/heads/master | sample/integration/original.py | 1 | def generator_example():
yield 1
def non_builtin_example(a):
return a
def mock_example(a):
return a
def list_example(a):
return a
def multi_line_docstring_example():
"""
La \ndi dee
One
Two
Three
"""
return 'Eric the half a bee'
def two_line_docstring():
"""a b c d e f g
Eric the half a bee"""
return 1
def single_line_return_value(a):
"""
:rtype: int
"""
return 1
def single_line_docstring(a):
"""One line"""
return a
def single_result_line():
return 1
def new_docstring(a):
return a
def no_docstring():
return None
|
yury-s/v8-inspector | refs/heads/master | Source/chrome/build/android/pylib/gtest/gtest_config.py | 56 | # Copyright (c) 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Configuration file for android gtest suites."""
# Add new suites here before upgrading them to the stable list below.
EXPERIMENTAL_TEST_SUITES = [
'components_browsertests',
'content_gl_tests',
'heap_profiler_unittests',
'devtools_bridge_tests',
]
TELEMETRY_EXPERIMENTAL_TEST_SUITES = [
'telemetry_unittests',
]
# Do not modify this list without approval of an android owner.
# This list determines which suites are run by default, both for local
# testing and on android trybots running on commit-queue.
STABLE_TEST_SUITES = [
'android_webview_unittests',
'base_unittests',
'breakpad_unittests',
'cc_unittests',
'components_unittests',
'content_browsertests',
'content_unittests',
'events_unittests',
'gl_tests',
'gl_unittests',
'gpu_unittests',
'ipc_tests',
'media_unittests',
'midi_unittests',
'net_unittests',
'sandbox_linux_unittests',
'skia_unittests',
'sql_unittests',
'sync_unit_tests',
'ui_android_unittests',
'ui_base_unittests',
'ui_touch_selection_unittests',
'unit_tests',
'webkit_unit_tests',
]
# Tests fail in component=shared_library build, which is required for ASan.
# http://crbug.com/344868
ASAN_EXCLUDED_TEST_SUITES = [
'breakpad_unittests',
'sandbox_linux_unittests'
]
|
clouserw/zamboni | refs/heads/master | mkt/site/management/commands/clean_redis.py | 1 | import logging
import os
import socket
import subprocess
import sys
import tempfile
import time
from django.core.management.base import BaseCommand
import redisutils
import redis as redislib
log = logging.getLogger('z.redis')
# We process the keys in chunks of size CHUNK.
CHUNK = 3000
# Remove any sets with less than MIN or more than MAX elements.
MIN = 10
MAX = 50
# Expire keys after EXPIRE seconds.
EXPIRE = 60 * 5
# Calling redis can raise raise these errors.
RedisError = redislib.RedisError, socket.error
def vacuum(master, slave):
def keys():
ks = slave.keys()
log.info('There are %s keys to clean up.' % len(ks))
ks = iter(ks)
while 1:
buffer = []
for _ in xrange(CHUNK):
try:
buffer.append(ks.next())
except StopIteration:
yield buffer
return
yield buffer
tmp = tempfile.NamedTemporaryFile(delete=False)
for ks in keys():
tmp.write('\n'.join(ks))
tmp.close()
# It's hard to get Python to clean up the memory from slave.keys(), so
# we'll let the OS do it. You have to pass sys.executable both as the
# thing to run and so argv[0] is set properly.
os.execl(sys.executable, sys.executable, sys.argv[0],
sys.argv[1], tmp.name)
def cleanup(master, slave, filename):
tmp = open(filename)
total = [1, 0]
p = subprocess.Popen(['wc', '-l', filename], stdout=subprocess.PIPE)
total[0] = int(p.communicate()[0].strip().split()[0])
def file_keys():
while 1:
buffer = []
for _ in xrange(CHUNK):
line = tmp.readline()
if line:
buffer.append(line.strip())
else:
yield buffer
return
yield buffer
num = 0
for ks in file_keys():
pipe = slave.pipeline()
for k in ks:
pipe.scard(k)
try:
drop = [k for k, size in zip(ks, pipe.execute())
if 0 < size < MIN or size > MAX]
except RedisError:
continue
num += len(ks)
percent = round(float(num) / total[0] * 100, 1) if total[0] else 0
total[1] += len(drop)
log.debug('[%s %.1f%%] Dropping %s keys.' % (num, percent, len(drop)))
pipe = master.pipeline()
for k in drop:
pipe.expire(k, EXPIRE)
try:
pipe.execute()
except RedisError:
continue
time.sleep(1) # Poor man's rate limiting.
if total[0]:
log.info('Dropped %s keys [%.1f%%].' %
(total[1], round(float(total[1]) / total[0] * 100, 1)))
class Command(BaseCommand):
help = "Clean up the redis used by cache machine."
def handle(self, *args, **kw):
try:
master = redisutils.connections['cache']
slave = redisutils.connections['cache_slave']
except Exception:
log.error('Could not connect to redis.', exc_info=True)
return
if args:
filename = args[0]
try:
cleanup(master, slave, filename)
finally:
os.unlink(filename)
else:
vacuum(master, slave)
|
fbradyirl/home-assistant | refs/heads/dev | homeassistant/components/ifttt/__init__.py | 2 | """Support to trigger Maker IFTTT recipes."""
import json
import logging
import requests
import voluptuous as vol
from homeassistant.const import CONF_WEBHOOK_ID
from homeassistant.helpers import config_entry_flow
import homeassistant.helpers.config_validation as cv
from .const import DOMAIN
_LOGGER = logging.getLogger(__name__)
EVENT_RECEIVED = "ifttt_webhook_received"
ATTR_EVENT = "event"
ATTR_TARGET = "target"
ATTR_VALUE1 = "value1"
ATTR_VALUE2 = "value2"
ATTR_VALUE3 = "value3"
CONF_KEY = "key"
SERVICE_TRIGGER = "trigger"
SERVICE_TRIGGER_SCHEMA = vol.Schema(
{
vol.Required(ATTR_EVENT): cv.string,
vol.Optional(ATTR_TARGET): vol.All(cv.ensure_list, [cv.string]),
vol.Optional(ATTR_VALUE1): cv.string,
vol.Optional(ATTR_VALUE2): cv.string,
vol.Optional(ATTR_VALUE3): cv.string,
}
)
CONFIG_SCHEMA = vol.Schema(
{
vol.Optional(DOMAIN): vol.Schema(
{vol.Required(CONF_KEY): vol.Any({cv.string: cv.string}, cv.string)}
)
},
extra=vol.ALLOW_EXTRA,
)
async def async_setup(hass, config):
"""Set up the IFTTT service component."""
if DOMAIN not in config:
return True
api_keys = config[DOMAIN][CONF_KEY]
if isinstance(api_keys, str):
api_keys = {"default": api_keys}
def trigger_service(call):
"""Handle IFTTT trigger service calls."""
event = call.data[ATTR_EVENT]
targets = call.data.get(ATTR_TARGET, list(api_keys))
value1 = call.data.get(ATTR_VALUE1)
value2 = call.data.get(ATTR_VALUE2)
value3 = call.data.get(ATTR_VALUE3)
target_keys = dict()
for target in targets:
if target not in api_keys:
_LOGGER.error("No IFTTT api key for %s", target)
continue
target_keys[target] = api_keys[target]
try:
import pyfttt
for target, key in target_keys.items():
res = pyfttt.send_event(key, event, value1, value2, value3)
if res.status_code != 200:
_LOGGER.error("IFTTT reported error sending event to %s.", target)
except requests.exceptions.RequestException:
_LOGGER.exception("Error communicating with IFTTT")
hass.services.async_register(
DOMAIN, SERVICE_TRIGGER, trigger_service, schema=SERVICE_TRIGGER_SCHEMA
)
return True
async def handle_webhook(hass, webhook_id, request):
"""Handle webhook callback."""
body = await request.text()
try:
data = json.loads(body) if body else {}
except ValueError:
return None
if isinstance(data, dict):
data["webhook_id"] = webhook_id
hass.bus.async_fire(EVENT_RECEIVED, data)
async def async_setup_entry(hass, entry):
"""Configure based on config entry."""
hass.components.webhook.async_register(
DOMAIN, "IFTTT", entry.data[CONF_WEBHOOK_ID], handle_webhook
)
return True
async def async_unload_entry(hass, entry):
"""Unload a config entry."""
hass.components.webhook.async_unregister(entry.data[CONF_WEBHOOK_ID])
return True
# pylint: disable=invalid-name
async_remove_entry = config_entry_flow.webhook_async_remove_entry
|
OpenMusicKontrollers/patchmatrix | refs/heads/master | subprojects/nk_pugl/pugl/scripts/dox_to_sphinx.py | 7 | #!/usr/bin/env python3
# Copyright 2020 David Robillard <d@drobilla.net>
#
# Permission to use, copy, modify, and/or distribute this software for any
# purpose with or without fee is hereby granted, provided that the above
# copyright notice and this permission notice appear in all copies.
#
# THIS SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
# OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
"""
Write Sphinx markup from Doxygen XML.
Takes a path to a directory of XML generated by Doxygen, and emits a directory
with a reStructuredText file for every documented symbol.
"""
import argparse
import os
import sys
import textwrap
import xml.etree.ElementTree
__author__ = "David Robillard"
__date__ = "2020-11-18"
__email__ = "d@drobilla.net"
__license__ = "ISC"
__version__ = __date__.replace("-", ".")
def load_index(index_path):
"""
Load the index from XML.
:returns: A dictionary from ID to skeleton records with basic information
for every documented entity. Some records have an ``xml_filename`` key
with the filename of a definition file. These files will be loaded later
to flesh out the records in the index.
"""
root = xml.etree.ElementTree.parse(index_path).getroot()
index = {}
for compound in root:
compound_id = compound.get("refid")
compound_kind = compound.get("kind")
compound_name = compound.find("name").text
if compound_kind in ["dir", "file", "page"]:
continue
# Add record for compound (compounds appear only once in the index)
assert compound_id not in index
index[compound_id] = {
"kind": compound_kind,
"name": compound_name,
"xml_filename": compound_id + ".xml",
"children": [],
}
name_prefix = (
("%s::" % compound_name) if compound_kind == "namespace" else ""
)
for child in compound.findall("member"):
if child.get("refid") in index:
assert compound_kind == "group"
continue
# Everything has a kind and a name
child_record = {
"kind": child.get("kind"),
"name": name_prefix + child.find("name").text,
}
if child.get("kind") == "enum":
# Enums are not compounds, but we want to resolve the parent of
# their values so they are not written as top level documents
child_record["children"] = []
if child.get("kind") == "enumvalue":
# Remove namespace prefix
child_record["name"] = child.find("name").text
index[child.get("refid")] = child_record
return index
def resolve_index(index, root):
"""
Walk a definition document and extend the index for linking.
This does two things: sets the "parent" and "children" fields of all
applicable records, and sets the "strong" field of enums so that the
correct Sphinx role can be used when referring to them.
"""
def add_child(index, parent_id, child_id):
parent = index[parent_id]
child = index[child_id]
if child["kind"] == "enumvalue":
assert parent["kind"] == "enum"
assert "parent" not in child or child["parent"] == parent_id
child["parent"] = parent_id
else:
if parent["kind"] in ["class", "struct", "union"]:
assert "parent" not in child or child["parent"] == parent_id
child["parent"] = parent_id
if child_id not in parent["children"]:
parent["children"] += [child_id]
compound = root.find("compounddef")
compound_kind = compound.get("kind")
if compound_kind == "group":
for subgroup in compound.findall("innergroup"):
add_child(index, compound.get("id"), subgroup.get("refid"))
for klass in compound.findall("innerclass"):
add_child(index, compound.get("id"), klass.get("refid"))
for section in compound.findall("sectiondef"):
if section.get("kind").startswith("private"):
for member in section.findall("memberdef"):
if member.get("id") in index:
del index[member.get("id")]
else:
for member in section.findall("memberdef"):
member_id = member.get("id")
add_child(index, compound.get("id"), member_id)
if member.get("kind") == "enum":
index[member_id]["strong"] = member.get("strong") == "yes"
for value in member.findall("enumvalue"):
add_child(index, member_id, value.get("id"))
def sphinx_role(record, lang):
"""
Return the Sphinx role used for a record.
This is used for the description directive like ".. c:function::", and
links like ":c:func:`foo`.
"""
kind = record["kind"]
if kind in ["class", "function", "namespace", "struct", "union"]:
return lang + ":" + kind
if kind == "define":
return "c:macro"
if kind == "enum":
return lang + (":enum-class" if record["strong"] else ":enum")
if kind == "typedef":
return lang + ":type"
if kind == "enumvalue":
return lang + ":enumerator"
if kind == "variable":
return lang + (":member" if "parent" in record else ":var")
raise RuntimeError("No known role for kind '%s'" % kind)
def child_identifier(lang, parent_name, child_name):
"""
Return the identifier for an enum value or struct member.
Sphinx, for some reason, uses a different syntax for this in C and C++.
"""
separator = "::" if lang == "cpp" else "."
return "%s%s%s" % (parent_name, separator, child_name)
def link_markup(index, lang, refid):
"""Return a Sphinx link for a Doxygen reference."""
record = index[refid]
kind, name = record["kind"], record["name"]
role = sphinx_role(record, lang)
if kind in ["class", "enum", "struct", "typedef", "union"]:
return ":%s:`%s`" % (role, name)
if kind == "function":
return ":%s:func:`%s`" % (lang, name)
if kind == "enumvalue":
parent_name = index[record["parent"]]["name"]
return ":%s:`%s`" % (role, child_identifier(lang, parent_name, name))
if kind == "variable":
if "parent" not in record:
return ":%s:var:`%s`" % (lang, name)
parent_name = index[record["parent"]]["name"]
return ":%s:`%s`" % (role, child_identifier(lang, parent_name, name))
raise RuntimeError("Unknown link target kind: %s" % kind)
def indent(markup, depth):
"""
Indent markup to a depth level.
Like textwrap.indent() but takes an integer and works in reST indentation
levels for clarity."
"""
return textwrap.indent(markup, " " * depth)
def heading(text, level):
"""
Return a ReST heading at a given level.
Follows the style in the Python documentation guide, see
<https://devguide.python.org/documenting/#sections>.
"""
assert 1 <= level <= 6
chars = ("#", "*", "=", "-", "^", '"')
line = chars[level] * len(text)
return "%s%s\n%s\n\n" % (line + "\n" if level < 3 else "", text, line)
def dox_to_rst(index, lang, node):
"""
Convert documentation commands (docCmdGroup) to Sphinx markup.
This is used to convert the content of descriptions in the documentation.
It recursively parses all children tags and raises a RuntimeError if any
unknown tag is encountered.
"""
def field_value(markup):
"""Return a value for a field as a single line or indented block."""
if "\n" in markup.strip():
return "\n" + indent(markup, 1)
return " " + markup.strip()
if node.tag == "lsquo":
return "‘"
if node.tag == "rsquo":
return "’"
if node.tag == "computeroutput":
assert len(node) == 0
return "``%s``" % node.text
if node.tag == "itemizedlist":
markup = ""
for item in node.findall("listitem"):
assert len(item) == 1
markup += "\n- %s" % dox_to_rst(index, lang, item[0])
return markup
if node.tag == "para":
markup = node.text if node.text is not None else ""
for child in node:
markup += dox_to_rst(index, lang, child)
markup += child.tail if child.tail is not None else ""
return markup.strip() + "\n\n"
if node.tag == "parameterlist":
markup = ""
for item in node.findall("parameteritem"):
name = item.find("parameternamelist/parametername")
description = item.find("parameterdescription")
assert len(description) == 1
markup += "\n\n:param %s:%s" % (
name.text,
field_value(dox_to_rst(index, lang, description[0])),
)
return markup + "\n"
if node.tag == "programlisting":
return "\n.. code-block:: %s\n\n%s" % (
lang,
indent(plain_text(node), 1),
)
if node.tag == "ref":
refid = node.get("refid")
if refid not in index:
sys.stderr.write("warning: Unresolved link: %s\n" % refid)
return node.text
assert len(node) == 0
assert len(link_markup(index, lang, refid)) > 0
return link_markup(index, lang, refid)
if node.tag == "simplesect":
assert len(node) == 1
if node.get("kind") == "return":
return "\n:returns:" + field_value(
dox_to_rst(index, lang, node[0])
)
if node.get("kind") == "see":
return dox_to_rst(index, lang, node[0])
raise RuntimeError("Unknown simplesect kind: %s" % node.get("kind"))
if node.tag == "ulink":
return "`%s <%s>`_" % (node.text, node.get("url"))
raise RuntimeError("Unknown documentation command: %s" % node.tag)
def description_markup(index, lang, node):
"""Return the markup for a brief or detailed description."""
assert node.tag == "briefdescription" or node.tag == "detaileddescription"
assert not (node.tag == "briefdescription" and len(node) > 1)
assert len(node.text.strip()) == 0
return "".join([dox_to_rst(index, lang, child) for child in node]).strip()
def set_descriptions(index, lang, definition, record):
"""Set a record's brief/detailed descriptions from the XML definition."""
for tag in ["briefdescription", "detaileddescription"]:
node = definition.find(tag)
if node is not None:
record[tag] = description_markup(index, lang, node)
def set_template_params(node, record):
"""Set a record's template_params from the XML definition."""
template_param_list = node.find("templateparamlist")
if template_param_list is not None:
params = []
for param in template_param_list.findall("param"):
if param.find("declname") is not None:
# Value parameter
type_text = plain_text(param.find("type"))
name_text = plain_text(param.find("declname"))
params += ["%s %s" % (type_text, name_text)]
else:
# Type parameter
params += ["%s" % (plain_text(param.find("type")))]
record["template_params"] = "%s" % ", ".join(params)
def plain_text(node):
"""
Return the plain text of a node with all tags ignored.
This is needed where Doxygen may include refs but Sphinx needs plain text
because it parses things itself to generate links.
"""
if node.tag == "sp":
markup = " "
elif node.text is not None:
markup = node.text
else:
markup = ""
for child in node:
markup += plain_text(child)
markup += child.tail if child.tail is not None else ""
return markup
def local_name(name):
"""Return a name with all namespace prefixes stripped."""
return name[name.rindex("::") + 2 :] if "::" in name else name
def read_definition_doc(index, lang, root):
"""Walk a definition document and update described records in the index."""
# Set descriptions for the compound itself
compound = root.find("compounddef")
compound_record = index[compound.get("id")]
set_descriptions(index, lang, compound, compound_record)
set_template_params(compound, compound_record)
if compound.find("title") is not None:
compound_record["title"] = compound.find("title").text.strip()
# Set documentation for all children
for section in compound.findall("sectiondef"):
if section.get("kind").startswith("private"):
continue
for member in section.findall("memberdef"):
kind = member.get("kind")
record = index[member.get("id")]
set_descriptions(index, lang, member, record)
set_template_params(member, record)
if compound.get("kind") in ["class", "struct", "union"]:
assert kind in ["function", "typedef", "variable"]
record["type"] = plain_text(member.find("type"))
if kind == "enum":
for value in member.findall("enumvalue"):
set_descriptions(
index, lang, value, index[value.get("id")]
)
elif kind == "function":
record["prototype"] = "%s %s%s" % (
plain_text(member.find("type")),
member.find("name").text,
member.find("argsstring").text,
)
elif kind == "typedef":
name = local_name(record["name"])
args_text = member.find("argsstring").text
target_text = plain_text(member.find("type"))
if args_text is not None: # Function pointer
assert target_text[-2:] == "(*" and args_text[0] == ")"
record["type"] = target_text + args_text
record["definition"] = target_text + name + args_text
else: # Normal named typedef
assert target_text is not None
record["type"] = target_text
if member.find("definition").text.startswith("using"):
record["definition"] = "%s = %s" % (
name,
target_text,
)
else:
record["definition"] = "%s %s" % (
target_text,
name,
)
elif kind == "variable":
record["definition"] = member.find("definition").text
def declaration_string(record):
"""
Return the string that describes a declaration.
This is what follows the directive, and is in C/C++ syntax, except without
keywords like "typedef" and "using" as expected by Sphinx. For example,
"struct ThingImpl Thing" or "void run(int value)".
"""
kind = record["kind"]
result = ""
if "template_params" in record:
result = "template <%s> " % record["template_params"]
if kind == "function":
result += record["prototype"]
elif kind == "typedef":
result += record["definition"]
elif kind == "variable":
if "parent" in record:
result += "%s %s" % (record["type"], local_name(record["name"]))
else:
result += record["definition"]
elif "type" in record:
result += "%s %s" % (record["type"], local_name(record["name"]))
else:
result += local_name(record["name"])
return result
def document_markup(index, lang, record):
"""Return the complete document that describes some documented entity."""
kind = record["kind"]
role = sphinx_role(record, lang)
name = record["name"]
markup = ""
if name != local_name(name):
markup += ".. cpp:namespace:: %s\n\n" % name[0 : name.rindex("::")]
# Write top-level directive
markup += ".. %s:: %s\n" % (role, declaration_string(record))
# Write main description blurb
markup += "\n" + indent(record["briefdescription"] + "\n", 1)
if len(record["detaileddescription"]) > 0:
markup += "\n" + indent(record["detaileddescription"], 1) + "\n"
assert (
kind in ["class", "enum", "namespace", "struct", "union"]
or "children" not in record
)
# Sphinx C++ namespaces work by setting a scope, they have no content
child_indent = 0 if kind == "namespace" else 1
# Write inline children if applicable
markup += "\n" if "children" in record else ""
for child_id in record.get("children", []):
child_record = index[child_id]
child_role = sphinx_role(child_record, lang)
child_header = ".. %s:: %s\n\n" % (
child_role,
declaration_string(child_record),
)
markup += "\n"
markup += indent(child_header, child_indent)
markup += indent(child_record["briefdescription"], child_indent + 1)
markup += indent(child_record["detaileddescription"], child_indent + 1)
return markup
def symbol_filename(name):
"""Adapt the name of a symbol to be suitable for use as a filename."""
return name.replace("::", "__")
def emit_groups(index, lang, output_dir, force):
"""Write a description file for every group documented in the index."""
for record in index.values():
if record["kind"] != "group":
continue
name = record["name"]
filename = os.path.join(output_dir, "%s.rst" % name)
if not force and os.path.exists(filename):
raise FileExistsError("File already exists: '%s'" % filename)
with open(filename, "w") as rst:
rst.write(heading(record["title"], 1))
# Get all child group and symbol names
child_groups = {}
child_symbols = {}
for child_id in record["children"]:
child = index[child_id]
if child["kind"] == "group":
child_groups[child["name"]] = child
else:
child_symbols[child["name"]] = child
# Emit description (document body)
if len(record["briefdescription"]) > 0:
rst.write(record["briefdescription"] + "\n\n")
if len(record["detaileddescription"]) > 0:
rst.write(record["detaileddescription"] + "\n\n")
if len(child_groups) > 0:
# Emit TOC for child groups
rst.write(".. toctree::\n\n")
for name, group in child_groups.items():
rst.write(indent(group["name"], 1) + "\n")
# Emit symbols in sorted order
for name, symbol in child_symbols.items():
rst.write("\n")
rst.write(document_markup(index, lang, symbol))
rst.write("\n")
def run(index_xml_path, output_dir, language, force):
"""Write a directory of Sphinx files from a Doxygen XML directory."""
# Build skeleton index from index.xml
xml_dir = os.path.dirname(index_xml_path)
index = load_index(index_xml_path)
# Load all definition documents
definition_docs = []
for record in index.values():
if "xml_filename" in record:
xml_path = os.path.join(xml_dir, record["xml_filename"])
definition_docs += [xml.etree.ElementTree.parse(xml_path)]
# Do an initial pass of the definition documents to resolve the index
for root in definition_docs:
resolve_index(index, root)
# Finally read the documentation from definition documents
for root in definition_docs:
read_definition_doc(index, language, root)
# Create output directory
try:
os.makedirs(output_dir)
except OSError:
pass
# Emit output files
emit_groups(index, language, output_dir, force)
if __name__ == "__main__":
ap = argparse.ArgumentParser(
usage="%(prog)s [OPTION]... XML_DIR OUTPUT_DIR",
description=__doc__,
formatter_class=argparse.RawDescriptionHelpFormatter,
)
ap.add_argument(
"-f",
"--force",
action="store_true",
help="overwrite files",
)
ap.add_argument(
"-l",
"--language",
default="c",
choices=["c", "cpp"],
help="language domain for output",
)
ap.add_argument("index_xml_path", help="path index.xml from Doxygen")
ap.add_argument("output_dir", help="output directory")
print(sys.argv)
run(**vars(ap.parse_args(sys.argv[1:])))
|
chrisfilda/edx_platform | refs/heads/master | lms/djangoapps/verify_student/migrations/0001_initial.py | 64 | # -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'SoftwareSecurePhotoVerification'
db.create_table('verify_student_softwaresecurephotoverification', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('status', self.gf('model_utils.fields.StatusField')(default='created', max_length=100, no_check_for_status=True)),
('status_changed', self.gf('model_utils.fields.MonitorField')(default=datetime.datetime.now, monitor=u'status')),
('user', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['auth.User'])),
('name', self.gf('django.db.models.fields.CharField')(max_length=255, blank=True)),
('face_image_url', self.gf('django.db.models.fields.URLField')(max_length=255, blank=True)),
('photo_id_image_url', self.gf('django.db.models.fields.URLField')(max_length=255, blank=True)),
('receipt_id', self.gf('django.db.models.fields.CharField')(max_length=255, db_index=True)),
('created_at', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, db_index=True, blank=True)),
('updated_at', self.gf('django.db.models.fields.DateTimeField')(auto_now=True, db_index=True, blank=True)),
('submitted_at', self.gf('django.db.models.fields.DateTimeField')(null=True, db_index=True)),
('reviewing_user', self.gf('django.db.models.fields.related.ForeignKey')(default=None, related_name='photo_verifications_reviewed', null=True, to=orm['auth.User'])),
('reviewing_service', self.gf('django.db.models.fields.CharField')(max_length=255, blank=True)),
('error_msg', self.gf('django.db.models.fields.TextField')(blank=True)),
('error_code', self.gf('django.db.models.fields.CharField')(max_length=50, blank=True)),
('photo_id_key', self.gf('django.db.models.fields.TextField')(max_length=1024)),
))
db.send_create_signal('verify_student', ['SoftwareSecurePhotoVerification'])
def backwards(self, orm):
# Deleting model 'SoftwareSecurePhotoVerification'
db.delete_table('verify_student_softwaresecurephotoverification')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'verify_student.softwaresecurephotoverification': {
'Meta': {'ordering': "['-created_at']", 'object_name': 'SoftwareSecurePhotoVerification'},
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'db_index': 'True', 'blank': 'True'}),
'error_code': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'}),
'error_msg': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'face_image_url': ('django.db.models.fields.URLField', [], {'max_length': '255', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'photo_id_image_url': ('django.db.models.fields.URLField', [], {'max_length': '255', 'blank': 'True'}),
'photo_id_key': ('django.db.models.fields.TextField', [], {'max_length': '1024'}),
'receipt_id': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'}),
'reviewing_service': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'reviewing_user': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'related_name': "'photo_verifications_reviewed'", 'null': 'True', 'to': "orm['auth.User']"}),
'status': ('model_utils.fields.StatusField', [], {'default': "'created'", 'max_length': '100', u'no_check_for_status': 'True'}),
'status_changed': ('model_utils.fields.MonitorField', [], {'default': 'datetime.datetime.now', u'monitor': "u'status'"}),
'submitted_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'db_index': 'True'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'db_index': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
}
}
complete_apps = ['verify_student'] |
ResearchComputing/RCAMP | refs/heads/master | rcamp/tests/test_accounts_views.py | 1 | from django.test import (
TestCase,
override_settings,
RequestFactory,
Client
)
from django.http import Http404
import mock
import datetime
from django.conf import settings
from django.contrib.sessions.middleware import SessionMiddleware
from tests.utilities.ldap import LdapTestCase
from tests.utilities.utils import (
SessionEnabledTestMixin,
SafeTestCase
)
from accounts.models import (
AccountRequest,
Intent,
IdTracker,
CuLdapUser,
CsuLdapUser
)
def get_account_request_verify_defaults():
"""Returns reasonable defaults for account request verification step POST data."""
verification_defaults = dict(
username = 'testuser',
password = 'testpass',
role = 'faculty',
department = 'physics'
)
return verification_defaults
def get_org_user_defaults():
"""Returns a dictionary of reasonable defaults for users returned from external LDAPs."""
defaults = dict(
username = 'testuser',
first_name = 'Test',
last_name = 'User',
email = 'testuser@colorado.edu'
)
return defaults
class AccountRequestVerifyUcbViewTestCase(LdapTestCase):
def test_request_verify(self):
mock_cu_user_defaults = get_org_user_defaults()
mock_cu_user = mock.MagicMock(**mock_cu_user_defaults)
mock_cu_user.authenticate.return_value = True
account_request_verify_defaults = get_account_request_verify_defaults()
with mock.patch('accounts.models.CuLdapUser.objects.get',return_value=mock_cu_user):
mock_cu_user.edu_affiliation = ['affiliated','unrecognized']
response = self.client.post(
'/accounts/account-request/create/verify/ucb',
data=account_request_verify_defaults
)
session_ar_data = self.client.session['account_request_data']
self.assertEqual(response.status_code, 302)
self.assertTrue(response.url.endswith('/accounts/account-request/create/intent'))
self.assertEqual(session_ar_data['organization'],'ucb')
self.assertFalse('status' in session_ar_data)
self.assertEqual(session_ar_data['email'],'testuser@colorado.edu')
self.assertEqual(session_ar_data['role'],'faculty')
self.assertEqual(session_ar_data['department'],'physics')
self.assertEqual(session_ar_data['first_name'],'Test')
self.assertEqual(session_ar_data['last_name'],'User')
self.assertEqual(session_ar_data['username'],'testuser')
def test_request_verify_autoapprove(self):
mock_cu_user_defaults = get_org_user_defaults()
mock_cu_user = mock.MagicMock(**mock_cu_user_defaults)
mock_cu_user.authenticate.return_value = True
account_request_verify_defaults = get_account_request_verify_defaults()
with mock.patch('accounts.models.CuLdapUser.objects.get',return_value=mock_cu_user):
mock_cu_user.edu_affiliation = ['faculty']
response = self.client.post(
'/accounts/account-request/create/verify/ucb',
data=account_request_verify_defaults
)
session_ar_data = self.client.session['account_request_data']
self.assertEqual(response.status_code, 302)
self.assertTrue(response.url.endswith('/accounts/account-request/create/intent'))
self.assertEqual(session_ar_data['organization'],'ucb')
self.assertEqual(session_ar_data['status'],'a')
def test_request_verify_invalid_creds(self):
mock_cu_user_defaults = get_org_user_defaults()
mock_cu_user = mock.MagicMock(**mock_cu_user_defaults)
mock_cu_user.authenticate.return_value = True
account_request_verify_defaults = get_account_request_verify_defaults()
with mock.patch('accounts.models.CuLdapUser.objects.get',side_effect=[CuLdapUser.DoesNotExist]):
response = self.client.post(
'/accounts/account-request/create/verify/ucb',
data=account_request_verify_defaults
)
self.assertEqual(
response.context['form'].errors['__all__'],
['Invalid username']
)
self.assertRaises(
AccountRequest.DoesNotExist,
AccountRequest.objects.get,
**{'username':account_request_verify_defaults['username']}
)
mock_cu_user.authenticate.return_value = False
with mock.patch('accounts.models.CuLdapUser.objects.get',return_value=mock_cu_user):
response = self.client.post(
'/accounts/account-request/create/verify/ucb',
data=account_request_verify_defaults
)
self.assertEqual(
response.context['form'].errors['__all__'],
['Invalid password']
)
self.assertRaises(
AccountRequest.DoesNotExist,
AccountRequest.objects.get,
**{'username':account_request_verify_defaults['username']}
)
class AccountRequestVerifyCsuViewTestCase(LdapTestCase):
def test_request_verify(self):
mock_csu_user_defaults = get_org_user_defaults()
mock_csu_user = mock.MagicMock(**mock_csu_user_defaults)
mock_csu_user.authenticate.return_value = True
account_request_verify_defaults = get_account_request_verify_defaults()
with mock.patch('accounts.models.CsuLdapUser.objects.get',return_value=mock_csu_user):
response = self.client.post(
'/accounts/account-request/create/verify/csu',
data=account_request_verify_defaults
)
session_ar_data = self.client.session['account_request_data']
self.assertEqual(response.status_code, 302)
self.assertTrue(response.url.endswith('/accounts/account-request/create/intent'))
self.assertEqual(session_ar_data['organization'],'csu')
self.assertEqual(session_ar_data['status'],'a')
def test_request_verify_invalid_creds(self):
mock_csu_user_defaults = get_org_user_defaults()
mock_csu_user = mock.MagicMock(**mock_csu_user_defaults)
mock_csu_user.authenticate.return_value = True
account_request_verify_defaults = get_account_request_verify_defaults()
with mock.patch('accounts.models.CsuLdapUser.objects.get',side_effect=[CsuLdapUser.DoesNotExist]):
response = self.client.post(
'/accounts/account-request/create/verify/csu',
data=account_request_verify_defaults
)
self.assertEqual(
response.context['form'].errors['__all__'],
['Invalid username']
)
self.assertRaises(
AccountRequest.DoesNotExist,
AccountRequest.objects.get,
**{'username':account_request_verify_defaults['username']}
)
mock_csu_user.authenticate.return_value = False
with mock.patch('accounts.models.CsuLdapUser.objects.get',return_value=mock_csu_user):
response = self.client.post(
'/accounts/account-request/create/verify/csu',
data=account_request_verify_defaults
)
self.assertEqual(
response.context['form'].errors['__all__'],
['Invalid password']
)
self.assertRaises(
AccountRequest.DoesNotExist,
AccountRequest.objects.get,
**{'username':account_request_verify_defaults['username']}
)
def get_account_request_session_defaults():
"""
Returns reasonable defaults for account request data stored in session between verification and
intent views.
"""
session_request_defaults = dict(
organization = 'ucb',
username = 'testuser',
email = 'testuser@colorado.edu',
first_name = 'Test',
last_name = 'User',
role = 'faculty',
department = 'physics'
)
return session_request_defaults
class AccountRequestIntentViewTestCase(LdapTestCase,SessionEnabledTestMixin):
def setUp(self):
idt = IdTracker.objects.create(
category='posix',
min_id=1000,
max_id=1500,
next_id=1001
)
def test_request_submit_intent_single(self):
account_request_data = get_account_request_session_defaults()
session = self.get_session(self.client)
session['account_request_data'] = account_request_data
session.save()
intent_request_data = dict(
reason_summit = True,
summit_description = 'Description of work.',
summit_funding = 'NSF Grant 1234'
)
with mock.patch('django.dispatch.Signal.send') as account_request_received_mock:
response = self.client.post(
'/accounts/account-request/create/intent',
data = intent_request_data
)
self.assertEqual(response.status_code, 302)
self.assertTrue(response.url.endswith('/accounts/account-request/review'))
account_request = AccountRequest.objects.get(username='testuser')
account_request_received_mock.assert_called_with(sender=account_request.__class__,instance=account_request)
self.assertEqual(self.client.session['account_request_data']['id'],account_request.id)
self.assertEqual(account_request.organization,'ucb')
self.assertEqual(account_request.username,'testuser')
self.assertEqual(account_request.email,'testuser@colorado.edu')
self.assertEqual(account_request.first_name,'Test')
self.assertEqual(account_request.last_name,'User')
self.assertEqual(account_request.role,'faculty')
self.assertEqual(account_request.department,'physics')
# This should be the case, as no override status was set on session
self.assertEqual(account_request.status,'p')
intent = Intent.objects.get(account_request=account_request)
self.assertTrue(intent.reason_summit)
self.assertFalse(intent.reason_course)
self.assertFalse(intent.reason_petalibrary)
self.assertFalse(intent.reason_blanca)
self.assertEqual(intent.summit_description,'Description of work.')
self.assertEqual(intent.summit_funding,'NSF Grant 1234')
def test_request_submit_intent_multi(self):
account_request_data = get_account_request_session_defaults()
session = self.get_session(self.client)
session['account_request_data'] = account_request_data
session.save()
intent_request_data = dict(
reason_summit = True,
reason_course = True,
reason_petalibrary = True,
summit_description = 'Description of work.',
summit_funding = 'NSF Grant 1234',
course_instructor_email = 'instructor@colorado.edu',
course_number = 'PHYS1000'
)
response = self.client.post(
'/accounts/account-request/create/intent',
data = intent_request_data
)
self.assertEqual(response.status_code, 302)
self.assertTrue(response.url.endswith('/accounts/account-request/review'))
account_request = AccountRequest.objects.get(username='testuser')
self.assertEqual(self.client.session['account_request_data']['id'],account_request.id)
intent = Intent.objects.get(account_request=account_request)
self.assertTrue(intent.reason_summit)
self.assertTrue(intent.reason_course)
self.assertTrue(intent.reason_petalibrary)
self.assertFalse(intent.reason_blanca)
self.assertEqual(intent.summit_description,'Description of work.')
self.assertEqual(intent.summit_funding,'NSF Grant 1234')
self.assertEqual(intent.course_instructor_email,'instructor@colorado.edu')
self.assertEqual(intent.course_number,'PHYS1000')
class AccountRequestReviewViewTestCase(SafeTestCase,SessionEnabledTestMixin):
def test_request_review(self):
account_request_data = get_account_request_session_defaults()
account_request = AccountRequest.objects.create(**account_request_data)
session = self.get_session(self.client)
session['account_request_data'] = account_request_data
session['account_request_data']['id'] = account_request.id
session.save()
response = self.client.get('/accounts/account-request/review')
self.assertEqual(response.status_code,200)
self.assertEqual(response.context['account_request'],account_request)
def test_request_review_no_data(self):
response = self.client.get('/accounts/account-request/review')
self.assertEqual(response.status_code,404)
|
solarjoe/numpy | refs/heads/master | tools/allocation_tracking/track_allocations.py | 28 | from __future__ import division, absolute_import, print_function
import numpy as np
import gc
import inspect
from alloc_hook import NumpyAllocHook
class AllocationTracker(object):
def __init__(self, threshold=0):
'''track numpy allocations of size threshold bytes or more.'''
self.threshold = threshold
# The total number of bytes currently allocated with size above
# threshold
self.total_bytes = 0
# We buffer requests line by line and move them into the allocation
# trace when a new line occurs
self.current_line = None
self.pending_allocations = []
self.blocksizes = {}
# list of (lineinfo, bytes allocated, bytes freed, # allocations, #
# frees, maximum memory usage, long-lived bytes allocated)
self.allocation_trace = []
self.numpy_hook = NumpyAllocHook(self.hook)
def __enter__(self):
self.numpy_hook.__enter__()
def __exit__(self, type, value, traceback):
self.check_line_changed() # forces pending events to be handled
self.numpy_hook.__exit__()
def hook(self, inptr, outptr, size):
# minimize the chances that the garbage collector kicks in during a
# cython __dealloc__ call and causes a double delete of the current
# object. To avoid this fully the hook would have to avoid all python
# api calls, e.g. by being implemented in C like python 3.4's
# tracemalloc module
gc_on = gc.isenabled()
gc.disable()
if outptr == 0: # it's a free
self.free_cb(inptr)
elif inptr != 0: # realloc
self.realloc_cb(inptr, outptr, size)
else: # malloc
self.alloc_cb(outptr, size)
if gc_on:
gc.enable()
def alloc_cb(self, ptr, size):
if size >= self.threshold:
self.check_line_changed()
self.blocksizes[ptr] = size
self.pending_allocations.append(size)
def free_cb(self, ptr):
size = self.blocksizes.pop(ptr, 0)
if size:
self.check_line_changed()
self.pending_allocations.append(-size)
def realloc_cb(self, newptr, oldptr, size):
if (size >= self.threshold) or (oldptr in self.blocksizes):
self.check_line_changed()
oldsize = self.blocksizes.pop(oldptr, 0)
self.pending_allocations.append(size - oldsize)
self.blocksizes[newptr] = size
def get_code_line(self):
# first frame is this line, then check_line_changed(), then 2 callbacks,
# then actual code.
try:
return inspect.stack()[4][1:]
except Exception:
return inspect.stack()[0][1:]
def check_line_changed(self):
line = self.get_code_line()
if line != self.current_line and (self.current_line is not None):
# move pending events into the allocation_trace
max_size = self.total_bytes
bytes_allocated = 0
bytes_freed = 0
num_allocations = 0
num_frees = 0
before_size = self.total_bytes
for allocation in self.pending_allocations:
self.total_bytes += allocation
if allocation > 0:
bytes_allocated += allocation
num_allocations += 1
else:
bytes_freed += -allocation
num_frees += 1
max_size = max(max_size, self.total_bytes)
long_lived = max(self.total_bytes - before_size, 0)
self.allocation_trace.append((self.current_line, bytes_allocated,
bytes_freed, num_allocations,
num_frees, max_size, long_lived))
# clear pending allocations
self.pending_allocations = []
# move to the new line
self.current_line = line
def write_html(self, filename):
f = open(filename, "w")
f.write('<HTML><HEAD><script src="sorttable.js"></script></HEAD><BODY>\n')
f.write('<TABLE class="sortable" width=100%>\n')
f.write("<TR>\n")
cols = "event#,lineinfo,bytes allocated,bytes freed,#allocations,#frees,max memory usage,long lived bytes".split(',')
for header in cols:
f.write(" <TH>{0}</TH>".format(header))
f.write("\n</TR>\n")
for idx, event in enumerate(self.allocation_trace):
f.write("<TR>\n")
event = [idx] + list(event)
for col, val in zip(cols, event):
if col == 'lineinfo':
# special handling
try:
filename, line, module, code, index = val
val = "{0}({1}): {2}".format(filename, line, code[index])
except Exception:
# sometimes this info is not available (from eval()?)
val = str(val)
f.write(" <TD>{0}</TD>".format(val))
f.write("\n</TR>\n")
f.write("</TABLE></BODY></HTML>\n")
f.close()
if __name__ == '__main__':
tracker = AllocationTracker(1000)
with tracker:
for i in range(100):
np.zeros(i * 100)
np.zeros(i * 200)
tracker.write_html("allocations.html")
|
rohit12/opencog | refs/heads/master | opencog/nlp/learn/Graphing Tools/addRankToLines.py | 20 | #Let's say that you have a list of objects in a file and you need to append
#their rank to them. This file does exactly that. "fp" denotes the input
#file while "fw" denotes the output file.
fp=open("word-frequencies2.txt","r")
fw=open("word-rank.txt","w+")
i=1
for line in fp:
string=line
a=string.strip()
fw.write(a + " " + `i` + "\n")
i+=1 |
Zubieta/CPP | refs/heads/master | CodeSignal/Arcade/The_Core/Level_05_List_Forest_Edge/041_Replace_Middle.py | 3 | # https://app.codesignal.com/arcade/code-arcade/list-forest-edge/APD5T5CybxTtfkdjL
def replaceMiddle(arr):
# Convert an array to odd length. Do this by adding up its 'two' middle
# elements, because even length arrays have two middle elements.
if len(arr) % 2 == 0:
mid = len(arr) // 2
middle = arr[mid - 1] + arr[mid]
arr = arr[:mid - 1] + [middle] + arr[mid + 1:]
return arr
|
prospwro/odoo | refs/heads/8.0 | addons/website_blog/controllers/main.py | 132 | # -*- coding: utf-8 -*-
import datetime
import werkzeug
from openerp import tools
from openerp.addons.web import http
from openerp.addons.web.controllers.main import login_redirect
from openerp.addons.web.http import request
from openerp.addons.website.models.website import slug
from openerp.osv.orm import browse_record
from openerp.tools.translate import _
from openerp import SUPERUSER_ID
from openerp.tools import html2plaintext
class QueryURL(object):
def __init__(self, path='', path_args=None, **args):
self.path = path
self.args = args
self.path_args = set(path_args or [])
def __call__(self, path=None, path_args=None, **kw):
path = path or self.path
for k, v in self.args.items():
kw.setdefault(k, v)
path_args = set(path_args or []).union(self.path_args)
paths, fragments = [], []
for key, value in kw.items():
if value and key in path_args:
if isinstance(value, browse_record):
paths.append((key, slug(value)))
else:
paths.append((key, value))
elif value:
if isinstance(value, list) or isinstance(value, set):
fragments.append(werkzeug.url_encode([(key, item) for item in value]))
else:
fragments.append(werkzeug.url_encode([(key, value)]))
for key, value in paths:
path += '/' + key + '/%s' % value
if fragments:
path += '?' + '&'.join(fragments)
return path
class WebsiteBlog(http.Controller):
_blog_post_per_page = 20
_post_comment_per_page = 10
def nav_list(self):
blog_post_obj = request.registry['blog.post']
groups = blog_post_obj.read_group(
request.cr, request.uid, [], ['name', 'create_date'],
groupby="create_date", orderby="create_date desc", context=request.context)
for group in groups:
begin_date = datetime.datetime.strptime(group['__domain'][0][2], tools.DEFAULT_SERVER_DATETIME_FORMAT).date()
end_date = datetime.datetime.strptime(group['__domain'][1][2], tools.DEFAULT_SERVER_DATETIME_FORMAT).date()
group['date_begin'] = '%s' % datetime.date.strftime(begin_date, tools.DEFAULT_SERVER_DATE_FORMAT)
group['date_end'] = '%s' % datetime.date.strftime(end_date, tools.DEFAULT_SERVER_DATE_FORMAT)
return groups
@http.route([
'/blog',
'/blog/page/<int:page>',
], type='http', auth="public", website=True)
def blogs(self, page=1, **post):
cr, uid, context = request.cr, request.uid, request.context
blog_obj = request.registry['blog.post']
total = blog_obj.search(cr, uid, [], count=True, context=context)
pager = request.website.pager(
url='/blog',
total=total,
page=page,
step=self._blog_post_per_page,
)
post_ids = blog_obj.search(cr, uid, [], offset=(page-1)*self._blog_post_per_page, limit=self._blog_post_per_page, context=context)
posts = blog_obj.browse(cr, uid, post_ids, context=context)
blog_url = QueryURL('', ['blog', 'tag'])
return request.website.render("website_blog.latest_blogs", {
'posts': posts,
'pager': pager,
'blog_url': blog_url,
})
@http.route([
'/blog/<model("blog.blog"):blog>',
'/blog/<model("blog.blog"):blog>/page/<int:page>',
'/blog/<model("blog.blog"):blog>/tag/<model("blog.tag"):tag>',
'/blog/<model("blog.blog"):blog>/tag/<model("blog.tag"):tag>/page/<int:page>',
], type='http', auth="public", website=True)
def blog(self, blog=None, tag=None, page=1, **opt):
""" Prepare all values to display the blog.
:return dict values: values for the templates, containing
- 'blog': current blog
- 'blogs': all blogs for navigation
- 'pager': pager of posts
- 'tag': current tag
- 'tags': all tags, for navigation
- 'nav_list': a dict [year][month] for archives navigation
- 'date': date_begin optional parameter, used in archives navigation
- 'blog_url': help object to create URLs
"""
date_begin, date_end = opt.get('date_begin'), opt.get('date_end')
cr, uid, context = request.cr, request.uid, request.context
blog_post_obj = request.registry['blog.post']
blog_obj = request.registry['blog.blog']
blog_ids = blog_obj.search(cr, uid, [], order="create_date asc", context=context)
blogs = blog_obj.browse(cr, uid, blog_ids, context=context)
domain = []
if blog:
domain += [('blog_id', '=', blog.id)]
if tag:
domain += [('tag_ids', 'in', tag.id)]
if date_begin and date_end:
domain += [("create_date", ">=", date_begin), ("create_date", "<=", date_end)]
blog_url = QueryURL('', ['blog', 'tag'], blog=blog, tag=tag, date_begin=date_begin, date_end=date_end)
post_url = QueryURL('', ['blogpost'], tag_id=tag and tag.id or None, date_begin=date_begin, date_end=date_end)
blog_post_ids = blog_post_obj.search(cr, uid, domain, order="create_date desc", context=context)
blog_posts = blog_post_obj.browse(cr, uid, blog_post_ids, context=context)
pager = request.website.pager(
url=blog_url(),
total=len(blog_posts),
page=page,
step=self._blog_post_per_page,
)
pager_begin = (page - 1) * self._blog_post_per_page
pager_end = page * self._blog_post_per_page
blog_posts = blog_posts[pager_begin:pager_end]
tags = blog.all_tags()[blog.id]
values = {
'blog': blog,
'blogs': blogs,
'tags': tags,
'tag': tag,
'blog_posts': blog_posts,
'pager': pager,
'nav_list': self.nav_list(),
'blog_url': blog_url,
'post_url': post_url,
'date': date_begin,
}
response = request.website.render("website_blog.blog_post_short", values)
return response
@http.route([
'''/blog/<model("blog.blog"):blog>/post/<model("blog.post", "[('blog_id','=',blog[0])]"):blog_post>''',
], type='http', auth="public", website=True)
def blog_post(self, blog, blog_post, tag_id=None, page=1, enable_editor=None, **post):
""" Prepare all values to display the blog.
:return dict values: values for the templates, containing
- 'blog_post': browse of the current post
- 'blog': browse of the current blog
- 'blogs': list of browse records of blogs
- 'tag': current tag, if tag_id in parameters
- 'tags': all tags, for tag-based navigation
- 'pager': a pager on the comments
- 'nav_list': a dict [year][month] for archives navigation
- 'next_post': next blog post, to direct the user towards the next interesting post
"""
cr, uid, context = request.cr, request.uid, request.context
tag_obj = request.registry['blog.tag']
blog_post_obj = request.registry['blog.post']
date_begin, date_end = post.get('date_begin'), post.get('date_end')
pager_url = "/blogpost/%s" % blog_post.id
pager = request.website.pager(
url=pager_url,
total=len(blog_post.website_message_ids),
page=page,
step=self._post_comment_per_page,
scope=7
)
pager_begin = (page - 1) * self._post_comment_per_page
pager_end = page * self._post_comment_per_page
comments = blog_post.website_message_ids[pager_begin:pager_end]
tag = None
if tag_id:
tag = request.registry['blog.tag'].browse(request.cr, request.uid, int(tag_id), context=request.context)
post_url = QueryURL('', ['blogpost'], blogpost=blog_post, tag_id=tag_id, date_begin=date_begin, date_end=date_end)
blog_url = QueryURL('', ['blog', 'tag'], blog=blog_post.blog_id, tag=tag, date_begin=date_begin, date_end=date_end)
if not blog_post.blog_id.id == blog.id:
return request.redirect("/blog/%s/post/%s" % (slug(blog_post.blog_id), slug(blog_post)))
tags = tag_obj.browse(cr, uid, tag_obj.search(cr, uid, [], context=context), context=context)
# Find next Post
all_post_ids = blog_post_obj.search(cr, uid, [('blog_id', '=', blog.id)], context=context)
# should always return at least the current post
current_blog_post_index = all_post_ids.index(blog_post.id)
next_post_id = all_post_ids[0 if current_blog_post_index == len(all_post_ids) - 1 \
else current_blog_post_index + 1]
next_post = next_post_id and blog_post_obj.browse(cr, uid, next_post_id, context=context) or False
values = {
'tags': tags,
'tag': tag,
'blog': blog,
'blog_post': blog_post,
'main_object': blog_post,
'nav_list': self.nav_list(),
'enable_editor': enable_editor,
'next_post': next_post,
'date': date_begin,
'post_url': post_url,
'blog_url': blog_url,
'pager': pager,
'comments': comments,
}
response = request.website.render("website_blog.blog_post_complete", values)
request.session[request.session_id] = request.session.get(request.session_id, [])
if not (blog_post.id in request.session[request.session_id]):
request.session[request.session_id].append(blog_post.id)
# Increase counter
blog_post_obj.write(cr, SUPERUSER_ID, [blog_post.id], {
'visits': blog_post.visits+1,
},context=context)
return response
def _blog_post_message(self, user, blog_post_id=0, **post):
cr, uid, context = request.cr, request.uid, request.context
blog_post = request.registry['blog.post']
partner_obj = request.registry['res.partner']
if uid != request.website.user_id.id:
partner_ids = [user.partner_id.id]
else:
partner_ids = blog_post._find_partner_from_emails(
cr, SUPERUSER_ID, 0, [post.get('email')], context=context)
if not partner_ids or not partner_ids[0]:
partner_ids = [partner_obj.create(cr, SUPERUSER_ID, {'name': post.get('name'), 'email': post.get('email')}, context=context)]
message_id = blog_post.message_post(
cr, SUPERUSER_ID, int(blog_post_id),
body=post.get('comment'),
type='comment',
subtype='mt_comment',
author_id=partner_ids[0],
path=post.get('path', False),
context=context)
return message_id
@http.route(['/blogpost/comment'], type='http', auth="public", website=True)
def blog_post_comment(self, blog_post_id=0, **post):
cr, uid, context = request.cr, request.uid, request.context
if not request.session.uid:
return login_redirect()
if post.get('comment'):
user = request.registry['res.users'].browse(cr, uid, uid, context=context)
blog_post = request.registry['blog.post']
blog_post.check_access_rights(cr, uid, 'read')
self._blog_post_message(user, blog_post_id, **post)
blog_post = request.registry['blog.post'].browse(cr, uid, int(blog_post_id), context=context)
return werkzeug.utils.redirect("/blog/%s/post/%s#comments" % (slug(blog_post.blog_id), slug(blog_post)))
def _get_discussion_detail(self, ids, publish=False, **post):
cr, uid, context = request.cr, request.uid, request.context
values = []
mail_obj = request.registry.get('mail.message')
for message in mail_obj.browse(cr, SUPERUSER_ID, ids, context=context):
values.append({
"id": message.id,
"author_name": message.author_id.name,
"author_image": message.author_id.image and \
("data:image/png;base64,%s" % message.author_id.image) or \
'/website_blog/static/src/img/anonymous.png',
"date": message.date,
'body': html2plaintext(message.body),
'website_published' : message.website_published,
'publish' : publish,
})
return values
@http.route(['/blogpost/post_discussion'], type='json', auth="public", website=True)
def post_discussion(self, blog_post_id, **post):
cr, uid, context = request.cr, request.uid, request.context
publish = request.registry['res.users'].has_group(cr, uid, 'base.group_website_publisher')
user = request.registry['res.users'].browse(cr, uid, uid, context=context)
id = self._blog_post_message(user, blog_post_id, **post)
return self._get_discussion_detail([id], publish, **post)
@http.route('/blogpost/new', type='http', auth="public", website=True)
def blog_post_create(self, blog_id, **post):
cr, uid, context = request.cr, request.uid, request.context
new_blog_post_id = request.registry['blog.post'].create(cr, uid, {
'blog_id': blog_id,
'name': _("Blog Post Title"),
'subtitle': _("Subtitle"),
'content': '',
'website_published': False,
}, context=context)
new_blog_post = request.registry['blog.post'].browse(cr, uid, new_blog_post_id, context=context)
return werkzeug.utils.redirect("/blog/%s/post/%s?enable_editor=1" % (slug(new_blog_post.blog_id), slug(new_blog_post)))
@http.route('/blogpost/duplicate', type='http', auth="public", website=True)
def blog_post_copy(self, blog_post_id, **post):
""" Duplicate a blog.
:param blog_post_id: id of the blog post currently browsed.
:return redirect to the new blog created
"""
cr, uid, context = request.cr, request.uid, request.context
create_context = dict(context, mail_create_nosubscribe=True)
nid = request.registry['blog.post'].copy(cr, uid, int(blog_post_id), {}, context=create_context)
new_blog_post = request.registry['blog.post'].browse(cr, uid, nid, context=context)
post = request.registry['blog.post'].browse(cr, uid, nid, context)
return werkzeug.utils.redirect("/blog/%s/post/%s?enable_editor=1" % (slug(post.blog_id), slug(new_blog_post)))
@http.route('/blogpost/get_discussion/', type='json', auth="public", website=True)
def discussion(self, post_id=0, path=None, count=False, **post):
cr, uid, context = request.cr, request.uid, request.context
mail_obj = request.registry.get('mail.message')
domain = [('res_id', '=', int(post_id)), ('model', '=', 'blog.post'), ('path', '=', path)]
#check current user belongs to website publisher group
publish = request.registry['res.users'].has_group(cr, uid, 'base.group_website_publisher')
if not publish:
domain.append(('website_published', '=', True))
ids = mail_obj.search(cr, SUPERUSER_ID, domain, count=count)
if count:
return ids
return self._get_discussion_detail(ids, publish, **post)
@http.route('/blogpost/get_discussions/', type='json', auth="public", website=True)
def discussions(self, post_id=0, paths=None, count=False, **post):
ret = []
for path in paths:
result = self.discussion(post_id=post_id, path=path, count=count, **post)
ret.append({"path": path, "val": result})
return ret
@http.route('/blogpost/change_background', type='json', auth="public", website=True)
def change_bg(self, post_id=0, image=None, **post):
if not post_id:
return False
return request.registry['blog.post'].write(request.cr, request.uid, [int(post_id)], {'background_image': image}, request.context)
@http.route('/blog/get_user/', type='json', auth="public", website=True)
def get_user(self, **post):
return [False if request.session.uid else True]
|
tonk/ansible | refs/heads/devel | lib/ansible/vars/manager.py | 3 | # (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
import sys
from collections import defaultdict
try:
from hashlib import sha1
except ImportError:
from sha import sha as sha1
from jinja2.exceptions import UndefinedError
from ansible import constants as C
from ansible.errors import AnsibleError, AnsibleParserError, AnsibleUndefinedVariable, AnsibleFileNotFound, AnsibleAssertionError, AnsibleTemplateError
from ansible.inventory.host import Host
from ansible.inventory.helpers import sort_groups, get_group_vars
from ansible.module_utils._text import to_text
from ansible.module_utils.common._collections_compat import Mapping, MutableMapping, Sequence
from ansible.module_utils.six import iteritems, text_type, string_types
from ansible.plugins.loader import lookup_loader
from ansible.vars.fact_cache import FactCache
from ansible.template import Templar
from ansible.utils.display import Display
from ansible.utils.listify import listify_lookup_plugin_terms
from ansible.utils.vars import combine_vars, load_extra_vars, load_options_vars
from ansible.utils.unsafe_proxy import wrap_var
from ansible.vars.clean import namespace_facts, clean_facts
from ansible.vars.plugins import get_vars_from_inventory_sources, get_vars_from_path
display = Display()
def preprocess_vars(a):
'''
Ensures that vars contained in the parameter passed in are
returned as a list of dictionaries, to ensure for instance
that vars loaded from a file conform to an expected state.
'''
if a is None:
return None
elif not isinstance(a, list):
data = [a]
else:
data = a
for item in data:
if not isinstance(item, MutableMapping):
raise AnsibleError("variable files must contain either a dictionary of variables, or a list of dictionaries. Got: %s (%s)" % (a, type(a)))
return data
class VariableManager:
_ALLOWED = frozenset(['plugins_by_group', 'groups_plugins_play', 'groups_plugins_inventory', 'groups_inventory',
'all_plugins_play', 'all_plugins_inventory', 'all_inventory'])
def __init__(self, loader=None, inventory=None, version_info=None):
self._nonpersistent_fact_cache = defaultdict(dict)
self._vars_cache = defaultdict(dict)
self._extra_vars = defaultdict(dict)
self._host_vars_files = defaultdict(dict)
self._group_vars_files = defaultdict(dict)
self._inventory = inventory
self._loader = loader
self._hostvars = None
self._omit_token = '__omit_place_holder__%s' % sha1(os.urandom(64)).hexdigest()
self._options_vars = load_options_vars(version_info)
# If the basedir is specified as the empty string then it results in cwd being used.
# This is not a safe location to load vars from.
basedir = self._options_vars.get('basedir', False)
self.safe_basedir = bool(basedir is False or basedir)
# load extra vars
self._extra_vars = load_extra_vars(loader=self._loader)
# load fact cache
try:
self._fact_cache = FactCache()
except AnsibleError as e:
# bad cache plugin is not fatal error
# fallback to a dict as in memory cache
display.warning(to_text(e))
self._fact_cache = {}
def __getstate__(self):
data = dict(
fact_cache=self._fact_cache,
np_fact_cache=self._nonpersistent_fact_cache,
vars_cache=self._vars_cache,
extra_vars=self._extra_vars,
host_vars_files=self._host_vars_files,
group_vars_files=self._group_vars_files,
omit_token=self._omit_token,
options_vars=self._options_vars,
inventory=self._inventory,
safe_basedir=self.safe_basedir,
)
return data
def __setstate__(self, data):
self._fact_cache = data.get('fact_cache', defaultdict(dict))
self._nonpersistent_fact_cache = data.get('np_fact_cache', defaultdict(dict))
self._vars_cache = data.get('vars_cache', defaultdict(dict))
self._extra_vars = data.get('extra_vars', dict())
self._host_vars_files = data.get('host_vars_files', defaultdict(dict))
self._group_vars_files = data.get('group_vars_files', defaultdict(dict))
self._omit_token = data.get('omit_token', '__omit_place_holder__%s' % sha1(os.urandom(64)).hexdigest())
self._inventory = data.get('inventory', None)
self._options_vars = data.get('options_vars', dict())
self.safe_basedir = data.get('safe_basedir', False)
self._loader = None
self._hostvars = None
@property
def extra_vars(self):
return self._extra_vars
def set_inventory(self, inventory):
self._inventory = inventory
def get_vars(self, play=None, host=None, task=None, include_hostvars=True, include_delegate_to=True, use_cache=True,
_hosts=None, _hosts_all=None, stage='task'):
'''
Returns the variables, with optional "context" given via the parameters
for the play, host, and task (which could possibly result in different
sets of variables being returned due to the additional context).
The order of precedence is:
- play->roles->get_default_vars (if there is a play context)
- group_vars_files[host] (if there is a host context)
- host_vars_files[host] (if there is a host context)
- host->get_vars (if there is a host context)
- fact_cache[host] (if there is a host context)
- play vars (if there is a play context)
- play vars_files (if there's no host context, ignore
file names that cannot be templated)
- task->get_vars (if there is a task context)
- vars_cache[host] (if there is a host context)
- extra vars
``_hosts`` and ``_hosts_all`` should be considered private args, with only internal trusted callers relying
on the functionality they provide. These arguments may be removed at a later date without a deprecation
period and without warning.
'''
display.debug("in VariableManager get_vars()")
all_vars = dict()
magic_variables = self._get_magic_variables(
play=play,
host=host,
task=task,
include_hostvars=include_hostvars,
include_delegate_to=include_delegate_to,
_hosts=_hosts,
_hosts_all=_hosts_all,
)
_vars_sources = {}
def _combine_and_track(data, new_data, source):
'''
Wrapper function to update var sources dict and call combine_vars()
See notes in the VarsWithSources docstring for caveats and limitations of the source tracking
'''
if C.DEFAULT_DEBUG:
# Populate var sources dict
for key in new_data:
_vars_sources[key] = source
return combine_vars(data, new_data)
# default for all cases
basedirs = []
if self.safe_basedir: # avoid adhoc/console loading cwd
basedirs = [self._loader.get_basedir()]
if play:
# first we compile any vars specified in defaults/main.yml
# for all roles within the specified play
for role in play.get_roles():
all_vars = _combine_and_track(all_vars, role.get_default_vars(), "role '%s' defaults" % role.name)
if task:
# set basedirs
if C.PLAYBOOK_VARS_ROOT == 'all': # should be default
basedirs = task.get_search_path()
elif C.PLAYBOOK_VARS_ROOT in ('bottom', 'playbook_dir'): # only option in 2.4.0
basedirs = [task.get_search_path()[0]]
elif C.PLAYBOOK_VARS_ROOT != 'top':
# preserves default basedirs, only option pre 2.3
raise AnsibleError('Unknown playbook vars logic: %s' % C.PLAYBOOK_VARS_ROOT)
# if we have a task in this context, and that task has a role, make
# sure it sees its defaults above any other roles, as we previously
# (v1) made sure each task had a copy of its roles default vars
if task._role is not None and (play or task.action == 'include_role'):
all_vars = _combine_and_track(all_vars, task._role.get_default_vars(dep_chain=task.get_dep_chain()),
"role '%s' defaults" % task._role.name)
if host:
# THE 'all' group and the rest of groups for a host, used below
all_group = self._inventory.groups.get('all')
host_groups = sort_groups([g for g in host.get_groups() if g.name not in ['all']])
def _get_plugin_vars(plugin, path, entities):
data = {}
try:
data = plugin.get_vars(self._loader, path, entities)
except AttributeError:
try:
for entity in entities:
if isinstance(entity, Host):
data.update(plugin.get_host_vars(entity.name))
else:
data.update(plugin.get_group_vars(entity.name))
except AttributeError:
if hasattr(plugin, 'run'):
raise AnsibleError("Cannot use v1 type vars plugin %s from %s" % (plugin._load_name, plugin._original_path))
else:
raise AnsibleError("Invalid vars plugin %s from %s" % (plugin._load_name, plugin._original_path))
return data
# internal functions that actually do the work
def _plugins_inventory(entities):
''' merges all entities by inventory source '''
return get_vars_from_inventory_sources(self._loader, self._inventory._sources, entities, stage)
def _plugins_play(entities):
''' merges all entities adjacent to play '''
data = {}
for path in basedirs:
data = _combine_and_track(data, get_vars_from_path(self._loader, path, entities, stage), "path '%s'" % path)
return data
# configurable functions that are sortable via config, remember to add to _ALLOWED if expanding this list
def all_inventory():
return all_group.get_vars()
def all_plugins_inventory():
return _plugins_inventory([all_group])
def all_plugins_play():
return _plugins_play([all_group])
def groups_inventory():
''' gets group vars from inventory '''
return get_group_vars(host_groups)
def groups_plugins_inventory():
''' gets plugin sources from inventory for groups '''
return _plugins_inventory(host_groups)
def groups_plugins_play():
''' gets plugin sources from play for groups '''
return _plugins_play(host_groups)
def plugins_by_groups():
'''
merges all plugin sources by group,
This should be used instead, NOT in combination with the other groups_plugins* functions
'''
data = {}
for group in host_groups:
data[group] = _combine_and_track(data[group], _plugins_inventory(group), "inventory group_vars for '%s'" % group)
data[group] = _combine_and_track(data[group], _plugins_play(group), "playbook group_vars for '%s'" % group)
return data
# Merge groups as per precedence config
# only allow to call the functions we want exposed
for entry in C.VARIABLE_PRECEDENCE:
if entry in self._ALLOWED:
display.debug('Calling %s to load vars for %s' % (entry, host.name))
all_vars = _combine_and_track(all_vars, locals()[entry](), "group vars, precedence entry '%s'" % entry)
else:
display.warning('Ignoring unknown variable precedence entry: %s' % (entry))
# host vars, from inventory, inventory adjacent and play adjacent via plugins
all_vars = _combine_and_track(all_vars, host.get_vars(), "host vars for '%s'" % host)
all_vars = _combine_and_track(all_vars, _plugins_inventory([host]), "inventory host_vars for '%s'" % host)
all_vars = _combine_and_track(all_vars, _plugins_play([host]), "playbook host_vars for '%s'" % host)
# finally, the facts caches for this host, if it exists
# TODO: cleaning of facts should eventually become part of taskresults instead of vars
try:
facts = wrap_var(self._fact_cache.get(host.name, {}))
all_vars.update(namespace_facts(facts))
# push facts to main namespace
if C.INJECT_FACTS_AS_VARS:
all_vars = _combine_and_track(all_vars, wrap_var(clean_facts(facts)), "facts")
else:
# always 'promote' ansible_local
all_vars = _combine_and_track(all_vars, wrap_var({'ansible_local': facts.get('ansible_local', {})}), "facts")
except KeyError:
pass
if play:
all_vars = _combine_and_track(all_vars, play.get_vars(), "play vars")
vars_files = play.get_vars_files()
try:
for vars_file_item in vars_files:
# create a set of temporary vars here, which incorporate the extra
# and magic vars so we can properly template the vars_files entries
temp_vars = combine_vars(all_vars, self._extra_vars)
temp_vars = combine_vars(temp_vars, magic_variables)
templar = Templar(loader=self._loader, variables=temp_vars)
# we assume each item in the list is itself a list, as we
# support "conditional includes" for vars_files, which mimics
# the with_first_found mechanism.
vars_file_list = vars_file_item
if not isinstance(vars_file_list, list):
vars_file_list = [vars_file_list]
# now we iterate through the (potential) files, and break out
# as soon as we read one from the list. If none are found, we
# raise an error, which is silently ignored at this point.
try:
for vars_file in vars_file_list:
vars_file = templar.template(vars_file)
if not (isinstance(vars_file, Sequence)):
raise AnsibleError(
"Invalid vars_files entry found: %r\n"
"vars_files entries should be either a string type or "
"a list of string types after template expansion" % vars_file
)
try:
data = preprocess_vars(self._loader.load_from_file(vars_file, unsafe=True))
if data is not None:
for item in data:
all_vars = _combine_and_track(all_vars, item, "play vars_files from '%s'" % vars_file)
break
except AnsibleFileNotFound:
# we continue on loader failures
continue
except AnsibleParserError:
raise
else:
# if include_delegate_to is set to False, we ignore the missing
# vars file here because we're working on a delegated host
if include_delegate_to:
raise AnsibleFileNotFound("vars file %s was not found" % vars_file_item)
except (UndefinedError, AnsibleUndefinedVariable):
if host is not None and self._fact_cache.get(host.name, dict()).get('module_setup') and task is not None:
raise AnsibleUndefinedVariable("an undefined variable was found when attempting to template the vars_files item '%s'"
% vars_file_item, obj=vars_file_item)
else:
# we do not have a full context here, and the missing variable could be because of that
# so just show a warning and continue
display.vvv("skipping vars_file '%s' due to an undefined variable" % vars_file_item)
continue
display.vvv("Read vars_file '%s'" % vars_file_item)
except TypeError:
raise AnsibleParserError("Error while reading vars files - please supply a list of file names. "
"Got '%s' of type %s" % (vars_files, type(vars_files)))
# By default, we now merge in all vars from all roles in the play,
# unless the user has disabled this via a config option
if not C.DEFAULT_PRIVATE_ROLE_VARS:
for role in play.get_roles():
all_vars = _combine_and_track(all_vars, role.get_vars(include_params=False), "role '%s' vars" % role.name)
# next, we merge in the vars from the role, which will specifically
# follow the role dependency chain, and then we merge in the tasks
# vars (which will look at parent blocks/task includes)
if task:
if task._role:
all_vars = _combine_and_track(all_vars, task._role.get_vars(task.get_dep_chain(), include_params=False),
"role '%s' vars" % task._role.name)
all_vars = _combine_and_track(all_vars, task.get_vars(), "task vars")
# next, we merge in the vars cache (include vars) and nonpersistent
# facts cache (set_fact/register), in that order
if host:
# include_vars non-persistent cache
all_vars = _combine_and_track(all_vars, self._vars_cache.get(host.get_name(), dict()), "include_vars")
# fact non-persistent cache
all_vars = _combine_and_track(all_vars, self._nonpersistent_fact_cache.get(host.name, dict()), "set_fact")
# next, we merge in role params and task include params
if task:
if task._role:
all_vars = _combine_and_track(all_vars, task._role.get_role_params(task.get_dep_chain()), "role '%s' params" % task._role.name)
# special case for include tasks, where the include params
# may be specified in the vars field for the task, which should
# have higher precedence than the vars/np facts above
all_vars = _combine_and_track(all_vars, task.get_include_params(), "include params")
# extra vars
all_vars = _combine_and_track(all_vars, self._extra_vars, "extra vars")
# magic variables
all_vars = _combine_and_track(all_vars, magic_variables, "magic vars")
# special case for the 'environment' magic variable, as someone
# may have set it as a variable and we don't want to stomp on it
if task:
all_vars['environment'] = task.environment
# if we have a task and we're delegating to another host, figure out the
# variables for that host now so we don't have to rely on hostvars later
if task and task.delegate_to is not None and include_delegate_to:
all_vars['ansible_delegated_vars'], all_vars['_ansible_loop_cache'] = self._get_delegated_vars(play, task, all_vars)
# 'vars' magic var
if task or play:
# has to be copy, otherwise recursive ref
all_vars['vars'] = all_vars.copy()
display.debug("done with get_vars()")
if C.DEFAULT_DEBUG:
# Use VarsWithSources wrapper class to display var sources
return VarsWithSources.new_vars_with_sources(all_vars, _vars_sources)
else:
return all_vars
def _get_magic_variables(self, play, host, task, include_hostvars, include_delegate_to,
_hosts=None, _hosts_all=None):
'''
Returns a dictionary of so-called "magic" variables in Ansible,
which are special variables we set internally for use.
'''
variables = {}
variables['playbook_dir'] = os.path.abspath(self._loader.get_basedir())
variables['ansible_playbook_python'] = sys.executable
variables['ansible_config_file'] = C.CONFIG_FILE
if play:
# This is a list of all role names of all dependencies for all roles for this play
dependency_role_names = list(set([d.get_name() for r in play.roles for d in r.get_all_dependencies()]))
# This is a list of all role names of all roles for this play
play_role_names = [r.get_name() for r in play.roles]
# ansible_role_names includes all role names, dependent or directly referenced by the play
variables['ansible_role_names'] = list(set(dependency_role_names + play_role_names))
# ansible_play_role_names includes the names of all roles directly referenced by this play
# roles that are implicitly referenced via dependencies are not listed.
variables['ansible_play_role_names'] = play_role_names
# ansible_dependent_role_names includes the names of all roles that are referenced via dependencies
# dependencies that are also explicitly named as roles are included in this list
variables['ansible_dependent_role_names'] = dependency_role_names
# DEPRECATED: role_names should be deprecated in favor of ansible_role_names or ansible_play_role_names
variables['role_names'] = variables['ansible_play_role_names']
variables['ansible_play_name'] = play.get_name()
if task:
if task._role:
variables['role_name'] = task._role.get_name(include_role_fqcn=False)
variables['role_path'] = task._role._role_path
variables['role_uuid'] = text_type(task._role._uuid)
variables['ansible_collection_name'] = task._role._role_collection
variables['ansible_role_name'] = task._role.get_name()
if self._inventory is not None:
variables['groups'] = self._inventory.get_groups_dict()
if play:
templar = Templar(loader=self._loader)
if templar.is_template(play.hosts):
pattern = 'all'
else:
pattern = play.hosts or 'all'
# add the list of hosts in the play, as adjusted for limit/filters
if not _hosts_all:
_hosts_all = [h.name for h in self._inventory.get_hosts(pattern=pattern, ignore_restrictions=True)]
if not _hosts:
_hosts = [h.name for h in self._inventory.get_hosts()]
variables['ansible_play_hosts_all'] = _hosts_all[:]
variables['ansible_play_hosts'] = [x for x in variables['ansible_play_hosts_all'] if x not in play._removed_hosts]
variables['ansible_play_batch'] = [x for x in _hosts if x not in play._removed_hosts]
# DEPRECATED: play_hosts should be deprecated in favor of ansible_play_batch,
# however this would take work in the templating engine, so for now we'll add both
variables['play_hosts'] = variables['ansible_play_batch']
# the 'omit' value allows params to be left out if the variable they are based on is undefined
variables['omit'] = self._omit_token
# Set options vars
for option, option_value in iteritems(self._options_vars):
variables[option] = option_value
if self._hostvars is not None and include_hostvars:
variables['hostvars'] = self._hostvars
return variables
def _get_delegated_vars(self, play, task, existing_variables):
if not hasattr(task, 'loop'):
# This "task" is not a Task, so we need to skip it
return {}, None
# we unfortunately need to template the delegate_to field here,
# as we're fetching vars before post_validate has been called on
# the task that has been passed in
vars_copy = existing_variables.copy()
templar = Templar(loader=self._loader, variables=vars_copy)
items = []
has_loop = True
if task.loop_with is not None:
if task.loop_with in lookup_loader:
try:
loop_terms = listify_lookup_plugin_terms(terms=task.loop, templar=templar,
loader=self._loader, fail_on_undefined=True, convert_bare=False)
items = wrap_var(lookup_loader.get(task.loop_with, loader=self._loader, templar=templar).run(terms=loop_terms, variables=vars_copy))
except AnsibleTemplateError:
# This task will be skipped later due to this, so we just setup
# a dummy array for the later code so it doesn't fail
items = [None]
else:
raise AnsibleError("Failed to find the lookup named '%s' in the available lookup plugins" % task.loop_with)
elif task.loop is not None:
try:
items = templar.template(task.loop)
except AnsibleTemplateError:
# This task will be skipped later due to this, so we just setup
# a dummy array for the later code so it doesn't fail
items = [None]
else:
has_loop = False
items = [None]
delegated_host_vars = dict()
item_var = getattr(task.loop_control, 'loop_var', 'item')
cache_items = False
for item in items:
# update the variables with the item value for templating, in case we need it
if item is not None:
vars_copy[item_var] = item
templar.available_variables = vars_copy
delegated_host_name = templar.template(task.delegate_to, fail_on_undefined=False)
if delegated_host_name != task.delegate_to:
cache_items = True
if delegated_host_name is None:
raise AnsibleError(message="Undefined delegate_to host for task:", obj=task._ds)
if not isinstance(delegated_host_name, string_types):
raise AnsibleError(message="the field 'delegate_to' has an invalid type (%s), and could not be"
" converted to a string type." % type(delegated_host_name),
obj=task._ds)
if delegated_host_name in delegated_host_vars:
# no need to repeat ourselves, as the delegate_to value
# does not appear to be tied to the loop item variable
continue
# a dictionary of variables to use if we have to create a new host below
# we set the default port based on the default transport here, to make sure
# we use the proper default for windows
new_port = C.DEFAULT_REMOTE_PORT
if C.DEFAULT_TRANSPORT == 'winrm':
new_port = 5986
new_delegated_host_vars = dict(
ansible_delegated_host=delegated_host_name,
ansible_host=delegated_host_name, # not redundant as other sources can change ansible_host
ansible_port=new_port,
ansible_user=C.DEFAULT_REMOTE_USER,
ansible_connection=C.DEFAULT_TRANSPORT,
)
# now try to find the delegated-to host in inventory, or failing that,
# create a new host on the fly so we can fetch variables for it
delegated_host = None
if self._inventory is not None:
delegated_host = self._inventory.get_host(delegated_host_name)
# try looking it up based on the address field, and finally
# fall back to creating a host on the fly to use for the var lookup
if delegated_host is None:
if delegated_host_name in C.LOCALHOST:
delegated_host = self._inventory.localhost
else:
for h in self._inventory.get_hosts(ignore_limits=True, ignore_restrictions=True):
# check if the address matches, or if both the delegated_to host
# and the current host are in the list of localhost aliases
if h.address == delegated_host_name:
delegated_host = h
break
else:
delegated_host = Host(name=delegated_host_name)
delegated_host.vars = combine_vars(delegated_host.vars, new_delegated_host_vars)
else:
delegated_host = Host(name=delegated_host_name)
delegated_host.vars = combine_vars(delegated_host.vars, new_delegated_host_vars)
# now we go fetch the vars for the delegated-to host and save them in our
# master dictionary of variables to be used later in the TaskExecutor/PlayContext
delegated_host_vars[delegated_host_name] = self.get_vars(
play=play,
host=delegated_host,
task=task,
include_delegate_to=False,
include_hostvars=False,
)
_ansible_loop_cache = None
if has_loop and cache_items:
# delegate_to templating produced a change, so we will cache the templated items
# in a special private hostvar
# this ensures that delegate_to+loop doesn't produce different results than TaskExecutor
# which may reprocess the loop
_ansible_loop_cache = items
return delegated_host_vars, _ansible_loop_cache
def clear_facts(self, hostname):
'''
Clears the facts for a host
'''
self._fact_cache.pop(hostname, None)
def set_host_facts(self, host, facts):
'''
Sets or updates the given facts for a host in the fact cache.
'''
if not isinstance(facts, Mapping):
raise AnsibleAssertionError("the type of 'facts' to set for host_facts should be a Mapping but is a %s" % type(facts))
try:
host_cache = self._fact_cache[host]
except KeyError:
# We get to set this as new
host_cache = facts
else:
if not isinstance(host_cache, MutableMapping):
raise TypeError('The object retrieved for {0} must be a MutableMapping but was'
' a {1}'.format(host, type(host_cache)))
# Update the existing facts
host_cache.update(facts)
# Save the facts back to the backing store
self._fact_cache[host] = host_cache
def set_nonpersistent_facts(self, host, facts):
'''
Sets or updates the given facts for a host in the fact cache.
'''
if not isinstance(facts, Mapping):
raise AnsibleAssertionError("the type of 'facts' to set for nonpersistent_facts should be a Mapping but is a %s" % type(facts))
try:
self._nonpersistent_fact_cache[host].update(facts)
except KeyError:
self._nonpersistent_fact_cache[host] = facts
def set_host_variable(self, host, varname, value):
'''
Sets a value in the vars_cache for a host.
'''
if host not in self._vars_cache:
self._vars_cache[host] = dict()
if varname in self._vars_cache[host] and isinstance(self._vars_cache[host][varname], MutableMapping) and isinstance(value, MutableMapping):
self._vars_cache[host] = combine_vars(self._vars_cache[host], {varname: value})
else:
self._vars_cache[host][varname] = value
class VarsWithSources(MutableMapping):
'''
Dict-like class for vars that also provides source information for each var
This class can only store the source for top-level vars. It does no tracking
on its own, just shows a debug message with the information that it is provided
when a particular var is accessed.
'''
def __init__(self, *args, **kwargs):
''' Dict-compatible constructor '''
self.data = dict(*args, **kwargs)
self.sources = {}
@classmethod
def new_vars_with_sources(cls, data, sources):
''' Alternate constructor method to instantiate class with sources '''
v = cls(data)
v.sources = sources
return v
def get_source(self, key):
return self.sources.get(key, None)
def __getitem__(self, key):
val = self.data[key]
# See notes in the VarsWithSources docstring for caveats and limitations of the source tracking
display.debug("variable '%s' from source: %s" % (key, self.sources.get(key, "unknown")))
return val
def __setitem__(self, key, value):
self.data[key] = value
def __delitem__(self, key):
del self.data[key]
def __iter__(self):
return iter(self.data)
def __len__(self):
return len(self.data)
# Prevent duplicate debug messages by defining our own __contains__ pointing at the underlying dict
def __contains__(self, key):
return self.data.__contains__(key)
def copy(self):
return VarsWithSources.new_vars_with_sources(self.data.copy(), self.sources.copy())
|
arnavd96/Cinemiezer | refs/heads/master | myvenv/lib/python3.4/site-packages/numpy/distutils/ccompiler.py | 22 | from __future__ import division, absolute_import, print_function
import os
import re
import sys
import types
from copy import copy
from distutils import ccompiler
from distutils.ccompiler import *
from distutils.errors import DistutilsExecError, DistutilsModuleError, \
DistutilsPlatformError
from distutils.sysconfig import customize_compiler
from distutils.version import LooseVersion
from numpy.distutils import log
from numpy.distutils.compat import get_exception
from numpy.distutils.exec_command import exec_command
from numpy.distutils.misc_util import cyg2win32, is_sequence, mingw32, \
quote_args, get_num_build_jobs
def replace_method(klass, method_name, func):
if sys.version_info[0] < 3:
m = types.MethodType(func, None, klass)
else:
# Py3k does not have unbound method anymore, MethodType does not work
m = lambda self, *args, **kw: func(self, *args, **kw)
setattr(klass, method_name, m)
# Using customized CCompiler.spawn.
def CCompiler_spawn(self, cmd, display=None):
"""
Execute a command in a sub-process.
Parameters
----------
cmd : str
The command to execute.
display : str or sequence of str, optional
The text to add to the log file kept by `numpy.distutils`.
If not given, `display` is equal to `cmd`.
Returns
-------
None
Raises
------
DistutilsExecError
If the command failed, i.e. the exit status was not 0.
"""
if display is None:
display = cmd
if is_sequence(display):
display = ' '.join(list(display))
log.info(display)
s, o = exec_command(cmd)
if s:
if is_sequence(cmd):
cmd = ' '.join(list(cmd))
try:
print(o)
except UnicodeError:
# When installing through pip, `o` can contain non-ascii chars
pass
if re.search('Too many open files', o):
msg = '\nTry rerunning setup command until build succeeds.'
else:
msg = ''
raise DistutilsExecError('Command "%s" failed with exit status %d%s' % (cmd, s, msg))
replace_method(CCompiler, 'spawn', CCompiler_spawn)
def CCompiler_object_filenames(self, source_filenames, strip_dir=0, output_dir=''):
"""
Return the name of the object files for the given source files.
Parameters
----------
source_filenames : list of str
The list of paths to source files. Paths can be either relative or
absolute, this is handled transparently.
strip_dir : bool, optional
Whether to strip the directory from the returned paths. If True,
the file name prepended by `output_dir` is returned. Default is False.
output_dir : str, optional
If given, this path is prepended to the returned paths to the
object files.
Returns
-------
obj_names : list of str
The list of paths to the object files corresponding to the source
files in `source_filenames`.
"""
if output_dir is None:
output_dir = ''
obj_names = []
for src_name in source_filenames:
base, ext = os.path.splitext(os.path.normpath(src_name))
base = os.path.splitdrive(base)[1] # Chop off the drive
base = base[os.path.isabs(base):] # If abs, chop off leading /
if base.startswith('..'):
# Resolve starting relative path components, middle ones
# (if any) have been handled by os.path.normpath above.
i = base.rfind('..')+2
d = base[:i]
d = os.path.basename(os.path.abspath(d))
base = d + base[i:]
if ext not in self.src_extensions:
raise UnknownFileError("unknown file type '%s' (from '%s')" % (ext, src_name))
if strip_dir:
base = os.path.basename(base)
obj_name = os.path.join(output_dir, base + self.obj_extension)
obj_names.append(obj_name)
return obj_names
replace_method(CCompiler, 'object_filenames', CCompiler_object_filenames)
def CCompiler_compile(self, sources, output_dir=None, macros=None,
include_dirs=None, debug=0, extra_preargs=None,
extra_postargs=None, depends=None):
"""
Compile one or more source files.
Please refer to the Python distutils API reference for more details.
Parameters
----------
sources : list of str
A list of filenames
output_dir : str, optional
Path to the output directory.
macros : list of tuples
A list of macro definitions.
include_dirs : list of str, optional
The directories to add to the default include file search path for
this compilation only.
debug : bool, optional
Whether or not to output debug symbols in or alongside the object
file(s).
extra_preargs, extra_postargs : ?
Extra pre- and post-arguments.
depends : list of str, optional
A list of file names that all targets depend on.
Returns
-------
objects : list of str
A list of object file names, one per source file `sources`.
Raises
------
CompileError
If compilation fails.
"""
# This method is effective only with Python >=2.3 distutils.
# Any changes here should be applied also to fcompiler.compile
# method to support pre Python 2.3 distutils.
if not sources:
return []
# FIXME:RELATIVE_IMPORT
if sys.version_info[0] < 3:
from .fcompiler import FCompiler, is_f_file, has_f90_header
else:
from numpy.distutils.fcompiler import (FCompiler, is_f_file,
has_f90_header)
if isinstance(self, FCompiler):
display = []
for fc in ['f77', 'f90', 'fix']:
fcomp = getattr(self, 'compiler_'+fc)
if fcomp is None:
continue
display.append("Fortran %s compiler: %s" % (fc, ' '.join(fcomp)))
display = '\n'.join(display)
else:
ccomp = self.compiler_so
display = "C compiler: %s\n" % (' '.join(ccomp),)
log.info(display)
macros, objects, extra_postargs, pp_opts, build = \
self._setup_compile(output_dir, macros, include_dirs, sources,
depends, extra_postargs)
cc_args = self._get_cc_args(pp_opts, debug, extra_preargs)
display = "compile options: '%s'" % (' '.join(cc_args))
if extra_postargs:
display += "\nextra options: '%s'" % (' '.join(extra_postargs))
log.info(display)
def single_compile(args):
obj, (src, ext) = args
self._compile(obj, src, ext, cc_args, extra_postargs, pp_opts)
if isinstance(self, FCompiler):
objects_to_build = list(build.keys())
f77_objects, other_objects = [], []
for obj in objects:
if obj in objects_to_build:
src, ext = build[obj]
if self.compiler_type=='absoft':
obj = cyg2win32(obj)
src = cyg2win32(src)
if is_f_file(src) and not has_f90_header(src):
f77_objects.append((obj, (src, ext)))
else:
other_objects.append((obj, (src, ext)))
# f77 objects can be built in parallel
build_items = f77_objects
# build f90 modules serial, module files are generated during
# compilation and may be used by files later in the list so the
# ordering is important
for o in other_objects:
single_compile(o)
else:
build_items = build.items()
jobs = get_num_build_jobs()
if len(build) > 1 and jobs > 1:
# build parallel
import multiprocessing.pool
pool = multiprocessing.pool.ThreadPool(jobs)
pool.map(single_compile, build_items)
pool.close()
else:
# build serial
for o in build_items:
single_compile(o)
# Return *all* object filenames, not just the ones we just built.
return objects
replace_method(CCompiler, 'compile', CCompiler_compile)
def CCompiler_customize_cmd(self, cmd, ignore=()):
"""
Customize compiler using distutils command.
Parameters
----------
cmd : class instance
An instance inheriting from `distutils.cmd.Command`.
ignore : sequence of str, optional
List of `CCompiler` commands (without ``'set_'``) that should not be
altered. Strings that are checked for are:
``('include_dirs', 'define', 'undef', 'libraries', 'library_dirs',
'rpath', 'link_objects')``.
Returns
-------
None
"""
log.info('customize %s using %s' % (self.__class__.__name__,
cmd.__class__.__name__))
def allow(attr):
return getattr(cmd, attr, None) is not None and attr not in ignore
if allow('include_dirs'):
self.set_include_dirs(cmd.include_dirs)
if allow('define'):
for (name, value) in cmd.define:
self.define_macro(name, value)
if allow('undef'):
for macro in cmd.undef:
self.undefine_macro(macro)
if allow('libraries'):
self.set_libraries(self.libraries + cmd.libraries)
if allow('library_dirs'):
self.set_library_dirs(self.library_dirs + cmd.library_dirs)
if allow('rpath'):
self.set_runtime_library_dirs(cmd.rpath)
if allow('link_objects'):
self.set_link_objects(cmd.link_objects)
replace_method(CCompiler, 'customize_cmd', CCompiler_customize_cmd)
def _compiler_to_string(compiler):
props = []
mx = 0
keys = list(compiler.executables.keys())
for key in ['version', 'libraries', 'library_dirs',
'object_switch', 'compile_switch',
'include_dirs', 'define', 'undef', 'rpath', 'link_objects']:
if key not in keys:
keys.append(key)
for key in keys:
if hasattr(compiler, key):
v = getattr(compiler, key)
mx = max(mx, len(key))
props.append((key, repr(v)))
lines = []
format = '%-' + repr(mx+1) + 's = %s'
for prop in props:
lines.append(format % prop)
return '\n'.join(lines)
def CCompiler_show_customization(self):
"""
Print the compiler customizations to stdout.
Parameters
----------
None
Returns
-------
None
Notes
-----
Printing is only done if the distutils log threshold is < 2.
"""
if 0:
for attrname in ['include_dirs', 'define', 'undef',
'libraries', 'library_dirs',
'rpath', 'link_objects']:
attr = getattr(self, attrname, None)
if not attr:
continue
log.info("compiler '%s' is set to %s" % (attrname, attr))
try:
self.get_version()
except:
pass
if log._global_log.threshold<2:
print('*'*80)
print(self.__class__)
print(_compiler_to_string(self))
print('*'*80)
replace_method(CCompiler, 'show_customization', CCompiler_show_customization)
def CCompiler_customize(self, dist, need_cxx=0):
"""
Do any platform-specific customization of a compiler instance.
This method calls `distutils.sysconfig.customize_compiler` for
platform-specific customization, as well as optionally remove a flag
to suppress spurious warnings in case C++ code is being compiled.
Parameters
----------
dist : object
This parameter is not used for anything.
need_cxx : bool, optional
Whether or not C++ has to be compiled. If so (True), the
``"-Wstrict-prototypes"`` option is removed to prevent spurious
warnings. Default is False.
Returns
-------
None
Notes
-----
All the default options used by distutils can be extracted with::
from distutils import sysconfig
sysconfig.get_config_vars('CC', 'CXX', 'OPT', 'BASECFLAGS',
'CCSHARED', 'LDSHARED', 'SO')
"""
# See FCompiler.customize for suggested usage.
log.info('customize %s' % (self.__class__.__name__))
customize_compiler(self)
if need_cxx:
# In general, distutils uses -Wstrict-prototypes, but this option is
# not valid for C++ code, only for C. Remove it if it's there to
# avoid a spurious warning on every compilation.
try:
self.compiler_so.remove('-Wstrict-prototypes')
except (AttributeError, ValueError):
pass
if hasattr(self, 'compiler') and 'cc' in self.compiler[0]:
if not self.compiler_cxx:
if self.compiler[0].startswith('gcc'):
a, b = 'gcc', 'g++'
else:
a, b = 'cc', 'c++'
self.compiler_cxx = [self.compiler[0].replace(a, b)]\
+ self.compiler[1:]
else:
if hasattr(self, 'compiler'):
log.warn("#### %s #######" % (self.compiler,))
if not hasattr(self, 'compiler_cxx'):
log.warn('Missing compiler_cxx fix for ' + self.__class__.__name__)
return
replace_method(CCompiler, 'customize', CCompiler_customize)
def simple_version_match(pat=r'[-.\d]+', ignore='', start=''):
"""
Simple matching of version numbers, for use in CCompiler and FCompiler.
Parameters
----------
pat : str, optional
A regular expression matching version numbers.
Default is ``r'[-.\\d]+'``.
ignore : str, optional
A regular expression matching patterns to skip.
Default is ``''``, in which case nothing is skipped.
start : str, optional
A regular expression matching the start of where to start looking
for version numbers.
Default is ``''``, in which case searching is started at the
beginning of the version string given to `matcher`.
Returns
-------
matcher : callable
A function that is appropriate to use as the ``.version_match``
attribute of a `CCompiler` class. `matcher` takes a single parameter,
a version string.
"""
def matcher(self, version_string):
# version string may appear in the second line, so getting rid
# of new lines:
version_string = version_string.replace('\n', ' ')
pos = 0
if start:
m = re.match(start, version_string)
if not m:
return None
pos = m.end()
while True:
m = re.search(pat, version_string[pos:])
if not m:
return None
if ignore and re.match(ignore, m.group(0)):
pos = m.end()
continue
break
return m.group(0)
return matcher
def CCompiler_get_version(self, force=False, ok_status=[0]):
"""
Return compiler version, or None if compiler is not available.
Parameters
----------
force : bool, optional
If True, force a new determination of the version, even if the
compiler already has a version attribute. Default is False.
ok_status : list of int, optional
The list of status values returned by the version look-up process
for which a version string is returned. If the status value is not
in `ok_status`, None is returned. Default is ``[0]``.
Returns
-------
version : str or None
Version string, in the format of `distutils.version.LooseVersion`.
"""
if not force and hasattr(self, 'version'):
return self.version
self.find_executables()
try:
version_cmd = self.version_cmd
except AttributeError:
return None
if not version_cmd or not version_cmd[0]:
return None
try:
matcher = self.version_match
except AttributeError:
try:
pat = self.version_pattern
except AttributeError:
return None
def matcher(version_string):
m = re.match(pat, version_string)
if not m:
return None
version = m.group('version')
return version
status, output = exec_command(version_cmd, use_tee=0)
version = None
if status in ok_status:
version = matcher(output)
if version:
version = LooseVersion(version)
self.version = version
return version
replace_method(CCompiler, 'get_version', CCompiler_get_version)
def CCompiler_cxx_compiler(self):
"""
Return the C++ compiler.
Parameters
----------
None
Returns
-------
cxx : class instance
The C++ compiler, as a `CCompiler` instance.
"""
if self.compiler_type in ('msvc', 'intelw', 'intelemw'):
return self
cxx = copy(self)
cxx.compiler_so = [cxx.compiler_cxx[0]] + cxx.compiler_so[1:]
if sys.platform.startswith('aix') and 'ld_so_aix' in cxx.linker_so[0]:
# AIX needs the ld_so_aix script included with Python
cxx.linker_so = [cxx.linker_so[0], cxx.compiler_cxx[0]] \
+ cxx.linker_so[2:]
else:
cxx.linker_so = [cxx.compiler_cxx[0]] + cxx.linker_so[1:]
return cxx
replace_method(CCompiler, 'cxx_compiler', CCompiler_cxx_compiler)
compiler_class['intel'] = ('intelccompiler', 'IntelCCompiler',
"Intel C Compiler for 32-bit applications")
compiler_class['intele'] = ('intelccompiler', 'IntelItaniumCCompiler',
"Intel C Itanium Compiler for Itanium-based applications")
compiler_class['intelem'] = ('intelccompiler', 'IntelEM64TCCompiler',
"Intel C Compiler for 64-bit applications")
compiler_class['intelw'] = ('intelccompiler', 'IntelCCompilerW',
"Intel C Compiler for 32-bit applications on Windows")
compiler_class['intelemw'] = ('intelccompiler', 'IntelEM64TCCompilerW',
"Intel C Compiler for 64-bit applications on Windows")
compiler_class['pathcc'] = ('pathccompiler', 'PathScaleCCompiler',
"PathScale Compiler for SiCortex-based applications")
ccompiler._default_compilers += (('linux.*', 'intel'),
('linux.*', 'intele'),
('linux.*', 'intelem'),
('linux.*', 'pathcc'),
('nt', 'intelw'),
('nt', 'intelemw'))
if sys.platform == 'win32':
compiler_class['mingw32'] = ('mingw32ccompiler', 'Mingw32CCompiler',
"Mingw32 port of GNU C Compiler for Win32"\
"(for MSC built Python)")
if mingw32():
# On windows platforms, we want to default to mingw32 (gcc)
# because msvc can't build blitz stuff.
log.info('Setting mingw32 as default compiler for nt.')
ccompiler._default_compilers = (('nt', 'mingw32'),) \
+ ccompiler._default_compilers
_distutils_new_compiler = new_compiler
def new_compiler (plat=None,
compiler=None,
verbose=0,
dry_run=0,
force=0):
# Try first C compilers from numpy.distutils.
if plat is None:
plat = os.name
try:
if compiler is None:
compiler = get_default_compiler(plat)
(module_name, class_name, long_description) = compiler_class[compiler]
except KeyError:
msg = "don't know how to compile C/C++ code on platform '%s'" % plat
if compiler is not None:
msg = msg + " with '%s' compiler" % compiler
raise DistutilsPlatformError(msg)
module_name = "numpy.distutils." + module_name
try:
__import__ (module_name)
except ImportError:
msg = str(get_exception())
log.info('%s in numpy.distutils; trying from distutils',
str(msg))
module_name = module_name[6:]
try:
__import__(module_name)
except ImportError:
msg = str(get_exception())
raise DistutilsModuleError("can't compile C/C++ code: unable to load module '%s'" % \
module_name)
try:
module = sys.modules[module_name]
klass = vars(module)[class_name]
except KeyError:
raise DistutilsModuleError(("can't compile C/C++ code: unable to find class '%s' " +
"in module '%s'") % (class_name, module_name))
compiler = klass(None, dry_run, force)
log.debug('new_compiler returns %s' % (klass))
return compiler
ccompiler.new_compiler = new_compiler
_distutils_gen_lib_options = gen_lib_options
def gen_lib_options(compiler, library_dirs, runtime_library_dirs, libraries):
library_dirs = quote_args(library_dirs)
runtime_library_dirs = quote_args(runtime_library_dirs)
r = _distutils_gen_lib_options(compiler, library_dirs,
runtime_library_dirs, libraries)
lib_opts = []
for i in r:
if is_sequence(i):
lib_opts.extend(list(i))
else:
lib_opts.append(i)
return lib_opts
ccompiler.gen_lib_options = gen_lib_options
# Also fix up the various compiler modules, which do
# from distutils.ccompiler import gen_lib_options
# Don't bother with mwerks, as we don't support Classic Mac.
for _cc in ['msvc9', 'msvc', 'bcpp', 'cygwinc', 'emxc', 'unixc']:
_m = sys.modules.get('distutils.' + _cc + 'compiler')
if _m is not None:
setattr(_m, 'gen_lib_options', gen_lib_options)
_distutils_gen_preprocess_options = gen_preprocess_options
def gen_preprocess_options (macros, include_dirs):
include_dirs = quote_args(include_dirs)
return _distutils_gen_preprocess_options(macros, include_dirs)
ccompiler.gen_preprocess_options = gen_preprocess_options
##Fix distutils.util.split_quoted:
# NOTE: I removed this fix in revision 4481 (see ticket #619), but it appears
# that removing this fix causes f2py problems on Windows XP (see ticket #723).
# Specifically, on WinXP when gfortran is installed in a directory path, which
# contains spaces, then f2py is unable to find it.
import string
_wordchars_re = re.compile(r'[^\\\'\"%s ]*' % string.whitespace)
_squote_re = re.compile(r"'(?:[^'\\]|\\.)*'")
_dquote_re = re.compile(r'"(?:[^"\\]|\\.)*"')
_has_white_re = re.compile(r'\s')
def split_quoted(s):
s = s.strip()
words = []
pos = 0
while s:
m = _wordchars_re.match(s, pos)
end = m.end()
if end == len(s):
words.append(s[:end])
break
if s[end] in string.whitespace: # unescaped, unquoted whitespace: now
words.append(s[:end]) # we definitely have a word delimiter
s = s[end:].lstrip()
pos = 0
elif s[end] == '\\': # preserve whatever is being escaped;
# will become part of the current word
s = s[:end] + s[end+1:]
pos = end+1
else:
if s[end] == "'": # slurp singly-quoted string
m = _squote_re.match(s, end)
elif s[end] == '"': # slurp doubly-quoted string
m = _dquote_re.match(s, end)
else:
raise RuntimeError("this can't happen (bad char '%c')" % s[end])
if m is None:
raise ValueError("bad string (mismatched %s quotes?)" % s[end])
(beg, end) = m.span()
if _has_white_re.search(s[beg+1:end-1]):
s = s[:beg] + s[beg+1:end-1] + s[end:]
pos = m.end() - 2
else:
# Keeping quotes when a quoted word does not contain
# white-space. XXX: send a patch to distutils
pos = m.end()
if pos >= len(s):
words.append(s)
break
return words
ccompiler.split_quoted = split_quoted
##Fix distutils.util.split_quoted:
|
xujb/odoo | refs/heads/8.0 | addons/l10n_co/wizard/__init__.py | 313 | # -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) David Arnold (devCO).
# Author David Arnold (devCO), dar@devco.co
# Co-Authors Juan Pablo Aries (devCO), jpa@devco.co
# Hector Ivan Valencia Muñoz (TIX SAS)
# Nhomar Hernandez (Vauxoo)
# Humberto Ochoa (Vauxoo)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
############################################################################## |
libracore/erpnext | refs/heads/v12 | erpnext/hr/doctype/employee_promotion/employee_promotion.py | 16 | # -*- coding: utf-8 -*-
# Copyright (c) 2018, Frappe Technologies Pvt. Ltd. and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe
from frappe import _
from frappe.model.document import Document
from frappe.utils import getdate
from erpnext.hr.utils import update_employee
class EmployeePromotion(Document):
def validate(self):
if frappe.get_value("Employee", self.employee, "status") == "Left":
frappe.throw(_("Cannot promote Employee with status Left"))
def before_submit(self):
if getdate(self.promotion_date) > getdate():
frappe.throw(_("Employee Promotion cannot be submitted before Promotion Date "),
frappe.DocstatusTransitionError)
def on_submit(self):
employee = frappe.get_doc("Employee", self.employee)
employee = update_employee(employee, self.promotion_details, date=self.promotion_date)
employee.save()
def on_cancel(self):
employee = frappe.get_doc("Employee", self.employee)
employee = update_employee(employee, self.promotion_details, cancel=True)
employee.save()
|
MalloyPower/parsing-python | refs/heads/master | front-end/testsuite-python-lib/Python-3.2/Lib/importlib/test/extension/test_case_sensitivity.py | 50 | import sys
from test import support
import unittest
from importlib import _bootstrap
from .. import util
from . import util as ext_util
@util.case_insensitive_tests
class ExtensionModuleCaseSensitivityTest(unittest.TestCase):
def find_module(self):
good_name = ext_util.NAME
bad_name = good_name.upper()
assert good_name != bad_name
finder = _bootstrap._FileFinder(ext_util.PATH,
_bootstrap._ExtensionFinderDetails())
return finder.find_module(bad_name)
def test_case_sensitive(self):
with support.EnvironmentVarGuard() as env:
env.unset('PYTHONCASEOK')
loader = self.find_module()
self.assertIsNone(loader)
def test_case_insensitivity(self):
with support.EnvironmentVarGuard() as env:
env.set('PYTHONCASEOK', '1')
loader = self.find_module()
self.assertTrue(hasattr(loader, 'load_module'))
def test_main():
if ext_util.FILENAME is None:
return
support.run_unittest(ExtensionModuleCaseSensitivityTest)
if __name__ == '__main__':
test_main()
|
BackupGGCode/python-for-android | refs/heads/master | python-build/python-libs/gdata/src/gdata/tlslite/BaseDB.py | 238 | """Base class for SharedKeyDB and VerifierDB."""
import anydbm
import thread
class BaseDB:
def __init__(self, filename, type):
self.type = type
self.filename = filename
if self.filename:
self.db = None
else:
self.db = {}
self.lock = thread.allocate_lock()
def create(self):
"""Create a new on-disk database.
@raise anydbm.error: If there's a problem creating the database.
"""
if self.filename:
self.db = anydbm.open(self.filename, "n") #raises anydbm.error
self.db["--Reserved--type"] = self.type
self.db.sync()
else:
self.db = {}
def open(self):
"""Open a pre-existing on-disk database.
@raise anydbm.error: If there's a problem opening the database.
@raise ValueError: If the database is not of the right type.
"""
if not self.filename:
raise ValueError("Can only open on-disk databases")
self.db = anydbm.open(self.filename, "w") #raises anydbm.error
try:
if self.db["--Reserved--type"] != self.type:
raise ValueError("Not a %s database" % self.type)
except KeyError:
raise ValueError("Not a recognized database")
def __getitem__(self, username):
if self.db == None:
raise AssertionError("DB not open")
self.lock.acquire()
try:
valueStr = self.db[username]
finally:
self.lock.release()
return self._getItem(username, valueStr)
def __setitem__(self, username, value):
if self.db == None:
raise AssertionError("DB not open")
valueStr = self._setItem(username, value)
self.lock.acquire()
try:
self.db[username] = valueStr
if self.filename:
self.db.sync()
finally:
self.lock.release()
def __delitem__(self, username):
if self.db == None:
raise AssertionError("DB not open")
self.lock.acquire()
try:
del(self.db[username])
if self.filename:
self.db.sync()
finally:
self.lock.release()
def __contains__(self, username):
"""Check if the database contains the specified username.
@type username: str
@param username: The username to check for.
@rtype: bool
@return: True if the database contains the username, False
otherwise.
"""
if self.db == None:
raise AssertionError("DB not open")
self.lock.acquire()
try:
return self.db.has_key(username)
finally:
self.lock.release()
def check(self, username, param):
value = self.__getitem__(username)
return self._checkItem(value, username, param)
def keys(self):
"""Return a list of usernames in the database.
@rtype: list
@return: The usernames in the database.
"""
if self.db == None:
raise AssertionError("DB not open")
self.lock.acquire()
try:
usernames = self.db.keys()
finally:
self.lock.release()
usernames = [u for u in usernames if not u.startswith("--Reserved--")]
return usernames |
hammerlab/gtfparse | refs/heads/master | gtfparse/parsing_error.py | 1 | # Copyright (c) 2015-2016. Mount Sinai School of Medicine
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function, division, absolute_import
class ParsingError(Exception):
pass
|
ezibyte/EziSocial-PhotoExample | refs/heads/master | Cocos2dx-3x/PhotoExample/cocos2d/plugin/tools/toolsForGame/steps.py | 255 | import sys, string, os
from Tkinter import *
# define class step
class step:
stepFrame = None
def initStep(self, root):
return
def checkStep(self):
return None
# define class step1
class step1(step):
step_entry = None
def initStep(self, root):
self.stepFrame = Frame(root)
step_tip = Label(self.stepFrame, text="Input the android project path of your game:")
step_tip.pack(anchor='nw', padx=30)
step_tip2 = Label(self.stepFrame, text="(Pleasd avoid using spaces in your project path)")
step_tip2.pack(anchor='nw', padx=30)
self.step_entry = Entry(self.stepFrame)
self.step_entry.pack(anchor='nw', fill=X, padx=30)
return
def checkStep(self):
tipStr = None
projPath = self.step_entry.get()
haveDir = os.path.exists(projPath)
isPorj = os.path.exists(projPath + '/AndroidManifest.xml')
if projPath == None or len(projPath) == 0 or haveDir == False or isPorj == False:
tipStr = 'The project path is wrong'
return tipStr
def getPath(self):
return self.step_entry.get()
# define class step2
class step2(step):
checkBtns = []
checkValues = []
def initStep(self, root, pluginList):
self.stepFrame = Frame(root)
step_tip = Label(self.stepFrame, text="Select plugins you needed:")
step_tip.pack(anchor='nw', padx=30)
for plugin in pluginList:
var = StringVar()
self.checkValues.append(var)
btn = Checkbutton(self.stepFrame, text=plugin, variable=var, onvalue=plugin, offvalue='')
btn.pack(anchor='nw', padx=50)
self.checkBtns.append(btn)
return
def checkStep(self):
tipStr = None
num = 0
for var in self.checkValues:
if len(var.get()) != 0:
num += 1
break
if num == 0:
tipStr = 'At least select one plugin'
return tipStr
def getSelectedPlugins(self):
selectPlugins = []
for var in self.checkValues:
if len(var.get()) != 0:
plugin = var.get()
selectPlugins.append(plugin)
return selectPlugins
|
broferek/ansible | refs/heads/devel | lib/ansible/modules/network/fortios/fortios_firewall_wildcard_fqdn_group.py | 13 | #!/usr/bin/python
from __future__ import (absolute_import, division, print_function)
# Copyright 2019 Fortinet, Inc.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
__metaclass__ = type
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'metadata_version': '1.1'}
DOCUMENTATION = '''
---
module: fortios_firewall_wildcard_fqdn_group
short_description: Config global Wildcard FQDN address groups in Fortinet's FortiOS and FortiGate.
description:
- This module is able to configure a FortiGate or FortiOS (FOS) device by allowing the
user to set and modify firewall_wildcard_fqdn feature and group category.
Examples include all parameters and values need to be adjusted to datasources before usage.
Tested with FOS v6.0.5
version_added: "2.8"
author:
- Miguel Angel Munoz (@mamunozgonzalez)
- Nicolas Thomas (@thomnico)
notes:
- Requires fortiosapi library developed by Fortinet
- Run as a local_action in your playbook
requirements:
- fortiosapi>=0.9.8
options:
host:
description:
- FortiOS or FortiGate IP address.
type: str
required: false
username:
description:
- FortiOS or FortiGate username.
type: str
required: false
password:
description:
- FortiOS or FortiGate password.
type: str
default: ""
vdom:
description:
- Virtual domain, among those defined previously. A vdom is a
virtual instance of the FortiGate that can be configured and
used as a different unit.
type: str
default: root
https:
description:
- Indicates if the requests towards FortiGate must use HTTPS protocol.
type: bool
default: true
ssl_verify:
description:
- Ensures FortiGate certificate must be verified by a proper CA.
type: bool
default: true
version_added: 2.9
state:
description:
- Indicates whether to create or remove the object.
This attribute was present already in previous version in a deeper level.
It has been moved out to this outer level.
type: str
required: false
choices:
- present
- absent
version_added: 2.9
firewall_wildcard_fqdn_group:
description:
- Config global Wildcard FQDN address groups.
default: null
type: dict
suboptions:
state:
description:
- B(Deprecated)
- Starting with Ansible 2.9 we recommend using the top-level 'state' parameter.
- HORIZONTALLINE
- Indicates whether to create or remove the object.
type: str
required: false
choices:
- present
- absent
color:
description:
- GUI icon color.
type: int
comment:
description:
- Comment.
type: str
member:
description:
- Address group members.
type: list
suboptions:
name:
description:
- Address name. Source firewall.wildcard-fqdn.custom.name.
required: true
type: str
name:
description:
- Address group name.
required: true
type: str
uuid:
description:
- Universally Unique Identifier (UUID; automatically assigned but can be manually reset).
type: str
visibility:
description:
- Enable/disable address visibility.
type: str
choices:
- enable
- disable
'''
EXAMPLES = '''
- hosts: localhost
vars:
host: "192.168.122.40"
username: "admin"
password: ""
vdom: "root"
ssl_verify: "False"
tasks:
- name: Config global Wildcard FQDN address groups.
fortios_firewall_wildcard_fqdn_group:
host: "{{ host }}"
username: "{{ username }}"
password: "{{ password }}"
vdom: "{{ vdom }}"
https: "False"
state: "present"
firewall_wildcard_fqdn_group:
color: "3"
comment: "Comment."
member:
-
name: "default_name_6 (source firewall.wildcard-fqdn.custom.name)"
name: "default_name_7"
uuid: "<your_own_value>"
visibility: "enable"
'''
RETURN = '''
build:
description: Build number of the fortigate image
returned: always
type: str
sample: '1547'
http_method:
description: Last method used to provision the content into FortiGate
returned: always
type: str
sample: 'PUT'
http_status:
description: Last result given by FortiGate on last operation applied
returned: always
type: str
sample: "200"
mkey:
description: Master key (id) used in the last call to FortiGate
returned: success
type: str
sample: "id"
name:
description: Name of the table used to fulfill the request
returned: always
type: str
sample: "urlfilter"
path:
description: Path of the table used to fulfill the request
returned: always
type: str
sample: "webfilter"
revision:
description: Internal revision number
returned: always
type: str
sample: "17.0.2.10658"
serial:
description: Serial number of the unit
returned: always
type: str
sample: "FGVMEVYYQT3AB5352"
status:
description: Indication of the operation's result
returned: always
type: str
sample: "success"
vdom:
description: Virtual domain used
returned: always
type: str
sample: "root"
version:
description: Version of the FortiGate
returned: always
type: str
sample: "v5.6.3"
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.connection import Connection
from ansible.module_utils.network.fortios.fortios import FortiOSHandler
from ansible.module_utils.network.fortimanager.common import FAIL_SOCKET_MSG
def login(data, fos):
host = data['host']
username = data['username']
password = data['password']
ssl_verify = data['ssl_verify']
fos.debug('on')
if 'https' in data and not data['https']:
fos.https('off')
else:
fos.https('on')
fos.login(host, username, password, verify=ssl_verify)
def filter_firewall_wildcard_fqdn_group_data(json):
option_list = ['color', 'comment', 'member',
'name', 'uuid', 'visibility']
dictionary = {}
for attribute in option_list:
if attribute in json and json[attribute] is not None:
dictionary[attribute] = json[attribute]
return dictionary
def underscore_to_hyphen(data):
if isinstance(data, list):
for elem in data:
elem = underscore_to_hyphen(elem)
elif isinstance(data, dict):
new_data = {}
for k, v in data.items():
new_data[k.replace('_', '-')] = underscore_to_hyphen(v)
data = new_data
return data
def firewall_wildcard_fqdn_group(data, fos):
vdom = data['vdom']
if 'state' in data and data['state']:
state = data['state']
elif 'state' in data['firewall_wildcard_fqdn_group'] and data['firewall_wildcard_fqdn_group']:
state = data['firewall_wildcard_fqdn_group']['state']
else:
state = True
firewall_wildcard_fqdn_group_data = data['firewall_wildcard_fqdn_group']
filtered_data = underscore_to_hyphen(filter_firewall_wildcard_fqdn_group_data(firewall_wildcard_fqdn_group_data))
if state == "present":
return fos.set('firewall.wildcard-fqdn',
'group',
data=filtered_data,
vdom=vdom)
elif state == "absent":
return fos.delete('firewall.wildcard-fqdn',
'group',
mkey=filtered_data['name'],
vdom=vdom)
def is_successful_status(status):
return status['status'] == "success" or \
status['http_method'] == "DELETE" and status['http_status'] == 404
def fortios_firewall_wildcard_fqdn(data, fos):
if data['firewall_wildcard_fqdn_group']:
resp = firewall_wildcard_fqdn_group(data, fos)
return not is_successful_status(resp), \
resp['status'] == "success", \
resp
def main():
fields = {
"host": {"required": False, "type": "str"},
"username": {"required": False, "type": "str"},
"password": {"required": False, "type": "str", "default": "", "no_log": True},
"vdom": {"required": False, "type": "str", "default": "root"},
"https": {"required": False, "type": "bool", "default": True},
"ssl_verify": {"required": False, "type": "bool", "default": True},
"state": {"required": False, "type": "str",
"choices": ["present", "absent"]},
"firewall_wildcard_fqdn_group": {
"required": False, "type": "dict", "default": None,
"options": {
"state": {"required": False, "type": "str",
"choices": ["present", "absent"]},
"color": {"required": False, "type": "int"},
"comment": {"required": False, "type": "str"},
"member": {"required": False, "type": "list",
"options": {
"name": {"required": True, "type": "str"}
}},
"name": {"required": True, "type": "str"},
"uuid": {"required": False, "type": "str"},
"visibility": {"required": False, "type": "str",
"choices": ["enable", "disable"]}
}
}
}
module = AnsibleModule(argument_spec=fields,
supports_check_mode=False)
# legacy_mode refers to using fortiosapi instead of HTTPAPI
legacy_mode = 'host' in module.params and module.params['host'] is not None and \
'username' in module.params and module.params['username'] is not None and \
'password' in module.params and module.params['password'] is not None
if not legacy_mode:
if module._socket_path:
connection = Connection(module._socket_path)
fos = FortiOSHandler(connection)
is_error, has_changed, result = fortios_firewall_wildcard_fqdn(module.params, fos)
else:
module.fail_json(**FAIL_SOCKET_MSG)
else:
try:
from fortiosapi import FortiOSAPI
except ImportError:
module.fail_json(msg="fortiosapi module is required")
fos = FortiOSAPI()
login(module.params, fos)
is_error, has_changed, result = fortios_firewall_wildcard_fqdn(module.params, fos)
fos.logout()
if not is_error:
module.exit_json(changed=has_changed, meta=result)
else:
module.fail_json(msg="Error in repo", meta=result)
if __name__ == '__main__':
main()
|
Dino0631/RedRain-Bot | refs/heads/develop | lib/youtube_dl/extractor/atvat.py | 39 | # coding: utf-8
from __future__ import unicode_literals
from .common import InfoExtractor
from ..utils import (
determine_ext,
int_or_none,
unescapeHTML,
)
class ATVAtIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?atv\.at/(?:[^/]+/){2}(?P<id>[dv]\d+)'
_TESTS = [{
'url': 'http://atv.at/aktuell/di-210317-2005-uhr/v1698449/',
'md5': 'c3b6b975fb3150fc628572939df205f2',
'info_dict': {
'id': '1698447',
'ext': 'mp4',
'title': 'DI, 21.03.17 | 20:05 Uhr 1/1',
}
}, {
'url': 'http://atv.at/aktuell/meinrad-knapp/d8416/',
'only_matching': True,
}]
def _real_extract(self, url):
display_id = self._match_id(url)
webpage = self._download_webpage(url, display_id)
video_data = self._parse_json(unescapeHTML(self._search_regex(
r'class="[^"]*jsb_video/FlashPlayer[^"]*"[^>]+data-jsb="([^"]+)"',
webpage, 'player data')), display_id)['config']['initial_video']
video_id = video_data['id']
video_title = video_data['title']
parts = []
for part in video_data.get('parts', []):
part_id = part['id']
part_title = part['title']
formats = []
for source in part.get('sources', []):
source_url = source.get('src')
if not source_url:
continue
ext = determine_ext(source_url)
if ext == 'm3u8':
formats.extend(self._extract_m3u8_formats(
source_url, part_id, 'mp4', 'm3u8_native',
m3u8_id='hls', fatal=False))
else:
formats.append({
'format_id': source.get('delivery'),
'url': source_url,
})
self._sort_formats(formats)
parts.append({
'id': part_id,
'title': part_title,
'thumbnail': part.get('preview_image_url'),
'duration': int_or_none(part.get('duration')),
'is_live': part.get('is_livestream'),
'formats': formats,
})
return {
'_type': 'multi_video',
'id': video_id,
'title': video_title,
'entries': parts,
}
|
duncan-brown/pycbc | refs/heads/master | pycbc/events/threshold_cuda.py | 10 | # Copyright (C) 2012 Alex Nitz
# This program is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the
# Free Software Foundation; either version 3 of the License, or (at your
# option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# =============================================================================
#
# Preamble
#
# =============================================================================
#
import numpy, mako.template
from pycuda.tools import dtype_to_ctype
from pycuda.elementwise import ElementwiseKernel
from pycuda.compiler import SourceModule
from .eventmgr import _BaseThresholdCluster
import pycbc.scheme
threshold_op = """
if (i == 0)
bn[0] = 0;
pycuda::complex<float> val = in[i];
if ( abs(val) > threshold){
int n_w = atomicAdd(bn, 1);
outv[n_w] = val;
outl[n_w] = i;
}
"""
threshold_kernel = ElementwiseKernel(
" %(tp_in)s *in, %(tp_out1)s *outv, %(tp_out2)s *outl, %(tp_th)s threshold, %(tp_n)s *bn" % {
"tp_in": dtype_to_ctype(numpy.complex64),
"tp_out1": dtype_to_ctype(numpy.complex64),
"tp_out2": dtype_to_ctype(numpy.uint32),
"tp_th": dtype_to_ctype(numpy.float32),
"tp_n": dtype_to_ctype(numpy.uint32),
},
threshold_op,
"getstuff")
import pycuda.driver as drv
n = drv.pagelocked_empty((1), numpy.uint32, mem_flags=drv.host_alloc_flags.DEVICEMAP)
nptr = numpy.intp(n.base.get_device_pointer())
val = drv.pagelocked_empty((4096*256), numpy.complex64, mem_flags=drv.host_alloc_flags.DEVICEMAP)
vptr = numpy.intp(val.base.get_device_pointer())
loc = drv.pagelocked_empty((4096*256), numpy.int32, mem_flags=drv.host_alloc_flags.DEVICEMAP)
lptr = numpy.intp(loc.base.get_device_pointer())
class T():
pass
tn = T()
tv = T()
tl = T()
tn.gpudata = nptr
tv.gpudata = vptr
tl.gpudata = lptr
tn.flags = tv.flags = tl.flags = n.flags
tkernel1 = mako.template.Template("""
#include <stdio.h>
__global__ void threshold_and_cluster(float2* in, float2* outv, int* outl, int window, float threshold){
int s = window * blockIdx.x;
int e = s + window;
// shared memory for chuck size candidates
__shared__ float svr[${chunk}];
__shared__ float svi[${chunk}];
__shared__ int sl[${chunk}];
// shared memory for the warp size candidates
__shared__ float svv[32];
__shared__ int idx[32];
int ml = -1;
float mvr = 0;
float mvi = 0;
float re;
float im;
// Iterate trought the entire window size chunk and find blockDim.x number
// of candidates
for (int i = s + threadIdx.x; i < e; i += blockDim.x){
re = in[i].x;
im = in[i].y;
if ((re * re + im * im) > (mvr * mvr + mvi * mvi)){
mvr = re;
mvi = im;
ml = i;
}
}
// Save the candidate from this thread to shared memory
svr[threadIdx.x] = mvr;
svi[threadIdx.x] = mvi;
sl[threadIdx.x] = ml;
__syncthreads();
if (threadIdx.x < 32){
int tl = threadIdx.x;
// Now that we have all the candiates for this chunk in shared memory
// Iterate through in the warp size to reduce to 32 candidates
for (int i = threadIdx.x; i < ${chunk}; i += 32){
re = svr[i];
im = svi[i];
if ((re * re + im * im) > (mvr * mvr + mvi * mvi)){
tl = i;
mvr = re;
mvi = im;
}
}
// Store the 32 candidates into shared memory
svv[threadIdx.x] = svr[tl] * svr[tl] + svi[tl] * svi[tl];
idx[threadIdx.x] = tl;
// Find the 1 candidate we are looking for using a manual log algorithm
if ((threadIdx.x < 16) && (svv[threadIdx.x] < svv[threadIdx.x + 16])){
svv[threadIdx.x] = svv[threadIdx.x + 16];
idx[threadIdx.x] = idx[threadIdx.x + 16];
}
if ((threadIdx.x < 8) && (svv[threadIdx.x] < svv[threadIdx.x + 8])){
svv[threadIdx.x] = svv[threadIdx.x + 8];
idx[threadIdx.x] = idx[threadIdx.x + 8];
}
if ((threadIdx.x < 4) && (svv[threadIdx.x] < svv[threadIdx.x + 4])){
svv[threadIdx.x] = svv[threadIdx.x + 4];
idx[threadIdx.x] = idx[threadIdx.x + 4];
}
if ((threadIdx.x < 2) && (svv[threadIdx.x] < svv[threadIdx.x + 2])){
svv[threadIdx.x] = svv[threadIdx.x + 2];
idx[threadIdx.x] = idx[threadIdx.x + 2];
}
// Save the 1 candidate maximum and location to the output vectors
if (threadIdx.x == 0){
if (svv[threadIdx.x] < svv[threadIdx.x + 1]){
idx[0] = idx[1];
svv[0] = svv[1];
}
if (svv[0] > threshold){
tl = idx[0];
outv[blockIdx.x].x = svr[tl];
outv[blockIdx.x].y = svi[tl];
outl[blockIdx.x] = sl[tl];
} else{
outl[blockIdx.x] = -1;
}
}
}
}
""")
tkernel2 = mako.template.Template("""
#include <stdio.h>
__global__ void threshold_and_cluster2(float2* outv, int* outl, float threshold, int window){
__shared__ int loc[${blocks}];
__shared__ float val[${blocks}];
int i = threadIdx.x;
int l = outl[i];
loc[i] = l;
if (l == -1)
return;
val[i] = outv[i].x * outv[i].x + outv[i].y * outv[i].y;
// Check right
if ( (i < (${blocks} - 1)) && (val[i + 1] > val[i]) ){
outl[i] = -1;
return;
}
// Check left
if ( (i > 0) && (val[i - 1] > val[i]) ){
outl[i] = -1;
return;
}
}
""")
tfn_cache = {}
def get_tkernel(slen, window):
if window < 32:
raise ValueError("GPU threshold kernel does not support a window smaller than 32 samples")
elif window <= 4096:
nt = 128
elif window <= 16384:
nt = 256
elif window <= 32768:
nt = 512
else:
nt = 1024
nb = int(numpy.ceil(slen / float(window)))
if nb > 1024:
raise ValueError("More than 1024 blocks not supported yet")
try:
return tfn_cache[(nt, nb)], nt, nb
except KeyError:
mod = SourceModule(tkernel1.render(chunk=nt))
mod2 = SourceModule(tkernel2.render(blocks=nb))
fn = mod.get_function("threshold_and_cluster")
fn.prepare("PPPif")
fn2 = mod2.get_function("threshold_and_cluster2")
fn2.prepare("PPfi")
tfn_cache[(nt, nb)] = (fn, fn2)
return tfn_cache[(nt, nb)], nt, nb
def threshold_and_cluster(series, threshold, window):
outl = tl.gpudata
outv = tv.gpudata
slen = len(series)
series = series.data.gpudata
(fn, fn2), nt, nb = get_tkernel(slen, window)
threshold = numpy.float32(threshold * threshold)
window = numpy.int32(window)
cl = loc[0:nb]
cv = val[0:nb]
fn.prepared_call((nb, 1), (nt, 1, 1), series, outv, outl, window, threshold,)
fn2.prepared_call((1, 1), (nb, 1, 1), outv, outl, threshold, window)
pycbc.scheme.mgr.state.context.synchronize()
w = (cl != -1)
return cv[w], cl[w]
class CUDAThresholdCluster(_BaseThresholdCluster):
def __init__(self, series):
self.series = series.data.gpudata
self.outl = tl.gpudata
self.outv = tv.gpudata
self.slen = len(series)
def threshold_and_cluster(self, threshold, window):
threshold = numpy.float32(threshold * threshold)
window = numpy.int32(window)
(fn, fn2), nt, nb = get_tkernel(self.slen, window)
fn = fn.prepared_call
fn2 = fn2.prepared_call
cl = loc[0:nb]
cv = val[0:nb]
fn((nb, 1), (nt, 1, 1), self.series, self.outv, self.outl, window, threshold,)
fn2((1, 1), (nb, 1, 1), self.outv, self.outl, threshold, window)
pycbc.scheme.mgr.state.context.synchronize()
w = (cl != -1)
return cv[w], cl[w]
def _threshold_cluster_factory(series):
return CUDAThresholdCluster
|
abezgauzdina/knesset-data-pipelines | refs/heads/master | datapackage_pipelines_knesset/common/base_processors/base_resource.py | 1 | from datapackage_pipelines_knesset.common.base_processors.base import BaseProcessor
class BaseResourceProcessor(BaseProcessor):
"""Base class for processing a single resource"""
def __init__(self, *args, **kwargs):
super(BaseResourceProcessor, self).__init__(*args, **kwargs)
# the descriptor of the selected resource (only 1 resource is processed by this processor)
self._resource_descriptor = None
# the selected resource number
self._resource_number = None
def _get_schema(self, resource_descriptor):
# can be extended to provide a hard-coded schema
# or to modify the schema from the input resource descriptor
return resource_descriptor.get("schema", {"fields": []})
def _get_output_resource_name(self):
return self._parameters.get("resource")
def _get_output_resource_path(self):
return "data/{}.csv".format(self._get_output_resource_name())
def _is_matching_resource_descriptor(self, resource_number, resource_descriptor):
# see the comment on _is_matching_resource_number
return resource_descriptor["name"] == self._get_output_resource_name()
def _is_matching_resource_number(self, resource_number, resource_descriptor=None):
# this is called from both _filter_resource_descriptors and filter_resources
# the first one that matches will store the resource number
# for example, if resource_descriptor matched an input resource -
# it will use the same nubmer for matching the output resource
if self._resource_number is None:
if not resource_descriptor:
resource_descriptor = self._get_resource_descriptor(resource_number)
if self._is_matching_resource_descriptor(resource_number, resource_descriptor):
self._resource_number = resource_number
return True
else:
return False
else:
return self._resource_number == resource_number
def _filter_resource_descriptors(self, resource_descriptors):
filtered_descriptors = []
for resource_number, resource_descriptor in enumerate(resource_descriptors):
if self._is_matching_resource_number(resource_number, resource_descriptor):
resource_descriptor = self._filter_resource_descriptor(resource_number, resource_descriptor)
filtered_descriptors.append(resource_descriptor)
return filtered_descriptors
def _filter_resource_descriptor(self, resource_number, resource_descriptor):
# allows to modify the resource descriptor
# if you just need to modify the schema - you should extend _get_schema instead
self._schema = self._get_schema(resource_descriptor)
resource_descriptor = dict(resource_descriptor, **{"name": self._get_output_resource_name(),
"path": self._get_output_resource_path(),
"schema": self._schema})
self._resource_descriptor = resource_descriptor
return resource_descriptor
def _filter_resources(self, resources):
# modified to only call filter methods for the matching resource
# other resources are yielded as-is without any processing
for resource_number, resource_data in enumerate(resources):
if self._is_matching_resource_number(resource_number):
yield self._filter_resource(resource_number, resource_data)
else:
yield resource_data
def _filter_resource(self, resource_number, resource_data):
# this method is called only for the matching resource
# it should be extended to provide code to run before or after iterating over the data
self._delay_limit_initialize()
yield from super(BaseResourceProcessor, self)._filter_resource(resource_number, resource_data)
def _filter_row(self, resource_number, row):
# this method is called only the matching resource's rows
for row in super(BaseResourceProcessor, self)._filter_row(resource_number, row):
if self._delay_limit_check():
self._incr_stat("delay limit skipped rows")
else:
yield row
|
prometheanbob/clowdflows | refs/heads/master | workflows/views.py | 2 | # helperji, context stvari
from django.shortcuts import render, get_object_or_404, redirect
from django.http import Http404, HttpResponse
from django.contrib import messages
from django.core import serializers
from django.utils import simplejson
from workflows.urls import *
from workflows.helpers import *
import workflows.interaction_views
import workflows.visualization_views
import sys
import traceback
from django.views.decorators.cache import never_cache
from jsonview.decorators import json_view
# modeli
from workflows.models import *
from django.contrib.auth.models import User
from workflows.utils import *
# auth fore
from django.contrib.auth.decorators import login_required
#settings
from mothra.settings import DEBUG, FILES_FOLDER
#ostalo
import os
from workflows import module_importer
def setattr_local(name, value, package):
setattr(sys.modules[__name__], name, value)
module_importer.import_all_packages_libs("views",setattr_local)
@login_required
def get_category(request):
if request.is_ajax() or DEBUG:
c = get_object_or_404(Category, pk=request.POST['category_id'])
if (c.user==request.user):
return render(request, 'category.html', {'category':c})
else:
return HttpResponse(status=400)
else:
return HttpResponse(status=400)
@login_required
def index(request):
try:
if request.user.userprofile.active_workflow is None:
return redirect('new workflow')
except:
return redirect('new workflow')
categories = Category.objects.all()
user_categories = request.user.categories.all()
user_widgets = request.user.widgets.filter(category=None)
return render(request, 'index.html', {'categories':categories,'user':request.user,'user_categories':user_categories,'user_widgets':user_widgets})
@login_required
def new_workflow(request):
w = Workflow()
w.user = request.user
w.save()
request.user.userprofile.active_workflow = w
request.user.userprofile.save()
return redirect('editor')
@login_required
def open_workflow(request,workflow_id):
w = get_object_or_404(Workflow, pk=workflow_id)
if w.user == request.user:
request.user.userprofile.active_workflow = w
request.user.userprofile.save()
else:
return HttpResponse(status=400)
return redirect('editor')
@login_required
def widget_progress(request):
w = get_object_or_404(Widget, pk=request.GET['widget_id'])
if w.running:
return HttpResponse(w.progress)
else:
if w.progress==100:
return HttpResponse("100")
return HttpResponse("-1")
@login_required
def add_widget(request):
if request.is_ajax() or DEBUG:
if request.POST.has_key("abstractwidget_id"):
aw = get_object_or_404(AbstractWidget, pk=request.POST['abstractwidget_id'])
workflow = get_object_or_404(Workflow, pk=request.POST['active_workflow'])
if (workflow.user==request.user):
w = Widget()
w.workflow = workflow
w.x = int(request.POST['scrollLeft'])+50
y = int(request.POST['scrollTop'])+50
while workflow.widgets.filter(y=y,x=w.x).count()>0:
y = y + 100
w.y = y
w.name = aw.name
w.abstract_widget = aw
w.type = 'regular'
w.save()
inputOrder = 0
paramOrder = 0
for i in aw.inputs.all():
j = Input()
j.name = i.name
j.short_name = i.short_name
j.description = i.description
j.variable = i.variable
j.widget = w
j.required = i.required
j.parameter = i.parameter
j.value = None
if (i.parameter):
paramOrder += 1
j.order = paramOrder
else:
inputOrder += 1
j.order = inputOrder
if not i.multi:
j.value = i.default
j.parameter_type = i.parameter_type
if i.multi:
j.multi_id = i.id
j.save()
for k in i.options.all():
o = Option()
o.name = k.name
o.value = k.value
o.input = j
o.save()
outputOrder = 0
for i in aw.outputs.all():
j = Output()
j.name = i.name
j.short_name = i.short_name
j.description = i.description
j.variable = i.variable
j.widget = w
outputOrder += 1
j.order = outputOrder
j.save()
w.defered_outputs = w.outputs.defer("value").all()
w.defered_inputs = w.inputs.defer("value").all()
return render(request, 'widgets.html', {'widgets':[w,]})
else:
return HttpResponse(status=400)
else:
aw = get_object_or_404(Widget, pk=request.POST['copywidget_id'])
if aw.type == 'regular':
workflow = get_object_or_404(Workflow, pk=request.POST['active_workflow'])
if (workflow.user==request.user):
w = Widget()
w.workflow = workflow
w.x = int(request.POST['scrollLeft'])+50
y = int(request.POST['scrollTop'])+50
while workflow.widgets.filter(y=y,x=w.x).count()>0:
y = y + 100
w.y = y
w.name = aw.name
w.abstract_widget = aw.abstract_widget
w.type = aw.type
w.save()
for i in aw.inputs.all():
j = Input()
j.name = i.name
j.short_name = i.short_name
j.description = i.description
j.variable = i.variable
j.widget = w
j.required = i.required
j.parameter = i.parameter
j.parameter_type = i.parameter_type
j.value = i.value
j.multi_id = i.multi_id
j.save()
for k in i.options.all():
o = Option()
o.name = k.name
o.value = k.value
o.input = j
o.save()
for i in aw.outputs.all():
j = Output()
j.name = i.name
j.short_name = i.short_name
j.description = i.description
j.variable = i.variable
j.widget = w
j.save()
w.defered_outputs = w.outputs.defer("value").all()
w.defered_inputs = w.inputs.defer("value").all()
return render(request, 'widgets.html', {'widgets':[w,]})
elif aw.type=='subprocess':
workflow = get_object_or_404(Workflow, pk=request.POST['active_workflow'])
if (workflow.user==request.user):
widget_conversion = {}
input_conversion = {}
output_conversion = {}
w = Widget()
w.workflow = workflow
w.x = int(request.POST['scrollLeft'])+50
y = int(request.POST['scrollTop'])+50
while workflow.widgets.filter(y=y,x=w.x).count()>0:
y = y + 100
w.y = y
w.name = aw.name
w.abstract_widget = aw.abstract_widget
w.type = aw.type
w.save()
widget_conversion[aw.pk]=w.pk
for i in aw.inputs.all():
j = Input()
j.name = i.name
j.short_name = i.short_name
j.description = i.description
j.variable = i.variable
j.widget = w
j.required = i.required
j.parameter = i.parameter
j.parameter_type = i.parameter_type
j.value = i.value
j.multi_id = i.multi_id
j.save()
input_conversion[i.pk]=j.pk
for k in i.options.all():
o = Option()
o.name = k.name
o.value = k.value
o.input = j
o.save()
for i in aw.outputs.all():
j = Output()
j.name = i.name
j.short_name = i.short_name
j.description = i.description
j.variable = i.variable
j.widget = w
j.save()
output_conversion[i.pk]=j.pk
workflows.models.copy_workflow(aw.workflow_link, request.user, widget_conversion,input_conversion,output_conversion,w)
w.defered_outputs = w.outputs.defer("value").all()
w.defered_inputs = w.inputs.defer("value").all()
return render(request, 'widgets.html', {'widgets':[w,]})
else:
return HttpResponse(status=400)
else:
return HttpResponse(status=400)
@login_required
def save_position(request):
if request.is_ajax() or DEBUG:
w = get_object_or_404(Widget, pk=request.POST['widget_id'])
if (w.workflow.user==request.user):
w.x = request.POST['x']
w.y = request.POST['y']
w.save()
return HttpResponse("Ok!")
else:
return HttpResponse(status=400)
else:
return HttpResponse(status=400)
@login_required
def add_connection(request):
if request.is_ajax() or DEBUG:
deleted = -1
added = -1
refresh = -1
refreshworkflow = -1
success = False
mimetype = 'application/javascript'
message = ""
previousExists=False
i = Input.objects.defer("value").get(pk=request.POST['input_id'])
#i = get_object_or_404(Input, pk=request.POST['input_id'])
o = Output.objects.defer("value").get(pk=request.POST['output_id'])
#o = get_object_or_404(Output, pk=request.POST['output_id'])
if (i.widget.workflow==o.widget.workflow and i.widget.workflow.user == request.user):
if Connection.objects.filter(input=i).exists():
previousExists=True
new_c = Connection.objects.get(input=i)
oldOutput = Output.objects.defer("value").get(pk=new_c.output_id)
deleted=new_c.id
else:
new_c = Connection()
new_c.input = i
new_c.output = o
new_c.workflow = i.widget.workflow
new_c.save()
if not checkForCycles(i.widget,i.widget):
if previousExists:
new_c.output=oldOutput
new_c.save()
else:
new_c.delete()
success = False
message = "Adding this connection would result in a cycle in the workflow."
data = simplejson.dumps({'message':message,'success':success,'deleted':deleted,'added':added,'input_id':request.POST['input_id'],'output_id':request.POST['output_id']})
return HttpResponse(data,mimetype)
added = new_c.id
new_c.input.widget.unfinish()
if deleted==-1:
if new_c.input.multi_id != 0:
i = new_c.input
j = Input()
j.name = i.name
j.short_name = i.short_name
j.description = i.description
j.variable = i.variable
j.widget = i.widget
j.required = i.required
j.parameter = i.parameter
j.value = None
j.parameter_type = i.parameter_type
j.multi_id = i.multi_id
j.save()
refresh = i.widget.id
refreshworkflow = i.widget.workflow.id
success = True
message = "Connection added."
data = simplejson.dumps({'message':message,'success':success,'deleted':deleted,'added':added,'input_id':request.POST['input_id'],'output_id':request.POST['output_id'],'refresh':refresh,'refreshworkflow':refreshworkflow})
return HttpResponse(data,mimetype)
else:
message = "Cannot connect widgets from different workflows."
data = simplejson.dumps({'message':message,'success':success,'deleted':deleted,'added':added,'input_id':request.POST['input_id'],'output_id':request.POST['output_id'],'refresh':refresh},)
return HttpResponse(data,mimetype)
else:
return HttpResponse(status=400)
@login_required
def delete_connection(request):
if request.is_ajax() or DEBUG:
c = get_object_or_404(Connection, pk=request.POST['connection_id'])
if (c.input.widget.workflow.user!=request.user):
return HttpResponse(status=400)
c.input.widget.unfinish()
mimetype = 'application/javascript'
refresh = -1
refreshworkflow = -1
already_deleted=False
if c.input.multi_id != 0:
#pogledamo kok jih je s tem idjem, ce je vec k en, tega pobrisemo
if c.input.widget.inputs.filter(multi_id=c.input.multi_id).count()>1:
refresh = c.input.widget.id
refreshworkflow = c.input.widget.workflow.id
c.input.delete()
already_deleted=True
if not already_deleted:
c.delete()
data = simplejson.dumps({'refresh':refresh,'refreshworkflow':refreshworkflow})
return HttpResponse(data,mimetype)
else:
return HttpResponse(status=400)
@login_required
def delete_widget(request):
if request.is_ajax() or DEBUG:
w = get_object_or_404(Widget, pk=request.POST['widget_id'])
w.unfinish()
mimetype = 'application/javascript'
refresh = []
delete_tab = -1
if (w.workflow.user!=request.user):
return HttpResponse(status=400)
connections = Connection.objects.filter(output__widget=w).filter(input__multi_id__gt=0)
for c in connections:
if c.input.widget.inputs.filter(multi_id=c.input.multi_id).count()>1:
refresh.append((c.input.widget.id,c.input.widget.workflow.id))
c.input.delete()
if w.type=='subprocess':
delete_tab = w.workflow_link.id
w.delete()
data = simplejson.dumps({'refresh':list(set(refresh)),'delete_tab':delete_tab})
return HttpResponse(data,mimetype)
else:
return HttpResponse(status=400)
@login_required
def delete_workflow(request):
if request.is_ajax() or DEBUG:
w = get_object_or_404(Workflow, pk=request.GET['workflow_id'])
if (w.user!=request.user):
return HttpResponse(status=400)
w.delete()
return HttpResponse("Ok")
else:
return HttpResponse(status=400)
@login_required
def add_subprocess(request):
if request.is_ajax() or DEBUG:
workflow = get_object_or_404(Workflow, pk=request.POST['active_workflow'])
if (workflow.user==request.user):
new_w = Workflow()
new_w.name = "Untitled widget"
new_w.user = request.user
w = Widget()
w.workflow = workflow
w.workflow_link = new_w
w.x = int(request.POST['scrollLeft'])+50
y = int(request.POST['scrollTop'])+50
while workflow.widgets.filter(y=y,x=w.x).count()>0:
y = y + 100
w.y=y
w.name = "Untitled widget"
w.type = 'subprocess'
w.save()
new_w.widget = w
new_w.save()
w.defered_outputs = w.outputs.defer("value").all()
w.defered_inputs = w.inputs.defer("value").all()
return render(request, 'widgets.html', {'widgets':[w,]})
else:
return HttpResponse(status=400)
else:
return HttpResponse(status=400)
@login_required
def get_subprocess(request):
if request.is_ajax() or DEBUG:
widget = get_object_or_404(Widget, pk=request.POST['widget_id'])
data = simplejson.dumps({'workflow_link':widget.workflow_link.pk,'workflow_name':widget.workflow_link.name})
mimetype = 'application/javascript'
return HttpResponse(data,mimetype)
else:
return HttpResponse(status=400)
@login_required
def add_for(request):
success = False
mimetype = 'application/javascript'
message = ""
if request.is_ajax() or DEBUG:
workflow = get_object_or_404(Workflow, pk=request.POST['active_workflow'])
if (workflow.user==request.user):
if workflow.widget==None:
message = 'The for widgets can only be put in a subprocess.'
data = simplejson.dumps({'message':message,'success':success})
return HttpResponse(data,mimetype)
elif workflow.widgets.filter(type='for_input').count()>0:
message = 'This subprocess already has a for loop. Try deleting it and adding it again.'
data = simplejson.dumps({'message':message,'success':success})
return HttpResponse(data,mimetype)
else:
for_input = Widget()
for_input.workflow = workflow
for_input.x=int(request.POST['scrollLeft'])+50
y=int(request.POST['scrollTop'])+50
while workflow.widgets.filter(y=y,x=for_input.x).count()>0:
y = y + 100
for_input.y=y
for_input.name = 'For input'
for_input.type = 'for_input'
for_input.save()
output = Output()
output.name = 'For input'
output.short_name = 'for'
output.variable = 'For'
output.widget = for_input
output.save()
input = Input()
input.widget = workflow.widget
input.name = 'For input'
input.short_name = 'for'
input.variable = 'For'
input.inner_output = output
input.save()
output.outer_input = input
output.save()
widget = Widget()
widget.workflow = workflow
widget.x=int(request.POST['scrollLeft'])+200
widget.y=int(request.POST['scrollTop'])+50
widget.name = 'For output'
widget.type = 'for_output'
widget.save()
input = Input()
input.name = 'For output'
input.short_name = 'for'
input.variable = 'For'
input.widget = widget
input.save()
output = Output()
output.widget = workflow.widget
output.name = 'For output'
output.short_name = 'for'
output.variable = 'For'
output.inner_input = input
output.save()
input.outer_output = output
input.save()
for_input.defered_outputs = for_input.outputs.defer("value").all()
for_input.defered_inputs = for_input.inputs.defer("value").all()
widget.defered_outputs = widget.outputs.defer("value").all()
widget.defered_inputs = widget.inputs.defer("value").all()
return render(request, 'widgets.html', {'widgets':[for_input,widget]})
else:
return HttpResponse(status=400)
else:
return HttpResponse(status=400)
@login_required
def add_input(request):
success = False
mimetype = 'application/javascript'
message = ""
if request.is_ajax() or DEBUG:
workflow = get_object_or_404(Workflow, pk=request.POST['active_workflow'])
if (workflow.user==request.user):
if workflow.widget==None:
message = 'The input widget can only be put in a subprocess.'
data = simplejson.dumps({'message':message,'success':success})
return HttpResponse(data,mimetype)
else:
widget = Widget()
widget.workflow = workflow
widget.x=int(request.POST['scrollLeft'])+50
y = int(request.POST['scrollTop'])+50
while workflow.widgets.filter(y=y,x=widget.x).count()>0:
y = y + 100
widget.y=y
widget.name = 'Input'
widget.type = 'input'
widget.save()
output = Output()
output.name = 'Input'
output.short_name = 'inp'
output.variable = 'Input'
output.widget = widget
output.save()
input = Input()
input.widget = workflow.widget
input.name = 'Input'
input.short_name = 'inp'
input.variable = 'Input'
input.inner_output = output
input.save()
output.outer_input = input
output.save()
widget.defered_outputs = widget.outputs.defer("value").all()
widget.defered_inputs = widget.inputs.defer("value").all()
return render(request, 'widgets.html', {'widgets':[widget,]})
else:
return HttpResponse(status=400)
else:
return HttpResponse(status=400)
@login_required
def add_output(request):
success = False
mimetype = 'application/javascript'
message = ""
if request.is_ajax() or DEBUG:
workflow = get_object_or_404(Workflow, pk=request.POST['active_workflow'])
if (workflow.user==request.user):
if workflow.widget==None:
message = 'The output widget can only be put in a subprocess.'
data = simplejson.dumps({'message':message,'success':success})
return HttpResponse(data,mimetype)
else:
widget = Widget()
widget.workflow = workflow
widget.x=int(request.POST['scrollLeft'])+50
y = int(request.POST['scrollTop'])+50
while workflow.widgets.filter(y=y,x=widget.x).count()>0:
y = y + 100
widget.y=y
widget.name = 'Output'
widget.type = 'output'
widget.save()
input = Input()
input.name = 'Output'
input.short_name = 'out'
input.variable = 'Output'
input.widget = widget
input.save()
output = Output()
output.widget = workflow.widget
output.name = 'Output'
output.short_name = 'out'
output.variable = 'Output'
output.inner_input = input
output.save()
input.outer_output = output
input.save()
widget.defered_outputs = widget.outputs.defer("value").all()
widget.defered_inputs = widget.inputs.defer("value").all()
return render(request, 'widgets.html', {'widgets':[widget,]})
else:
return HttpResponse(status=400)
else:
return HttpResponse(status=400)
@login_required
def synchronize_widgets(request):
if request.is_ajax() or DEBUG:
w = get_object_or_404(Workflow,pk=request.POST['workflow_id'])
if (w.user==request.user):
widgets = w.widgets.all()
defered_outputs = list(Output.objects.defer("value").filter(widget__workflow=w))
defered_inputs = list(Input.objects.defer("value").filter(widget__workflow=w))
for w in widgets:
w.defered_outputs = filter(lambda e: e.widget_id == w.id,defered_outputs)
w.defered_inputs = filter(lambda e: e.widget_id == w.id,defered_inputs)
return render(request, 'widgets.html', {'widgets':widgets})
else:
return HttpResponse(status=400)
else:
return HttpResponse(status=400)
@login_required
def synchronize_connections(request):
if request.is_ajax() or DEBUG:
mimetype = 'application/javascript'
w = get_object_or_404(Workflow,pk=request.POST['workflow_id'])
if (w.user==request.user):
c = Connection.objects.filter(input__widget__workflow=w)
data = serializers.serialize('json', c)
return HttpResponse(data,mimetype)
else:
return HttpResponse(status=400)
else:
return HttpResponse(status=400)
@login_required
def get_widget(request):
if request.is_ajax() or DEBUG:
w = get_object_or_404(Widget, pk=request.POST['widget_id'])
if (w.workflow.user==request.user):
w.defered_outputs = w.outputs.defer("value").all()
w.defered_inputs = w.inputs.defer("value").all()
return render(request, 'widgets.html', {'widgets':[w,]})
else:
return HttpResponse(status=400)
else:
return HttpResponse(status=400)
@login_required
def get_parameters(request):
if request.is_ajax() or DEBUG:
w = get_object_or_404(Widget, pk=request.POST['widget_id'])
if (w.workflow.user==request.user):
return render(request, 'parameters.html', {'widget':w,'parameters':w.inputs.filter(parameter=True)})
else:
return HttpResponse(status=400)
else:
return HttpResponse(status=400)
@login_required
def save_parameter(request):
if request.is_ajax() or DEBUG:
input = get_object_or_404(Input, pk=request.POST['input_id'])
if (input.widget.workflow.user==request.user):
input.value = request.POST['value']
input.save()
input.widget.unfinish()
return HttpResponse(status=200)
else:
return HttpResponse(status=400)
else:
return HttpResponse(status=400)
@login_required
def get_configuration(request):
if request.is_ajax() or DEBUG:
w = get_object_or_404(Widget, pk=request.POST['widget_id'])
if (w.workflow.user==request.user):
inputs = w.inputs.filter(parameter=False);
parameters = w.inputs.filter(parameter=True);
outputs = w.outputs.all();
benchmark_exists = False
for o in outputs:
if o.variable=='clowdflows_elapsed':
benchmark_exists = True
return render(request, 'configuration.html', {'widget':w,'inputs': inputs, 'parameters':parameters, 'outputs':outputs, 'benchmark_exists':benchmark_exists})
else:
return HttpResponse(status=400)
else:
return HttpResponse(status=400)
@login_required
def save_configuration(request):
if request.is_ajax() or DEBUG:
widget = get_object_or_404(Widget, pk=request.POST['widgetId'])
if (widget.workflow.user==request.user):
inputs = request.POST.getlist('inputs')
params = request.POST.getlist('params')
outputs = request.POST.getlist('outputs')
changed = False
reordered = False
deletedConnections = []
for (id, input) in enumerate(inputs):
inp = get_object_or_404(Input, pk=input)
id += 1
if (inp.widget.workflow.user!=request.user):
return HttpResponse(status=400)
if (inp.parameter):
inp.parameter = False
changed = True
inp.save()
if (inp.order != id):
inp.order = id
reordered = True
inp.save()
for (id, input) in enumerate(params):
inp = get_object_or_404(Input, pk=input)
id += 1
if (inp.widget.workflow.user!=request.user):
return HttpResponse(status=400)
if (not inp.parameter):
#need to be careful if connections are set up to this input and need to be removed
for c in Connection.objects.filter(input=inp):
deletedConnections.append(c.id)
c.delete()
inp.parameter = True
changed = True
inp.save()
if (inp.order != id):
inp.order = id
reordered = True
inp.save()
for (id, output) in enumerate(outputs):
out = get_object_or_404(Output, pk = output)
id += 1
if (out.widget.workflow.user!=request.user):
return HttpResponse(status=400)
if (out.order != id):
out.order = id
reordered = True
out.save()
if request.POST.get('benchmark')=='true':
if widget.outputs.filter(variable='clowdflows_elapsed').count()==0:
new_o = Output()
new_o.widget = widget
new_o.variable = 'clowdflows_elapsed'
new_o.name = 'Elapsed time'
new_o.short_name = 'bmk'
new_o.order=99
new_o.save()
changed = True
reordered = True
if request.POST.get('benchmark')=='false':
o = widget.outputs.filter(variable='clowdflows_elapsed')
if len(o)>0:
o.delete()
changed = True
reordered = True
if (changed):
widget.unfinish()
data = simplejson.dumps({'changed':changed,'reordered':reordered, 'deletedConnections':deletedConnections})
mimetype = 'application/javascript'
return HttpResponse(data, mimetype)
else:
return HttpResponse(status=400)
else:
return HttpResponse(status=400)
@login_required
def save_designation(request):
if request.is_ajax() or DEBUG:
for key in request.POST:
input = get_object_or_404(AbstractInput, pk=key)
if request.POST[key] == "input":
input.parameter=False
input.save()
elif request.POST[key] == "parameter":
input.parameter=True
if input.parameter_type!="checkbox":
input.parameter_type="text"
input.save()
return HttpResponse(status=200)
else:
return HttpResponse(status=400)
@login_required
def get_parameters(request):
if request.is_ajax() or DEBUG:
w = get_object_or_404(Widget, pk=request.POST['widget_id'])
if (w.workflow.user==request.user):
return render(request, 'parameters.html', {'widget':w,'parameters':w.inputs.filter(parameter=True)})
else:
return HttpResponse(status=400)
else:
return HttpResponse(status=400)
@login_required
def get_rename_dialog(request):
if request.is_ajax() or DEBUG:
w = get_object_or_404(Widget, pk=request.POST['widget_id'])
if (w.workflow.user==request.user):
return render(request, 'rename.html', {'widget':w})
else:
return HttpResponse(status=400)
else:
return HttpResponse(status=400)
@login_required
def rename_widget(request):
if request.is_ajax() or DEBUG:
w = get_object_or_404(Widget, pk=request.POST['widget_id'])
if (w.workflow.user==request.user):
w.rename(request.POST['new_name'])
mimetype = 'application/javascript'
workflow_link = True
workflow_link_id = 0
rename_inputs = []
rename_outputs = []
if w.type=='input':
rename_inputs.append(w.outputs.all()[0].outer_input.id)
rename_outputs.append(w.outputs.all()[0].id)
if w.type=='output':
rename_outputs.append(w.inputs.all()[0].outer_output.id)
rename_inputs.append(w.inputs.all()[0].id)
try:
workflow_link_id = w.workflow_link.id
except Workflow.DoesNotExist:
workflow_link = False
data = simplejson.dumps({'workflow_link':workflow_link,'workflow_link_id':workflow_link_id,'rename_inputs':rename_inputs,'rename_outputs':rename_outputs})
return HttpResponse(data,mimetype)
else:
return HttpResponse(status=400)
else:
return HttpResponse(status=400)
@login_required
def rename_workflow(request):
if request.is_ajax() or DEBUG:
w = get_object_or_404(Workflow, pk=request.POST['workflow_id'])
if (w.user==request.user):
w.rename(request.POST['new_name'])
w.description = request.POST['description']
if request.POST['public']=="true":
w.public=True
else:
w.public=False
w.save()
mimetype = 'application/javascript'
data = simplejson.dumps({'workflow_id':w.id})
return HttpResponse(data,mimetype)
else:
return HttpResponse(status=400)
else:
return HttpResponse(status=400)
@login_required
def run_widget(request):
if request.is_ajax() or DEBUG:
w = get_object_or_404(Widget, pk=request.POST['widget_id'])
if (w.workflow.user==request.user):
try:
if w.type == 'for_input' or w.type == 'for_output':
raise Exception("You can't run for loops like this. Please run the containing widget.")
output_dict = w.run(False)
mimetype = 'application/javascript'
if not w.abstract_widget is None:
if w.abstract_widget.interactive:
w.interaction_waiting = True
w.save()
data = simplejson.dumps({'status':'interactive','message':'Widget '+w.name+' needs your attention.','widget_id':w.id})
elif w.abstract_widget.visualization_view!='':
data = simplejson.dumps({'status':'visualize','message':'Visualizing widget '+w.name+'.','widget_id':w.id})
else:
data = simplejson.dumps({'status':'ok','message':'Widget '+w.name+' executed successfully.'})
else:
data = simplejson.dumps({'status':'ok','message':'Widget '+w.name+' executed successfully.'})
except Exception,e:
mimetype = 'application/javascript'
w.error = True
w.running = False
w.finished = False
w.save()
print traceback.format_exc(e)
#raise
for o in w.outputs.all():
o.value=None
o.save()
data = simplejson.dumps({'status':'error','message':'Error occurred when trying to execute widget '+w.name+': '+str(type(e))+' '+str(e)})
return HttpResponse(data,mimetype)
else:
return HttpResponse(status=400)
else:
return HttpResponse(status=400)
@login_required
def run_tree(request):
if request.is_ajax() or DEBUG:
w = get_object_or_404(Widget, pk=request.POST['widget_id'])
if (w.workflow.user==request.user):
try:
if w.type == 'for_input' or w.type == 'for_output':
raise Exception("You can't run for loops like this. Please run the containing widget.")
output_dict = w.run(False)
mimetype = 'application/javascript'
if not w.abstract_widget is None:
if w.abstract_widget.interactive:
w.interaction_waiting = True
w.save()
data = simplejson.dumps({'status':'interactive','message':'Widget '+w.name+' needs your attention.','widget_id':w.id})
elif w.abstract_widget.visualization_view!='':
data = simplejson.dumps({'status':'visualize','message':'Visualizing widget '+w.name+'.','widget_id':w.id})
else:
data = simplejson.dumps({'status':'ok','message':'Widget '+w.name+' executed successfully.'})
else:
data = simplejson.dumps({'status':'ok','message':'Widget '+w.name+' executed successfully.'})
except Exception, e:
mimetype = 'application/javascript'
w.error = True
w.running = False
w.finished = False
w.save()
print traceback.format_exc(e)
#raise
for o in w.outputs.all():
o.value=None
o.save()
data = simplejson.dumps({'status':'error','message':'Error occurred when trying to execute widget '+w.name+': '+str(type(e))+' '+str(e)})
return HttpResponse(data,mimetype)
else:
return HttpResponse(status=400)
else:
return HttpResponse(status=400)
@login_required
def reset_widget(request):
if request.is_ajax() or DEBUG:
w = get_object_or_404(Widget, pk=request.POST['widget_id'])
if (w.workflow.user==request.user):
try:
w.reset_descendants()
data = simplejson.dumps({'status':'ok','message':'Widget '+w.name+' reset successfully.'})
mimetype = 'application/javascript'
except Exception, e:
mimetype = 'application/javascript'
w.error = True
w.running = False
w.finished = False
w.save()
print traceback.format_exc(e)
#raise
for o in w.outputs.all():
o.value=None
o.save()
data = simplejson.dumps({'status':'error','message':'Error occurred when trying to reset the widget '+w.name+': '+str(type(e))+' '+str(e)})
return HttpResponse(data,mimetype)
else:
return HttpResponse(status=400)
else:
return HttpResponse(status=400)
@login_required
def reset_workflow(request):
if request.is_ajax() or DEBUG:
workflow = get_object_or_404(Workflow, pk=request.POST['workflow_id'])
resetWidget = []
for w in workflow.widgets.all():
w.reset(False)
resetWidget.append(w.pk)
mimetype = 'application/javascript'
data = simplejson.dumps({'resetWidget':resetWidget})
return HttpResponse(data,mimetype)
else:
return HttpResponse(status=400)
@login_required
def get_executed_status(request):
if request.is_ajax() or DEBUG:
workflow = get_object_or_404(Workflow, pk=request.POST['workflow_id'])
executedStatus = {}
for w in workflow.widgets.all():
if w.error or w.running or not w.finished:
executedStatus[w.pk] = False
else:
executedStatus[w.pk] = True
mimetype = 'application/javascript'
data = simplejson.dumps({'executedStatus':executedStatus})
return HttpResponse(data,mimetype)
else:
return HttpResponse(status=400)
@login_required
def visualize_widget(request):
if request.is_ajax() or DEBUG:
w = get_object_or_404(Widget, pk=request.POST['widget_id'])
if (w.workflow.user==request.user):
output_dict = {}
for o in w.outputs.all():
output_dict[o.variable]=o.value
input_dict = {}
for i in w.inputs.all():
if not i.parameter:
if i.connections.count() > 0:
i.value = i.connections.all()[0].output.value
i.save()
else:
i.value = None
i.save()
if i.multi_id == 0:
input_dict[i.variable]=i.value
else:
if not i.variable in input_dict:
input_dict[i.variable]=[]
if not i.value == None:
input_dict[i.variable].append(i.value)
view_to_call = getattr(workflows.visualization_views,w.abstract_widget.visualization_view)
return view_to_call(request, input_dict, output_dict, w)
else:
return HttpResponse(status=400)
else:
return HttpResponse(status=400)
@login_required
def widget_results(request):
def cap(s):
"""
Caps the display size of long strings.
"""
if type(s) in [unicode, str] and len(s) > 300:
return s[:300] + '\n...\''
return s
if request.is_ajax() or DEBUG:
w = get_object_or_404(Widget, pk=request.POST['widget_id'])
if (w.workflow.user==request.user):
import pprint
input_dict = {}
output_dict = {}
for i in w.inputs.all():
if not i.parameter:
if i.connections.count() > 0:
i.value = i.connections.all()[0].output.value
i.save()
else:
i.value = None
i.save()
if i.multi_id == 0:
input_dict[i.variable] = cap(pprint.pformat(i.value))
else:
if not i.variable in input_dict:
input_dict[i.variable]=[]
if not i.value == None:
input_dict[i.variable].append(cap(pprint.pformat(i.value)))
for o in w.outputs.all():
output_dict[o.variable] = cap(pprint.pformat(o.value))
return render(request, 'visualizations/result_viewer.html', {'widget':w,'input_dict':input_dict,'output_dict':output_dict})
else:
return HttpResponse(status=400)
else:
return HttpResponse(status=400)
@login_required
def documentation(request):
if request.is_ajax() or DEBUG:
w = get_object_or_404(Widget, pk=request.POST['widget_id'])
if (w.workflow.user==request.user):
return render(request, 'visualizations/documentation.html', {'widget':w})
else:
return HttpResponse(status=400)
else:
return HttpResponse(status=400)
@login_required
def get_designate_dialogs(request):
if request.is_ajax() or DEBUG:
c = get_object_or_404(Category, pk=request.POST['category_id'])
if (c.user==request.user):
return render(request, 'designation.html', {'category':c})
else:
return HttpResponse(status=400)
else:
return HttpResponse(status=400)
@login_required
def get_unfinished(request):
#iscemo vse unfinished katerih predhodniki so finished
if request.is_ajax() or DEBUG:
workflow = get_object_or_404(Workflow, pk=request.POST['workflow_id'])
if (workflow.user==request.user):
unfinished_list = workflow.get_ready_to_run()
mimetype = 'application/javascript'
data = simplejson.dumps({'ready_to_run':unfinished_list})
return HttpResponse(data,mimetype)
else:
return HttpResponse(status=400)
else:
return HttpResponse(status=400)
@login_required
def unfinish_visualizations(request):
if request.is_ajax() or DEBUG:
workflow = get_object_or_404(Workflow, pk=request.POST['workflow_id'])
unfinished_list = []
for w in workflow.widgets.all():
if w.is_visualization():
w.unfinish()
#w.save()
unfinished_list.append(w.pk)
mimetype = 'application/javascript'
data = simplejson.dumps({'unfinished':unfinished_list})
return HttpResponse(data,mimetype)
else:
return HttpResponse(status=400)
@login_required
def upload_handler(request):
input = get_object_or_404(Input, pk=request.POST['input_id'])
if (input.widget.workflow.user==request.user):
destination = FILES_FOLDER+str(input.widget.workflow.id)+'/'+request.FILES['file'].name
ensure_dir(destination)
destination_file = open(destination, 'wb')
for chunk in request.FILES['file'].chunks():
destination_file.write(chunk)
destination_file.close()
input.value = destination
input.save()
input.widget.unfinish()
error = None
return render(request,'upload_handler.html', {"error":error,"input_id":input.id})
else:
return HttpResponse(status=400)
@login_required
def widget_interaction(request):
if request.is_ajax() or DEBUG:
w = get_object_or_404(Widget, pk=request.POST['widget_id'])
if (w.workflow.user==request.user):
input_dict = {}
output_dict = {}
for i in w.inputs.all():
if not i.parameter:
if i.connections.count() > 0:
i.value = i.connections.all()[0].output.value
i.save()
else:
i.value = None
i.save()
if i.multi_id == 0:
input_dict[i.variable]=i.value
else:
if not i.variable in input_dict:
input_dict[i.variable]=[]
if not i.value == None:
input_dict[i.variable].append(i.value)
for o in w.outputs.all():
output_dict[o.variable]=o.value
view_to_call = getattr(workflows.interaction_views,w.abstract_widget.interaction_view)
return view_to_call(request, input_dict, output_dict, w)
else:
return HttpResponse(status=400)
else:
return HttpResponse(status=400)
@login_required
def finish_interaction(request):
if request.is_ajax() or DEBUG:
w = get_object_or_404(Widget, pk=request.POST['widget_id'])
if (w.workflow.user==request.user):
try:
output_dict = w.run_post(dict(request.POST))
w.interaction_waiting = False
w.save()
mimetype = 'application/javascript'
data = simplejson.dumps({'status':'ok','message':'Widget '+w.name+' executed successfully.','widget_id':w.id})
except Exception,e:
mimetype = 'application/javascript'
w.error = True
w.running = False
w.finished = False
w.interaction_waiting = False
w.save()
print traceback.format_exc(e)
#raise
for o in w.outputs.all():
o.value=None
o.save()
data = simplejson.dumps({'status':'error','message':'Error occurred when trying to execute widget '+w.name+': '+str(type(e))+' '+str(e),'widget_id':w.id})
return HttpResponse(data,mimetype)
else:
return HttpResponse(status=400)
else:
return HttpResponse(status=400)
@login_required
def import_webservice(request):
from services.webservice import WebService
ws = WebService(request.POST['wsdl'])
new_c = Category()
current_name = ws.name
i=0
while request.user.categories.filter(name=current_name).count()>0:
i = i + 1
current_name = ws.name+' ('+str(i)+')'
new_c.name=current_name
new_c.user=request.user
new_c.workflow=request.user.userprofile.active_workflow
new_c.save()
for m in ws.methods:
new_a = AbstractWidget()
new_a.name=m['name']
new_a.action='call_webservice'
new_a.wsdl=ws.wsdl_url
new_a.wsdl_method=m['name']
new_a.description=m['documentation']
new_a.user=request.user
new_a.category=new_c
new_a.save()
new_i = AbstractInput()
new_i.parameter=True
new_i.widget = new_a
new_i.name = "Timeout"
new_i.short_name = "to"
new_i.variable = "timeout"
new_i.default = '60'
new_i.parameter_type='text'
new_i.save()
new_i = AbstractInput()
new_i.parameter=True
new_i.widget = new_a
new_i.name = "Send empty strings to webservices"
new_i.short_name = "ses"
new_i.variable = "sendemptystrings"
new_i.default = ''
new_i.parameter_type='checkbox'
new_i.save()
for i in m['inputs']:
new_i = AbstractInput()
new_i.name = i['name']
new_i.variable = i['name']
new_i.short_name = i['name'][:3]
new_i.description = ''
new_i.required = False
new_i.parameter= False
if i['type']==bool:
new_i.parameter_type='checkbox'
new_i.default = ''
new_i.widget = new_a
new_i.save()
for o in m['outputs']:
new_o = AbstractOutput()
new_o.name = o['name']
new_o.variable = o['name']
new_o.short_name = o['name'][:3]
new_o.description = ''
new_o.widget = new_a
new_o.save()
mimetype = 'application/javascript'
data = simplejson.dumps({'category_id':new_c.id})
return HttpResponse(data,mimetype)
@login_required
def copy_workflow(request,workflow_id):
w = get_object_or_404(Workflow, pk=workflow_id)
if w.user == request.user or w.public:
new_w = workflows.models.copy_workflow(w,request.user)
request.user.userprofile.active_workflow = new_w
request.user.userprofile.save()
else:
return HttpResponse(status=400)
return redirect('editor')
@login_required
def workflow_url(request):
if request.is_ajax() or DEBUG:
if request.user.userprofile.active_workflow is None:
return HttpResponse(status=200)
return render(request,'workflow_url.html', {"workflow":request.user.userprofile.active_workflow})
else:
return HttpResponse(status=200)
@login_required
def export_package(request, packages):
try:
if not request.user.is_staff:
return HttpResponse(status=405)
except:
return HttpResponse(status=400)
newuid = (request.GET.get('newuid', 'False').lower()=='true' or request.GET.get('n', 'False').lower()=='true')
updateuid = (request.GET.get('updateuid', 'False').lower()=='true' or request.GET.get('u', 'False').lower()=='true')
all = (request.GET.get('all', 'False').lower()=='true' or request.GET.get('a', 'False').lower()=='true')
try:
verbosity = int(request.GET.get('v', '1'))
if verbosity == 1:
verbosity = int(request.GET.get('verbosity', '1'))
except:
verbosity = 1
packagesArray = tuple(packages.split('-'))
class OutWriter:
msgs = ""
def write(self, msg):
self.msgs += msg
ov = OutWriter()
from workflows.management.commands.export_package import export_package_string
result = export_package_string(ov.write, packagesArray, newuid, updateuid, all, verbosity)
content = '----------------------------------------\n' + \
'Export procedure message:' +\
"\n----------------------------------------\n" +\
ov.msgs + \
"\n----------------------------------------\n" + \
"Exported data:" +\
"\n----------------------------------------\n" +\
result + \
"\n----------------------------------------"
response = HttpResponse(mimetype='text/plain',content=content)
return response
@json_view
def widget_inputs(request, widget_id):
w = get_object_or_404(Widget, pk=widget_id)
input_dict = {}
for i in w.inputs.all():
if not i.parameter:
if i.connections.count() > 0:
i.value = i.connections.all()[0].output.value
#i.save()
else:
i.value = None
#i.save()
if i.multi_id == 0:
input_dict[i.variable]=i.value
else:
if not i.variable in input_dict:
input_dict[i.variable]=[]
if not i.value == None:
input_dict[i.variable].append(i.value)
return input_dict
@never_cache
def workflow_results(request,workflow_id):
from workflows.tasks import runTest
w = get_object_or_404(Workflow, pk=workflow_id)
return_string = request.GET.get('result')
s = Stream()
a = runTest.delay(return_string)
r = a.wait()
#return s.execute(workflow=w)
return HttpResponse(str(r))
@login_required
def widget_iframe(request, widget_id):
w = get_object_or_404(Widget, pk=widget_id)
if (w.workflow.user==request.user):
output_dict = {}
for o in w.outputs.all():
output_dict[o.variable]=o.value
input_dict = {}
for i in w.inputs.all():
if not i.parameter:
if i.connections.count() > 0:
i.value = i.connections.all()[0].output.value
i.save()
else:
i.value = None
i.save()
if i.multi_id == 0:
input_dict[i.variable]=i.value
else:
if not i.variable in input_dict:
input_dict[i.variable]=[]
if not i.value == None:
input_dict[i.variable].append(i.value)
view_to_call = getattr(workflows.visualization_views,w.abstract_widget.visualization_view)
return view_to_call(request, input_dict, output_dict, w, True)
else:
return HttpResponse(status=400)
return HttpResponse("OK") |
YihaoLu/statsmodels | refs/heads/master | tools/fold_toc.py | 33 | #!/usr/bin/env python
import sys
import re
# Read doc to string
filename = sys.argv[1]
try:
static_path = sys.argv[2]
except:
static_path = '_static'
doc = open(filename).read()
# Add mktree to head
pre = '<head>'
post = '''<head>
<script type="text/javascript" src="static_path/mktree.js"></script>
<link rel="stylesheet" href="static_path/mktree.css" type="text/css">
'''
post = re.sub('static_path', static_path, post)
doc = re.sub(pre, post, doc)
# TOC class
pre = '''<div class="toctree-wrapper compound">
<ul>'''
post = '''<div class="toctree-wrapper compound">
<a onclick="expandTree('toctree#')" href="javascript:void(0);">Expand all. </a>
<a onclick="collapseTree('toctree#')" href="javascript:void(0);">Collapse all.</a>
<ul class="mktree" id="toctree#">'''
toc_n = doc.count('toctree-wrapper')
for i in range(toc_n):
post_n = re.sub('#', str(i), post)
doc = re.sub(pre, post_n, doc, count=1)
## TOC entries
pre = '<li class="toctree-l1">'
post = '<li class="liClosed"> '
doc = re.sub(pre, post, doc)
# TOC entries 2nd level
pre = '<li class="toctree-l2">'
post = '<li class="liClosed"> '
doc = re.sub(pre, post, doc)
# TOC entries 3rd level
pre = '<li class="toctree-l3">'
post = '<li class="liClosed"> '
doc = re.sub(pre, post, doc)
# TOC entries 4th level
pre = '<li class="toctree-l4">'
post = '<li class="liClosed"> '
doc = re.sub(pre, post, doc)
# Write to file
f = open(filename, 'w')
f.write(doc)
f.close()
|
ChristineLaMuse/mozillians | refs/heads/master | vendor-local/lib/python/south/creator/changes.py | 10 | """
Contains things to detect changes - either using options passed in on the
commandline, or by using autodetection, etc.
"""
from __future__ import print_function
from django.db import models
from django.contrib.contenttypes.generic import GenericRelation
from django.utils.datastructures import SortedDict
from south.creator.freezer import remove_useless_attributes, freeze_apps, model_key
from south.utils import auto_through
from south.utils.py3 import string_types
class BaseChanges(object):
"""
Base changes class.
"""
def suggest_name(self):
return ''
def split_model_def(self, model, model_def):
"""
Given a model and its model def (a dict of field: triple), returns three
items: the real fields dict, the Meta dict, and the M2M fields dict.
"""
real_fields = SortedDict()
meta = SortedDict()
m2m_fields = SortedDict()
for name, triple in model_def.items():
if name == "Meta":
meta = triple
elif isinstance(model._meta.get_field_by_name(name)[0], models.ManyToManyField):
m2m_fields[name] = triple
else:
real_fields[name] = triple
return real_fields, meta, m2m_fields
def current_model_from_key(self, key):
app_label, model_name = key.split(".")
return models.get_model(app_label, model_name)
def current_field_from_key(self, key, fieldname):
app_label, model_name = key.split(".")
# Special, for the magical field from order_with_respect_to
if fieldname == "_order":
field = models.IntegerField()
field.name = "_order"
field.attname = "_order"
field.column = "_order"
field.default = 0
return field
# Otherwise, normal.
return models.get_model(app_label, model_name)._meta.get_field_by_name(fieldname)[0]
class AutoChanges(BaseChanges):
"""
Detects changes by 'diffing' two sets of frozen model definitions.
"""
# Field types we don't generate add/remove field changes for.
IGNORED_FIELD_TYPES = [
GenericRelation,
]
def __init__(self, migrations, old_defs, old_orm, new_defs):
self.migrations = migrations
self.old_defs = old_defs
self.old_orm = old_orm
self.new_defs = new_defs
def suggest_name(self):
parts = ["auto"]
for change_name, params in self.get_changes():
if change_name == "AddModel":
parts.append("add_%s" % params['model']._meta.object_name.lower())
elif change_name == "DeleteModel":
parts.append("del_%s" % params['model']._meta.object_name.lower())
elif change_name == "AddField":
parts.append("add_field_%s_%s" % (
params['model']._meta.object_name.lower(),
params['field'].name,
))
elif change_name == "DeleteField":
parts.append("del_field_%s_%s" % (
params['model']._meta.object_name.lower(),
params['field'].name,
))
elif change_name == "ChangeField":
parts.append("chg_field_%s_%s" % (
params['model']._meta.object_name.lower(),
params['new_field'].name,
))
elif change_name == "AddUnique":
parts.append("add_unique_%s_%s" % (
params['model']._meta.object_name.lower(),
"_".join([x.name for x in params['fields']]),
))
elif change_name == "DeleteUnique":
parts.append("del_unique_%s_%s" % (
params['model']._meta.object_name.lower(),
"_".join([x.name for x in params['fields']]),
))
elif change_name == "AddIndex":
parts.append("add_index_%s_%s" % (
params['model']._meta.object_name.lower(),
"_".join([x.name for x in params['fields']]),
))
elif change_name == "DeleteIndex":
parts.append("del_index_%s_%s" % (
params['model']._meta.object_name.lower(),
"_".join([x.name for x in params['fields']]),
))
return ("__".join(parts))[:70]
def get_changes(self):
"""
Returns the difference between the old and new sets of models as a 5-tuple:
added_models, deleted_models, added_fields, deleted_fields, changed_fields
"""
deleted_models = set()
# See if anything's vanished
for key in self.old_defs:
if key not in self.new_defs:
# We shouldn't delete it if it was managed=False
old_fields, old_meta, old_m2ms = self.split_model_def(self.old_orm[key], self.old_defs[key])
if old_meta.get("managed", "True") != "False":
# Alright, delete it.
yield ("DeleteModel", {
"model": self.old_orm[key],
"model_def": old_fields,
})
# Also make sure we delete any M2Ms it had.
for fieldname in old_m2ms:
# Only delete its stuff if it wasn't a through=.
field = self.old_orm[key + ":" + fieldname]
if auto_through(field):
yield ("DeleteM2M", {"model": self.old_orm[key], "field": field})
# And any index/uniqueness constraints it had
for attr, operation in (("unique_together", "DeleteUnique"), ("index_together", "DeleteIndex")):
together = eval(old_meta.get(attr, "[]"))
if together:
# If it's only a single tuple, make it into the longer one
if isinstance(together[0], string_types):
together = [together]
# For each combination, make an action for it
for fields in together:
yield (operation, {
"model": self.old_orm[key],
"fields": [self.old_orm[key]._meta.get_field_by_name(x)[0] for x in fields],
})
# We always add it in here so we ignore it later
deleted_models.add(key)
# Or appeared
for key in self.new_defs:
if key not in self.old_defs:
# We shouldn't add it if it's managed=False
new_fields, new_meta, new_m2ms = self.split_model_def(self.current_model_from_key(key), self.new_defs[key])
if new_meta.get("managed", "True") != "False":
yield ("AddModel", {
"model": self.current_model_from_key(key),
"model_def": new_fields,
})
# Also make sure we add any M2Ms it has.
for fieldname in new_m2ms:
# Only create its stuff if it wasn't a through=.
field = self.current_field_from_key(key, fieldname)
if auto_through(field):
yield ("AddM2M", {"model": self.current_model_from_key(key), "field": field})
# And any index/uniqueness constraints it has
for attr, operation in (("unique_together", "AddUnique"), ("index_together", "AddIndex")):
together = eval(new_meta.get(attr, "[]"))
if together:
# If it's only a single tuple, make it into the longer one
if isinstance(together[0], string_types):
together = [together]
# For each combination, make an action for it
for fields in together:
yield (operation, {
"model": self.current_model_from_key(key),
"fields": [self.current_model_from_key(key)._meta.get_field_by_name(x)[0] for x in fields],
})
# Now, for every model that's stayed the same, check its fields.
for key in self.old_defs:
if key not in deleted_models:
old_fields, old_meta, old_m2ms = self.split_model_def(self.old_orm[key], self.old_defs[key])
new_fields, new_meta, new_m2ms = self.split_model_def(self.current_model_from_key(key), self.new_defs[key])
# Do nothing for models which are now not managed.
if new_meta.get("managed", "True") == "False":
continue
# Find fields that have vanished.
for fieldname in old_fields:
if fieldname not in new_fields:
# Don't do it for any fields we're ignoring
field = self.old_orm[key + ":" + fieldname]
field_allowed = True
for field_type in self.IGNORED_FIELD_TYPES:
if isinstance(field, field_type):
field_allowed = False
if field_allowed:
# Looks alright.
yield ("DeleteField", {
"model": self.old_orm[key],
"field": field,
"field_def": old_fields[fieldname],
})
# And ones that have appeared
for fieldname in new_fields:
if fieldname not in old_fields:
# Don't do it for any fields we're ignoring
field = self.current_field_from_key(key, fieldname)
field_allowed = True
for field_type in self.IGNORED_FIELD_TYPES:
if isinstance(field, field_type):
field_allowed = False
if field_allowed:
# Looks alright.
yield ("AddField", {
"model": self.current_model_from_key(key),
"field": field,
"field_def": new_fields[fieldname],
})
# Find M2Ms that have vanished
for fieldname in old_m2ms:
if fieldname not in new_m2ms:
# Only delete its stuff if it wasn't a through=.
field = self.old_orm[key + ":" + fieldname]
if auto_through(field):
yield ("DeleteM2M", {"model": self.old_orm[key], "field": field})
# Find M2Ms that have appeared
for fieldname in new_m2ms:
if fieldname not in old_m2ms:
# Only create its stuff if it wasn't a through=.
field = self.current_field_from_key(key, fieldname)
if auto_through(field):
yield ("AddM2M", {"model": self.current_model_from_key(key), "field": field})
# For the ones that exist in both models, see if they were changed
for fieldname in set(old_fields).intersection(set(new_fields)):
# Non-index changes
if self.different_attributes(
remove_useless_attributes(old_fields[fieldname], True, True),
remove_useless_attributes(new_fields[fieldname], True, True)):
yield ("ChangeField", {
"model": self.current_model_from_key(key),
"old_field": self.old_orm[key + ":" + fieldname],
"new_field": self.current_field_from_key(key, fieldname),
"old_def": old_fields[fieldname],
"new_def": new_fields[fieldname],
})
# Index changes
old_field = self.old_orm[key + ":" + fieldname]
new_field = self.current_field_from_key(key, fieldname)
if not old_field.db_index and new_field.db_index:
# They've added an index.
yield ("AddIndex", {
"model": self.current_model_from_key(key),
"fields": [new_field],
})
if old_field.db_index and not new_field.db_index:
# They've removed an index.
yield ("DeleteIndex", {
"model": self.old_orm[key],
"fields": [old_field],
})
# See if their uniques have changed
if old_field.unique != new_field.unique:
# Make sure we look at the one explicitly given to see what happened
if new_field.unique:
yield ("AddUnique", {
"model": self.current_model_from_key(key),
"fields": [new_field],
})
else:
yield ("DeleteUnique", {
"model": self.old_orm[key],
"fields": [old_field],
})
# See if there's any M2Ms that have changed.
for fieldname in set(old_m2ms).intersection(set(new_m2ms)):
old_field = self.old_orm[key + ":" + fieldname]
new_field = self.current_field_from_key(key, fieldname)
# Have they _added_ a through= ?
if auto_through(old_field) and not auto_through(new_field):
yield ("DeleteM2M", {"model": self.old_orm[key], "field": old_field})
# Have they _removed_ a through= ?
if not auto_through(old_field) and auto_through(new_field):
yield ("AddM2M", {"model": self.current_model_from_key(key), "field": new_field})
## See if the {index,unique}_togethers have changed
for attr, add_operation, del_operation in (("unique_together", "AddUnique", "DeleteUnique"), ("index_together", "AddIndex", "DeleteIndex")):
# First, normalise them into lists of sets.
old_together = eval(old_meta.get(attr, "[]"))
new_together = eval(new_meta.get(attr, "[]"))
if old_together and isinstance(old_together[0], string_types):
old_together = [old_together]
if new_together and isinstance(new_together[0], string_types):
new_together = [new_together]
old_together = list(map(set, old_together))
new_together = list(map(set, new_together))
# See if any appeared or disappeared
for item in old_together:
if item not in new_together:
yield (del_operation, {
"model": self.old_orm[key],
"fields": [self.old_orm[key + ":" + x] for x in item],
})
for item in new_together:
if item not in old_together:
yield (add_operation, {
"model": self.current_model_from_key(key),
"fields": [self.current_field_from_key(key, x) for x in item],
})
@classmethod
def is_triple(cls, triple):
"Returns whether the argument is a triple."
return isinstance(triple, (list, tuple)) and len(triple) == 3 and \
isinstance(triple[0], string_types) and \
isinstance(triple[1], (list, tuple)) and \
isinstance(triple[2], dict)
@classmethod
def different_attributes(cls, old, new):
"""
Backwards-compat comparison that ignores orm. on the RHS and not the left
and which knows django.db.models.fields.CharField = models.CharField.
Has a whole load of tests in tests/autodetection.py.
"""
# If they're not triples, just do normal comparison
if not cls.is_triple(old) or not cls.is_triple(new):
return old != new
# Expand them out into parts
old_field, old_pos, old_kwd = old
new_field, new_pos, new_kwd = new
# Copy the positional and keyword arguments so we can compare them and pop off things
old_pos, new_pos = old_pos[:], new_pos[:]
old_kwd = dict(old_kwd.items())
new_kwd = dict(new_kwd.items())
# Remove comparison of the existence of 'unique', that's done elsewhere.
# TODO: Make this work for custom fields where unique= means something else?
if "unique" in old_kwd:
del old_kwd['unique']
if "unique" in new_kwd:
del new_kwd['unique']
# If the first bit is different, check it's not by dj.db.models...
if old_field != new_field:
if old_field.startswith("models.") and (new_field.startswith("django.db.models") \
or new_field.startswith("django.contrib.gis")):
if old_field.split(".")[-1] != new_field.split(".")[-1]:
return True
else:
# Remove those fields from the final comparison
old_field = new_field = ""
# If there's a positional argument in the first, and a 'to' in the second,
# see if they're actually comparable.
if (old_pos and "to" in new_kwd) and ("orm" in new_kwd['to'] and "orm" not in old_pos[0]):
# Do special comparison to fix #153
try:
if old_pos[0] != new_kwd['to'].split("'")[1].split(".")[1]:
return True
except IndexError:
pass # Fall back to next comparison
# Remove those attrs from the final comparison
old_pos = old_pos[1:]
del new_kwd['to']
return old_field != new_field or old_pos != new_pos or old_kwd != new_kwd
class ManualChanges(BaseChanges):
"""
Detects changes by reading the command line.
"""
def __init__(self, migrations, added_models, added_fields, added_indexes):
self.migrations = migrations
self.added_models = added_models
self.added_fields = added_fields
self.added_indexes = added_indexes
def suggest_name(self):
bits = []
for model_name in self.added_models:
bits.append('add_model_%s' % model_name)
for field_name in self.added_fields:
bits.append('add_field_%s' % field_name)
for index_name in self.added_indexes:
bits.append('add_index_%s' % index_name)
return '_'.join(bits).replace('.', '_')
def get_changes(self):
# Get the model defs so we can use them for the yield later
model_defs = freeze_apps([self.migrations.app_label()])
# Make the model changes
for model_name in self.added_models:
model = models.get_model(self.migrations.app_label(), model_name)
real_fields, meta, m2m_fields = self.split_model_def(model, model_defs[model_key(model)])
yield ("AddModel", {
"model": model,
"model_def": real_fields,
})
# And the field changes
for field_desc in self.added_fields:
try:
model_name, field_name = field_desc.split(".")
except (TypeError, ValueError):
raise ValueError("%r is not a valid field description." % field_desc)
model = models.get_model(self.migrations.app_label(), model_name)
real_fields, meta, m2m_fields = self.split_model_def(model, model_defs[model_key(model)])
yield ("AddField", {
"model": model,
"field": model._meta.get_field_by_name(field_name)[0],
"field_def": real_fields[field_name],
})
# And the indexes
for field_desc in self.added_indexes:
try:
model_name, field_name = field_desc.split(".")
except (TypeError, ValueError):
print("%r is not a valid field description." % field_desc)
model = models.get_model(self.migrations.app_label(), model_name)
yield ("AddIndex", {
"model": model,
"fields": [model._meta.get_field_by_name(field_name)[0]],
})
class InitialChanges(BaseChanges):
"""
Creates all models; handles --initial.
"""
def suggest_name(self):
return 'initial'
def __init__(self, migrations):
self.migrations = migrations
def get_changes(self):
# Get the frozen models for this app
model_defs = freeze_apps([self.migrations.app_label()])
for model in models.get_models(models.get_app(self.migrations.app_label())):
# Don't do anything for unmanaged, abstract or proxy models
if model._meta.abstract or getattr(model._meta, "proxy", False) or not getattr(model._meta, "managed", True):
continue
real_fields, meta, m2m_fields = self.split_model_def(model, model_defs[model_key(model)])
# Firstly, add the main table and fields
yield ("AddModel", {
"model": model,
"model_def": real_fields,
})
# Then, add any indexing/uniqueness that's around
if meta:
for attr, operation in (("unique_together", "AddUnique"), ("index_together", "AddIndex")):
together = eval(meta.get(attr, "[]"))
if together:
# If it's only a single tuple, make it into the longer one
if isinstance(together[0], string_types):
together = [together]
# For each combination, make an action for it
for fields in together:
yield (operation, {
"model": model,
"fields": [model._meta.get_field_by_name(x)[0] for x in fields],
})
# Finally, see if there's some M2M action
for name, triple in m2m_fields.items():
field = model._meta.get_field_by_name(name)[0]
# But only if it's not through=foo (#120)
if field.rel.through:
try:
# Django 1.1 and below
through_model = field.rel.through_model
except AttributeError:
# Django 1.2
through_model = field.rel.through
if (not field.rel.through) or getattr(through_model._meta, "auto_created", False):
yield ("AddM2M", {
"model": model,
"field": field,
})
|
Chilledheart/gyp | refs/heads/master | test/win/gyptest-link-shard.py | 165 | #!/usr/bin/env python
# Copyright (c) 2013 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Make sure msvs_shard works correctly.
"""
import TestGyp
import sys
if sys.platform == 'win32':
test = TestGyp.TestGyp(formats=['msvs', 'ninja'])
CHDIR = 'shard'
test.run_gyp('shard.gyp', chdir=CHDIR)
test.build('shard.gyp', test.ALL, chdir=CHDIR)
test.built_file_must_exist('shard_0.lib', chdir=CHDIR)
test.built_file_must_exist('shard_1.lib', chdir=CHDIR)
test.built_file_must_exist('shard_2.lib', chdir=CHDIR)
test.built_file_must_exist('shard_3.lib', chdir=CHDIR)
test.run_gyp('shard_ref.gyp', chdir=CHDIR)
test.build('shard_ref.gyp', test.ALL, chdir=CHDIR)
test.pass_test()
|
citrix-openstack/build-trove | refs/heads/ctx-nova-network-smoke-latest | trove/openstack/common/crypto/utils.py | 7 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2013 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import base64
from Crypto.Hash import HMAC
from Crypto import Random
from trove.openstack.common.gettextutils import _ # noqa
from trove.openstack.common import importutils
class CryptoutilsException(Exception):
"""Generic Exception for Crypto utilities."""
message = _("An unknown error occurred in crypto utils.")
class CipherBlockLengthTooBig(CryptoutilsException):
"""The block size is too big."""
def __init__(self, requested, permitted):
msg = _("Block size of %(given)d is too big, max = %(maximum)d")
message = msg % {'given': requested, 'maximum': permitted}
super(CryptoutilsException, self).__init__(message)
class HKDFOutputLengthTooLong(CryptoutilsException):
"""The amount of Key Material asked is too much."""
def __init__(self, requested, permitted):
msg = _("Length of %(given)d is too long, max = %(maximum)d")
message = msg % {'given': requested, 'maximum': permitted}
super(CryptoutilsException, self).__init__(message)
class HKDF(object):
"""An HMAC-based Key Derivation Function implementation (RFC5869)
This class creates an object that allows to use HKDF to derive keys.
"""
def __init__(self, hashtype='SHA256'):
self.hashfn = importutils.import_module('Crypto.Hash.' + hashtype)
self.max_okm_length = 255 * self.hashfn.digest_size
def extract(self, ikm, salt=None):
"""An extract function that can be used to derive a robust key given
weak Input Key Material (IKM) which could be a password.
Returns a pseudorandom key (of HashLen octets)
:param ikm: input keying material (ex a password)
:param salt: optional salt value (a non-secret random value)
"""
if salt is None:
salt = '\x00' * self.hashfn.digest_size
return HMAC.new(salt, ikm, self.hashfn).digest()
def expand(self, prk, info, length):
"""An expand function that will return arbitrary length output that can
be used as keys.
Returns a buffer usable as key material.
:param prk: a pseudorandom key of at least HashLen octets
:param info: optional string (can be a zero-length string)
:param length: length of output keying material (<= 255 * HashLen)
"""
if length > self.max_okm_length:
raise HKDFOutputLengthTooLong(length, self.max_okm_length)
N = (length + self.hashfn.digest_size - 1) / self.hashfn.digest_size
okm = ""
tmp = ""
for block in range(1, N + 1):
tmp = HMAC.new(prk, tmp + info + chr(block), self.hashfn).digest()
okm += tmp
return okm[:length]
MAX_CB_SIZE = 256
class SymmetricCrypto(object):
"""Symmetric Key Crypto object.
This class creates a Symmetric Key Crypto object that can be used
to encrypt, decrypt, or sign arbitrary data.
:param enctype: Encryption Cipher name (default: AES)
:param hashtype: Hash/HMAC type name (default: SHA256)
"""
def __init__(self, enctype='AES', hashtype='SHA256'):
self.cipher = importutils.import_module('Crypto.Cipher.' + enctype)
self.hashfn = importutils.import_module('Crypto.Hash.' + hashtype)
def new_key(self, size):
return Random.new().read(size)
def encrypt(self, key, msg, b64encode=True):
"""Encrypt the provided msg and returns the cyphertext optionally
base64 encoded.
Uses AES-128-CBC with a Random IV by default.
The plaintext is padded to reach blocksize length.
The last byte of the block is the length of the padding.
The length of the padding does not include the length byte itself.
:param key: The Encryption key.
:param msg: the plain text.
:returns encblock: a block of encrypted data.
"""
iv = Random.new().read(self.cipher.block_size)
cipher = self.cipher.new(key, self.cipher.MODE_CBC, iv)
# CBC mode requires a fixed block size. Append padding and length of
# padding.
if self.cipher.block_size > MAX_CB_SIZE:
raise CipherBlockLengthTooBig(self.cipher.block_size, MAX_CB_SIZE)
r = len(msg) % self.cipher.block_size
padlen = self.cipher.block_size - r - 1
msg += '\x00' * padlen
msg += chr(padlen)
enc = iv + cipher.encrypt(msg)
if b64encode:
enc = base64.b64encode(enc)
return enc
def decrypt(self, key, msg, b64decode=True):
"""Decrypts the provided ciphertext, optionally base 64 encoded, and
returns the plaintext message, after padding is removed.
Uses AES-128-CBC with an IV by default.
:param key: The Encryption key.
:param msg: the ciphetext, the first block is the IV
"""
if b64decode:
msg = base64.b64decode(msg)
iv = msg[:self.cipher.block_size]
cipher = self.cipher.new(key, self.cipher.MODE_CBC, iv)
padded = cipher.decrypt(msg[self.cipher.block_size:])
l = ord(padded[-1]) + 1
plain = padded[:-l]
return plain
def sign(self, key, msg, b64encode=True):
"""Signs a message string and returns a base64 encoded signature.
Uses HMAC-SHA-256 by default.
:param key: The Signing key.
:param msg: the message to sign.
"""
h = HMAC.new(key, msg, self.hashfn)
out = h.digest()
if b64encode:
out = base64.b64encode(out)
return out
|
desmovalvo/virtualsib-part2 | refs/heads/master | manager/lib/treplies.py | 4 | #!/usr/bin/python
# requirements
from SSAPLib import *
from termcolor import *
from lib.Subreq import *
from smart_m3.m3_kp import *
from xml.sax import make_parser
##############################################################
#
# REQUESTS
#
##############################################################
# REGISTER REQUEST
def handle_register_request(logger, conn, info):
"""This method is used to forge and send a reply to the REGISTER
REQUEST sent by a publisher entity."""
# debug info
print colored("treplies>", "green", attrs=["bold"]) + " handle_register_request"
logger.info("REGISTER REQUEST handled by handle_register_request")
# build a reply message
reply = SSAP_MESSAGE_CONFIRM_TEMPLATE%(info["node_id"],
info["space_id"],
"REGISTER",
info["transaction_id"],
'<parameter name="status">m3:Success</parameter>')
# try to receive, then return
try:
conn.send(reply)
return True
except socket.error:
logger.error("REGISTER CONFIRM not sent!")
return False
# JOIN REQUEST
def handle_join_request(logger, info, ssap_msg, sib_list, kp_list):
"""The present method is used to manage the join request received from a KP."""
# debug message
print colored("treplies>", "green", attrs=["bold"]) + " handle_join_request"
logger.info("JOIN REQUEST handled by handle_join_request")
# forwarding message to the publishers
for sock in sib_list:
try:
sock.send(ssap_msg)
except socket.error:
err_msg = SSAP_MESSAGE_CONFIRM_TEMPLATE%(info["node_id"],
info["space_id"],
"JOIN",
info["transaction_id"],
'<parameter name="status">m3:Error</parameter>')
# send a notification error to the KP
kp_list[info["node_id"]].send(err_msg)
del kp_list[info["node_id"]]
logger.error("JOIN REQUEST forwarding failed")
# LEAVE REQUEST
def handle_leave_request(logger, info, ssap_msg, sib_list, kp_list):
"""The present method is used to manage the leave request received from a KP."""
# debug message
print colored("treplies>", "green", attrs=["bold"]) + " handle_leave_request"
logger.info("LEAVE REQUEST handled by handle_leave_request")
# forwarding message to the publishers
for sock in sib_list:
try:
sock.send(ssap_msg)
except socket.error:
err_msg = SSAP_MESSAGE_CONFIRM_TEMPLATE%(info["node_id"],
info["space_id"],
"LEAVE",
info["transaction_id"],
'<parameter name="status">m3:Error</parameter>')
kp_list[info["node_id"]].send(err_msg)
logger.error("LEAVE REQUEST forwarding failed")
# INSERT REQUEST
def handle_insert_request(logger, info, ssap_msg, sib_list, kp_list):
"""The present method is used to manage the insert request received from a KP."""
# debug info
print colored("treplies>", "green", attrs=["bold"]) + " handle_insert_request"
logger.info("INSERT REQUEST handled by handle_insert_request")
# forwarding message to the publishers
for sock in sib_list:
try:
sock.send(ssap_msg)
except socket.error:
err_msg = SSAP_MESSAGE_CONFIRM_TEMPLATE%(info["node_id"],
info["space_id"],
"INSERT",
info["transaction_id"],
'<parameter name="status">m3:Error</parameter>')
kp_list[info["node_id"]].send(err_msg)
logger.error("INSERT REQUEST forwarding failed")
# REMOVE REQUEST
def handle_remove_request(logger, info, ssap_msg, sib_list, kp_list):
"""The present method is used to manage the remove request received from a KP."""
# debug info
print colored("treplies>", "green", attrs=["bold"]) + " handle_remove_request"
logger.info("REMOVE REQUEST handled by handle_remove_request")
# forwarding message to the publishers
for sock in sib_list:
try:
sock.send(ssap_msg)
except socket.error:
err_msg = SSAP_MESSAGE_CONFIRM_TEMPLATE%(info["node_id"],
info["space_id"],
"REMOVE",
info["transaction_id"],
'<parameter name="status">m3:Error</parameter>')
kp_list[info["node_id"]].send(err_msg)
logger.error("REMOVE REQUEST forwarding failed")
# SPARQL QUERY REQUEST
def handle_sparql_query_request(logger, info, ssap_msg, sib_list, kp_list):
"""The present method is used to manage the sparql query request received from a KP."""
# debug info
print colored("treplies>", "green", attrs=["bold"]) + " handle_sparql_query_request"
logger.info("SPARQL QUERY REQUEST handled by handle_sparql_query_request")
# forwarding message to the publishers
for sock in sib_list:
try:
sock.send(ssap_msg)
except socket.error:
err_msg = SSAP_MESSAGE_CONFIRM_TEMPLATE%(info["node_id"],
info["space_id"],
"QUERY",
info["transaction_id"],
'<parameter name="status">m3:Error</parameter>')
kp_list[info["node_id"]].send(err_msg)
logger.error("SPARQL QUERY REQUEST forwarding failed")
# RDF QUERY REQUEST
def handle_rdf_query_request(logger, info, ssap_msg, sib_list, kp_list):
"""The present method is used to manage the rdf query request received from a KP."""
# debug info
print colored("treplies>", "green", attrs=["bold"]) + " handle_rdf_query_request"
logger.info("RDF QUERY REQUEST handled by handle_rdf_query_request")
# forwarding message to the publishers
for sock in sib_list:
try:
sock.send(ssap_msg)
except socket.error:
err_msg = SSAP_MESSAGE_CONFIRM_TEMPLATE%(info["node_id"],
info["space_id"],
"QUERY",
info["transaction_id"],
'<parameter name="status">m3:Error</parameter>')
kp_list[info["node_id"]].send(err_msg)
logger.error("RDF QUERY REQUEST forwarding failed")
# RDF SUBSCRIBE REQUEST
def handle_rdf_subscribe_request(logger, info, ssap_msg, sib_list, kp_list, clientsock, val_subscriptions):
"""The present method is used to manage the rdf query request received from a KP."""
# debug info
print colored("treplies>", "green", attrs=["bold"]) + " handle_rdf_subscribe_request"
logger.info("RDF SUBSCRIBE REQUEST handled by handle_rdf_subscribe_request")
# generating a Subreq instance
newsub = Subreq(clientsock, info["node_id"], info["transaction_id"])
val_subscriptions.append(newsub)
# convert ssap_msg to dict
ssap_msg_dict = {}
parser = make_parser()
ssap_mh = SSAPMsgHandler(ssap_msg_dict)
parser.setContentHandler(ssap_mh)
parser.parse(StringIO(ssap_msg))
# forwarding message to the publishers
for sock in sib_list:
try:
pars = '<parameter name = "type">RDF-M3</parameter><parameter name = "query">' + ssap_msg_dict["query"] + '</parameter><virtual_subscription_id>' + str(newsub.virtual_subscription_id) + '</virtual_subscription_id>'
ssap_msg = SSAP_MESSAGE_REQUEST_TEMPLATE%(info["node_id"],
info["space_id"],
"SUBSCRIBE",
info["transaction_id"],
pars
)
sock.send(ssap_msg)
except socket.error:
err_msg = SSAP_MESSAGE_CONFIRM_TEMPLATE%(info["node_id"],
info["space_id"],
"SUBSCRIBE",
info["transaction_id"],
'<parameter name="status">m3:Error</parameter>')
newsub.conn.send(err_msg)
#TODO delete class!
logger.error("RDF SUBSCRIBE REQUEST forwarding failed")
# RDF UNSUBSCRIBE REQUEST
def handle_rdf_unsubscribe_request(logger, info, ssap_msg, sib_list, kp_list, clientsock, val_subscriptions):
"""The present method is used to manage the rdf query request received from a KP."""
# debug info
print colored("treplies>", "green", attrs=["bold"]) + " handle_rdf_unsubscribe_request"
logger.info("RDF UNSUBSCRIBE REQUEST handled by handle_rdf_unsubscribe_request")
# find the Subreq instance
for s in val_subscriptions:
if str(s.virtual_subscription_id) == str(info["parameter_subscription_id"]):
# forwarding message to the publishers
for sock in sib_list:
try:
# send the message
sock.send(ssap_msg)
except socket.error:
err_msg = SSAP_MESSAGE_CONFIRM_TEMPLATE%(info["node_id"],
info["space_id"],
"UNSUBSCRIBE",
info["transaction_id"],
'<parameter name="status">m3:Error</parameter>')
s.conn.send(err_msg)
logger.error("RDF UNSUBSCRIBE REQUEST forwarding failed")
break
##############################################################
#
# confirms
#
##############################################################
# JOIN CONFIRM
def handle_join_confirm(logger, conn, info, ssap_msg, confirms, kp_list):
''' This method forwards the join confirm message to the KP '''
# debug info
print colored("treplies>", "green", attrs=["bold"]) + " handle_join_confirm"
logger.info("JOIN CONFIRM handled by handle_join_confirm")
if not confirms[info["node_id"]] == None:
if info["parameter_status"] == "m3:Success":
# insert successful
confirms[info["node_id"]] -= 1
if confirms[info["node_id"]] == 0:
kp_list[info["node_id"]].send(ssap_msg)
kp_list[info["node_id"]].close()
else:
# insert failed
confirms[info["node_id"]] = None
# send SSAP ERROR MESSAGE
err_msg = SSAP_MESSAGE_CONFIRM_TEMPLATE%(info["node_id"],
info["space_id"],
"JOIN",
info["transaction_id"],
'<parameter name="status">m3:Error</parameter>')
kp_list[info["node_id"]].send(err_msg)
kp_list[info["node_id"]].close()
del kp_list[info["node_id"]]
logger.error("JOIN CONFIRM forwarding failed")
# LEAVE CONFIRM
def handle_leave_confirm(logger, info, ssap_msg, confirms, kp_list):
"""This method is used to decide what to do once an LEAVE CONFIRM
is received. We can send the confirm back to the KP (if all the
sibs sent a confirm), decrement a counter (if we are waiting for
other sibs to reply) or send an error message (if the current
message or one of the previous replies it's a failure)"""
# debug info
print colored("treplies>", "green", attrs=["bold"]) + " handle_leave_confirm"
logger.info("LEAVE CONFIRM handled by handle_leave_confirm")
# check if we already received a failure
if not confirms[info["node_id"]] == None:
# check if the current message represent a successful insertion
if info["parameter_status"] == "m3:Success":
confirms[info["node_id"]] -= 1
if confirms[info["node_id"]] == 0:
kp_list[info["node_id"]].send(ssap_msg)
kp_list[info["node_id"]].close()
del kp_list[info["node_id"]]
# if the current message represent a failure...
else:
confirms[info["node_id"]] = None
# send SSAP ERROR MESSAGE
err_msg = SSAP_MESSAGE_CONFIRM_TEMPLATE%(info["node_id"],
info["space_id"],
"LEAVE",
info["transaction_id"],
'<parameter name="status">m3:Error</parameter>')
kp_list[info["node_id"]].send(err_msg)
kp_list[info["node_id"]].close()
logger.error("LEAVE CONFIRM forwarding failed")
# INSERT CONFIRM
def handle_insert_confirm(logger, info, ssap_msg, confirms, kp_list):
"""This method is used to decide what to do once an INSERT CONFIRM
is received. We can send the confirm back to the KP (if all the
sibs sent a confirm), decrement a counter (if we are waiting for
other sibs to reply) or send an error message (if the current
message or one of the previous replies it's a failure)"""
# debug message
print colored("treplies>", "green", attrs=["bold"]) + " handle_insert_confirm"
logger.info("INSERT CONFIRM handled by handle_insert_confirm")
# check if we already received a failure
if not confirms[info["node_id"]] == None:
# check if the current message represent a successful insertion
if info["parameter_status"] == "m3:Success":
confirms[info["node_id"]] -= 1
if confirms[info["node_id"]] == 0:
kp_list[info["node_id"]].send(ssap_msg)
kp_list[info["node_id"]].close()
# if the current message represent a failure...
else:
confirms[info["node_id"]] = None
# send SSAP ERROR MESSAGE
err_msg = SSAP_MESSAGE_CONFIRM_TEMPLATE%(info["node_id"],
info["space_id"],
"INSERT",
info["transaction_id"],
'<parameter name="status">m3:Error</parameter>')
kp_list[info["node_id"]].send(err_msg)
kp_list[info["node_id"]].close()
logger.error("INSERT CONFIRM forwarding failed")
# REMOVE CONFIRM
def handle_remove_confirm(logger, info, ssap_msg, confirms, kp_list):
"""This method is used to decide what to do once an REMOVE CONFIRM
is received. We can send the confirm back to the KP (if all the
sibs sent a confirm), decrement a counter (if we are waiting for
other sibs to reply) or send an error message (if the current
message or one of the previous replies it's a failure)"""
# debug message
print colored("treplies>", "green", attrs=["bold"]) + " handle_remove_confirm"
logger.info("REMOVE CONFIRM handled by handle_remove_confirm")
# check if we already received a failure
if not confirms[info["node_id"]] == None:
# check if the current message represent a successful insertion
if info["parameter_status"] == "m3:Success":
confirms[info["node_id"]] -= 1
if confirms[info["node_id"]] == 0:
kp_list[info["node_id"]].send(ssap_msg)
kp_list[info["node_id"]].close()
# if the current message represent a failure...
else:
confirms[info["node_id"]] = None
# send SSAP ERROR MESSAGE
err_msg = SSAP_MESSAGE_CONFIRM_TEMPLATE%(info["node_id"],
info["space_id"],
"REMOVE",
info["transaction_id"],
'<parameter name="status">m3:Error</parameter>')
kp_list[info["node_id"]].send(err_msg)
kp_list[info["node_id"]].close()
logger.error("REMOVE CONFIRM forwarding failed")
# SPARQL QUERY CONFIRM
def handle_sparql_query_confirm(logger, info, ssap_msg, confirms, kp_list, query_results):
"""This method is used to manage sparql QUERY CONFIRM received. """
# debug message
print colored("treplies>", "green", attrs=["bold"]) + " handle_sparql_query_confirm"
logger.info("SPARQL QUERY CONFIRM handled by handle_sparql_query_confirm")
# check if we already received a failure
if not confirms[info["node_id"]] == None:
# check if the current message represent a successful insertion
if info["parameter_status"] == "m3:Success":
confirms[info["node_id"]] -= 1
# convert ssap_msg to dict
ssap_msg_dict = {}
parser = make_parser()
ssap_mh = SSAPMsgHandler(ssap_msg_dict)
parser.setContentHandler(ssap_mh)
parser.parse(StringIO(ssap_msg))
# extract triples from ssap reply
triple_list = parse_sparql(ssap_msg_dict["results"])
for triple in triple_list:
query_results[info["node_id"]].append(triple)
# remove duplicates
result = []
for triple in query_results[info["node_id"]]:
if not triple in result:
result.append(triple)
query_results[info["node_id"]] = result
if confirms[info["node_id"]] == 0:
# build ssap reply
ssap_reply = reply_to_sparql_query(ssap_msg_dict["node_id"],
ssap_msg_dict["space_id"],
ssap_msg_dict["transaction_id"],
result)
kp_list[info["node_id"]].send(ssap_reply)
kp_list[info["node_id"]].close()
# if the current message represent a failure...
else:
confirms[info["node_id"]] = None
# send SSAP ERROR MESSAGE
err_msg = SSAP_MESSAGE_CONFIRM_TEMPLATE%(info["node_id"],
info["space_id"],
"QUERY",
info["transaction_id"],
'<parameter name="status">m3:Error</parameter>')
kp_list[info["node_id"]].send(err_msg)
kp_list[info["node_id"]].close()
logger.error("SPARQL CONFIRM forwarding failed")
# RDF QUERY CONFIRM
def handle_rdf_query_confirm(logger, info, ssap_msg, confirms, kp_list, query_results):
"""This method is used to manage rdf QUERY CONFIRM received. """
# debug info
print colored("treplies>", "green", attrs=["bold"]) + " handle_rdf_query_confirm"
logger.info("RDF QUERY CONFIRM handled by handle_rdf_query_confirm")
# check if we already received a failure
if not confirms[info["node_id"]] == None:
# check if the current message represent a successful insertion
if info["parameter_status"] == "m3:Success":
confirms[info["node_id"]] -= 1
# convert ssap_msg to dict
ssap_msg_dict = {}
parser = make_parser()
ssap_mh = SSAPMsgHandler(ssap_msg_dict)
parser.setContentHandler(ssap_mh)
parser.parse(StringIO(ssap_msg))
# extract triples from ssap reply
triple_list = parse_M3RDF(ssap_msg_dict["results"])
for triple in triple_list:
query_results[info["node_id"]].append(triple)
# remove duplicates
result = []
for triple in query_results[info["node_id"]]:
if not triple in result:
result.append(triple)
query_results[info["node_id"]] = result
if confirms[info["node_id"]] == 0:
# build ssap reply
ssap_reply = reply_to_rdf_query(ssap_msg_dict["node_id"],
ssap_msg_dict["space_id"],
ssap_msg_dict["transaction_id"],
result)
kp_list[info["node_id"]].send(ssap_reply)
kp_list[info["node_id"]].close()
# if the current message represent a failure...
else:
confirms[info["node_id"]] = None
# send SSAP ERROR MESSAGE
err_msg = SSAP_MESSAGE_CONFIRM_TEMPLATE%(info["node_id"],
info["space_id"],
"QUERY",
info["transaction_id"],
'<parameter name="status">m3:Error</parameter>')
kp_list[info["node_id"]].send(err_msg)
kp_list[info["node_id"]].close()
logger.error("RDF QUERY CONFIRM forwarding failed")
# RDF SUBSCRIBE CONFIRM
def handle_rdf_subscribe_confirm(logger, info, ssap_msg, confirms, kp_list, initial_results, active_subscriptions, clientsock, val_subscriptions):
"""This method is used to manage rdf SUBSCRIBE CONFIRM received. """
# debug info
print colored("treplies>", "green", attrs=["bold"]) + " handle_rdf_subscribe_confirm"
logger.info("RDF SUBSCRIBE CONFIRM handled by handle_rdf_subscribe_confirm")
# check if we already received a failure
if not confirms[info["node_id"]] == None:
# check if the current message represent a successful insertion
if info["parameter_status"] == "m3:Success":
confirms[info["node_id"]] -= 1
# store the corrispondence between the real sib and the real_subscription_id
for s in val_subscriptions:
if s.node_id == info["node_id"] and s.request_transaction_id == info["transaction_id"]:
# s.received_confirm(clientsock, info["parameter_subscription_id"])
subreq_instance = s
# convert ssap_msg to dict
ssap_msg_dict = {}
parser = make_parser()
ssap_mh = SSAPMsgHandler(ssap_msg_dict)
parser.setContentHandler(ssap_mh)
parser.parse(StringIO(ssap_msg))
# extract triples from ssap reply
triple_list = parse_M3RDF(ssap_msg_dict["results"])
for triple in triple_list:
initial_results[info["node_id"]].append(triple)
# remove duplicates
for triple in initial_results[info["node_id"]]:
if not triple in s.result:
s.result.append(triple)
initial_results[info["node_id"]] = s.result
if confirms[info["node_id"]] == 0:
# build ssap reply
ssap_reply = reply_to_rdf_subscribe(ssap_msg_dict["node_id"],
ssap_msg_dict["space_id"],
ssap_msg_dict["transaction_id"],
s.result,
subreq_instance.virtual_subscription_id)
subreq_instance.conn.send(ssap_reply)
# if the current message represent a failure...
else:
confirms[info["node_id"]] = None
# send SSAP ERROR MESSAGE
err_msg = SSAP_MESSAGE_CONFIRM_TEMPLATE%(info["node_id"],
info["space_id"],
"SUBSCRIBE",
info["transaction_id"],
'<parameter name="status">m3:Error</parameter>')
for s in val_subscriptions:
if s.node_id == info["node_id"] and s.request_transaction_id == ["transaction_id"]:
s.conn.send(err_msg)
logger.error("SUBSCRIBE CONFIRM forwarding failed")
# RDF UNSUBSCRIBE CONFIRM
def handle_rdf_unsubscribe_confirm(logger, info, ssap_msg, confirms, kp_list, initial_results, active_subscriptions, clientsock, val_subscriptions):
"""This method is used to manage UNSUBSCRIBE CONFIRM received. """
# debug info
print colored("treplies>", "green", attrs=["bold"]) + " handle_rdf_unsubscribe_confirm"
logger.info("RDF UNSUBSCRIBE CONFIRM handled by handle_rdf_unsubscribe_confirm")
for s in val_subscriptions:
if str(s.virtual_subscription_id) == str(info["parameter_subscription_id"]):
# check if we already received a failure
if not confirms[info["node_id"]] == None:
# check if the current message represent a successful insertion
if info["parameter_status"] == "m3:Success":
confirms[info["node_id"]] -= 1
if confirms[info["node_id"]] == 0:
s.conn.send(ssap_msg)
# if the current message represent a failure...
else:
confirms[info["node_id"]] = None
# send SSAP ERROR MESSAGE
err_msg = SSAP_MESSAGE_CONFIRM_TEMPLATE%(info["node_id"],
info["space_id"],
"UNSUBSCRIBE",
info["transaction_id"],
'<parameter name="status">m3:Error</parameter><parameter name="subscription_id">virtual_sub_id</parameter>')
s.conn.send(ssap_reply)
logger.error("SUBSCRIBE CONFIRM forwarding failed")
# destroy the class instance
del s
##############################################################
#
# INDICATIONS
#
##############################################################
def handle_subscribe_indication(logger, ssap_msg, info, fromsocket, val_subscriptions):
# debug info
print colored("treplies>", "green", attrs=["bold"]) + " handle_rdf_subscribe_indication"
logger.info("SUBSCRIBE INDICATION handled by handle_subscribe_indication")
for s in val_subscriptions:
if str(s.virtual_subscription_id) == str(info["parameter_subscription_id"]):
# send the message to the kp
print "Inoltro la indication"
try:
s.conn.send(ssap_msg)
except socket.error:
print "inoltro indication fallito"
break
##############################################################
#
# UTILITIES
#
##############################################################
def reply_to_sparql_query(node_id, space_id, transaction_id, results):
# building HEAD part of the query results
variable_list = []
for triple in results:
for element in triple:
if not SSAP_VARIABLE_TEMPLATE%(str(element[0])) in variable_list:
variable_list.append(SSAP_VARIABLE_TEMPLATE%(str(element[0])))
head = SSAP_HEAD_TEMPLATE%(''.join(variable_list))
# building RESULTS part of the query results
result_string = ""
for triple in results:
binding_string = ""
for element in triple:
binding_string = binding_string + SSAP_BINDING_TEMPLATE%(element[0], element[2])
result_string = result_string + SSAP_RESULT_TEMPLATE%(binding_string)
results_string = SSAP_RESULTS_TEMPLATE%(result_string)
body = SSAP_RESULTS_SPARQL_PARAM_TEMPLATE%(head + results_string)
# finalizing the reply
reply = SSAP_MESSAGE_CONFIRM_TEMPLATE%(node_id,
space_id,
"QUERY",
transaction_id,
body)
return reply
def reply_to_rdf_query(node_id, space_id, transaction_id, results):
tr = ""
for el in results:
tr = tr + SSAP_TRIPLE_TEMPLATE%(el[0], el[1], el[2])
body = SSAP_RESULTS_RDF_PARAM_TEMPLATE%(SSAP_TRIPLE_LIST_TEMPLATE%(tr))
# finalizing the reply
reply = SSAP_MESSAGE_CONFIRM_TEMPLATE%(node_id,
space_id,
"QUERY",
transaction_id,
body)
return reply
def reply_to_rdf_subscribe(node_id, space_id, transaction_id, results, subscription_id):
tr = ""
for el in results:
tr = tr + SSAP_TRIPLE_TEMPLATE%(el[0], el[1], el[2])
body = SSAP_RESULTS_SUB_RDF_PARAM_TEMPLATE%(subscription_id, SSAP_TRIPLE_LIST_TEMPLATE%(tr))
# finalizing the reply
reply = SSAP_MESSAGE_CONFIRM_TEMPLATE%(node_id,
space_id,
"SUBSCRIBE",
transaction_id,
body)
return reply
|
bw4sz/DeepMeerkat | refs/heads/master | training/Detection/object_detection/core/box_list.py | 48 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Bounding Box List definition.
BoxList represents a list of bounding boxes as tensorflow
tensors, where each bounding box is represented as a row of 4 numbers,
[y_min, x_min, y_max, x_max]. It is assumed that all bounding boxes
within a given list correspond to a single image. See also
box_list_ops.py for common box related operations (such as area, iou, etc).
Optionally, users can add additional related fields (such as weights).
We assume the following things to be true about fields:
* they correspond to boxes in the box_list along the 0th dimension
* they have inferrable rank at graph construction time
* all dimensions except for possibly the 0th can be inferred
(i.e., not None) at graph construction time.
Some other notes:
* Following tensorflow conventions, we use height, width ordering,
and correspondingly, y,x (or ymin, xmin, ymax, xmax) ordering
* Tensors are always provided as (flat) [N, 4] tensors.
"""
import tensorflow as tf
class BoxList(object):
"""Box collection."""
def __init__(self, boxes):
"""Constructs box collection.
Args:
boxes: a tensor of shape [N, 4] representing box corners
Raises:
ValueError: if invalid dimensions for bbox data or if bbox data is not in
float32 format.
"""
if len(boxes.get_shape()) != 2 or boxes.get_shape()[-1] != 4:
raise ValueError('Invalid dimensions for box data.')
if boxes.dtype != tf.float32:
raise ValueError('Invalid tensor type: should be tf.float32')
self.data = {'boxes': boxes}
def num_boxes(self):
"""Returns number of boxes held in collection.
Returns:
a tensor representing the number of boxes held in the collection.
"""
return tf.shape(self.data['boxes'])[0]
def num_boxes_static(self):
"""Returns number of boxes held in collection.
This number is inferred at graph construction time rather than run-time.
Returns:
Number of boxes held in collection (integer) or None if this is not
inferrable at graph construction time.
"""
return self.data['boxes'].get_shape()[0].value
def get_all_fields(self):
"""Returns all fields."""
return self.data.keys()
def get_extra_fields(self):
"""Returns all non-box fields (i.e., everything not named 'boxes')."""
return [k for k in self.data.keys() if k != 'boxes']
def add_field(self, field, field_data):
"""Add field to box list.
This method can be used to add related box data such as
weights/labels, etc.
Args:
field: a string key to access the data via `get`
field_data: a tensor containing the data to store in the BoxList
"""
self.data[field] = field_data
def has_field(self, field):
return field in self.data
def get(self):
"""Convenience function for accessing box coordinates.
Returns:
a tensor with shape [N, 4] representing box coordinates.
"""
return self.get_field('boxes')
def set(self, boxes):
"""Convenience function for setting box coordinates.
Args:
boxes: a tensor of shape [N, 4] representing box corners
Raises:
ValueError: if invalid dimensions for bbox data
"""
if len(boxes.get_shape()) != 2 or boxes.get_shape()[-1] != 4:
raise ValueError('Invalid dimensions for box data.')
self.data['boxes'] = boxes
def get_field(self, field):
"""Accesses a box collection and associated fields.
This function returns specified field with object; if no field is specified,
it returns the box coordinates.
Args:
field: this optional string parameter can be used to specify
a related field to be accessed.
Returns:
a tensor representing the box collection or an associated field.
Raises:
ValueError: if invalid field
"""
if not self.has_field(field):
raise ValueError('field ' + str(field) + ' does not exist')
return self.data[field]
def set_field(self, field, value):
"""Sets the value of a field.
Updates the field of a box_list with a given value.
Args:
field: (string) name of the field to set value.
value: the value to assign to the field.
Raises:
ValueError: if the box_list does not have specified field.
"""
if not self.has_field(field):
raise ValueError('field %s does not exist' % field)
self.data[field] = value
def get_center_coordinates_and_sizes(self, scope=None):
"""Computes the center coordinates, height and width of the boxes.
Args:
scope: name scope of the function.
Returns:
a list of 4 1-D tensors [ycenter, xcenter, height, width].
"""
with tf.name_scope(scope, 'get_center_coordinates_and_sizes'):
box_corners = self.get()
ymin, xmin, ymax, xmax = tf.unstack(tf.transpose(box_corners))
width = xmax - xmin
height = ymax - ymin
ycenter = ymin + height / 2.
xcenter = xmin + width / 2.
return [ycenter, xcenter, height, width]
def transpose_coordinates(self, scope=None):
"""Transpose the coordinate representation in a boxlist.
Args:
scope: name scope of the function.
"""
with tf.name_scope(scope, 'transpose_coordinates'):
y_min, x_min, y_max, x_max = tf.split(
value=self.get(), num_or_size_splits=4, axis=1)
self.set(tf.concat([x_min, y_min, x_max, y_max], 1))
def as_tensor_dict(self, fields=None):
"""Retrieves specified fields as a dictionary of tensors.
Args:
fields: (optional) list of fields to return in the dictionary.
If None (default), all fields are returned.
Returns:
tensor_dict: A dictionary of tensors specified by fields.
Raises:
ValueError: if specified field is not contained in boxlist.
"""
tensor_dict = {}
if fields is None:
fields = self.get_all_fields()
for field in fields:
if not self.has_field(field):
raise ValueError('boxlist must contain all specified fields')
tensor_dict[field] = self.get_field(field)
return tensor_dict
|
40223139/2015cdaa5-12 | refs/heads/master | static/Brython3.1.0-20150301-090019/Lib/xml/dom/__init__.py | 873 | """W3C Document Object Model implementation for Python.
The Python mapping of the Document Object Model is documented in the
Python Library Reference in the section on the xml.dom package.
This package contains the following modules:
minidom -- A simple implementation of the Level 1 DOM with namespace
support added (based on the Level 2 specification) and other
minor Level 2 functionality.
pulldom -- DOM builder supporting on-demand tree-building for selected
subtrees of the document.
"""
class Node:
"""Class giving the NodeType constants."""
__slots__ = ()
# DOM implementations may use this as a base class for their own
# Node implementations. If they don't, the constants defined here
# should still be used as the canonical definitions as they match
# the values given in the W3C recommendation. Client code can
# safely refer to these values in all tests of Node.nodeType
# values.
ELEMENT_NODE = 1
ATTRIBUTE_NODE = 2
TEXT_NODE = 3
CDATA_SECTION_NODE = 4
ENTITY_REFERENCE_NODE = 5
ENTITY_NODE = 6
PROCESSING_INSTRUCTION_NODE = 7
COMMENT_NODE = 8
DOCUMENT_NODE = 9
DOCUMENT_TYPE_NODE = 10
DOCUMENT_FRAGMENT_NODE = 11
NOTATION_NODE = 12
#ExceptionCode
INDEX_SIZE_ERR = 1
DOMSTRING_SIZE_ERR = 2
HIERARCHY_REQUEST_ERR = 3
WRONG_DOCUMENT_ERR = 4
INVALID_CHARACTER_ERR = 5
NO_DATA_ALLOWED_ERR = 6
NO_MODIFICATION_ALLOWED_ERR = 7
NOT_FOUND_ERR = 8
NOT_SUPPORTED_ERR = 9
INUSE_ATTRIBUTE_ERR = 10
INVALID_STATE_ERR = 11
SYNTAX_ERR = 12
INVALID_MODIFICATION_ERR = 13
NAMESPACE_ERR = 14
INVALID_ACCESS_ERR = 15
VALIDATION_ERR = 16
class DOMException(Exception):
"""Abstract base class for DOM exceptions.
Exceptions with specific codes are specializations of this class."""
def __init__(self, *args, **kw):
if self.__class__ is DOMException:
raise RuntimeError(
"DOMException should not be instantiated directly")
Exception.__init__(self, *args, **kw)
def _get_code(self):
return self.code
class IndexSizeErr(DOMException):
code = INDEX_SIZE_ERR
class DomstringSizeErr(DOMException):
code = DOMSTRING_SIZE_ERR
class HierarchyRequestErr(DOMException):
code = HIERARCHY_REQUEST_ERR
class WrongDocumentErr(DOMException):
code = WRONG_DOCUMENT_ERR
class InvalidCharacterErr(DOMException):
code = INVALID_CHARACTER_ERR
class NoDataAllowedErr(DOMException):
code = NO_DATA_ALLOWED_ERR
class NoModificationAllowedErr(DOMException):
code = NO_MODIFICATION_ALLOWED_ERR
class NotFoundErr(DOMException):
code = NOT_FOUND_ERR
class NotSupportedErr(DOMException):
code = NOT_SUPPORTED_ERR
class InuseAttributeErr(DOMException):
code = INUSE_ATTRIBUTE_ERR
class InvalidStateErr(DOMException):
code = INVALID_STATE_ERR
class SyntaxErr(DOMException):
code = SYNTAX_ERR
class InvalidModificationErr(DOMException):
code = INVALID_MODIFICATION_ERR
class NamespaceErr(DOMException):
code = NAMESPACE_ERR
class InvalidAccessErr(DOMException):
code = INVALID_ACCESS_ERR
class ValidationErr(DOMException):
code = VALIDATION_ERR
class UserDataHandler:
"""Class giving the operation constants for UserDataHandler.handle()."""
# Based on DOM Level 3 (WD 9 April 2002)
NODE_CLONED = 1
NODE_IMPORTED = 2
NODE_DELETED = 3
NODE_RENAMED = 4
XML_NAMESPACE = "http://www.w3.org/XML/1998/namespace"
XMLNS_NAMESPACE = "http://www.w3.org/2000/xmlns/"
XHTML_NAMESPACE = "http://www.w3.org/1999/xhtml"
EMPTY_NAMESPACE = None
EMPTY_PREFIX = None
from .domreg import getDOMImplementation, registerDOMImplementation
|
duramato/SickRage | refs/heads/master | autoProcessTV/lib/requests/packages/chardet/latin1prober.py | 1777 | ######################## BEGIN LICENSE BLOCK ########################
# The Original Code is Mozilla Universal charset detector code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 2001
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
# Shy Shalom - original C code
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
from .charsetprober import CharSetProber
from .constants import eNotMe
from .compat import wrap_ord
FREQ_CAT_NUM = 4
UDF = 0 # undefined
OTH = 1 # other
ASC = 2 # ascii capital letter
ASS = 3 # ascii small letter
ACV = 4 # accent capital vowel
ACO = 5 # accent capital other
ASV = 6 # accent small vowel
ASO = 7 # accent small other
CLASS_NUM = 8 # total classes
Latin1_CharToClass = (
OTH, OTH, OTH, OTH, OTH, OTH, OTH, OTH, # 00 - 07
OTH, OTH, OTH, OTH, OTH, OTH, OTH, OTH, # 08 - 0F
OTH, OTH, OTH, OTH, OTH, OTH, OTH, OTH, # 10 - 17
OTH, OTH, OTH, OTH, OTH, OTH, OTH, OTH, # 18 - 1F
OTH, OTH, OTH, OTH, OTH, OTH, OTH, OTH, # 20 - 27
OTH, OTH, OTH, OTH, OTH, OTH, OTH, OTH, # 28 - 2F
OTH, OTH, OTH, OTH, OTH, OTH, OTH, OTH, # 30 - 37
OTH, OTH, OTH, OTH, OTH, OTH, OTH, OTH, # 38 - 3F
OTH, ASC, ASC, ASC, ASC, ASC, ASC, ASC, # 40 - 47
ASC, ASC, ASC, ASC, ASC, ASC, ASC, ASC, # 48 - 4F
ASC, ASC, ASC, ASC, ASC, ASC, ASC, ASC, # 50 - 57
ASC, ASC, ASC, OTH, OTH, OTH, OTH, OTH, # 58 - 5F
OTH, ASS, ASS, ASS, ASS, ASS, ASS, ASS, # 60 - 67
ASS, ASS, ASS, ASS, ASS, ASS, ASS, ASS, # 68 - 6F
ASS, ASS, ASS, ASS, ASS, ASS, ASS, ASS, # 70 - 77
ASS, ASS, ASS, OTH, OTH, OTH, OTH, OTH, # 78 - 7F
OTH, UDF, OTH, ASO, OTH, OTH, OTH, OTH, # 80 - 87
OTH, OTH, ACO, OTH, ACO, UDF, ACO, UDF, # 88 - 8F
UDF, OTH, OTH, OTH, OTH, OTH, OTH, OTH, # 90 - 97
OTH, OTH, ASO, OTH, ASO, UDF, ASO, ACO, # 98 - 9F
OTH, OTH, OTH, OTH, OTH, OTH, OTH, OTH, # A0 - A7
OTH, OTH, OTH, OTH, OTH, OTH, OTH, OTH, # A8 - AF
OTH, OTH, OTH, OTH, OTH, OTH, OTH, OTH, # B0 - B7
OTH, OTH, OTH, OTH, OTH, OTH, OTH, OTH, # B8 - BF
ACV, ACV, ACV, ACV, ACV, ACV, ACO, ACO, # C0 - C7
ACV, ACV, ACV, ACV, ACV, ACV, ACV, ACV, # C8 - CF
ACO, ACO, ACV, ACV, ACV, ACV, ACV, OTH, # D0 - D7
ACV, ACV, ACV, ACV, ACV, ACO, ACO, ACO, # D8 - DF
ASV, ASV, ASV, ASV, ASV, ASV, ASO, ASO, # E0 - E7
ASV, ASV, ASV, ASV, ASV, ASV, ASV, ASV, # E8 - EF
ASO, ASO, ASV, ASV, ASV, ASV, ASV, OTH, # F0 - F7
ASV, ASV, ASV, ASV, ASV, ASO, ASO, ASO, # F8 - FF
)
# 0 : illegal
# 1 : very unlikely
# 2 : normal
# 3 : very likely
Latin1ClassModel = (
# UDF OTH ASC ASS ACV ACO ASV ASO
0, 0, 0, 0, 0, 0, 0, 0, # UDF
0, 3, 3, 3, 3, 3, 3, 3, # OTH
0, 3, 3, 3, 3, 3, 3, 3, # ASC
0, 3, 3, 3, 1, 1, 3, 3, # ASS
0, 3, 3, 3, 1, 2, 1, 2, # ACV
0, 3, 3, 3, 3, 3, 3, 3, # ACO
0, 3, 1, 3, 1, 1, 1, 3, # ASV
0, 3, 1, 3, 1, 1, 3, 3, # ASO
)
class Latin1Prober(CharSetProber):
def __init__(self):
CharSetProber.__init__(self)
self.reset()
def reset(self):
self._mLastCharClass = OTH
self._mFreqCounter = [0] * FREQ_CAT_NUM
CharSetProber.reset(self)
def get_charset_name(self):
return "windows-1252"
def feed(self, aBuf):
aBuf = self.filter_with_english_letters(aBuf)
for c in aBuf:
charClass = Latin1_CharToClass[wrap_ord(c)]
freq = Latin1ClassModel[(self._mLastCharClass * CLASS_NUM)
+ charClass]
if freq == 0:
self._mState = eNotMe
break
self._mFreqCounter[freq] += 1
self._mLastCharClass = charClass
return self.get_state()
def get_confidence(self):
if self.get_state() == eNotMe:
return 0.01
total = sum(self._mFreqCounter)
if total < 0.01:
confidence = 0.0
else:
confidence = ((self._mFreqCounter[3] - self._mFreqCounter[1] * 20.0)
/ total)
if confidence < 0.0:
confidence = 0.0
# lower the confidence of latin1 so that other more accurate
# detector can take priority.
confidence = confidence * 0.73
return confidence
|
arju88nair/projectCulminate | refs/heads/master | venv/lib/python3.5/site-packages/nltk/tokenize/api.py | 2 | # Natural Language Toolkit: Tokenizer Interface
#
# Copyright (C) 2001-2017 NLTK Project
# Author: Edward Loper <edloper@gmail.com>
# Steven Bird <stevenbird1@gmail.com>
# URL: <http://nltk.org/>
# For license information, see LICENSE.TXT
"""
Tokenizer Interface
"""
from abc import ABCMeta, abstractmethod
from six import add_metaclass
from nltk.internals import overridden
from nltk.tokenize.util import string_span_tokenize
@add_metaclass(ABCMeta)
class TokenizerI(object):
"""
A processing interface for tokenizing a string.
Subclasses must define ``tokenize()`` or ``tokenize_sents()`` (or both).
"""
@abstractmethod
def tokenize(self, s):
"""
Return a tokenized copy of *s*.
:rtype: list of str
"""
if overridden(self.tokenize_sents):
return self.tokenize_sents([s])[0]
def span_tokenize(self, s):
"""
Identify the tokens using integer offsets ``(start_i, end_i)``,
where ``s[start_i:end_i]`` is the corresponding token.
:rtype: iter(tuple(int, int))
"""
raise NotImplementedError()
def tokenize_sents(self, strings):
"""
Apply ``self.tokenize()`` to each element of ``strings``. I.e.:
return [self.tokenize(s) for s in strings]
:rtype: list(list(str))
"""
return [self.tokenize(s) for s in strings]
def span_tokenize_sents(self, strings):
"""
Apply ``self.span_tokenize()`` to each element of ``strings``. I.e.:
return [self.span_tokenize(s) for s in strings]
:rtype: iter(list(tuple(int, int)))
"""
for s in strings:
yield list(self.span_tokenize(s))
class StringTokenizer(TokenizerI):
"""A tokenizer that divides a string into substrings by splitting
on the specified string (defined in subclasses).
"""
def tokenize(self, s):
return s.split(self._string)
def span_tokenize(self, s):
for span in string_span_tokenize(s, self._string):
yield span
|
saneyuki/servo | refs/heads/master | tests/wpt/web-platform-tests/tools/pywebsocket/test/mux_client_for_testing.py | 8 | #!/usr/bin/env python
#
# Copyright 2012, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""WebSocket client utility for testing mux extension.
This code should be independent from mod_pywebsocket. See the comment of
client_for_testing.py.
NOTE: This code is far from robust like client_for_testing.py.
"""
import Queue
import base64
import collections
import email
import email.parser
import logging
import math
import os
import random
import socket
import struct
import threading
from mod_pywebsocket import util
from test import client_for_testing
_CONTROL_CHANNEL_ID = 0
_DEFAULT_CHANNEL_ID = 1
_MUX_OPCODE_ADD_CHANNEL_REQUEST = 0
_MUX_OPCODE_ADD_CHANNEL_RESPONSE = 1
_MUX_OPCODE_FLOW_CONTROL = 2
_MUX_OPCODE_DROP_CHANNEL = 3
_MUX_OPCODE_NEW_CHANNEL_SLOT = 4
class _ControlBlock:
def __init__(self, opcode):
self.opcode = opcode
def _parse_handshake_response(response):
status_line, header_lines = response.split('\r\n', 1)
words = status_line.split(' ')
if len(words) < 3:
raise ValueError('Bad Status-Line syntax %r' % status_line)
[version, response_code] = words[:2]
if version != 'HTTP/1.1':
raise ValueError('Bad response version %r' % version)
if response_code != '101':
raise ValueError('Bad response code %r ' % response_code)
headers = email.parser.Parser().parsestr(header_lines)
return headers
def _parse_channel_id(data, offset=0):
length = len(data)
remaining = length - offset
if remaining <= 0:
raise Exception('No channel id found')
channel_id = ord(data[offset])
channel_id_length = 1
if channel_id & 0xe0 == 0xe0:
if remaining < 4:
raise Exception('Invalid channel id format')
channel_id = struct.unpack('!L',
data[offset:offset+4])[0] & 0x1fffffff
channel_id_length = 4
elif channel_id & 0xc0 == 0xc0:
if remaining < 3:
raise Exception('Invalid channel id format')
channel_id = (((channel_id & 0x1f) << 16) +
struct.unpack('!H', data[offset+1:offset+3])[0])
channel_id_length = 3
elif channel_id & 0x80 == 0x80:
if remaining < 2:
raise Exception('Invalid channel id format')
channel_id = struct.unpack('!H', data[offset:offset+2])[0] & 0x3fff
channel_id_length = 2
return channel_id, channel_id_length
def _parse_number(data, offset=0):
first_byte = ord(data[offset])
if (first_byte & 0x80) != 0:
raise Exception('The MSB of number field must be unset')
first_byte = first_byte & 0x7f
if first_byte == 127:
if offset + 9 > len(data):
raise Exception('Invalid number')
return struct.unpack('!Q', data[offset+1:offset+9])[0], 9
if first_byte == 126:
if offset + 3 > len(data):
raise Exception('Invalid number')
return struct.unpack('!H', data[offset+1:offset+3])[0], 3
return first_byte, 1
def _parse_size_and_contents(data, offset=0):
size, advance = _parse_number(data, offset)
start_position = offset + advance
end_position = start_position + size
if len(data) < end_position:
raise Exception('Invalid size of control block (%d < %d)' % (
len(data), end_position))
return data[start_position:end_position], size + advance
def _parse_control_blocks(data):
blocks = []
length = len(data)
pos = 0
while pos < length:
first_byte = ord(data[pos])
pos += 1
opcode = (first_byte >> 5) & 0x7
block = _ControlBlock(opcode)
# TODO(bashi): Support more opcode
if opcode == _MUX_OPCODE_ADD_CHANNEL_RESPONSE:
block.encode = first_byte & 3
block.rejected = (first_byte >> 4) & 1
channel_id, advance = _parse_channel_id(data, pos)
block.channel_id = channel_id
pos += advance
encoded_handshake, advance = _parse_size_and_contents(data, pos)
block.encoded_handshake = encoded_handshake
pos += advance
blocks.append(block)
elif opcode == _MUX_OPCODE_DROP_CHANNEL:
block.mux_error = (first_byte >> 4) & 1
channel_id, advance = _parse_channel_id(data, pos)
block.channel_id = channel_id
pos += advance
reason, advance = _parse_size_and_contents(data, pos)
if len(reason) == 0:
block.drop_code = None
block.drop_message = ''
elif len(reason) >= 2:
block.drop_code = struct.unpack('!H', reason[:2])[0]
block.drop_message = reason[2:]
else:
raise Exception('Invalid DropChannel')
pos += advance
blocks.append(block)
elif opcode == _MUX_OPCODE_FLOW_CONTROL:
channel_id, advance = _parse_channel_id(data, pos)
block.channel_id = channel_id
pos += advance
send_quota, advance = _parse_number(data, pos)
block.send_quota = send_quota
pos += advance
blocks.append(block)
elif opcode == _MUX_OPCODE_NEW_CHANNEL_SLOT:
fallback = first_byte & 1
slots, advance = _parse_number(data, pos)
pos += advance
send_quota, advance = _parse_number(data, pos)
pos += advance
if fallback == 1 and (slots != 0 or send_quota != 0):
raise Exception('slots and send_quota must be zero if F bit '
'is set')
block.fallback = fallback
block.slots = slots
block.send_quota = send_quota
blocks.append(block)
else:
raise Exception(
'Unsupported mux opcode %d received' % opcode)
return blocks
def _encode_channel_id(channel_id):
if channel_id < 0:
raise ValueError('Channel id %d must not be negative' % channel_id)
if channel_id < 2 ** 7:
return chr(channel_id)
if channel_id < 2 ** 14:
return struct.pack('!H', 0x8000 + channel_id)
if channel_id < 2 ** 21:
first = chr(0xc0 + (channel_id >> 16))
return first + struct.pack('!H', channel_id & 0xffff)
if channel_id < 2 ** 29:
return struct.pack('!L', 0xe0000000 + channel_id)
raise ValueError('Channel id %d is too large' % channel_id)
def _encode_number(number):
if number <= 125:
return chr(number)
elif number < (1 << 16):
return chr(0x7e) + struct.pack('!H', number)
elif number < (1 << 63):
return chr(0x7f) + struct.pack('!Q', number)
else:
raise Exception('Invalid number')
def _create_add_channel_request(channel_id, encoded_handshake,
encoding=0):
length = len(encoded_handshake)
handshake_length = _encode_number(length)
first_byte = (_MUX_OPCODE_ADD_CHANNEL_REQUEST << 5) | encoding
return (chr(first_byte) + _encode_channel_id(channel_id) +
handshake_length + encoded_handshake)
def _create_flow_control(channel_id, replenished_quota):
first_byte = (_MUX_OPCODE_FLOW_CONTROL << 5)
return (chr(first_byte) + _encode_channel_id(channel_id) +
_encode_number(replenished_quota))
class _MuxReaderThread(threading.Thread):
"""Mux reader thread.
Reads frames and passes them to the mux client. This thread accesses
private functions/variables of the mux client.
"""
def __init__(self, mux):
threading.Thread.__init__(self)
self.setDaemon(True)
self._mux = mux
self._stop_requested = False
def _receive_message(self):
first_opcode = None
pending_payload = []
while not self._stop_requested:
fin, rsv1, rsv2, rsv3, opcode, payload_length = (
client_for_testing.read_frame_header(self._mux._socket))
if not first_opcode:
if opcode == client_for_testing.OPCODE_TEXT:
raise Exception('Received a text message on physical '
'connection')
if opcode == client_for_testing.OPCODE_CONTINUATION:
raise Exception('Received an intermediate frame but '
'fragmentation was not started')
if (opcode == client_for_testing.OPCODE_BINARY or
opcode == client_for_testing.OPCODE_PONG or
opcode == client_for_testing.OPCODE_PONG or
opcode == client_for_testing.OPCODE_CLOSE):
first_opcode = opcode
else:
raise Exception('Received an undefined opcode frame: %d' %
opcode)
elif opcode != client_for_testing.OPCODE_CONTINUATION:
raise Exception('Received a new opcode before '
'terminating fragmentation')
payload = client_for_testing.receive_bytes(
self._mux._socket, payload_length)
if self._mux._incoming_frame_filter is not None:
payload = self._mux._incoming_frame_filter.filter(payload)
pending_payload.append(payload)
if fin:
break
if self._stop_requested:
return None, None
message = ''.join(pending_payload)
return first_opcode, message
def request_stop(self):
self._stop_requested = True
def run(self):
try:
while not self._stop_requested:
# opcode is OPCODE_BINARY or control opcodes when a message
# is succesfully received.
opcode, message = self._receive_message()
if not opcode:
return
if opcode == client_for_testing.OPCODE_BINARY:
channel_id, advance = _parse_channel_id(message)
self._mux._dispatch_frame(channel_id, message[advance:])
else:
self._mux._process_control_message(opcode, message)
finally:
self._mux._notify_reader_done()
class _InnerFrame(object):
def __init__(self, fin, rsv1, rsv2, rsv3, opcode, payload):
self.fin = fin
self.rsv1 = rsv1
self.rsv2 = rsv2
self.rsv3 = rsv3
self.opcode = opcode
self.payload = payload
class _LogicalChannelData(object):
def __init__(self):
self.queue = Queue.Queue()
self.send_quota = 0
self.receive_quota = 0
class MuxClient(object):
"""WebSocket mux client.
Note that this class is NOT thread-safe. Do not access an instance of this
class from multiple threads at a same time.
"""
def __init__(self, options):
self._logger = util.get_class_logger(self)
self._options = options
self._options.enable_mux()
self._stream = None
self._socket = None
self._handshake = client_for_testing.WebSocketHandshake(self._options)
self._incoming_frame_filter = None
self._outgoing_frame_filter = None
self._is_active = False
self._read_thread = None
self._control_blocks_condition = threading.Condition()
self._control_blocks = []
self._channel_slots = collections.deque()
self._logical_channels_condition = threading.Condition();
self._logical_channels = {}
self._timeout = 2
self._physical_connection_close_event = None
self._physical_connection_close_message = None
def _parse_inner_frame(self, data):
if len(data) == 0:
raise Exception('Invalid encapsulated frame received')
first_byte = ord(data[0])
fin = (first_byte << 7) & 1
rsv1 = (first_byte << 6) & 1
rsv2 = (first_byte << 5) & 1
rsv3 = (first_byte << 4) & 1
opcode = first_byte & 0xf
if self._outgoing_frame_filter:
payload = self._outgoing_frame_filter.filter(
data[1:])
else:
payload = data[1:]
return _InnerFrame(fin, rsv1, rsv2, rsv3, opcode, payload)
def _process_mux_control_blocks(self):
for block in self._control_blocks:
if block.opcode == _MUX_OPCODE_ADD_CHANNEL_RESPONSE:
# AddChannelResponse will be handled in add_channel().
continue
elif block.opcode == _MUX_OPCODE_FLOW_CONTROL:
try:
self._logical_channels_condition.acquire()
if not block.channel_id in self._logical_channels:
raise Exception('Invalid flow control received for '
'channel id %d' % block.channel_id)
self._logical_channels[block.channel_id].send_quota += (
block.send_quota)
self._logical_channels_condition.notify()
finally:
self._logical_channels_condition.release()
elif block.opcode == _MUX_OPCODE_NEW_CHANNEL_SLOT:
self._channel_slots.extend([block.send_quota] * block.slots)
def _dispatch_frame(self, channel_id, payload):
if channel_id == _CONTROL_CHANNEL_ID:
try:
self._control_blocks_condition.acquire()
self._control_blocks += _parse_control_blocks(payload)
self._process_mux_control_blocks()
self._control_blocks_condition.notify()
finally:
self._control_blocks_condition.release()
else:
try:
self._logical_channels_condition.acquire()
if not channel_id in self._logical_channels:
raise Exception('Received logical frame on channel id '
'%d, which is not established' %
channel_id)
inner_frame = self._parse_inner_frame(payload)
self._logical_channels[channel_id].receive_quota -= (
len(inner_frame.payload))
if self._logical_channels[channel_id].receive_quota < 0:
raise Exception('The server violates quota on '
'channel id %d' % channel_id)
finally:
self._logical_channels_condition.release()
self._logical_channels[channel_id].queue.put(inner_frame)
def _process_control_message(self, opcode, message):
# Ping/Pong are not supported.
if opcode == client_for_testing.OPCODE_CLOSE:
self._physical_connection_close_message = message
if self._is_active:
self._stream.send_close(
code=client_for_testing.STATUS_NORMAL_CLOSURE, reason='')
self._read_thread.request_stop()
if self._physical_connection_close_event:
self._physical_connection_close_event.set()
def _notify_reader_done(self):
self._logger.debug('Read thread terminated.')
self.close_socket()
def _assert_channel_slot_available(self):
try:
self._control_blocks_condition.acquire()
if len(self._channel_slots) == 0:
# Wait once
self._control_blocks_condition.wait(timeout=self._timeout)
finally:
self._control_blocks_condition.release()
if len(self._channel_slots) == 0:
raise Exception('Failed to receive NewChannelSlot')
def _assert_send_quota_available(self, channel_id):
try:
self._logical_channels_condition.acquire()
if self._logical_channels[channel_id].send_quota == 0:
# Wait once
self._logical_channels_condition.wait(timeout=self._timeout)
finally:
self._logical_channels_condition.release()
if self._logical_channels[channel_id].send_quota == 0:
raise Exception('Failed to receive FlowControl for channel id %d' %
channel_id)
def connect(self):
self._socket = client_for_testing.connect_socket_with_retry(
self._options.server_host,
self._options.server_port,
self._options.socket_timeout,
self._options.use_tls)
self._handshake.handshake(self._socket)
self._stream = client_for_testing.WebSocketStream(
self._socket, self._handshake)
self._logical_channels[_DEFAULT_CHANNEL_ID] = _LogicalChannelData()
self._read_thread = _MuxReaderThread(self)
self._read_thread.start()
self._assert_channel_slot_available()
self._assert_send_quota_available(_DEFAULT_CHANNEL_ID)
self._is_active = True
self._logger.info('Connection established')
def add_channel(self, channel_id, options):
if not self._is_active:
raise Exception('Mux client is not active')
if channel_id in self._logical_channels:
raise Exception('Channel id %d already exists' % channel_id)
try:
send_quota = self._channel_slots.popleft()
except IndexError as e:
raise Exception('No channel slots: %r' % e)
# Create AddChannel request
request_line = 'GET %s HTTP/1.1\r\n' % options.resource
fields = []
if options.server_port == client_for_testing.DEFAULT_PORT:
fields.append('Host: %s\r\n' % options.server_host.lower())
else:
fields.append('Host: %s:%d\r\n' % (options.server_host.lower(),
options.server_port))
fields.append('Origin: %s\r\n' % options.origin.lower())
fields.append('Connection: Upgrade\r\n')
if len(options.extensions) > 0:
fields.append('Sec-WebSocket-Extensions: %s\r\n' %
', '.join(options.extensions))
handshake = request_line + ''.join(fields) + '\r\n'
add_channel_request = _create_add_channel_request(
channel_id, handshake)
payload = _encode_channel_id(_CONTROL_CHANNEL_ID) + add_channel_request
self._stream.send_binary(payload)
# Wait AddChannelResponse
self._logger.debug('Waiting AddChannelResponse for the request...')
response = None
try:
self._control_blocks_condition.acquire()
while True:
for block in self._control_blocks:
if block.opcode != _MUX_OPCODE_ADD_CHANNEL_RESPONSE:
continue
if block.channel_id == channel_id:
response = block
self._control_blocks.remove(response)
break
if response:
break
self._control_blocks_condition.wait(self._timeout)
if not self._is_active:
raise Exception('AddChannelRequest timed out')
finally:
self._control_blocks_condition.release()
# Validate AddChannelResponse
if response.rejected:
raise Exception('The server rejected AddChannelRequest')
fields = _parse_handshake_response(response.encoded_handshake)
# Should we reject when Upgrade, Connection, or Sec-WebSocket-Accept
# headers exist?
self._logical_channels_condition.acquire()
self._logical_channels[channel_id] = _LogicalChannelData()
self._logical_channels[channel_id].send_quota = send_quota
self._logical_channels_condition.release()
self._logger.debug('Logical channel %d established' % channel_id)
def _check_logical_channel_is_opened(self, channel_id):
if not self._is_active:
raise Exception('Mux client is not active')
if not channel_id in self._logical_channels:
raise Exception('Logical channel %d is not established.')
def drop_channel(self, channel_id):
# TODO(bashi): Implement
pass
def send_flow_control(self, channel_id, replenished_quota):
self._check_logical_channel_is_opened(channel_id)
flow_control = _create_flow_control(channel_id, replenished_quota)
payload = _encode_channel_id(_CONTROL_CHANNEL_ID) + flow_control
# Replenish receive quota
try:
self._logical_channels_condition.acquire()
self._logical_channels[channel_id].receive_quota += (
replenished_quota)
finally:
self._logical_channels_condition.release()
self._stream.send_binary(payload)
def send_message(self, channel_id, message, end=True, binary=False):
self._check_logical_channel_is_opened(channel_id)
if binary:
first_byte = (end << 7) | client_for_testing.OPCODE_BINARY
else:
first_byte = (end << 7) | client_for_testing.OPCODE_TEXT
message = message.encode('utf-8')
try:
self._logical_channels_condition.acquire()
if self._logical_channels[channel_id].send_quota < len(message):
raise Exception('Send quota violation: %d < %d' % (
self._logical_channels[channel_id].send_quota,
len(message)))
self._logical_channels[channel_id].send_quota -= len(message)
finally:
self._logical_channels_condition.release()
payload = _encode_channel_id(channel_id) + chr(first_byte) + message
self._stream.send_binary(payload)
def assert_receive(self, channel_id, payload, binary=False):
self._check_logical_channel_is_opened(channel_id)
try:
inner_frame = self._logical_channels[channel_id].queue.get(
timeout=self._timeout)
except Queue.Empty as e:
raise Exception('Cannot receive message from channel id %d' %
channel_id)
if binary:
opcode = client_for_testing.OPCODE_BINARY
else:
opcode = client_for_testing.OPCODE_TEXT
if inner_frame.opcode != opcode:
raise Exception('Unexpected opcode received (%r != %r)' %
(expected_opcode, inner_frame.opcode))
if inner_frame.payload != payload:
raise Exception('Unexpected payload received')
def send_close(self, channel_id, code=None, reason=''):
self._check_logical_channel_is_opened(channel_id)
if code is not None:
body = struct.pack('!H', code) + reason.encode('utf-8')
else:
body = ''
first_byte = (1 << 7) | client_for_testing.OPCODE_CLOSE
payload = _encode_channel_id(channel_id) + chr(first_byte) + body
self._stream.send_binary(payload)
def assert_receive_close(self, channel_id):
self._check_logical_channel_is_opened(channel_id)
try:
inner_frame = self._logical_channels[channel_id].queue.get(
timeout=self._timeout)
except Queue.Empty as e:
raise Exception('Cannot receive message from channel id %d' %
channel_id)
if inner_frame.opcode != client_for_testing.OPCODE_CLOSE:
raise Exception('Didn\'t receive close frame')
def send_physical_connection_close(self, code=None, reason=''):
self._physical_connection_close_event = threading.Event()
self._stream.send_close(code, reason)
# This method can be used only after calling
# send_physical_connection_close().
def assert_physical_connection_receive_close(
self, code=client_for_testing.STATUS_NORMAL_CLOSURE, reason=''):
self._physical_connection_close_event.wait(timeout=self._timeout)
if (not self._physical_connection_close_event.isSet() or
not self._physical_connection_close_message):
raise Exception('Didn\'t receive closing handshake')
def close_socket(self):
self._is_active = False
self._socket.close()
|
zarboz/Ville-Z_Blackout_edition | refs/heads/showpreals2w | scripts/build-all.py | 1250 | #! /usr/bin/env python
# Copyright (c) 2009-2011, Code Aurora Forum. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of Code Aurora nor
# the names of its contributors may be used to endorse or promote
# products derived from this software without specific prior written
# permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NON-INFRINGEMENT ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
# OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
# OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
# ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# Build the kernel for all targets using the Android build environment.
#
# TODO: Accept arguments to indicate what to build.
import glob
from optparse import OptionParser
import subprocess
import os
import os.path
import shutil
import sys
version = 'build-all.py, version 0.01'
build_dir = '../all-kernels'
make_command = ["vmlinux", "modules"]
make_env = os.environ
make_env.update({
'ARCH': 'arm',
'CROSS_COMPILE': 'arm-none-linux-gnueabi-',
'KCONFIG_NOTIMESTAMP': 'true' })
all_options = {}
def error(msg):
sys.stderr.write("error: %s\n" % msg)
def fail(msg):
"""Fail with a user-printed message"""
error(msg)
sys.exit(1)
def check_kernel():
"""Ensure that PWD is a kernel directory"""
if (not os.path.isfile('MAINTAINERS') or
not os.path.isfile('arch/arm/mach-msm/Kconfig')):
fail("This doesn't seem to be an MSM kernel dir")
def check_build():
"""Ensure that the build directory is present."""
if not os.path.isdir(build_dir):
try:
os.makedirs(build_dir)
except OSError as exc:
if exc.errno == errno.EEXIST:
pass
else:
raise
def update_config(file, str):
print 'Updating %s with \'%s\'\n' % (file, str)
defconfig = open(file, 'a')
defconfig.write(str + '\n')
defconfig.close()
def scan_configs():
"""Get the full list of defconfigs appropriate for this tree."""
names = {}
for n in glob.glob('arch/arm/configs/[fm]sm[0-9-]*_defconfig'):
names[os.path.basename(n)[:-10]] = n
for n in glob.glob('arch/arm/configs/qsd*_defconfig'):
names[os.path.basename(n)[:-10]] = n
for n in glob.glob('arch/arm/configs/apq*_defconfig'):
names[os.path.basename(n)[:-10]] = n
return names
class Builder:
def __init__(self, logname):
self.logname = logname
self.fd = open(logname, 'w')
def run(self, args):
devnull = open('/dev/null', 'r')
proc = subprocess.Popen(args, stdin=devnull,
env=make_env,
bufsize=0,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
count = 0
# for line in proc.stdout:
rawfd = proc.stdout.fileno()
while True:
line = os.read(rawfd, 1024)
if not line:
break
self.fd.write(line)
self.fd.flush()
if all_options.verbose:
sys.stdout.write(line)
sys.stdout.flush()
else:
for i in range(line.count('\n')):
count += 1
if count == 64:
count = 0
print
sys.stdout.write('.')
sys.stdout.flush()
print
result = proc.wait()
self.fd.close()
return result
failed_targets = []
def build(target):
dest_dir = os.path.join(build_dir, target)
log_name = '%s/log-%s.log' % (build_dir, target)
print 'Building %s in %s log %s' % (target, dest_dir, log_name)
if not os.path.isdir(dest_dir):
os.mkdir(dest_dir)
defconfig = 'arch/arm/configs/%s_defconfig' % target
dotconfig = '%s/.config' % dest_dir
savedefconfig = '%s/defconfig' % dest_dir
shutil.copyfile(defconfig, dotconfig)
devnull = open('/dev/null', 'r')
subprocess.check_call(['make', 'O=%s' % dest_dir,
'%s_defconfig' % target], env=make_env, stdin=devnull)
devnull.close()
if not all_options.updateconfigs:
build = Builder(log_name)
result = build.run(['make', 'O=%s' % dest_dir] + make_command)
if result != 0:
if all_options.keep_going:
failed_targets.append(target)
fail_or_error = error
else:
fail_or_error = fail
fail_or_error("Failed to build %s, see %s" % (target, build.logname))
# Copy the defconfig back.
if all_options.configs or all_options.updateconfigs:
devnull = open('/dev/null', 'r')
subprocess.check_call(['make', 'O=%s' % dest_dir,
'savedefconfig'], env=make_env, stdin=devnull)
devnull.close()
shutil.copyfile(savedefconfig, defconfig)
def build_many(allconf, targets):
print "Building %d target(s)" % len(targets)
for target in targets:
if all_options.updateconfigs:
update_config(allconf[target], all_options.updateconfigs)
build(target)
if failed_targets:
fail('\n '.join(["Failed targets:"] +
[target for target in failed_targets]))
def main():
global make_command
check_kernel()
check_build()
configs = scan_configs()
usage = ("""
%prog [options] all -- Build all targets
%prog [options] target target ... -- List specific targets
%prog [options] perf -- Build all perf targets
%prog [options] noperf -- Build all non-perf targets""")
parser = OptionParser(usage=usage, version=version)
parser.add_option('--configs', action='store_true',
dest='configs',
help="Copy configs back into tree")
parser.add_option('--list', action='store_true',
dest='list',
help='List available targets')
parser.add_option('-v', '--verbose', action='store_true',
dest='verbose',
help='Output to stdout in addition to log file')
parser.add_option('--oldconfig', action='store_true',
dest='oldconfig',
help='Only process "make oldconfig"')
parser.add_option('--updateconfigs',
dest='updateconfigs',
help="Update defconfigs with provided option setting, "
"e.g. --updateconfigs=\'CONFIG_USE_THING=y\'")
parser.add_option('-j', '--jobs', type='int', dest="jobs",
help="Number of simultaneous jobs")
parser.add_option('-l', '--load-average', type='int',
dest='load_average',
help="Don't start multiple jobs unless load is below LOAD_AVERAGE")
parser.add_option('-k', '--keep-going', action='store_true',
dest='keep_going', default=False,
help="Keep building other targets if a target fails")
parser.add_option('-m', '--make-target', action='append',
help='Build the indicated make target (default: %s)' %
' '.join(make_command))
(options, args) = parser.parse_args()
global all_options
all_options = options
if options.list:
print "Available targets:"
for target in configs.keys():
print " %s" % target
sys.exit(0)
if options.oldconfig:
make_command = ["oldconfig"]
elif options.make_target:
make_command = options.make_target
if options.jobs:
make_command.append("-j%d" % options.jobs)
if options.load_average:
make_command.append("-l%d" % options.load_average)
if args == ['all']:
build_many(configs, configs.keys())
elif args == ['perf']:
targets = []
for t in configs.keys():
if "perf" in t:
targets.append(t)
build_many(configs, targets)
elif args == ['noperf']:
targets = []
for t in configs.keys():
if "perf" not in t:
targets.append(t)
build_many(configs, targets)
elif len(args) > 0:
targets = []
for t in args:
if t not in configs.keys():
parser.error("Target '%s' not one of %s" % (t, configs.keys()))
targets.append(t)
build_many(configs, targets)
else:
parser.error("Must specify a target to build, or 'all'")
if __name__ == "__main__":
main()
|
makkalot/func | refs/heads/master | func/overlord/modules/netapp.py | 8 | ##
## Overlord library to interface with minion-side netapp operations
##
## Most of this is just wrappers to create some cleaner, earier to use
## interfaces. Also allows users to get function signatures and use
## nice things like kwargs client side, for those of us who can't live
## without ipython introspection.
##
## Copyright 2008, Red Hat, Inc
## John Eckersberg <jeckersb@redhat.com>
##
## This software may be freely redistributed under the terms of the GNU
## general public license.
##
## You should have received a copy of the GNU General Public License
## along with this program; if not, write to the Free Software
## Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
##
from func.overlord.client import Client
class RemoteError(Exception): pass
def _(res):
if type(res) == type([]) and res[0] == 'REMOTE_ERROR':
raise RemoteError, res[2]
else:
return res
class Filer(Client):
def __init__(self, filer, admin_host):
Client.__init__(self, admin_host)
self.filer = filer
self.admin_host = admin_host
def create_volume(self, vol, aggr, size):
return _(self.netapp.vol.create(self.filer, vol, aggr, size)[self.admin_host])
def destroy_volume(self, vol):
# offline it first
try:
self.netapp.vol.offline(self.filer, vol)
except:
pass
return _(self.netapp.vol.destroy(self.filer, vol)[self.admin_host])
def offline_volume(self, vol):
return _(self.netapp.vol.offline(self.filer, vol)[self.admin_host])
def online_volume(self, vol):
return _(self.netapp.vol.online(self.filer, vol)[self.admin_host])
def get_volume_size(self, vol):
return _(self.netapp.vol.size(self.filer, vol)[self.admin_host])
def resize_volume(self, vol, delta):
return _(self.netapp.vol.size(self.filer, vol, delta)[self.admin_host])
def create_snapshot(self, vol, snap):
return _(self.netapp.snap.create(self.filer, vol, snap)[self.admin_host])
def delete_snapshot(self, vol, snap):
return _(self.netapp.snap.delete(self.filer, vol, snap)[self.admin_host])
def create_clone_volume(self, vol, parent, snap):
return _(self.netapp.vol.clone.create(self.filer, vol, parent, snap)[self.admin_host])
def split_clone_volume(self, vol):
return _(self.netapp.vol.clone.split(self.filer, vol)[self.admin_host])
def list_volumes(self):
vols = _(self.netapp.vol.status(self.filer))
return_list = []
for vol in vols:
return_list.append(vol['name'])
return return_list
def volume_details(self, vol=None):
if vol:
return _(self.netapp.vol.status(self.filer, vol)[self.admin_host])
else:
return _(self.netapp.vol.status(self.filer)[self.admin_host])
|
hoffmabc/OpenBazaar-Server | refs/heads/master | dht/protocol.py | 4 | """
Copyright (c) 2014 Brian Muller
Copyright (c) 2015 OpenBazaar
"""
import random
from twisted.internet import defer
from zope.interface import implements
import nacl.signing
from dht.node import Node
from dht.routing import RoutingTable
from dht.utils import digest
from log import Logger
from rpcudp import RPCProtocol
from interfaces import MessageProcessor
from protos import objects
from protos.message import PING, STUN, STORE, DELETE, FIND_NODE, FIND_VALUE, HOLE_PUNCH
class KademliaProtocol(RPCProtocol):
implements(MessageProcessor)
def __init__(self, sourceNode, storage, ksize):
self.ksize = ksize
self.router = RoutingTable(self, ksize, sourceNode)
self.storage = storage
self.sourceNode = sourceNode
self.multiplexer = None
self.log = Logger(system=self)
self.handled_commands = [PING, STUN, STORE, DELETE, FIND_NODE, FIND_VALUE, HOLE_PUNCH]
RPCProtocol.__init__(self, sourceNode.getProto(), self.router)
def connect_multiplexer(self, multiplexer):
self.multiplexer = multiplexer
def getRefreshIDs(self):
"""
Get ids to search for to keep old buckets up to date.
"""
ids = []
for bucket in self.router.getLonelyBuckets():
ids.append(random.randint(*bucket.range))
return ids
def rpc_stun(self, sender):
self.addToRouter(sender)
return [sender.ip, str(sender.port)]
def rpc_ping(self, sender):
self.addToRouter(sender)
return [self.sourceNode.getProto().SerializeToString()]
def rpc_store(self, sender, keyword, key, value):
self.addToRouter(sender)
self.log.debug("got a store request from %s, storing value" % str(sender))
if len(keyword) == 20 and len(key) <= 33 and len(value) <= 1800:
self.storage[keyword] = (key, value)
return ["True"]
else:
return ["False"]
def rpc_delete(self, sender, keyword, key, signature):
self.addToRouter(sender)
value = self.storage.getSpecific(keyword, key)
if value is not None:
# Try to delete a message from the dht
if keyword == digest(sender.id):
try:
verify_key = nacl.signing.VerifyKey(sender.signed_pubkey[64:])
verify_key.verify(key, signature)
self.storage.delete(keyword, key)
return ["True"]
except Exception:
return ["False"]
# Or try to delete a pointer
else:
try:
node = objects.Node()
node.ParseFromString(value)
pubkey = node.signedPublicKey[64:]
try:
verify_key = nacl.signing.VerifyKey(pubkey)
verify_key.verify(signature + key)
self.storage.delete(keyword, key)
return ["True"]
except Exception:
return ["False"]
except Exception:
pass
return ["False"]
def rpc_find_node(self, sender, key):
self.log.info("finding neighbors of %s in local table" % key.encode('hex'))
self.addToRouter(sender)
node = Node(key)
nodeList = self.router.findNeighbors(node, exclude=sender)
ret = []
for n in nodeList:
ret.append(n.getProto().SerializeToString())
return ret
def rpc_find_value(self, sender, key):
self.addToRouter(sender)
ret = ["value"]
value = self.storage.get(key, None)
if value is None:
return self.rpc_find_node(sender, key)
ret.extend(value)
return ret
def callFindNode(self, nodeToAsk, nodeToFind):
address = (nodeToAsk.ip, nodeToAsk.port)
d = self.find_node(address, nodeToFind.id)
return d.addCallback(self.handleCallResponse, nodeToAsk)
def callFindValue(self, nodeToAsk, nodeToFind):
address = (nodeToAsk.ip, nodeToAsk.port)
d = self.find_value(address, nodeToFind.id)
return d.addCallback(self.handleCallResponse, nodeToAsk)
def callPing(self, nodeToAsk):
address = (nodeToAsk.ip, nodeToAsk.port)
d = self.ping(address)
return d.addCallback(self.handleCallResponse, nodeToAsk)
def callStore(self, nodeToAsk, keyword, key, value):
address = (nodeToAsk.ip, nodeToAsk.port)
d = self.store(address, keyword, key, value)
return d.addCallback(self.handleCallResponse, nodeToAsk)
def callDelete(self, nodeToAsk, keyword, key, signature):
address = (nodeToAsk.ip, nodeToAsk.port)
d = self.delete(address, keyword, key, signature)
return d.addCallback(self.handleCallResponse, nodeToAsk)
def transferKeyValues(self, node):
"""
Given a new node, send it all the keys/values it should be storing.
@param node: A new node that just joined (or that we just found out
about).
Process:
For each key in storage, get k closest nodes. If newnode is closer
than the furtherst in that list, and the node for this server
is closer than the closest in that list, then store the key/value
on the new node (per section 2.5 of the paper)
"""
ds = []
for keyword in self.storage.iterkeys():
keynode = Node(keyword)
neighbors = self.router.findNeighbors(keynode, exclude=node)
if len(neighbors) > 0:
newNodeClose = node.distanceTo(keynode) < neighbors[-1].distanceTo(keynode)
thisNodeClosest = self.sourceNode.distanceTo(keynode) < neighbors[0].distanceTo(keynode)
if len(neighbors) == 0 \
or (newNodeClose and thisNodeClosest) \
or (thisNodeClosest and len(neighbors) < self.ksize):
for k, v in self.storage.iteritems(keyword):
ds.append(self.callStore(node, keyword, k, v))
return defer.gatherResults(ds)
def handleCallResponse(self, result, node):
"""
If we get a response, add the node to the routing table. If
we get no response, make sure it's removed from the routing table.
"""
if result[0]:
if self.router.isNewNode(node):
self.transferKeyValues(node)
self.log.info("got response from %s, adding to router" % node)
self.router.addContact(node)
else:
self.log.debug("no response from %s, removing from router" % node)
self.router.removeContact(node)
return result
def addToRouter(self, node):
"""
Called by rpc_ functions when a node sends them a request.
We add the node to our router and transfer our stored values
if they are new and within our neighborhood.
"""
if self.router.isNewNode(node):
self.log.debug("Found a new node, transferring key/values")
self.transferKeyValues(node)
self.router.addContact(node)
def __iter__(self):
return iter(self.handled_commands)
|
chouseknecht/openshift-restclient-python | refs/heads/master | openshift/test/test_v1_stateful_set.py | 1 | # coding: utf-8
"""
OpenShift API (with Kubernetes)
OpenShift provides builds, application lifecycle, image content management, and administrative policy on top of Kubernetes. The API allows consistent management of those objects. All API operations are authenticated via an Authorization bearer token that is provided for service accounts as a generated secret (in JWT form) or via the native OAuth endpoint located at /oauth/authorize. Core infrastructure components may use openshift.client certificates that require no authentication. All API operations return a 'resourceVersion' string that represents the version of the object in the underlying storage. The standard LIST operation performs a snapshot read of the underlying objects, returning a resourceVersion representing a consistent version of the listed objects. The WATCH operation allows all updates to a set of objects after the provided resourceVersion to be observed by a openshift.client. By listing and beginning a watch from the returned resourceVersion, openshift.clients may observe a consistent view of the state of one or more objects. Note that WATCH always returns the update after the provided resourceVersion. Watch may be extended a limited time in the past - using etcd 2 the watch window is 1000 events (which on a large cluster may only be a few tens of seconds) so openshift.clients must explicitly handle the \"watch to old error\" by re-listing. Objects are divided into two rough categories - those that have a lifecycle and must reflect the state of the cluster, and those that have no state. Objects with lifecycle typically have three main sections: * 'metadata' common to all objects * a 'spec' that represents the desired state * a 'status' that represents how much of the desired state is reflected on the cluster at the current time Objects that have no state have 'metadata' but may lack a 'spec' or 'status' section. Objects are divided into those that are namespace scoped (only exist inside of a namespace) and those that are cluster scoped (exist outside of a namespace). A namespace scoped resource will be deleted when the namespace is deleted and cannot be created if the namespace has not yet been created or is in the process of deletion. Cluster scoped resources are typically only accessible to admins - resources like nodes, persistent volumes, and cluster policy. All objects have a schema that is a combination of the 'kind' and 'apiVersion' fields. This schema is additive only for any given version - no backwards incompatible changes are allowed without incrementing the apiVersion. The server will return and accept a number of standard responses that share a common schema - for instance, the common error type is 'metav1.Status' (described below) and will be returned on any error from the API server. The API is available in multiple serialization formats - the default is JSON (Accept: application/json and Content-Type: application/json) but openshift.clients may also use YAML (application/yaml) or the native Protobuf schema (application/vnd.kubernetes.protobuf). Note that the format of the WATCH API call is slightly different - for JSON it returns newline delimited objects while for Protobuf it returns length-delimited frames (4 bytes in network-order) that contain a 'versioned.Watch' Protobuf object. See the OpenShift documentation at https://docs.openshift.org for more information.
OpenAPI spec version: latest
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import os
import sys
import unittest
import openshift.client
from kubernetes.client.rest import ApiException
from openshift.client.models.v1_stateful_set import V1StatefulSet
class TestV1StatefulSet(unittest.TestCase):
""" V1StatefulSet unit test stubs """
def setUp(self):
pass
def tearDown(self):
pass
def testV1StatefulSet(self):
"""
Test V1StatefulSet
"""
# FIXME: construct object with mandatory attributes with example values
#model = openshift.client.models.v1_stateful_set.V1StatefulSet()
pass
if __name__ == '__main__':
unittest.main()
|
tomkralidis/QGIS | refs/heads/master | python/plugins/processing/algs/grass7/ext/i_segment.py | 45 | # -*- coding: utf-8 -*-
"""
***************************************************************************
i_segment.py
------------
Date : March 2016
Copyright : (C) 2016 by Médéric Ribreux
Email : medspx at medspx dot fr
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = 'Médéric Ribreux'
__date__ = 'March 2016'
__copyright__ = '(C) 2016, Médéric Ribreux'
from .i import regroupRasters
def processCommand(alg, parameters, context, feedback):
# Regroup rasters
regroupRasters(alg, parameters, context, 'input', 'group')
alg.processCommand(parameters, context, feedback)
|
Kast0rTr0y/ansible | refs/heads/devel | lib/ansible/modules/files/template.py | 20 | # this is a virtual module that is entirely implemented server side
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'status': ['stableinterface'],
'supported_by': 'core',
'version': '1.0'}
DOCUMENTATION = '''
---
module: template
version_added: historical
short_description: Templates a file out to a remote server.
description:
- Templates are processed by the Jinja2 templating language
(U(http://jinja.pocoo.org/docs/)) - documentation on the template
formatting can be found in the Template Designer Documentation
(U(http://jinja.pocoo.org/docs/templates/)).
- "Six additional variables can be used in templates:
C(ansible_managed) (configurable via the C(defaults) section of C(ansible.cfg)) contains a string which can be used to
describe the template name, host, modification time of the template file and the owner uid.
C(template_host) contains the node name of the template's machine.
C(template_uid) the numeric user id of the owner.
C(template_path) the path of the template.
C(template_fullpath) is the absolute path of the template.
C(template_run_date) is the date that the template was rendered."
options:
src:
description:
- Path of a Jinja2 formatted template on the Ansible controller. This can be a relative or absolute path.
required: true
dest:
description:
- Location to render the template to on the remote machine.
required: true
backup:
description:
- Create a backup file including the timestamp information so you can get
the original file back if you somehow clobbered it incorrectly.
required: false
choices: [ "yes", "no" ]
default: "no"
force:
description:
- the default is C(yes), which will replace the remote file when contents
are different than the source. If C(no), the file will only be transferred
if the destination does not exist.
required: false
choices: [ "yes", "no" ]
default: "yes"
notes:
- Including a string that uses a date in the template will result in the template being marked 'changed' each time
- "Since Ansible version 0.9, templates are loaded with C(trim_blocks=True)."
- "Also, you can override jinja2 settings by adding a special header to template file.
i.e. C(#jinja2:variable_start_string:'[%' , variable_end_string:'%]', trim_blocks: False)
which changes the variable interpolation markers to [% var %] instead of {{ var }}.
This is the best way to prevent evaluation of things that look like, but should not be Jinja2.
raw/endraw in Jinja2 will not work as you expect because templates in Ansible are recursively evaluated."
author:
- Ansible Core Team
- Michael DeHaan
extends_documentation_fragment:
- files
- validate
'''
EXAMPLES = '''
# Example from Ansible Playbooks
- template:
src: /mytemplates/foo.j2
dest: /etc/file.conf
owner: bin
group: wheel
mode: 0644
# The same example, but using symbolic modes equivalent to 0644
- template:
src: /mytemplates/foo.j2
dest: /etc/file.conf
owner: bin
group: wheel
mode: "u=rw,g=r,o=r"
# Copy a new "sudoers" file into place, after passing validation with visudo
- template:
src: /mine/sudoers
dest: /etc/sudoers
validate: 'visudo -cf %s'
# Update sshd configuration safely, avoid locking yourself out
- template:
src: etc/ssh/sshd_config.j2
dest: /etc/ssh/sshd_config
owner: root
group: root
mode: '0600'
validate: /usr/sbin/sshd -t -f %s
backup: yes
'''
|
mafiya69/sympy | refs/heads/master | sympy/polys/fields.py | 29 | """Sparse rational function fields. """
from __future__ import print_function, division
from operator import add, mul, lt, le, gt, ge
from sympy.core.compatibility import is_sequence, reduce, string_types
from sympy.core.expr import Expr
from sympy.core.symbol import Symbol
from sympy.core.sympify import CantSympify, sympify
from sympy.polys.rings import PolyElement
from sympy.polys.orderings import lex
from sympy.polys.polyerrors import CoercionFailed
from sympy.polys.polyoptions import build_options
from sympy.polys.polyutils import _parallel_dict_from_expr
from sympy.polys.domains.domainelement import DomainElement
from sympy.polys.domains.polynomialring import PolynomialRing
from sympy.polys.domains.fractionfield import FractionField
from sympy.polys.constructor import construct_domain
from sympy.printing.defaults import DefaultPrinting
from sympy.utilities import public
from sympy.utilities.magic import pollute
@public
def field(symbols, domain, order=lex):
"""Construct new rational function field returning (field, x1, ..., xn). """
_field = FracField(symbols, domain, order)
return (_field,) + _field.gens
@public
def xfield(symbols, domain, order=lex):
"""Construct new rational function field returning (field, (x1, ..., xn)). """
_field = FracField(symbols, domain, order)
return (_field, _field.gens)
@public
def vfield(symbols, domain, order=lex):
"""Construct new rational function field and inject generators into global namespace. """
_field = FracField(symbols, domain, order)
pollute([ sym.name for sym in _field.symbols ], _field.gens)
return _field
@public
def sfield(exprs, *symbols, **options):
"""Construct a field deriving generators and domain
from options and input expressions.
Parameters
----------
exprs : :class:`Expr` or sequence of :class:`Expr` (sympifiable)
symbols : sequence of :class:`Symbol`/:class:`Expr`
options : keyword arguments understood by :class:`Options`
Examples
========
>>> from sympy.core import symbols
>>> from sympy.functions import exp, log
>>> from sympy.polys.fields import sfield
>>> x = symbols("x")
>>> K, f = sfield((x*log(x) + 4*x**2)*exp(1/x + log(x)/3)/x**2)
>>> K
Rational function field in x, exp(1/x), log(x), x**(1/3) over ZZ with lex order
>>> f
(4*x**2*(exp(1/x)) + x*(exp(1/x))*(log(x)))/((x**(1/3))**5)
"""
single = False
if not is_sequence(exprs):
exprs, single = [exprs], True
exprs = list(map(sympify, exprs))
opt = build_options(symbols, options)
numdens = []
for expr in exprs:
numdens.extend(expr.as_numer_denom())
reps, opt = _parallel_dict_from_expr(numdens, opt)
if opt.domain is None:
# NOTE: this is inefficient because construct_domain() automatically
# performs conversion to the target domain. It shouldn't do this.
coeffs = sum([list(rep.values()) for rep in reps], [])
opt.domain, _ = construct_domain(coeffs, opt=opt)
_field = FracField(opt.gens, opt.domain, opt.order)
fracs = []
for i in range(0, len(reps), 2):
fracs.append(_field(tuple(reps[i:i+2])))
if single:
return (_field, fracs[0])
else:
return (_field, fracs)
_field_cache = {}
class FracField(DefaultPrinting):
"""Multivariate distributed rational function field. """
def __new__(cls, symbols, domain, order=lex):
from sympy.polys.rings import PolyRing
ring = PolyRing(symbols, domain, order)
symbols = ring.symbols
ngens = ring.ngens
domain = ring.domain
order = ring.order
_hash = hash((cls.__name__, symbols, ngens, domain, order))
obj = _field_cache.get(_hash)
if obj is None:
obj = object.__new__(cls)
obj._hash = _hash
obj.ring = ring
obj.dtype = type("FracElement", (FracElement,), {"field": obj})
obj.symbols = symbols
obj.ngens = ngens
obj.domain = domain
obj.order = order
obj.zero = obj.dtype(ring.zero)
obj.one = obj.dtype(ring.one)
obj.gens = obj._gens()
for symbol, generator in zip(obj.symbols, obj.gens):
if isinstance(symbol, Symbol):
name = symbol.name
if not hasattr(obj, name):
setattr(obj, name, generator)
_field_cache[_hash] = obj
return obj
def _gens(self):
"""Return a list of polynomial generators. """
return tuple([ self.dtype(gen) for gen in self.ring.gens ])
def __getnewargs__(self):
return (self.symbols, self.domain, self.order)
def __hash__(self):
return self._hash
def __eq__(self, other):
return self is other
def __ne__(self, other):
return self is not other
def raw_new(self, numer, denom=None):
return self.dtype(numer, denom)
def new(self, numer, denom=None):
if denom is None: denom = self.ring.one
numer, denom = numer.cancel(denom)
return self.raw_new(numer, denom)
def domain_new(self, element):
return self.domain.convert(element)
def ground_new(self, element):
try:
return self.new(self.ring.ground_new(element))
except CoercionFailed:
domain = self.domain
if not domain.has_Field and domain.has_assoc_Field:
ring = self.ring
ground_field = domain.get_field()
element = ground_field.convert(element)
numer = ring.ground_new(ground_field.numer(element))
denom = ring.ground_new(ground_field.denom(element))
return self.raw_new(numer, denom)
else:
raise
def field_new(self, element):
if isinstance(element, FracElement):
if self == element.field:
return element
else:
raise NotImplementedError("conversion")
elif isinstance(element, PolyElement):
denom, numer = element.clear_denoms()
numer = numer.set_ring(self.ring)
denom = self.ring.ground_new(denom)
return self.raw_new(numer, denom)
elif isinstance(element, tuple) and len(element) == 2:
numer, denom = list(map(self.ring.ring_new, element))
return self.new(numer, denom)
elif isinstance(element, string_types):
raise NotImplementedError("parsing")
elif isinstance(element, Expr):
return self.from_expr(element)
else:
return self.ground_new(element)
__call__ = field_new
def _rebuild_expr(self, expr, mapping):
domain = self.domain
def _rebuild(expr):
generator = mapping.get(expr)
if generator is not None:
return generator
elif expr.is_Add:
return reduce(add, list(map(_rebuild, expr.args)))
elif expr.is_Mul:
return reduce(mul, list(map(_rebuild, expr.args)))
elif expr.is_Pow and expr.exp.is_Integer:
return _rebuild(expr.base)**int(expr.exp)
else:
try:
return domain.convert(expr)
except CoercionFailed:
if not domain.has_Field and domain.has_assoc_Field:
return domain.get_field().convert(expr)
else:
raise
return _rebuild(sympify(expr))
def from_expr(self, expr):
mapping = dict(list(zip(self.symbols, self.gens)))
try:
frac = self._rebuild_expr(expr, mapping)
except CoercionFailed:
raise ValueError("expected an expression convertible to a rational function in %s, got %s" % (self, expr))
else:
return self.field_new(frac)
def to_domain(self):
return FractionField(self)
def to_ring(self):
from sympy.polys.rings import PolyRing
return PolyRing(self.symbols, self.domain, self.order)
class FracElement(DomainElement, DefaultPrinting, CantSympify):
"""Element of multivariate distributed rational function field. """
def __init__(self, numer, denom=None):
if denom is None:
denom = self.field.ring.one
elif not denom:
raise ZeroDivisionError("zero denominator")
self.numer = numer
self.denom = denom
def raw_new(f, numer, denom):
return f.__class__(numer, denom)
def new(f, numer, denom):
return f.raw_new(*numer.cancel(denom))
def to_poly(f):
if f.denom != 1:
raise ValueError("f.denom should be 1")
return f.numer
def parent(self):
return self.field.to_domain()
def __getnewargs__(self):
return (self.field, self.numer, self.denom)
_hash = None
def __hash__(self):
_hash = self._hash
if _hash is None:
self._hash = _hash = hash((self.field, self.numer, self.denom))
return _hash
def copy(self):
return self.raw_new(self.numer.copy(), self.denom.copy())
def set_field(self, new_field):
if self.field == new_field:
return self
else:
new_ring = new_field.ring
numer = self.numer.set_ring(new_ring)
denom = self.denom.set_ring(new_ring)
return new_field.new(numer, denom)
def as_expr(self, *symbols):
return self.numer.as_expr(*symbols)/self.denom.as_expr(*symbols)
def __eq__(f, g):
if isinstance(g, f.field.dtype):
return f.numer == g.numer and f.denom == g.denom
else:
return f.numer == g and f.denom == f.field.ring.one
def __ne__(f, g):
return not f.__eq__(g)
def __nonzero__(f):
return bool(f.numer)
__bool__ = __nonzero__
def sort_key(self):
return (self.denom.sort_key(), self.numer.sort_key())
def _cmp(f1, f2, op):
if isinstance(f2, f1.field.dtype):
return op(f1.sort_key(), f2.sort_key())
else:
return NotImplemented
def __lt__(f1, f2):
return f1._cmp(f2, lt)
def __le__(f1, f2):
return f1._cmp(f2, le)
def __gt__(f1, f2):
return f1._cmp(f2, gt)
def __ge__(f1, f2):
return f1._cmp(f2, ge)
def __pos__(f):
"""Negate all coefficients in ``f``. """
return f.raw_new(f.numer, f.denom)
def __neg__(f):
"""Negate all coefficients in ``f``. """
return f.raw_new(-f.numer, f.denom)
def _extract_ground(self, element):
domain = self.field.domain
try:
element = domain.convert(element)
except CoercionFailed:
if not domain.has_Field and domain.has_assoc_Field:
ground_field = domain.get_field()
try:
element = ground_field.convert(element)
except CoercionFailed:
pass
else:
return -1, ground_field.numer(element), ground_field.denom(element)
return 0, None, None
else:
return 1, element, None
def __add__(f, g):
"""Add rational functions ``f`` and ``g``. """
field = f.field
if not g:
return f
elif not f:
return g
elif isinstance(g, field.dtype):
if f.denom == g.denom:
return f.new(f.numer + g.numer, f.denom)
else:
return f.new(f.numer*g.denom + f.denom*g.numer, f.denom*g.denom)
elif isinstance(g, field.ring.dtype):
return f.new(f.numer + f.denom*g, f.denom)
else:
if isinstance(g, FracElement):
if isinstance(field.domain, FractionField) and field.domain.field == g.field:
pass
elif isinstance(g.field.domain, FractionField) and g.field.domain.field == field:
return g.__radd__(f)
else:
return NotImplemented
elif isinstance(g, PolyElement):
if isinstance(field.domain, PolynomialRing) and field.domain.ring == g.ring:
pass
else:
return g.__radd__(f)
return f.__radd__(g)
def __radd__(f, c):
if isinstance(c, f.field.ring.dtype):
return f.new(f.numer + f.denom*c, f.denom)
op, g_numer, g_denom = f._extract_ground(c)
if op == 1:
return f.new(f.numer + f.denom*g_numer, f.denom)
elif not op:
return NotImplemented
else:
return f.new(f.numer*g_denom + f.denom*g_numer, f.denom*g_denom)
def __sub__(f, g):
"""Subtract rational functions ``f`` and ``g``. """
field = f.field
if not g:
return f
elif not f:
return -g
elif isinstance(g, field.dtype):
if f.denom == g.denom:
return f.new(f.numer - g.numer, f.denom)
else:
return f.new(f.numer*g.denom - f.denom*g.numer, f.denom*g.denom)
elif isinstance(g, field.ring.dtype):
return f.new(f.numer - f.denom*g, f.denom)
else:
if isinstance(g, FracElement):
if isinstance(field.domain, FractionField) and field.domain.field == g.field:
pass
elif isinstance(g.field.domain, FractionField) and g.field.domain.field == field:
return g.__rsub__(f)
else:
return NotImplemented
elif isinstance(g, PolyElement):
if isinstance(field.domain, PolynomialRing) and field.domain.ring == g.ring:
pass
else:
return g.__rsub__(f)
op, g_numer, g_denom = f._extract_ground(g)
if op == 1:
return f.new(f.numer - f.denom*g_numer, f.denom)
elif not op:
return NotImplemented
else:
return f.new(f.numer*g_denom - f.denom*g_numer, f.denom*g_denom)
def __rsub__(f, c):
if isinstance(c, f.field.ring.dtype):
return f.new(-f.numer + f.denom*c, f.denom)
op, g_numer, g_denom = f._extract_ground(c)
if op == 1:
return f.new(-f.numer + f.denom*g_numer, f.denom)
elif not op:
return NotImplemented
else:
return f.new(-f.numer*g_denom + f.denom*g_numer, f.denom*g_denom)
def __mul__(f, g):
"""Multiply rational functions ``f`` and ``g``. """
field = f.field
if not f or not g:
return field.zero
elif isinstance(g, field.dtype):
return f.new(f.numer*g.numer, f.denom*g.denom)
elif isinstance(g, field.ring.dtype):
return f.new(f.numer*g, f.denom)
else:
if isinstance(g, FracElement):
if isinstance(field.domain, FractionField) and field.domain.field == g.field:
pass
elif isinstance(g.field.domain, FractionField) and g.field.domain.field == field:
return g.__rmul__(f)
else:
return NotImplemented
elif isinstance(g, PolyElement):
if isinstance(field.domain, PolynomialRing) and field.domain.ring == g.ring:
pass
else:
return g.__rmul__(f)
return f.__rmul__(g)
def __rmul__(f, c):
if isinstance(c, f.field.ring.dtype):
return f.new(f.numer*c, f.denom)
op, g_numer, g_denom = f._extract_ground(c)
if op == 1:
return f.new(f.numer*g_numer, f.denom)
elif not op:
return NotImplemented
else:
return f.new(f.numer*g_numer, f.denom*g_denom)
def __truediv__(f, g):
"""Computes quotient of fractions ``f`` and ``g``. """
field = f.field
if not g:
raise ZeroDivisionError
elif isinstance(g, field.dtype):
return f.new(f.numer*g.denom, f.denom*g.numer)
elif isinstance(g, field.ring.dtype):
return f.new(f.numer, f.denom*g)
else:
if isinstance(g, FracElement):
if isinstance(field.domain, FractionField) and field.domain.field == g.field:
pass
elif isinstance(g.field.domain, FractionField) and g.field.domain.field == field:
return g.__rtruediv__(f)
else:
return NotImplemented
elif isinstance(g, PolyElement):
if isinstance(field.domain, PolynomialRing) and field.domain.ring == g.ring:
pass
else:
return g.__rtruediv__(f)
op, g_numer, g_denom = f._extract_ground(g)
if op == 1:
return f.new(f.numer, f.denom*g_numer)
elif not op:
return NotImplemented
else:
return f.new(f.numer*g_denom, f.denom*g_numer)
__div__ = __truediv__
def __rtruediv__(f, c):
if not f:
raise ZeroDivisionError
elif isinstance(c, f.field.ring.dtype):
return f.new(f.denom*c, f.numer)
op, g_numer, g_denom = f._extract_ground(c)
if op == 1:
return f.new(f.denom*g_numer, f.numer)
elif not op:
return NotImplemented
else:
return f.new(f.denom*g_numer, f.numer*g_denom)
__rdiv__ = __rtruediv__
def __pow__(f, n):
"""Raise ``f`` to a non-negative power ``n``. """
if n >= 0:
return f.raw_new(f.numer**n, f.denom**n)
elif not f:
raise ZeroDivisionError
else:
return f.raw_new(f.denom**-n, f.numer**-n)
def diff(f, x):
"""Computes partial derivative in ``x``.
Examples
========
>>> from sympy.polys.fields import field
>>> from sympy.polys.domains import ZZ
>>> _, x, y, z = field("x,y,z", ZZ)
>>> ((x**2 + y)/(z + 1)).diff(x)
2*x/(z + 1)
"""
x = x.to_poly()
return f.new(f.numer.diff(x)*f.denom - f.numer*f.denom.diff(x), f.denom**2)
def __call__(f, *values):
if 0 < len(values) <= f.field.ngens:
return f.evaluate(list(zip(f.field.gens, values)))
else:
raise ValueError("expected at least 1 and at most %s values, got %s" % (f.field.ngens, len(values)))
def evaluate(f, x, a=None):
if isinstance(x, list) and a is None:
x = [ (X.to_poly(), a) for X, a in x ]
numer, denom = f.numer.evaluate(x), f.denom.evaluate(x)
else:
x = x.to_poly()
numer, denom = f.numer.evaluate(x, a), f.denom.evaluate(x, a)
field = numer.ring.to_field()
return field.new(numer, denom)
def subs(f, x, a=None):
if isinstance(x, list) and a is None:
x = [ (X.to_poly(), a) for X, a in x ]
numer, denom = f.numer.subs(x), f.denom.subs(x)
else:
x = x.to_poly()
numer, denom = f.numer.subs(x, a), f.denom.subs(x, a)
return f.new(numer, denom)
def compose(f, x, a=None):
raise NotImplementedError
|
daviddupont69/CouchPotatoServer | refs/heads/develop | libs/pyutil/scripts/tailx.py | 106 | #!/usr/bin/env python
# output all but the first N lines of a file
# Allen Short and Jp Calderone wrote this coool version:
import itertools, sys
def main():
K = int(sys.argv[1])
if len(sys.argv) > 2:
fname = sys.argv[2]
inf = open(fname, 'r')
else:
inf = sys.stdin
sys.stdout.writelines(itertools.islice(inf, K, None))
if __name__ == '__main__':
main()
# thus replacing my dumb version:
# # from the Python Standard Library
# import sys
#
# i = K
# for l in sys.stdin.readlines():
# if i:
# i -= 1
# else:
# print l,
|
BORETS24/Zenfone-2-500CL | refs/heads/master | linux/kernel/scripts/tracing/draw_functrace.py | 14679 | #!/usr/bin/python
"""
Copyright 2008 (c) Frederic Weisbecker <fweisbec@gmail.com>
Licensed under the terms of the GNU GPL License version 2
This script parses a trace provided by the function tracer in
kernel/trace/trace_functions.c
The resulted trace is processed into a tree to produce a more human
view of the call stack by drawing textual but hierarchical tree of
calls. Only the functions's names and the the call time are provided.
Usage:
Be sure that you have CONFIG_FUNCTION_TRACER
# mount -t debugfs nodev /sys/kernel/debug
# echo function > /sys/kernel/debug/tracing/current_tracer
$ cat /sys/kernel/debug/tracing/trace_pipe > ~/raw_trace_func
Wait some times but not too much, the script is a bit slow.
Break the pipe (Ctrl + Z)
$ scripts/draw_functrace.py < raw_trace_func > draw_functrace
Then you have your drawn trace in draw_functrace
"""
import sys, re
class CallTree:
""" This class provides a tree representation of the functions
call stack. If a function has no parent in the kernel (interrupt,
syscall, kernel thread...) then it is attached to a virtual parent
called ROOT.
"""
ROOT = None
def __init__(self, func, time = None, parent = None):
self._func = func
self._time = time
if parent is None:
self._parent = CallTree.ROOT
else:
self._parent = parent
self._children = []
def calls(self, func, calltime):
""" If a function calls another one, call this method to insert it
into the tree at the appropriate place.
@return: A reference to the newly created child node.
"""
child = CallTree(func, calltime, self)
self._children.append(child)
return child
def getParent(self, func):
""" Retrieve the last parent of the current node that
has the name given by func. If this function is not
on a parent, then create it as new child of root
@return: A reference to the parent.
"""
tree = self
while tree != CallTree.ROOT and tree._func != func:
tree = tree._parent
if tree == CallTree.ROOT:
child = CallTree.ROOT.calls(func, None)
return child
return tree
def __repr__(self):
return self.__toString("", True)
def __toString(self, branch, lastChild):
if self._time is not None:
s = "%s----%s (%s)\n" % (branch, self._func, self._time)
else:
s = "%s----%s\n" % (branch, self._func)
i = 0
if lastChild:
branch = branch[:-1] + " "
while i < len(self._children):
if i != len(self._children) - 1:
s += "%s" % self._children[i].__toString(branch +\
" |", False)
else:
s += "%s" % self._children[i].__toString(branch +\
" |", True)
i += 1
return s
class BrokenLineException(Exception):
"""If the last line is not complete because of the pipe breakage,
we want to stop the processing and ignore this line.
"""
pass
class CommentLineException(Exception):
""" If the line is a comment (as in the beginning of the trace file),
just ignore it.
"""
pass
def parseLine(line):
line = line.strip()
if line.startswith("#"):
raise CommentLineException
m = re.match("[^]]+?\\] +([0-9.]+): (\\w+) <-(\\w+)", line)
if m is None:
raise BrokenLineException
return (m.group(1), m.group(2), m.group(3))
def main():
CallTree.ROOT = CallTree("Root (Nowhere)", None, None)
tree = CallTree.ROOT
for line in sys.stdin:
try:
calltime, callee, caller = parseLine(line)
except BrokenLineException:
break
except CommentLineException:
continue
tree = tree.getParent(caller)
tree = tree.calls(callee, calltime)
print CallTree.ROOT
if __name__ == "__main__":
main()
|
luci/luci-py | refs/heads/master | client/third_party/google/auth/_default_async.py | 4 | # Copyright 2020 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Application default credentials.
Implements application default credentials and project ID detection.
"""
import io
import json
import os
import six
from google.auth import _default
from google.auth import environment_vars
from google.auth import exceptions
def load_credentials_from_file(filename, scopes=None, quota_project_id=None):
"""Loads Google credentials from a file.
The credentials file must be a service account key or stored authorized
user credentials.
Args:
filename (str): The full path to the credentials file.
scopes (Optional[Sequence[str]]): The list of scopes for the credentials. If
specified, the credentials will automatically be scoped if
necessary
quota_project_id (Optional[str]): The project ID used for
quota and billing.
Returns:
Tuple[google.auth.credentials.Credentials, Optional[str]]: Loaded
credentials and the project ID. Authorized user credentials do not
have the project ID information.
Raises:
google.auth.exceptions.DefaultCredentialsError: if the file is in the
wrong format or is missing.
"""
if not os.path.exists(filename):
raise exceptions.DefaultCredentialsError(
"File {} was not found.".format(filename)
)
with io.open(filename, "r") as file_obj:
try:
info = json.load(file_obj)
except ValueError as caught_exc:
new_exc = exceptions.DefaultCredentialsError(
"File {} is not a valid json file.".format(filename), caught_exc
)
six.raise_from(new_exc, caught_exc)
# The type key should indicate that the file is either a service account
# credentials file or an authorized user credentials file.
credential_type = info.get("type")
if credential_type == _default._AUTHORIZED_USER_TYPE:
from google.oauth2 import _credentials_async as credentials
try:
credentials = credentials.Credentials.from_authorized_user_info(
info, scopes=scopes
).with_quota_project(quota_project_id)
except ValueError as caught_exc:
msg = "Failed to load authorized user credentials from {}".format(filename)
new_exc = exceptions.DefaultCredentialsError(msg, caught_exc)
six.raise_from(new_exc, caught_exc)
if not credentials.quota_project_id:
_default._warn_about_problematic_credentials(credentials)
return credentials, None
elif credential_type == _default._SERVICE_ACCOUNT_TYPE:
from google.oauth2 import _service_account_async as service_account
try:
credentials = service_account.Credentials.from_service_account_info(
info, scopes=scopes
).with_quota_project(quota_project_id)
except ValueError as caught_exc:
msg = "Failed to load service account credentials from {}".format(filename)
new_exc = exceptions.DefaultCredentialsError(msg, caught_exc)
six.raise_from(new_exc, caught_exc)
return credentials, info.get("project_id")
else:
raise exceptions.DefaultCredentialsError(
"The file {file} does not have a valid type. "
"Type is {type}, expected one of {valid_types}.".format(
file=filename, type=credential_type, valid_types=_default._VALID_TYPES
)
)
def _get_gcloud_sdk_credentials():
"""Gets the credentials and project ID from the Cloud SDK."""
from google.auth import _cloud_sdk
# Check if application default credentials exist.
credentials_filename = _cloud_sdk.get_application_default_credentials_path()
if not os.path.isfile(credentials_filename):
return None, None
credentials, project_id = load_credentials_from_file(credentials_filename)
if not project_id:
project_id = _cloud_sdk.get_project_id()
return credentials, project_id
def _get_explicit_environ_credentials():
"""Gets credentials from the GOOGLE_APPLICATION_CREDENTIALS environment
variable."""
from google.auth import _cloud_sdk
cloud_sdk_adc_path = _cloud_sdk.get_application_default_credentials_path()
explicit_file = os.environ.get(environment_vars.CREDENTIALS)
if explicit_file is not None and explicit_file == cloud_sdk_adc_path:
# Cloud sdk flow calls gcloud to fetch project id, so if the explicit
# file path is cloud sdk credentials path, then we should fall back
# to cloud sdk flow, otherwise project id cannot be obtained.
return _get_gcloud_sdk_credentials()
if explicit_file is not None:
credentials, project_id = load_credentials_from_file(
os.environ[environment_vars.CREDENTIALS]
)
return credentials, project_id
else:
return None, None
def _get_gae_credentials():
"""Gets Google App Engine App Identity credentials and project ID."""
# While this library is normally bundled with app_engine, there are
# some cases where it's not available, so we tolerate ImportError.
return _default._get_gae_credentials()
def _get_gce_credentials(request=None):
"""Gets credentials and project ID from the GCE Metadata Service."""
# Ping requires a transport, but we want application default credentials
# to require no arguments. So, we'll use the _http_client transport which
# uses http.client. This is only acceptable because the metadata server
# doesn't do SSL and never requires proxies.
# While this library is normally bundled with compute_engine, there are
# some cases where it's not available, so we tolerate ImportError.
return _default._get_gce_credentials(request)
def default_async(scopes=None, request=None, quota_project_id=None):
"""Gets the default credentials for the current environment.
`Application Default Credentials`_ provides an easy way to obtain
credentials to call Google APIs for server-to-server or local applications.
This function acquires credentials from the environment in the following
order:
1. If the environment variable ``GOOGLE_APPLICATION_CREDENTIALS`` is set
to the path of a valid service account JSON private key file, then it is
loaded and returned. The project ID returned is the project ID defined
in the service account file if available (some older files do not
contain project ID information).
2. If the `Google Cloud SDK`_ is installed and has application default
credentials set they are loaded and returned.
To enable application default credentials with the Cloud SDK run::
gcloud auth application-default login
If the Cloud SDK has an active project, the project ID is returned. The
active project can be set using::
gcloud config set project
3. If the application is running in the `App Engine standard environment`_
(first generation) then the credentials and project ID from the
`App Identity Service`_ are used.
4. If the application is running in `Compute Engine`_ or `Cloud Run`_ or
the `App Engine flexible environment`_ or the `App Engine standard
environment`_ (second generation) then the credentials and project ID
are obtained from the `Metadata Service`_.
5. If no credentials are found,
:class:`~google.auth.exceptions.DefaultCredentialsError` will be raised.
.. _Application Default Credentials: https://developers.google.com\
/identity/protocols/application-default-credentials
.. _Google Cloud SDK: https://cloud.google.com/sdk
.. _App Engine standard environment: https://cloud.google.com/appengine
.. _App Identity Service: https://cloud.google.com/appengine/docs/python\
/appidentity/
.. _Compute Engine: https://cloud.google.com/compute
.. _App Engine flexible environment: https://cloud.google.com\
/appengine/flexible
.. _Metadata Service: https://cloud.google.com/compute/docs\
/storing-retrieving-metadata
.. _Cloud Run: https://cloud.google.com/run
Example::
import google.auth
credentials, project_id = google.auth.default()
Args:
scopes (Sequence[str]): The list of scopes for the credentials. If
specified, the credentials will automatically be scoped if
necessary.
request (google.auth.transport.Request): An object used to make
HTTP requests. This is used to detect whether the application
is running on Compute Engine. If not specified, then it will
use the standard library http client to make requests.
quota_project_id (Optional[str]): The project ID used for
quota and billing.
Returns:
Tuple[~google.auth.credentials.Credentials, Optional[str]]:
the current environment's credentials and project ID. Project ID
may be None, which indicates that the Project ID could not be
ascertained from the environment.
Raises:
~google.auth.exceptions.DefaultCredentialsError:
If no credentials were found, or if the credentials found were
invalid.
"""
from google.auth._credentials_async import with_scopes_if_required
explicit_project_id = os.environ.get(
environment_vars.PROJECT, os.environ.get(environment_vars.LEGACY_PROJECT)
)
checkers = (
_get_explicit_environ_credentials,
_get_gcloud_sdk_credentials,
_get_gae_credentials,
lambda: _get_gce_credentials(request),
)
for checker in checkers:
credentials, project_id = checker()
if credentials is not None:
credentials = with_scopes_if_required(
credentials, scopes
).with_quota_project(quota_project_id)
effective_project_id = explicit_project_id or project_id
if not effective_project_id:
_default._LOGGER.warning(
"No project ID could be determined. Consider running "
"`gcloud config set project` or setting the %s "
"environment variable",
environment_vars.PROJECT,
)
return credentials, effective_project_id
raise exceptions.DefaultCredentialsError(_default._HELP_MESSAGE)
|
agry/NGECore2 | refs/heads/master | scripts/mobiles/tatooine/rodian_clan_warchief.py | 2 | import sys
from services.spawn import MobileTemplate
from services.spawn import WeaponTemplate
from resources.datatables import WeaponType
from resources.datatables import Difficulty
from resources.datatables import Options
from java.util import Vector
def addTemplate(core):
mobileTemplate = MobileTemplate()
mobileTemplate.setCreatureName('mission_rodian_clan_warchief')
mobileTemplate.setLevel(15)
mobileTemplate.setDifficulty(Difficulty.NORMAL)
mobileTemplate.setMinSpawnDistance(4)
mobileTemplate.setMaxSpawnDistance(8)
mobileTemplate.setDeathblow(True)
mobileTemplate.setScale(1)
mobileTemplate.setSocialGroup("rodian")
mobileTemplate.setAssistRange(0)
mobileTemplate.setStalker(False)
templates = Vector()
templates.add('object/mobile/shared_dressed_tatooine_rodian_clan_warchief.iff')
mobileTemplate.setTemplates(templates)
weaponTemplates = Vector()
weapontemplate = WeaponTemplate('object/weapon/ranged/carbine/shared_carbine_cdef.iff', WeaponType.CARBINE, 1.0, 15, 'energy')
weaponTemplates.add(weapontemplate)
mobileTemplate.setWeaponTemplateVector(weaponTemplates)
attacks = Vector()
mobileTemplate.setDefaultAttack('rangedShot')
mobileTemplate.setAttacks(attacks)
core.spawnService.addMobileTemplate('rodian_clan_warchief', mobileTemplate)
return |
cjhak/b2share | refs/heads/master | invenio/ext/template/loader.py | 17 | # -*- coding: utf-8 -*-
# This file is part of Invenio.
# Copyright (C) 2013, 2014 CERN.
#
# Invenio is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# Invenio is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Invenio; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""Customization of default Flask Jinja2 template loader.
By default the Flask Jinja2 template loader is not aware of the order of
Blueprints as defined by the PACKAGES configuration variable.
"""
from flask.templating import DispatchingJinjaLoader
from flask import __version__ as flask_version
from distutils.version import LooseVersion
# Flask 1.0 changes return value of _iter_loaders so for compatibility with
# both Flask 0.10 and 1.0 we here check the version.
# See Flask commit bafc13981002dee4610234c7c97ac176766181c1
IS_FLASK_1_0 = LooseVersion(flask_version) >= LooseVersion("0.11-dev")
try:
# Deprecated in Flask commit 817b72d484d353800d907b3580c899314bf7f3c6
from flask.templating import blueprint_is_module
except ImportError:
blueprint_is_module = lambda blueprint: False
class OrderAwareDispatchingJinjaLoader(DispatchingJinjaLoader):
"""Order aware dispatching Jinja loader."""
def _iter_loaders(self, template):
for blueprint in self.app.extensions['registry']['blueprints']:
if blueprint_is_module(blueprint):
continue
loader = blueprint.jinja_loader
if loader is not None:
if IS_FLASK_1_0:
yield blueprint, loader
else:
yield loader, template
|
sri85/pyfakefs | refs/heads/master | fake_tempfile.py | 23 | # Copyright 2010 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Fake tempfile module.
Fake implementation of the python2.4.1 tempfile built-in module that works with
a FakeFilesystem object.
"""
#pylint: disable-all
import errno
import logging
import os
import stat
import tempfile
import fake_filesystem
try:
import StringIO as io # pylint: disable-msg=C6204
except ImportError:
import io # pylint: disable-msg=C6204
class FakeTempfileModule(object):
"""Uses a FakeFilesystem to provide a mock for the tempfile 2.4.1 module.
Common usage:
filesystem = fake_filesystem.FakeFilesystem()
my_tempfile_module = mock_tempfile.FakeTempfileModule(filesystem)
See also: default keyword arguments for Dependency Injection on
http://go/tott-episode-12
"""
def __init__(self, filesystem):
self._filesystem = filesystem
self._tempfile = tempfile
self.tempdir = None # initialized by mktemp(), others
self._temp_prefix = 'tmp'
self._mktemp_retvals = []
# pylint: disable-msg=W0622
def _TempFilename(self, suffix='', prefix=None, dir=None):
"""Create a temporary filename that does not exist.
This is a re-implementation of how tempfile creates random filenames,
and is probably different.
Does not modify self._filesystem, that's your job.
Output: self.tempdir is initialized if unset
Args:
suffix: filename suffix
prefix: filename prefix
dir: dir to put filename in
Returns:
string, temp filename that does not exist
"""
if dir is None:
dir = self._filesystem.JoinPaths(self._filesystem.root.name, 'tmp')
filename = None
if prefix is None:
prefix = self._temp_prefix
while not filename or self._filesystem.Exists(filename):
# pylint: disable-msg=W0212
filename = self._filesystem.JoinPaths(dir, '%s%s%s' % (
prefix,
next(self._tempfile._RandomNameSequence()),
suffix))
return filename
# pylint: disable-msg=W0622,W0613
def TemporaryFile(self, mode='w+b', bufsize=-1,
suffix='', prefix=None, dir=None):
"""Return a file-like object deleted on close().
Python 2.4.1 tempfile.TemporaryFile.__doc__ =
>Return a file (or file-like) object that can be used as a temporary
>storage area. The file is created using mkstemp. It will be destroyed as
>soon as it is closed (including an implicit close when the object is
>garbage collected). Under Unix, the directory entry for the file is
>removed immediately after the file is created. Other platforms do not
>support this; your code should not rely on a temporary file created using
>this function having or not having a visible name in the file system.
>
>The mode parameter defaults to 'w+b' so that the file created can be read
>and written without being closed. Binary mode is used so that it behaves
>consistently on all platforms without regard for the data that is stored.
>bufsize defaults to -1, meaning that the operating system default is used.
>
>The dir, prefix and suffix parameters are passed to mkstemp()
Args:
mode: optional string, see above
bufsize: optional int, see above
suffix: optional string, see above
prefix: optional string, see above
dir: optional string, see above
Returns:
a file-like object.
"""
# pylint: disable-msg=C6002
# TODO: prefix, suffix, bufsize, dir, mode unused?
# cannot be cStringIO due to .name requirement below
retval = io.StringIO()
retval.name = '<fdopen>' # as seen on 2.4.3
return retval
# pylint: disable-msg=W0622,W0613
def NamedTemporaryFile(self, mode='w+b', bufsize=-1,
suffix='', prefix=None, dir=None, delete=True):
"""Return a file-like object with name that is deleted on close().
Python 2.4.1 tempfile.NamedTemporaryFile.__doc__ =
>This function operates exactly as TemporaryFile() does, except that
>the file is guaranteed to have a visible name in the file system. That
>name can be retrieved from the name member of the file object.
Args:
mode: optional string, see above
bufsize: optional int, see above
suffix: optional string, see above
prefix: optional string, see above
dir: optional string, see above
delete: optional bool, see above
Returns:
a file-like object including obj.name
"""
# pylint: disable-msg=C6002
# TODO: bufsiz unused?
temp = self.mkstemp(suffix=suffix, prefix=prefix, dir=dir)
filename = temp[1]
mock_open = fake_filesystem.FakeFileOpen(
self._filesystem, delete_on_close=delete)
obj = mock_open(filename, mode)
obj.name = filename
return obj
# pylint: disable-msg=C6409
def mkstemp(self, suffix='', prefix=None, dir=None, text=False):
"""Create temp file, returning a 2-tuple: (9999, filename).
Important: Returns 9999 instead of a real file descriptor!
Python 2.4.1 tempfile.mkstemp.__doc__ =
>mkstemp([suffix, [prefix, [dir, [text]]]])
>
>User-callable function to create and return a unique temporary file.
>The return value is a pair (fd, name) where fd is the file descriptor
>returned by os.open, and name is the filename.
>
>...[snip args]...
>
>The file is readable and writable only by the creating user ID.
>If the operating system uses permission bits to indicate whether
>a file is executable, the file is executable by no one. The file
>descriptor is not inherited by children of this process.
>
>Caller is responsible for deleting the file when done with it.
NOTE: if dir is unspecified, this call creates a directory.
Output: self.tempdir is initialized if unset
Args:
suffix: optional string, filename suffix
prefix: optional string, filename prefix
dir: optional string, directory for temp file; must exist before call
text: optional boolean, True = open file in text mode.
default False = open file in binary mode.
Returns:
2-tuple containing
[0] = int, file descriptor number for the file object
[1] = string, absolute pathname of a file
Raises:
OSError: when dir= is specified but does not exist
"""
# pylint: disable-msg=C6002
# TODO: optional boolean text is unused?
# default dir affected by "global"
filename = self._TempEntryname(suffix, prefix, dir)
fh = self._filesystem.CreateFile(filename, st_mode=stat.S_IFREG|0o600)
fd = self._filesystem.AddOpenFile(fh)
self._mktemp_retvals.append(filename)
return (fd, filename)
# pylint: disable-msg=C6409
def mkdtemp(self, suffix='', prefix=None, dir=None):
"""Create temp directory, returns string, absolute pathname.
Python 2.4.1 tempfile.mkdtemp.__doc__ =
>mkdtemp([suffix[, prefix[, dir]]])
>Creates a temporary directory in the most secure manner
>possible. [...]
>
>The user of mkdtemp() is responsible for deleting the temporary
>directory and its contents when done with it.
> [...]
>mkdtemp() returns the absolute pathname of the new directory. [...]
Args:
suffix: optional string, filename suffix
prefix: optional string, filename prefix
dir: optional string, directory for temp dir. Must exist before call
Returns:
string, directory name
"""
dirname = self._TempEntryname(suffix, prefix, dir)
self._filesystem.CreateDirectory(dirname, perm_bits=0o700)
self._mktemp_retvals.append(dirname)
return dirname
def _TempEntryname(self, suffix, prefix, dir):
"""Helper function for mk[ds]temp.
Args:
suffix: string, filename suffix
prefix: string, filename prefix
dir: string, directory for temp dir. Must exist before call
Returns:
string, entry name
"""
# default dir affected by "global"
if dir is None:
call_mkdir = True
dir = self.gettempdir()
else:
call_mkdir = False
entryname = None
while not entryname or self._filesystem.Exists(entryname):
entryname = self._TempFilename(suffix=suffix, prefix=prefix, dir=dir)
if not call_mkdir:
# This is simplistic. A bad input of suffix=/f will cause tempfile
# to blow up, but this mock won't. But that's already a broken
# corner case
parent_dir = os.path.dirname(entryname)
try:
self._filesystem.GetObject(parent_dir)
except IOError as err:
assert 'No such file or directory' in str(err)
# python -c 'import tempfile; tempfile.mkstemp(dir="/no/such/dr")'
# OSError: [Errno 2] No such file or directory: '/no/such/dr/tmpFBuqjO'
raise OSError(
errno.ENOENT,
'No such directory in mock filesystem',
parent_dir)
return entryname
# pylint: disable-msg=C6409
def gettempdir(self):
"""Get default temp dir. Sets default if unset."""
if self.tempdir:
return self.tempdir
# pylint: disable-msg=C6002
# TODO: environment variables TMPDIR TEMP TMP, or other dirs?
self.tempdir = '/tmp'
return self.tempdir
# pylint: disable-msg=C6409
def gettempprefix(self):
"""Get temp filename prefix.
NOTE: This has no effect on py2.4
Returns:
string, prefix to use in temporary filenames
"""
return self._temp_prefix
# pylint: disable-msg=C6409
def mktemp(self, suffix=''):
"""mktemp is deprecated in 2.4.1, and is thus unimplemented."""
raise NotImplementedError
def _SetTemplate(self, template):
"""Setter for 'template' property."""
self._temp_prefix = template
logging.error('tempfile.template= is a NOP in python2.4')
def __SetTemplate(self, template):
"""Indirect setter for 'template' property."""
self._SetTemplate(template)
def __DeprecatedTemplate(self):
"""template property implementation."""
raise NotImplementedError
# reading from template is deprecated, setting is ok.
template = property(__DeprecatedTemplate, __SetTemplate,
doc="""Set the prefix for temp filenames""")
def FakeReturnedMktempValues(self):
"""For validation purposes, mktemp()'s return values are stored."""
return self._mktemp_retvals
def FakeMktempReset(self):
"""Clear the stored mktemp() values."""
self._mktemp_retvals = []
|
tensor-tang/Paddle | refs/heads/develop | python/paddle/fluid/tests/unittests/test_optimizer.py | 1 | # Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import unittest
import paddle.fluid.framework as framework
import paddle.fluid.optimizer as optimizer
import paddle.compat as cpt
from paddle.fluid.backward import append_backward
class TestOptimizer(unittest.TestCase):
def test_sgd_optimizer(self):
def check_sgd_optimizer(optimizer_attr):
init_program = framework.Program()
program = framework.Program()
block = program.global_block()
mul_x = block.create_parameter(
dtype="float32",
shape=[5, 10],
lod_level=0,
name="mul.x",
optimize_attr=optimizer_attr)
mul_y = block.create_var(
dtype="float32", shape=[10, 8], lod_level=0, name="mul.y")
mul_out = block.create_var(
dtype="float32", shape=[5, 8], lod_level=0, name="mul.out")
mean_out = block.create_var(
dtype="float32", shape=[1], lod_level=0, name="mean.out")
block.append_op(
type="mul",
inputs={"X": mul_x,
"Y": mul_y},
outputs={"Out": mul_out},
attrs={"x_num_col_dims": 1})
block.append_op(
type="mean", inputs={"X": mul_out}, outputs={"Out": mean_out})
sgd_optimizer = optimizer.SGDOptimizer(learning_rate=0.01)
opts, _ = sgd_optimizer.minimize(mean_out, init_program)
return opts
opts = check_sgd_optimizer({'learning_rate': 1.1})
self.assertEqual(len(opts), 2)
self.assertEqual([op.type for op in opts], ["scale", "sgd"])
opts = check_sgd_optimizer({'learning_rate': 1.0})
self.assertEqual(len(opts), 1)
self.assertEqual([op.type for op in opts], ["sgd"])
class TestOptimizerBackwardApplygrad(unittest.TestCase):
def test_sgd_optimizer(self):
def check_sgd_optimizer(optimizer_attr):
init_program = framework.Program()
program = framework.Program()
block = program.global_block()
mul_x = block.create_parameter(
dtype="float32",
shape=[5, 10],
lod_level=0,
name="mul.x",
optimize_attr=optimizer_attr)
mul_y = block.create_var(
dtype="float32", shape=[10, 8], lod_level=0, name="mul.y")
mul_out = block.create_var(
dtype="float32", shape=[5, 8], lod_level=0, name="mul.out")
mean_out = block.create_var(
dtype="float32", shape=[1], lod_level=0, name="mean.out")
block.append_op(
type="mul",
inputs={"X": mul_x,
"Y": mul_y},
outputs={"Out": mul_out},
attrs={"x_num_col_dims": 1})
block.append_op(
type="mean", inputs={"X": mul_out}, outputs={"Out": mean_out})
sgd_optimizer = optimizer.SGDOptimizer(learning_rate=0.01)
with framework.program_guard(program, init_program):
p_g = sgd_optimizer.backward(mean_out)
opts = sgd_optimizer.apply_gradients(p_g)
return opts
opts = check_sgd_optimizer({'learning_rate': 1.1})
self.assertEqual(len(opts), 2)
self.assertEqual([op.type for op in opts], ["scale", "sgd"])
opts = check_sgd_optimizer({'learning_rate': 1.0})
self.assertEqual(len(opts), 1)
self.assertEqual([op.type for op in opts], ["sgd"])
class TestMomentumOptimizer(unittest.TestCase):
class MockMomentum(optimizer.MomentumOptimizer):
def get_accumulators(self):
return self._accumulators
def get_velocity_str(self):
return self._velocity_acc_str
def test_vanilla_momentum_optimizer(self):
init_program = framework.Program()
program = framework.Program()
block = program.global_block()
mul_x = block.create_parameter(
dtype="float32",
shape=[5, 10],
lod_level=0,
name="mul.x",
optimize_attr={'learning_rate': 1.1})
mul_y = block.create_var(
dtype="float32", shape=[10, 8], lod_level=0, name="mul.y")
mul_out = block.create_var(
dtype="float32", shape=[5, 8], lod_level=0, name="mul.out")
block.append_op(
type="mul",
inputs={"X": mul_x,
"Y": mul_y},
outputs={"Out": mul_out},
attrs={"x_num_col_dims": 1})
learning_rate = 0.01
momentum_optimizer = self.MockMomentum(
learning_rate=learning_rate, momentum=0.2)
mean_out = block.create_var(
dtype="float32", shape=[1], lod_level=0, name="mean.out")
block.append_op(
type="mean", inputs={"X": mul_out}, outputs={"Out": mean_out})
params_grads = append_backward(mean_out)
self.assertEqual(len(params_grads), 1)
self.assertEqual(len(momentum_optimizer.get_accumulators()), 0)
with framework.program_guard(program, init_program):
opts = momentum_optimizer.apply_gradients(params_grads)
self.assertEqual(len(opts), 2)
sgd_op = opts[-1]
self.assertEqual([op.type for op in opts], ["scale", "momentum"])
self.assertFalse(sgd_op.attr('use_nesterov'))
# Check accumulators
accumulators = momentum_optimizer.get_accumulators()
self.assertEqual(len(accumulators), 1)
self.assertTrue(momentum_optimizer.get_velocity_str() in accumulators)
velocity_acc = accumulators[momentum_optimizer.get_velocity_str()]
self.assertEqual(len(velocity_acc), 1)
self.assertTrue(mul_x.name in velocity_acc)
# Check init_program
init_ops = init_program.global_block().ops
self.assertEqual(len(init_ops), 2)
self.assertEqual(init_ops[0].type, "fill_constant")
self.assertAlmostEqual(init_ops[0].attr('value'), learning_rate)
self.assertEqual(init_ops[1].type, "fill_constant")
self.assertAlmostEqual(init_ops[1].attr('value'), 0.0)
def test_nesterov_momentum_optimizer(self):
init_program = framework.Program()
program = framework.Program()
block = program.global_block()
mul_x = block.create_parameter(
dtype="float32",
shape=[5, 10],
lod_level=0,
name="mul.x",
optimize_attr={'learning_rate': 1.1})
mul_y = block.create_var(
dtype="float32", shape=[10, 8], lod_level=0, name="mul.y")
mul_out = block.create_var(
dtype="float32", shape=[5, 8], lod_level=0, name="mul.out")
block.append_op(
type="mul",
inputs={"X": mul_x,
"Y": mul_y},
outputs={"Out": mul_out},
attrs={"x_num_col_dims": 1})
mean_out = block.create_var(
dtype="float32", shape=[1], lod_level=0, name="mean.out")
block.append_op(
type="mean", inputs={"X": mul_out}, outputs={"Out": mean_out})
learning_rate = 0.01
momentum_optimizer = self.MockMomentum(
learning_rate=learning_rate, momentum=0.2, use_nesterov=True)
params_grads = append_backward(mean_out)
self.assertEqual(len(params_grads), 1)
self.assertEqual(len(momentum_optimizer.get_accumulators()), 0)
with framework.program_guard(program, init_program):
opts = momentum_optimizer.apply_gradients(params_grads)
self.assertEqual(len(opts), 2)
sgd_op = opts[-1]
self.assertEqual([op.type for op in opts], ["scale", "momentum"])
self.assertTrue(sgd_op.attr('use_nesterov'))
# Check accumulators
accumulators = momentum_optimizer.get_accumulators()
self.assertEqual(len(accumulators), 1)
self.assertTrue(momentum_optimizer.get_velocity_str() in accumulators)
velocity_acc = accumulators[momentum_optimizer.get_velocity_str()]
self.assertEqual(len(velocity_acc), 1)
self.assertTrue(mul_x.name in velocity_acc)
# Check init_program
init_ops = init_program.global_block().ops
self.assertEqual(len(init_ops), 2)
self.assertEqual(init_ops[0].type, "fill_constant")
self.assertAlmostEqual(init_ops[0].attr('value'), learning_rate)
self.assertEqual(init_ops[1].type, "fill_constant")
self.assertAlmostEqual(init_ops[1].attr('value'), 0.0)
class TestAdagradOptimizer(unittest.TestCase):
class MockAdagrad(optimizer.AdagradOptimizer):
def get_accumulators(self):
return self._accumulators
def get_moment_str(self):
return self._moment_acc_str
def test_adagrad_optimizer(self):
init_program = framework.Program()
program = framework.Program()
block = program.global_block()
mul_x = block.create_parameter(
dtype="float32",
shape=[5, 10],
lod_level=0,
name="mul.x",
optimize_attr={'learning_rate': 1.1})
mul_y = block.create_var(
dtype="float32", shape=[10, 8], lod_level=0, name="mul.y")
mul_out = block.create_var(
dtype="float32", shape=[5, 8], lod_level=0, name="mul.out")
block.append_op(
type="mul",
inputs={"X": mul_x,
"Y": mul_y},
outputs={"Out": mul_out},
attrs={"x_num_col_dims": 1})
mean_out = block.create_var(
dtype="float32", shape=[1], lod_level=0, name="mean.out")
block.append_op(
type="mean", inputs={"X": mul_out}, outputs={"Out": mean_out})
learning_rate = 0.01
adagrad_optimizer = self.MockAdagrad(
learning_rate=learning_rate, epsilon=1.0e-6)
params_grads = append_backward(mean_out)
self.assertEqual(len(params_grads), 1)
self.assertEqual(len(adagrad_optimizer.get_accumulators()), 0)
with framework.program_guard(program, init_program):
opts = adagrad_optimizer.apply_gradients(params_grads)
self.assertEqual(len(opts), 2)
self.assertEqual([op.type for op in opts], ["scale", "adagrad"])
# Check accumulators
accumulators = adagrad_optimizer.get_accumulators()
self.assertEqual(len(accumulators), 1)
self.assertTrue(adagrad_optimizer.get_moment_str() in accumulators)
moment_acc = accumulators[adagrad_optimizer.get_moment_str()]
self.assertEqual(len(moment_acc), 1)
self.assertTrue(mul_x.name in moment_acc)
# Check init_program
init_ops = init_program.global_block().ops
self.assertEqual(len(init_ops), 3)
self.assertEqual(init_ops[0].type, "fill_constant")
self.assertAlmostEqual(init_ops[0].attr('value'), learning_rate)
self.assertEqual(init_ops[1].type, "fill_constant")
self.assertAlmostEqual(init_ops[1].attr('value'), 0.0)
class TestAdamOptimizer(unittest.TestCase):
class MockAdam(optimizer.AdamOptimizer):
def get_accumulators(self):
return self._accumulators
def get_moment1_str(self):
return self._moment1_acc_str
def get_moment2_str(self):
return self._moment2_acc_str
def test_adam_optimizer(self):
init_program = framework.Program()
program = framework.Program()
block = program.global_block()
mul_x = block.create_parameter(
dtype="float32",
shape=[5, 10],
lod_level=0,
name="mul.x",
optimize_attr={'learning_rate': 1.1})
mul_y = block.create_var(
dtype="float32", shape=[10, 8], lod_level=0, name="mul.y")
mul_out = block.create_var(
dtype="float32", shape=[5, 8], lod_level=0, name="mul.out")
block.append_op(
type="mul",
inputs={"X": mul_x,
"Y": mul_y},
outputs={"Out": mul_out},
attrs={"x_num_col_dims": 1})
mean_out = block.create_var(
dtype="float32", shape=[1], lod_level=0, name="mean.out")
block.append_op(
type="mean", inputs={"X": mul_out}, outputs={"Out": mean_out})
learning_rate = 0.01
adam_optimizer = self.MockAdam(
learning_rate=learning_rate, beta1=0.9, beta2=0.999)
params_grads = append_backward(mean_out)
self.assertEqual(len(params_grads), 1)
self.assertEqual(len(adam_optimizer.get_accumulators()), 0)
with framework.program_guard(program, init_program):
opts = adam_optimizer.apply_gradients(params_grads)
self.assertEqual(len(opts), 4)
self.assertEqual([op.type for op in opts],
["scale", "adam", "scale", "scale"])
# Check accumulators
accumulators = adam_optimizer.get_accumulators()
self.assertEqual(len(accumulators), 4)
self.assertTrue(adam_optimizer.get_moment1_str() in accumulators)
self.assertTrue(adam_optimizer.get_moment2_str() in accumulators)
moment1_acc = accumulators[adam_optimizer.get_moment1_str()]
moment2_acc = accumulators[adam_optimizer.get_moment2_str()]
self.assertEqual(len(moment1_acc), 1)
self.assertEqual(len(moment2_acc), 1)
self.assertTrue(mul_x.name in moment1_acc)
self.assertTrue(mul_x.name in moment2_acc)
# Check init_program
init_ops = init_program.global_block().ops
self.assertEqual(len(init_ops), 5)
self.assertEqual(init_ops[0].type, "fill_constant")
self.assertAlmostEqual(init_ops[0].attr('value'), learning_rate)
class TestAdamaxOptimizer(unittest.TestCase):
class MockAdamax(optimizer.AdamaxOptimizer):
def get_accumulators(self):
return self._accumulators
def get_moment_str(self):
return self._moment_acc_str
def get_inf_norm_str(self):
return self._inf_norm_acc_str
def test_adamax_optimizer(self):
init_program = framework.Program()
program = framework.Program()
block = program.global_block()
mul_x = block.create_parameter(
dtype="float32",
shape=[5, 10],
lod_level=0,
name="mul.x",
optimize_attr={'learning_rate': 1.1})
mul_y = block.create_var(
dtype="float32", shape=[10, 8], lod_level=0, name="mul.y")
mul_out = block.create_var(
dtype="float32", shape=[5, 8], lod_level=0, name="mul.out")
block.append_op(
type="mul",
inputs={"X": mul_x,
"Y": mul_y},
outputs={"Out": mul_out},
attrs={"x_num_col_dims": 1})
mean_out = block.create_var(
dtype="float32", shape=[1], lod_level=0, name="mean.out")
block.append_op(
type="mean", inputs={"X": mul_out}, outputs={"Out": mean_out})
learning_rate = 0.01
adamax_optimizer = self.MockAdamax(
learning_rate=learning_rate, beta1=0.9, beta2=0.999)
params_grads = append_backward(mean_out)
self.assertEqual(len(params_grads), 1)
self.assertEqual(len(adamax_optimizer.get_accumulators()), 0)
with framework.program_guard(program, init_program):
opts = adamax_optimizer.apply_gradients(params_grads)
self.assertEqual(len(opts), 3)
self.assertEqual([op.type for op in opts], ["scale", "adamax", "scale"])
# Check accumulators
accumulators = adamax_optimizer.get_accumulators()
self.assertEqual(len(accumulators), 3)
self.assertTrue(adamax_optimizer.get_moment_str() in accumulators)
self.assertTrue(adamax_optimizer.get_inf_norm_str() in accumulators)
moment_acc = accumulators[adamax_optimizer.get_moment_str()]
inf_norm_acc = accumulators[adamax_optimizer.get_inf_norm_str()]
self.assertEqual(len(moment_acc), 1)
self.assertEqual(len(inf_norm_acc), 1)
self.assertTrue(mul_x.name in moment_acc)
self.assertTrue(mul_x.name in inf_norm_acc)
# Check init_program
init_ops = init_program.global_block().ops
self.assertEqual(len(init_ops), 4)
self.assertEqual(init_ops[0].type, "fill_constant")
self.assertAlmostEqual(init_ops[0].attr('value'), learning_rate)
class TestDecayedAdagradOptimizer(unittest.TestCase):
class MockDecayedAdagrad(optimizer.DecayedAdagradOptimizer):
def get_accumulators(self):
return self._accumulators
def get_moment_str(self):
return self._moment_acc_str
def test_decayed_adagrad_optimizer(self):
init_program = framework.Program()
program = framework.Program()
block = program.global_block()
mul_x = block.create_parameter(
dtype="float32",
shape=[5, 10],
lod_level=0,
name="mul.x",
optimize_attr={'learning_rate': 1.1})
mul_y = block.create_var(
dtype="float32", shape=[10, 8], lod_level=0, name="mul.y")
mul_out = block.create_var(
dtype="float32", shape=[5, 8], lod_level=0, name="mul.out")
block.append_op(
type="mul",
inputs={"X": mul_x,
"Y": mul_y},
outputs={"Out": mul_out},
attrs={"x_num_col_dims": 1})
mean_out = block.create_var(
dtype="float32", shape=[1], lod_level=0, name="mean.out")
block.append_op(
type="mean", inputs={"X": mul_out}, outputs={"Out": mean_out})
learning_rate = 0.01
decayed_adagrad_optimizer = self.MockDecayedAdagrad(
learning_rate=learning_rate, decay=0.95, epsilon=1.0e-6)
params_grads = append_backward(mean_out)
self.assertEqual(len(params_grads), 1)
self.assertEqual(len(decayed_adagrad_optimizer.get_accumulators()), 0)
with framework.program_guard(program, init_program):
opts = decayed_adagrad_optimizer.apply_gradients(params_grads)
self.assertEqual(len(opts), 2)
self.assertEqual([op.type for op in opts], ["scale", "decayed_adagrad"])
# Check accumulators
accumulators = decayed_adagrad_optimizer.get_accumulators()
self.assertEqual(len(accumulators), 1)
self.assertTrue(
decayed_adagrad_optimizer.get_moment_str() in accumulators)
moment_acc = accumulators[decayed_adagrad_optimizer.get_moment_str()]
self.assertEqual(len(moment_acc), 1)
self.assertTrue(mul_x.name in moment_acc)
# Check init_program
init_ops = init_program.global_block().ops
self.assertEqual(len(init_ops), 2)
self.assertEqual(init_ops[0].type, "fill_constant")
self.assertAlmostEqual(init_ops[0].attr('value'), learning_rate)
self.assertEqual(init_ops[1].type, "fill_constant")
self.assertAlmostEqual(init_ops[1].attr('value'), 0.0)
class TestFtrlOptimizer(unittest.TestCase):
class MockFtrl(optimizer.FtrlOptimizer):
def get_accumulators(self):
return self._accumulators
def get_squared_str(self):
return self._squared_acc_str
def get_linear_str(self):
return self._linear_acc_str
def test_ftrl_optimizer(self):
init_program = framework.Program()
program = framework.Program()
block = program.global_block()
mul_x = block.create_parameter(
dtype="float32",
shape=[5, 10],
lod_level=0,
name="mul.x",
optimize_attr={'learning_rate': 1.1})
mul_y = block.create_var(
dtype="float32", shape=[10, 8], lod_level=0, name="mul.y")
mul_out = block.create_var(
dtype="float32", shape=[5, 8], lod_level=0, name="mul.out")
block.append_op(
type="mul",
inputs={"X": mul_x,
"Y": mul_y},
outputs={"Out": mul_out},
attrs={"x_num_col_dims": 1})
mean_out = block.create_var(
dtype="float32", shape=[1], lod_level=0, name="mean.out")
block.append_op(
type="mean", inputs={"X": mul_out}, outputs={"Out": mean_out})
learning_rate = 0.01
ftrl_optimizer = self.MockFtrl(
learning_rate=learning_rate, l1=0.0, l2=0.0, lr_power=-0.5)
params_grads = append_backward(mean_out)
self.assertEqual(len(params_grads), 1)
self.assertEqual(len(ftrl_optimizer.get_accumulators()), 0)
with framework.program_guard(program, init_program):
opts = ftrl_optimizer.apply_gradients(params_grads)
self.assertEqual(len(opts), 2)
self.assertEqual([op.type for op in opts], ["scale", "ftrl"])
# Check accumulators
accumulators = ftrl_optimizer.get_accumulators()
self.assertEqual(len(accumulators), 2)
self.assertTrue(ftrl_optimizer.get_squared_str() in accumulators)
self.assertTrue(ftrl_optimizer.get_linear_str() in accumulators)
squared_acc = accumulators[ftrl_optimizer.get_squared_str()]
linear_acc = accumulators[ftrl_optimizer.get_linear_str()]
self.assertEqual(len(squared_acc), 1)
self.assertEqual(len(linear_acc), 1)
self.assertTrue(mul_x.name in squared_acc)
self.assertTrue(mul_x.name in linear_acc)
# Check init_program
init_ops = init_program.global_block().ops
self.assertEqual(len(init_ops), 3)
self.assertEqual(init_ops[0].type, "fill_constant")
self.assertAlmostEqual(init_ops[0].attr('value'), learning_rate)
class TestLookaheadOptimizer(unittest.TestCase):
def test_lookahead_optimizer(self):
init_program = framework.Program()
program = framework.Program()
block = program.global_block()
init_block = init_program.global_block()
mul_x = block.create_parameter(
dtype="float32",
shape=[5, 10],
lod_level=0,
name="mul.x",
optimize_attr={'learning_rate': 1.1})
init_mul_x = init_block.create_parameter(
dtype="float32", shape=[5, 10], lod_level=0, name="mul.x")
mul_y = block.create_var(
dtype="float32", shape=[10, 8], lod_level=0, name="mul.y")
mul_out = block.create_var(
dtype="float32", shape=[5, 8], lod_level=0, name="mul.out")
mean_out = block.create_var(
dtype="float32", shape=[1], lod_level=0, name="mean.out")
block.append_op(
type="mul",
inputs={"X": mul_x,
"Y": mul_y},
outputs={"Out": mul_out},
attrs={"x_num_col_dims": 1})
block.append_op(
type="mean", inputs={"X": mul_out}, outputs={"Out": mean_out})
sgd = optimizer.SGD(learning_rate=0.01)
lookahead = optimizer.LookaheadOptimizer(sgd, alpha=0.5, k=5)
with framework.program_guard(program, init_program):
opts, _ = lookahead.minimize(mean_out)
self.assertEqual(len(opts), 2)
self.assertEqual([op.type for op in opts], ["scale", "sgd"])
class TestRecomputeOptimizer(unittest.TestCase):
def net(self):
program = framework.Program()
block = program.global_block()
mul_x = block.create_parameter(
dtype="float32", shape=[5, 10], lod_level=0, name="mul.x")
mul_y = block.create_var(
dtype="float32", shape=[10, 8], lod_level=0, name="mul.y")
mul_out = block.create_var(
dtype="float32", shape=[5, 8], lod_level=0, name="mul.out")
b1 = block.create_parameter(
dtype="float32", shape=[5, 8], lod_level=0, name="b1")
b1_out = block.create_var(
dtype="float32", shape=[5, 8], lod_level=0, name="b1_out")
b2 = block.create_parameter(
dtype="float32", shape=[5, 8], lod_level=0, name="b2")
b2_out = block.create_var(
dtype="float32", shape=[5, 8], lod_level=0, name="b2_out")
mean_out = block.create_var(
dtype="float32", shape=[1], lod_level=0, name="mean.out")
block.append_op(
type="mul",
inputs={"X": mul_x,
"Y": mul_y},
outputs={"Out": mul_out},
attrs={"x_num_col_dims": 1})
block.append_op(
type="elementwise_add",
inputs={"X": mul_out,
"Y": b1},
outputs={"Out": b1_out})
block.append_op(
type="elementwise_add",
inputs={"X": b1_out,
"Y": b2},
outputs={"Out": b2_out})
block.append_op(
type="mean", inputs={"X": b2_out}, outputs={"Out": mean_out})
return mul_out, b1_out, b2_out, mean_out
def test_no_checkpoint(self):
mul_out, b1_out, b2_out, mean_out = self.net()
self.assertEqual(len(mean_out.block.ops), 4)
self.assertEqual([op.type for op in mean_out.block.ops],
["mul", "elementwise_add", "elementwise_add", "mean"])
sgd_optimizer = optimizer.SGD(learning_rate=1.0)
recompute_optimizer = optimizer.RecomputeOptimizer(sgd_optimizer)
recompute_optimizer._set_checkpoints([])
opts, params_grads = recompute_optimizer.minimize(mean_out)
self.assertEqual(len(mean_out.block.ops), 12)
self.assertEqual([op.type for op in mean_out.block.ops], [
"mul", "elementwise_add", "elementwise_add", "mean",
"fill_constant", "mean_grad", "elementwise_add_grad",
"elementwise_add_grad", "mul_grad", "sgd", "sgd", "sgd"
])
def test_one_checkpoint(self):
mul_out, b1_out, b2_out, mean_out = self.net()
self.assertEqual(len(mean_out.block.ops), 4)
self.assertEqual([op.type for op in mean_out.block.ops],
["mul", "elementwise_add", "elementwise_add", "mean"])
sgd_optimizer = optimizer.SGD(learning_rate=1.0)
recompute_optimizer = optimizer.RecomputeOptimizer(sgd_optimizer)
recompute_optimizer._set_checkpoints([b1_out])
opts, params_grads = recompute_optimizer.minimize(mean_out)
self.assertEqual(len(mean_out.block.ops), 13)
self.assertEqual([op.type for op in mean_out.block.ops], [
"mul", "elementwise_add", "elementwise_add", "mean",
"fill_constant", "mean_grad", "elementwise_add_grad", "mul",
"elementwise_add_grad", "mul_grad", "sgd", "sgd", "sgd"
])
def test_multi_checkpoint(self):
mul_out, b1_out, b2_out, mean_out = self.net()
self.assertEqual(len(mean_out.block.ops), 4)
self.assertEqual([op.type for op in mean_out.block.ops],
["mul", "elementwise_add", "elementwise_add", "mean"])
sgd_optimizer = optimizer.SGD(learning_rate=1.0)
recompute_optimizer = optimizer.RecomputeOptimizer(sgd_optimizer)
recompute_optimizer._set_checkpoints([mul_out, b2_out])
opts, params_grads = recompute_optimizer.minimize(mean_out)
self.assertEqual(len(mean_out.block.ops), 13)
self.assertEqual([op.type for op in mean_out.block.ops], [
"mul", "elementwise_add", "elementwise_add", "mean",
"fill_constant", "mean_grad", "elementwise_add",
"elementwise_add_grad", "elementwise_add_grad", "mul_grad", "sgd",
"sgd", "sgd"
])
def test_adjacent_checkpoint(self):
mul_out, b1_out, b2_out, mean_out = self.net()
self.assertEqual(len(mean_out.block.ops), 4)
self.assertEqual([op.type for op in mean_out.block.ops],
["mul", "elementwise_add", "elementwise_add", "mean"])
sgd_optimizer = optimizer.SGD(learning_rate=1.0)
recompute_optimizer = optimizer.RecomputeOptimizer(sgd_optimizer)
recompute_optimizer._set_checkpoints([mul_out, b1_out])
opts, params_grads = recompute_optimizer.minimize(mean_out)
self.assertEqual(len(mean_out.block.ops), 12)
self.assertEqual([op.type for op in mean_out.block.ops], [
"mul", "elementwise_add", "elementwise_add", "mean",
"fill_constant", "mean_grad", "elementwise_add_grad",
"elementwise_add_grad", "mul_grad", "sgd", "sgd", "sgd"
])
def test_apply_gradients(self):
mul_out, b1_out, b2_out, mean_out = self.net()
sgd_optimizer = optimizer.SGD(learning_rate=1.0)
recompute_optimizer = optimizer.RecomputeOptimizer(sgd_optimizer)
recompute_optimizer._set_checkpoints([b1_out])
# apply backward
params_grads = recompute_optimizer.backward(
mean_out,
startup_program=None,
parameter_list=None,
no_grad_set=None,
checkpoints=[b1_out])
# apply gradient
program = mean_out.block.program
with framework.program_guard(program, None):
optimize_ops = recompute_optimizer.apply_gradients(params_grads)
self.assertEqual(len(mean_out.block.ops), 13)
self.assertEqual([op.type for op in mean_out.block.ops], [
"mul", "elementwise_add", "elementwise_add", "mean",
"fill_constant", "mean_grad", "elementwise_add_grad", "mul",
"elementwise_add_grad", "mul_grad", "sgd", "sgd", "sgd"
])
def test_load(self):
mul_out, b1_out, b2_out, mean_out = self.net()
sgd_optimizer = optimizer.SGD(learning_rate=1.0)
recompute_optimizer = optimizer.RecomputeOptimizer(sgd_optimizer)
recompute_optimizer._set_checkpoints([b1_out])
try:
stat_dict = {}
recompute_optimizer.load(stat_dict)
except NotImplementedError as e:
self.assertEqual(
"load function is not supported by Recompute Optimizer for now",
cpt.get_exception_message(e))
if __name__ == '__main__':
unittest.main()
|
jgeewax/gcloud-python | refs/heads/master | speech/google/cloud/speech/_gax.py | 3 | # Copyright 2016 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""GAX/GAPIC module for managing Speech API requests."""
from google.cloud.gapic.speech.v1beta1.speech_client import SpeechClient
from google.cloud.grpc.speech.v1beta1.cloud_speech_pb2 import RecognitionAudio
from google.cloud.grpc.speech.v1beta1.cloud_speech_pb2 import RecognitionConfig
from google.cloud.grpc.speech.v1beta1.cloud_speech_pb2 import SpeechContext
from google.cloud.grpc.speech.v1beta1.cloud_speech_pb2 import (
StreamingRecognitionConfig)
from google.cloud.grpc.speech.v1beta1.cloud_speech_pb2 import (
StreamingRecognizeRequest)
from google.longrunning import operations_grpc
from google.cloud._helpers import make_secure_channel
from google.cloud._helpers import make_secure_stub
from google.cloud._http import DEFAULT_USER_AGENT
from google.cloud.speech.alternative import Alternative
from google.cloud.speech.operation import Operation
OPERATIONS_API_HOST = 'speech.googleapis.com'
class GAPICSpeechAPI(object):
"""Manage calls through GAPIC wrappers to the Speech API."""
def __init__(self, client=None):
self._client = client
credentials = self._client._connection.credentials
channel = make_secure_channel(
credentials, DEFAULT_USER_AGENT,
SpeechClient.SERVICE_ADDRESS)
self._gapic_api = SpeechClient(channel=channel)
self._operations_stub = make_secure_stub(
credentials,
DEFAULT_USER_AGENT,
operations_grpc.OperationsStub,
OPERATIONS_API_HOST)
def async_recognize(self, sample, language_code=None,
max_alternatives=None, profanity_filter=None,
speech_context=None):
"""Asychronous Recognize request to Google Speech API.
.. _async_recognize: https://cloud.google.com/speech/reference/\
rest/v1beta1/speech/asyncrecognize
See `async_recognize`_.
:type sample: :class:`~google.cloud.speech.sample.Sample`
:param sample: Instance of ``Sample`` containing audio information.
:type language_code: str
:param language_code: (Optional) The language of the supplied audio as
BCP-47 language tag. Example: ``'en-GB'``.
If omitted, defaults to ``'en-US'``.
:type max_alternatives: int
:param max_alternatives: (Optional) Maximum number of recognition
hypotheses to be returned. The server may
return fewer than maxAlternatives.
Valid values are 0-30. A value of 0 or 1
will return a maximum of 1. Defaults to 1
:type profanity_filter: bool
:param profanity_filter: If True, the server will attempt to filter
out profanities, replacing all but the
initial character in each filtered word with
asterisks, e.g. ``'f***'``. If False or
omitted, profanities won't be filtered out.
:type speech_context: list
:param speech_context: A list of strings (max 50) containing words and
phrases "hints" so that the speech recognition
is more likely to recognize them. This can be
used to improve the accuracy for specific words
and phrases. This can also be used to add new
words to the vocabulary of the recognizer.
:rtype: :class:`~google.cloud.speech.operation.Operation`
:returns: Instance of ``Operation`` to poll for results.
"""
config = RecognitionConfig(
encoding=sample.encoding, sample_rate=sample.sample_rate,
language_code=language_code, max_alternatives=max_alternatives,
profanity_filter=profanity_filter,
speech_context=SpeechContext(phrases=speech_context))
audio = RecognitionAudio(content=sample.content,
uri=sample.source_uri)
api = self._gapic_api
response = api.async_recognize(config=config, audio=audio)
return Operation.from_pb(response, self)
def streaming_recognize(self, sample, language_code=None,
max_alternatives=None, profanity_filter=None,
speech_context=None, single_utterance=False,
interim_results=False):
"""Streaming speech recognition.
.. note::
Streaming recognition requests are limited to 1 minute of audio.
See: https://cloud.google.com/speech/limits#content
Yields :class:`~streaming_response.StreamingSpeechResponse` containing
results and metadata from the streaming request.
:type sample: :class:`~google.cloud.speech.sample.Sample`
:param sample: Instance of ``Sample`` containing audio information.
:type language_code: str
:param language_code: (Optional) The language of the supplied audio as
BCP-47 language tag. Example: ``'en-GB'``.
If omitted, defaults to ``'en-US'``.
:type max_alternatives: int
:param max_alternatives: (Optional) Maximum number of recognition
hypotheses to be returned. The server may
return fewer than maxAlternatives.
Valid values are 0-30. A value of 0 or 1
will return a maximum of 1. Defaults to 1
:type profanity_filter: bool
:param profanity_filter: If True, the server will attempt to filter
out profanities, replacing all but the
initial character in each filtered word with
asterisks, e.g. ``'f***'``. If False or
omitted, profanities won't be filtered out.
:type speech_context: list
:param speech_context: A list of strings (max 50) containing words and
phrases "hints" so that the speech recognition
is more likely to recognize them. This can be
used to improve the accuracy for specific words
and phrases. This can also be used to add new
words to the vocabulary of the recognizer.
:type single_utterance: bool
:param single_utterance: (Optional) If false or omitted, the recognizer
will perform continuous recognition
(continuing to process audio even if the user
pauses speaking) until the client closes the
output stream (gRPC API) or when the maximum
time limit has been reached. Multiple
SpeechRecognitionResults with the is_final
flag set to true may be returned.
If true, the recognizer will detect a single
spoken utterance. When it detects that the
user has paused or stopped speaking, it will
return an END_OF_UTTERANCE event and cease
recognition. It will return no more than one
SpeechRecognitionResult with the is_final flag
set to true.
:type interim_results: bool
:param interim_results: (Optional) If true, interim results (tentative
hypotheses) may be returned as they become
available (these interim results are indicated
with the is_final=false flag). If false or
omitted, only is_final=true result(s) are
returned.
:raises: :class:`ValueError` if sample.content is not a file-like
object. :class:`ValueError` if stream has closed.
:rtype: :class:`~google.cloud.grpc.speech.v1beta1\
.cloud_speech_pb2.StreamingRecognizeResponse`
:returns: ``StreamingRecognizeResponse`` instances.
"""
if getattr(sample.content, 'closed', None) is None:
raise ValueError('Please use file-like object for data stream.')
if sample.content.closed:
raise ValueError('Stream is closed.')
requests = _stream_requests(sample, language_code=language_code,
max_alternatives=max_alternatives,
profanity_filter=profanity_filter,
speech_context=speech_context,
single_utterance=single_utterance,
interim_results=interim_results)
api = self._gapic_api
responses = api.streaming_recognize(requests)
return responses
def sync_recognize(self, sample, language_code=None, max_alternatives=None,
profanity_filter=None, speech_context=None):
"""Synchronous Speech Recognition.
.. _sync_recognize: https://cloud.google.com/speech/reference/\
rest/v1beta1/speech/syncrecognize
See `sync_recognize`_.
:type sample: :class:`~google.cloud.speech.sample.Sample`
:param sample: Instance of ``Sample`` containing audio information.
:type language_code: str
:param language_code: (Optional) The language of the supplied audio as
BCP-47 language tag. Example: ``'en-GB'``.
If omitted, defaults to ``'en-US'``.
:type max_alternatives: int
:param max_alternatives: (Optional) Maximum number of recognition
hypotheses to be returned. The server may
return fewer than maxAlternatives.
Valid values are 0-30. A value of 0 or 1
will return a maximum of 1. Defaults to 1
:type profanity_filter: bool
:param profanity_filter: If True, the server will attempt to filter
out profanities, replacing all but the
initial character in each filtered word with
asterisks, e.g. ``'f***'``. If False or
omitted, profanities won't be filtered out.
:type speech_context: list
:param speech_context: A list of strings (max 50) containing words and
phrases "hints" so that the speech recognition
is more likely to recognize them. This can be
used to improve the accuracy for specific words
and phrases. This can also be used to add new
words to the vocabulary of the recognizer.
:rtype: list
:returns: A list of dictionaries. One dict for each alternative. Each
dictionary typically contains two keys (though not
all will be present in all cases)
* ``transcript``: The detected text from the audio recording.
* ``confidence``: The confidence in language detection, float
between 0 and 1.
:raises: ValueError if more than one result is returned or no results.
"""
config = RecognitionConfig(
encoding=sample.encoding, sample_rate=sample.sample_rate,
language_code=language_code, max_alternatives=max_alternatives,
profanity_filter=profanity_filter,
speech_context=SpeechContext(phrases=speech_context))
audio = RecognitionAudio(content=sample.content,
uri=sample.source_uri)
api = self._gapic_api
api_response = api.sync_recognize(config=config, audio=audio)
if len(api_response.results) == 1:
results = api_response.results.pop()
alternatives = results.alternatives
return [Alternative.from_pb(alternative)
for alternative in alternatives]
else:
raise ValueError('More than one result or none returned from API.')
def _stream_requests(sample, language_code=None, max_alternatives=None,
profanity_filter=None, speech_context=None,
single_utterance=None, interim_results=None):
"""Generate stream of requests from sample.
:type sample: :class:`~google.cloud.speech.sample.Sample`
:param sample: Instance of ``Sample`` containing audio information.
:type language_code: str
:param language_code: (Optional) The language of the supplied audio as
BCP-47 language tag. Example: ``'en-GB'``.
If omitted, defaults to ``'en-US'``.
:type max_alternatives: int
:param max_alternatives: (Optional) Maximum number of recognition
hypotheses to be returned. The server may
return fewer than maxAlternatives.
Valid values are 0-30. A value of 0 or 1
will return a maximum of 1. Defaults to 1
:type profanity_filter: bool
:param profanity_filter: (Optional) If True, the server will attempt to
filter out profanities, replacing all but the
initial character in each filtered word with
asterisks, e.g. ``'f***'``. If False or
omitted, profanities won't be filtered out.
:type speech_context: list
:param speech_context: (Optional) A list of strings (max 50) containing
words and phrases "hints" so that the speech
recognition is more likely to recognize them.
This can be used to improve the accuracy for
specific words and phrases. This can also be used to
add new words to the vocabulary of the recognizer.
:type single_utterance: bool
:param single_utterance: (Optional) If false or omitted, the recognizer
will perform continuous recognition
(continuing to process audio even if the user
pauses speaking) until the client closes the
output stream (gRPC API) or when the maximum
time limit has been reached. Multiple
SpeechRecognitionResults with the is_final
flag set to true may be returned.
If true, the recognizer will detect a single
spoken utterance. When it detects that the
user has paused or stopped speaking, it will
return an END_OF_UTTERANCE event and cease
recognition. It will return no more than one
SpeechRecognitionResult with the is_final flag
set to true.
:type interim_results: bool
:param interim_results: (Optional) If true, interim results (tentative
hypotheses) may be returned as they become
available (these interim results are indicated
with the is_final=false flag). If false or
omitted, only is_final=true result(s) are
returned.
"""
config_request = _make_streaming_request(
sample, language_code=language_code, max_alternatives=max_alternatives,
profanity_filter=profanity_filter,
speech_context=SpeechContext(phrases=speech_context),
single_utterance=single_utterance, interim_results=interim_results)
# The config request MUST go first and not contain any audio data.
yield config_request
while True:
data = sample.content.read(sample.chunk_size)
if not data:
break
yield StreamingRecognizeRequest(audio_content=data)
def _make_streaming_request(sample, language_code,
max_alternatives, profanity_filter,
speech_context, single_utterance,
interim_results):
"""Build streaming request.
:type sample: :class:`~google.cloud.speech.sample.Sample`
:param sample: Instance of ``Sample`` containing audio information.
:type language_code: str
:param language_code: The language of the supplied audio as
BCP-47 language tag. Example: ``'en-GB'``.
If omitted, defaults to ``'en-US'``.
:type max_alternatives: int
:param max_alternatives: Maximum number of recognition
hypotheses to be returned. The server may
return fewer than maxAlternatives.
Valid values are 0-30. A value of 0 or 1
will return a maximum of 1. Defaults to 1
:type profanity_filter: bool
:param profanity_filter: If True, the server will attempt to filter
out profanities, replacing all but the
initial character in each filtered word with
asterisks, e.g. ``'f***'``. If False or
omitted, profanities won't be filtered out.
:type speech_context: list
:param speech_context: A list of strings (max 50) containing words and
phrases "hints" so that the speech recognition
is more likely to recognize them. This can be
used to improve the accuracy for specific words
and phrases. This can also be used to add new
words to the vocabulary of the recognizer.
:type single_utterance: bool
:param single_utterance: If false or omitted, the recognizer
will perform continuous recognition
(continuing to process audio even if the user
pauses speaking) until the client closes the
output stream (gRPC API) or when the maximum
time limit has been reached. Multiple
SpeechRecognitionResults with the is_final
flag set to true may be returned.
If true, the recognizer will detect a single
spoken utterance. When it detects that the
user has paused or stopped speaking, it will
return an END_OF_UTTERANCE event and cease
recognition. It will return no more than one
SpeechRecognitionResult with the is_final flag
set to true.
:type interim_results: bool
:param interim_results: If true, interim results (tentative
hypotheses) may be returned as they become
available (these interim results are indicated
with the is_final=false flag). If false or
omitted, only is_final=true result(s) are
returned.
:rtype:
:class:`~grpc.speech.v1beta1.cloud_speech_pb2.StreamingRecognizeRequest`
:returns: Instance of ``StreamingRecognizeRequest``.
"""
config = RecognitionConfig(
encoding=sample.encoding, sample_rate=sample.sample_rate,
language_code=language_code, max_alternatives=max_alternatives,
profanity_filter=profanity_filter, speech_context=speech_context)
streaming_config = StreamingRecognitionConfig(
config=config, single_utterance=single_utterance,
interim_results=interim_results)
config_request = StreamingRecognizeRequest(
streaming_config=streaming_config)
return config_request
|
ivan-fedorov/intellij-community | refs/heads/master | python/testData/quickdoc/HoverOverMethod.py | 83 | class A(object):
"doc of A"
def f(self):
return 1
A().<the_ref>f
|
Eddy0402/Environment | refs/heads/master | vim/ycmd/third_party/requests/requests/packages/chardet/langbulgarianmodel.py | 2964 | ######################## BEGIN LICENSE BLOCK ########################
# The Original Code is Mozilla Communicator client code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 1998
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
# 255: Control characters that usually does not exist in any text
# 254: Carriage/Return
# 253: symbol (punctuation) that does not belong to word
# 252: 0 - 9
# Character Mapping Table:
# this table is modified base on win1251BulgarianCharToOrderMap, so
# only number <64 is sure valid
Latin5_BulgarianCharToOrderMap = (
255,255,255,255,255,255,255,255,255,255,254,255,255,254,255,255, # 00
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, # 10
253,253,253,253,253,253,253,253,253,253,253,253,253,253,253,253, # 20
252,252,252,252,252,252,252,252,252,252,253,253,253,253,253,253, # 30
253, 77, 90, 99,100, 72,109,107,101, 79,185, 81,102, 76, 94, 82, # 40
110,186,108, 91, 74,119, 84, 96,111,187,115,253,253,253,253,253, # 50
253, 65, 69, 70, 66, 63, 68,112,103, 92,194,104, 95, 86, 87, 71, # 60
116,195, 85, 93, 97,113,196,197,198,199,200,253,253,253,253,253, # 70
194,195,196,197,198,199,200,201,202,203,204,205,206,207,208,209, # 80
210,211,212,213,214,215,216,217,218,219,220,221,222,223,224,225, # 90
81,226,227,228,229,230,105,231,232,233,234,235,236, 45,237,238, # a0
31, 32, 35, 43, 37, 44, 55, 47, 40, 59, 33, 46, 38, 36, 41, 30, # b0
39, 28, 34, 51, 48, 49, 53, 50, 54, 57, 61,239, 67,240, 60, 56, # c0
1, 18, 9, 20, 11, 3, 23, 15, 2, 26, 12, 10, 14, 6, 4, 13, # d0
7, 8, 5, 19, 29, 25, 22, 21, 27, 24, 17, 75, 52,241, 42, 16, # e0
62,242,243,244, 58,245, 98,246,247,248,249,250,251, 91,252,253, # f0
)
win1251BulgarianCharToOrderMap = (
255,255,255,255,255,255,255,255,255,255,254,255,255,254,255,255, # 00
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, # 10
253,253,253,253,253,253,253,253,253,253,253,253,253,253,253,253, # 20
252,252,252,252,252,252,252,252,252,252,253,253,253,253,253,253, # 30
253, 77, 90, 99,100, 72,109,107,101, 79,185, 81,102, 76, 94, 82, # 40
110,186,108, 91, 74,119, 84, 96,111,187,115,253,253,253,253,253, # 50
253, 65, 69, 70, 66, 63, 68,112,103, 92,194,104, 95, 86, 87, 71, # 60
116,195, 85, 93, 97,113,196,197,198,199,200,253,253,253,253,253, # 70
206,207,208,209,210,211,212,213,120,214,215,216,217,218,219,220, # 80
221, 78, 64, 83,121, 98,117,105,222,223,224,225,226,227,228,229, # 90
88,230,231,232,233,122, 89,106,234,235,236,237,238, 45,239,240, # a0
73, 80,118,114,241,242,243,244,245, 62, 58,246,247,248,249,250, # b0
31, 32, 35, 43, 37, 44, 55, 47, 40, 59, 33, 46, 38, 36, 41, 30, # c0
39, 28, 34, 51, 48, 49, 53, 50, 54, 57, 61,251, 67,252, 60, 56, # d0
1, 18, 9, 20, 11, 3, 23, 15, 2, 26, 12, 10, 14, 6, 4, 13, # e0
7, 8, 5, 19, 29, 25, 22, 21, 27, 24, 17, 75, 52,253, 42, 16, # f0
)
# Model Table:
# total sequences: 100%
# first 512 sequences: 96.9392%
# first 1024 sequences:3.0618%
# rest sequences: 0.2992%
# negative sequences: 0.0020%
BulgarianLangModel = (
0,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,2,3,3,3,3,3,3,3,3,2,3,3,3,3,3,
3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,0,3,3,3,2,2,3,2,2,1,2,2,
3,1,3,3,2,3,3,3,3,3,3,3,3,3,3,3,3,0,3,3,3,3,3,3,3,3,3,3,0,3,0,1,
0,0,0,0,0,0,0,0,0,0,1,0,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,
3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,2,3,2,3,3,3,3,3,3,3,3,0,3,1,0,
0,1,0,0,0,0,0,0,0,0,1,1,0,1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,
3,2,2,2,3,3,3,3,3,3,3,3,3,3,3,3,3,1,3,2,3,3,3,3,3,3,3,3,0,3,0,0,
0,0,0,0,0,0,0,0,0,0,1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,2,3,3,2,3,3,3,3,3,3,3,3,3,3,3,3,1,3,2,3,3,3,3,3,3,3,3,0,3,0,0,
0,0,0,0,0,0,0,0,0,0,1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,3,3,3,3,3,3,3,3,3,3,2,3,2,2,1,3,3,3,3,2,2,2,1,1,2,0,1,0,1,0,0,
0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,1,
3,3,3,3,3,3,3,2,3,2,2,3,3,1,1,2,3,3,2,3,3,3,3,2,1,2,0,2,0,3,0,0,
0,0,0,0,0,0,0,1,0,0,2,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,1,
3,3,3,3,3,3,3,1,3,3,3,3,3,2,3,2,3,3,3,3,3,2,3,3,1,3,0,3,0,2,0,0,
0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,1,
3,3,3,3,3,3,3,3,1,3,3,2,3,3,3,1,3,3,2,3,2,2,2,0,0,2,0,2,0,2,0,0,
0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,1,
3,3,3,3,3,3,3,3,3,0,3,3,3,2,2,3,3,3,1,2,2,3,2,1,1,2,0,2,0,0,0,0,
1,0,0,0,0,0,0,0,0,0,2,0,0,1,0,0,1,0,0,0,1,0,0,0,0,0,0,0,0,0,0,1,
3,3,3,3,3,3,3,2,3,3,1,2,3,2,2,2,3,3,3,3,3,2,2,3,1,2,0,2,1,2,0,0,
0,0,0,0,0,0,0,0,0,0,3,0,0,1,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,1,
3,3,3,3,3,1,3,3,3,3,3,2,3,3,3,2,3,3,2,3,2,2,2,3,1,2,0,1,0,1,0,0,
0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,1,
3,3,3,3,3,3,3,3,3,3,3,1,1,1,2,2,1,3,1,3,2,2,3,0,0,1,0,1,0,1,0,0,
0,0,0,1,0,0,0,0,1,0,2,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,1,
3,3,3,3,3,2,2,3,2,2,3,1,2,1,1,1,2,3,1,3,1,2,2,0,1,1,1,1,0,1,0,0,
0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,1,
3,3,3,3,3,1,3,2,2,3,3,1,2,3,1,1,3,3,3,3,1,2,2,1,1,1,0,2,0,2,0,1,
0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,1,
3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,1,2,2,3,3,3,2,2,1,1,2,0,2,0,1,0,0,
0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,1,
3,0,1,2,1,3,3,2,3,3,3,3,3,2,3,2,1,0,3,1,2,1,2,1,2,3,2,1,0,1,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1,1,1,2,3,3,3,3,3,3,3,3,3,3,3,3,0,0,3,1,3,3,2,3,3,2,2,2,0,1,0,0,
0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
2,3,3,3,3,0,3,3,3,3,3,2,1,1,2,1,3,3,0,3,1,1,1,1,3,2,0,1,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,1,
3,3,2,2,2,3,3,3,3,3,3,3,3,3,3,3,1,1,3,1,3,3,2,3,2,2,2,3,0,2,0,0,
0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,3,3,3,3,2,3,3,2,2,3,2,1,1,1,1,1,3,1,3,1,1,0,0,0,1,0,0,0,1,0,0,
0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,
3,3,3,3,3,2,3,2,0,3,2,0,3,0,2,0,0,2,1,3,1,0,0,1,0,0,0,1,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,
3,3,3,3,2,1,1,1,1,2,1,1,2,1,1,1,2,2,1,2,1,1,1,0,1,1,0,1,0,1,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,1,
3,3,3,3,2,1,3,1,1,2,1,3,2,1,1,0,1,2,3,2,1,1,1,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
2,3,3,3,3,2,2,1,0,1,0,0,1,0,0,0,2,1,0,3,0,0,1,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,
3,3,3,2,3,2,3,3,1,3,2,1,1,1,2,1,1,2,1,3,0,1,0,0,0,1,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,1,1,2,2,3,3,2,3,2,2,2,3,1,2,2,1,1,2,1,1,2,2,0,1,1,0,1,0,2,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,3,3,3,2,1,3,1,0,2,2,1,3,2,1,0,0,2,0,2,0,1,0,0,0,0,0,0,0,1,0,0,
0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,1,
3,3,3,3,3,3,1,2,0,2,3,1,2,3,2,0,1,3,1,2,1,1,1,0,0,1,0,0,2,2,2,3,
2,2,2,2,1,2,1,1,2,2,1,1,2,0,1,1,1,0,0,1,1,0,0,1,1,0,0,0,1,1,0,1,
3,3,3,3,3,2,1,2,2,1,2,0,2,0,1,0,1,2,1,2,1,1,0,0,0,1,0,1,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,1,
3,3,2,3,3,1,1,3,1,0,3,2,1,0,0,0,1,2,0,2,0,1,0,0,0,1,0,1,2,1,2,2,
1,1,1,1,1,1,1,2,2,2,1,1,1,1,1,1,1,0,1,2,1,1,1,0,0,0,0,0,1,1,0,0,
3,1,0,1,0,2,3,2,2,2,3,2,2,2,2,2,1,0,2,1,2,1,1,1,0,1,2,1,2,2,2,1,
1,1,2,2,2,2,1,2,1,1,0,1,2,1,2,2,2,1,1,1,0,1,1,1,1,2,0,1,0,0,0,0,
2,3,2,3,3,0,0,2,1,0,2,1,0,0,0,0,2,3,0,2,0,0,0,0,0,1,0,0,2,0,1,2,
2,1,2,1,2,2,1,1,1,2,1,1,1,0,1,2,2,1,1,1,1,1,0,1,1,1,0,0,1,2,0,0,
3,3,2,2,3,0,2,3,1,1,2,0,0,0,1,0,0,2,0,2,0,0,0,1,0,1,0,1,2,0,2,2,
1,1,1,1,2,1,0,1,2,2,2,1,1,1,1,1,1,1,0,1,1,1,0,0,0,0,0,0,1,1,0,0,
2,3,2,3,3,0,0,3,0,1,1,0,1,0,0,0,2,2,1,2,0,0,0,0,0,0,0,0,2,0,1,2,
2,2,1,1,1,1,1,2,2,2,1,0,2,0,1,0,1,0,0,1,0,1,0,0,1,0,0,0,0,1,0,0,
3,3,3,3,2,2,2,2,2,0,2,1,1,1,1,2,1,2,1,1,0,2,0,1,0,1,0,0,2,0,1,2,
1,1,1,1,1,1,1,2,2,1,1,0,2,0,1,0,2,0,0,1,1,1,0,0,2,0,0,0,1,1,0,0,
2,3,3,3,3,1,0,0,0,0,0,0,0,0,0,0,2,0,0,1,1,0,0,0,0,0,0,1,2,0,1,2,
2,2,2,1,1,2,1,1,2,2,2,1,2,0,1,1,1,1,1,1,0,1,1,1,1,0,0,1,1,1,0,0,
2,3,3,3,3,0,2,2,0,2,1,0,0,0,1,1,1,2,0,2,0,0,0,3,0,0,0,0,2,0,2,2,
1,1,1,2,1,2,1,1,2,2,2,1,2,0,1,1,1,0,1,1,1,1,0,2,1,0,0,0,1,1,0,0,
2,3,3,3,3,0,2,1,0,0,2,0,0,0,0,0,1,2,0,2,0,0,0,0,0,0,0,0,2,0,1,2,
1,1,1,2,1,1,1,1,2,2,2,0,1,0,1,1,1,0,0,1,1,1,0,0,1,0,0,0,0,1,0,0,
3,3,2,2,3,0,1,0,1,0,0,0,0,0,0,0,1,1,0,3,0,0,0,0,0,0,0,0,1,0,2,2,
1,1,1,1,1,2,1,1,2,2,1,2,2,1,0,1,1,1,1,1,0,1,0,0,1,0,0,0,1,1,0,0,
3,1,0,1,0,2,2,2,2,3,2,1,1,1,2,3,0,0,1,0,2,1,1,0,1,1,1,1,2,1,1,1,
1,2,2,1,2,1,2,2,1,1,0,1,2,1,2,2,1,1,1,0,0,1,1,1,2,1,0,1,0,0,0,0,
2,1,0,1,0,3,1,2,2,2,2,1,2,2,1,1,1,0,2,1,2,2,1,1,2,1,1,0,2,1,1,1,
1,2,2,2,2,2,2,2,1,2,0,1,1,0,2,1,1,1,1,1,0,0,1,1,1,1,0,1,0,0,0,0,
2,1,1,1,1,2,2,2,2,1,2,2,2,1,2,2,1,1,2,1,2,3,2,2,1,1,1,1,0,1,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
2,2,2,3,2,0,1,2,0,1,2,1,1,0,1,0,1,2,1,2,0,0,0,1,1,0,0,0,1,0,0,2,
1,1,0,0,1,1,0,1,1,1,1,0,2,0,1,1,1,0,0,1,1,0,0,0,0,1,0,0,0,1,0,0,
2,0,0,0,0,1,2,2,2,2,2,2,2,1,2,1,1,1,1,1,1,1,0,1,1,1,1,1,2,1,1,1,
1,2,2,2,2,1,1,2,1,2,1,1,1,0,2,1,2,1,1,1,0,2,1,1,1,1,0,1,0,0,0,0,
3,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,1,0,
1,1,0,1,0,1,1,1,1,1,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
2,2,2,3,2,0,0,0,0,1,0,0,0,0,0,0,1,1,0,2,0,0,0,0,0,0,0,0,1,0,1,2,
1,1,1,1,1,1,0,0,2,2,2,2,2,0,1,1,0,1,1,1,1,1,0,0,1,0,0,0,1,1,0,1,
2,3,1,2,1,0,1,1,0,2,2,2,0,0,1,0,0,1,1,1,1,0,0,0,0,0,0,0,1,0,1,2,
1,1,1,1,2,1,1,1,1,1,1,1,1,0,1,1,0,1,0,1,0,1,0,0,1,0,0,0,0,1,0,0,
2,2,2,2,2,0,0,2,0,0,2,0,0,0,0,0,0,1,0,1,0,0,0,0,0,0,0,0,2,0,2,2,
1,1,1,1,1,0,0,1,2,1,1,0,1,0,1,0,0,0,0,1,1,0,0,0,0,0,0,0,0,0,0,0,
1,2,2,2,2,0,0,2,0,1,1,0,0,0,1,0,0,2,0,2,0,0,0,0,0,0,0,0,0,0,1,1,
0,0,0,1,1,1,1,1,1,1,1,1,1,0,1,0,0,1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,
1,2,2,3,2,0,0,1,0,0,1,0,0,0,0,0,0,1,0,2,0,0,0,1,0,0,0,0,0,0,0,2,
1,1,0,0,1,0,0,0,1,1,0,0,1,0,1,1,0,0,0,1,1,0,0,0,0,0,0,0,0,0,0,0,
2,1,2,2,2,1,2,1,2,2,1,1,2,1,1,1,0,1,1,1,1,2,0,1,0,1,1,1,1,0,1,1,
1,1,2,1,1,1,1,1,1,0,0,1,2,1,1,1,1,1,1,0,0,1,1,1,0,0,0,0,0,0,0,0,
1,0,0,1,3,1,1,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
2,2,2,2,1,0,0,1,0,2,0,0,0,0,0,1,1,1,0,1,0,0,0,0,0,0,0,0,2,0,0,1,
0,2,0,1,0,0,1,1,2,0,1,0,1,0,1,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,
1,2,2,2,2,0,1,1,0,2,1,0,1,1,1,0,0,1,0,2,0,1,0,0,0,0,0,0,0,0,0,1,
0,1,0,0,1,0,0,0,1,1,0,0,1,0,0,1,0,0,0,1,1,0,0,0,0,0,0,0,0,0,0,0,
2,2,2,2,2,0,0,1,0,0,0,1,0,1,0,0,0,1,0,1,0,0,0,0,0,0,0,0,0,0,0,1,
0,1,0,1,1,1,0,0,1,1,1,0,1,0,0,0,0,0,0,1,1,0,0,0,0,0,0,0,0,0,0,0,
2,0,1,0,0,1,2,1,1,1,1,1,1,2,2,1,0,0,1,0,1,0,0,0,0,1,1,1,1,0,0,0,
1,1,2,1,1,1,1,0,0,0,1,1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
2,2,1,2,1,0,0,1,0,0,0,0,0,0,0,0,1,1,0,1,0,0,0,0,0,0,0,0,0,0,0,1,
0,0,0,0,0,0,0,0,1,1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1,0,0,1,2,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,1,0,0,0,
0,1,1,0,1,1,1,0,0,1,0,0,1,0,1,0,0,0,1,0,0,0,0,0,1,0,0,0,0,0,0,0,
1,0,1,0,0,1,1,1,1,1,1,1,1,1,1,1,0,0,1,0,2,0,0,2,0,1,0,0,1,0,0,1,
1,1,0,0,1,1,0,1,0,0,0,1,0,0,1,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,1,1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,1,0,
1,1,1,1,1,1,1,2,0,0,0,0,0,0,2,1,0,1,1,0,0,1,1,1,0,1,0,0,0,0,0,0,
2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1,0,0,1,1,1,1,1,1,1,1,1,1,1,1,1,1,0,1,0,1,1,0,1,1,1,1,1,0,1,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,
)
Latin5BulgarianModel = {
'charToOrderMap': Latin5_BulgarianCharToOrderMap,
'precedenceMatrix': BulgarianLangModel,
'mTypicalPositiveRatio': 0.969392,
'keepEnglishLetter': False,
'charsetName': "ISO-8859-5"
}
Win1251BulgarianModel = {
'charToOrderMap': win1251BulgarianCharToOrderMap,
'precedenceMatrix': BulgarianLangModel,
'mTypicalPositiveRatio': 0.969392,
'keepEnglishLetter': False,
'charsetName': "windows-1251"
}
# flake8: noqa
|
savoirfairelinux/santropol-feast | refs/heads/dev | src/member/migrations/0004_fix004a.py | 3 | # -*- coding: utf-8 -*-
# Generated by Django 1.9.5 on 2016-06-23 01:00
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('meal', '0002_ingredient_ingredient_group'),
('member', '0003_auto_20160615_2100'),
]
operations = [
migrations.CreateModel(
name='Client_avoid_component',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
],
),
migrations.CreateModel(
name='Client_avoid_ingredient',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
],
),
migrations.AddField(
model_name='client_option',
name='value',
field=models.CharField(max_length=100, null=True, verbose_name='value'),
),
migrations.AlterField(
model_name='client',
name='billing_payment_type',
field=models.CharField(choices=[('check', 'Check'), ('cash', 'Cash'), ('debit', 'Debit card'), ('credit', 'Credit card'), ('eft', 'EFT')], max_length=10, null=True, verbose_name='Payment Type'),
),
migrations.AlterField(
model_name='client',
name='gender',
field=models.CharField(blank=True, choices=[('', 'Gender'), ('F', 'Female'), ('M', 'Male')], default='U', max_length=1, null='True'),
),
migrations.AlterField(
model_name='client',
name='route',
field=models.ForeignKey(blank=True, default='', on_delete=django.db.models.deletion.CASCADE, to='member.Route', verbose_name='route'),
preserve_default=False,
),
migrations.AlterField(
model_name='option',
name='option_group',
field=models.CharField(choices=[('main dish size', 'Main dish size'), ('dish', 'Dish'), ('preparation', 'Preparation'), ('other order item', 'Other order item')], max_length=100, verbose_name='option group'),
),
migrations.AddField(
model_name='client_avoid_ingredient',
name='client',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='+', to='member.Client', verbose_name='client'),
),
migrations.AddField(
model_name='client_avoid_ingredient',
name='ingredient',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='+', to='meal.Ingredient', verbose_name='ingredient'),
),
migrations.AddField(
model_name='client_avoid_component',
name='client',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='+', to='member.Client', verbose_name='client'),
),
migrations.AddField(
model_name='client_avoid_component',
name='component',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='+', to='meal.Component', verbose_name='component'),
),
]
|
fabianvaccaro/pygums | refs/heads/master | pythonLibs/pymorph-0.96/pymorph/text.py | 3 | __all__ = ['text']
import numpy as np
from mmorph import concat
FontDft = np.array([
[[0, 0, 0, 0, 0, 0, 0, 0, 0,],
[0, 0, 0, 0, 0, 0, 0, 0, 0,],
[0, 0, 0, 0, 0, 0, 0, 0, 0,],
[0, 0, 0, 0, 0, 0, 0, 0, 0,],
[0, 0, 0, 0, 0, 0, 0, 0, 0,],
[0, 0, 0, 0, 0, 0, 0, 0, 0,],
[0, 0, 0, 0, 0, 0, 0, 0, 0,],
[0, 0, 0, 0, 0, 0, 0, 0, 0,],
[0, 0, 0, 0, 0, 0, 0, 0, 0,],
[0, 0, 0, 0, 0, 0, 0, 0, 0,],
[0, 0, 0, 0, 0, 0, 0, 0, 0,],
[0, 0, 0, 0, 0, 0, 0, 0, 0,],
[0, 0, 0, 0, 0, 0, 0, 0, 0,],
[0, 0, 0, 0, 0, 0, 0, 0, 0,],
[0, 0, 0, 0, 0, 0, 0, 0, 0,]],
[[0, 0, 0, 0, 0, 0, 0, 0, 0,],
[0, 0, 0, 0, 0, 0, 0, 0, 0,],
[0, 0, 0, 0, 1, 0, 0, 0, 0,],
[0, 0, 0, 0, 1, 0, 0, 0, 0,],
[0, 0, 0, 0, 1, 0, 0, 0, 0,],
[0, 0, 0, 0, 1, 0, 0, 0, 0,],
[0, 0, 0, 0, 1, 0, 0, 0, 0,],
[0, 0, 0, 0, 1, 0, 0, 0, 0,],
[0, 0, 0, 0, 1, 0, 0, 0, 0,],
[0, 0, 0, 0, 1, 0, 0, 0, 0,],
[0, 0, 0, 0, 0, 0, 0, 0, 0,],
[0, 0, 0, 0, 1, 0, 0, 0, 0,],
[0, 0, 0, 0, 0, 0, 0, 0, 0,],
[0, 0, 0, 0, 0, 0, 0, 0, 0,],
[0, 0, 0, 0, 0, 0, 0, 0, 0,]],
[[0, 0, 0, 0, 0, 0, 0, 0, 0,],
[0, 0, 0, 0, 0, 0, 0, 0, 0,],
[0, 0, 0, 1, 0, 0, 1, 0, 0,],
[0, 0, 0, 1, 0, 0, 1, 0, 0,],
[0, 0, 0, 1, 0, 0, 1, 0, 0,],
[0, 0, 0, 0, 0, 0, 0, 0, 0,],
[0, 0, 0, 0, 0, 0, 0, 0, 0,],
[0, 0, 0, 0, 0, 0, 0, 0, 0,],
[0, 0, 0, 0, 0, 0, 0, 0, 0,],
[0, 0, 0, 0, 0, 0, 0, 0, 0,],
[0, 0, 0, 0, 0, 0, 0, 0, 0,],
[0, 0, 0, 0, 0, 0, 0, 0, 0,],
[0, 0, 0, 0, 0, 0, 0, 0, 0,],
[0, 0, 0, 0, 0, 0, 0, 0, 0,],
[0, 0, 0, 0, 0, 0, 0, 0, 0,]],
[[0, 0, 0, 0, 0, 0, 0, 0, 0,],
[0, 0, 0, 0, 0, 0, 0, 0, 0,],
[0, 0, 0, 0, 0, 0, 0, 0, 0,],
[0, 0, 1, 0, 0, 1, 0, 0, 0,],
[0, 0, 1, 0, 0, 1, 0, 0, 0,],
[0, 1, 1, 1, 1, 1, 1, 0, 0,],
[0, 0, 1, 0, 0, 1, 0, 0, 0,],
[0, 0, 1, 0, 0, 1, 0, 0, 0,],
[0, 1, 1, 1, 1, 1, 1, 0, 0,],
[0, 0, 1, 0, 0, 1, 0, 0, 0,],
[0, 0, 1, 0, 0, 1, 0, 0, 0,],
[0, 0, 0, 0, 0, 0, 0, 0, 0,],
[0, 0, 0, 0, 0, 0, 0, 0, 0,],
[0, 0, 0, 0, 0, 0, 0, 0, 0,],
[0, 0, 0, 0, 0, 0, 0, 0, 0,]],
[[0, 0, 0, 0, 0, 0, 0, 0, 0,],
[0, 0, 0, 0, 0, 0, 0, 0, 0,],
[0, 0, 0, 0, 0, 0, 0, 0, 0,],
[0, 0, 0, 0, 1, 0, 0, 0, 0,],
[0, 0, 1, 1, 1, 1, 1, 0, 0,],
[0, 1, 0, 0, 1, 0, 0, 0, 0,],
[0, 1, 0, 0, 1, 0, 0, 0, 0,],
[0, 0, 1, 1, 1, 1, 1, 0, 0,],
[0, 0, 0, 0, 1, 0, 0, 1, 0,],
[0, 0, 0, 0, 1, 0, 0, 1, 0,],
[0, 0, 1, 1, 1, 1, 1, 0, 0,],
[0, 0, 0, 0, 1, 0, 0, 0, 0,],
[0, 0, 0, 0, 0, 0, 0, 0, 0,],
[0, 0, 0, 0, 0, 0, 0, 0, 0,],
[0, 0, 0, 0, 0, 0, 0, 0, 0,]],
[[0, 0, 0, 0, 0, 0, 0, 0, 0,],
[0, 0, 0, 0, 0, 0, 0, 0, 0,],
[0, 0, 1, 0, 0, 0, 0, 1, 0,],
[0, 1, 0, 1, 0, 0, 1, 0, 0,],
[0, 1, 0, 1, 0, 0, 1, 0, 0,],
[0, 0, 1, 0, 0, 1, 0, 0, 0,],
[0, 0, 0, 0, 1, 0, 0, 0, 0,],
[0, 0, 0, 0, 1, 0, 0, 0, 0,],
[0, 0, 0, 1, 0, 0, 1, 0, 0,],
[0, 0, 1, 0, 0, 1, 0, 1, 0,],
[0, 0, 1, 0, 0, 1, 0, 1, 0,],
[0, 1, 0, 0, 0, 0, 1, 0, 0,],
[0, 0, 0, 0, 0, 0, 0, 0, 0,],
[0, 0, 0, 0, 0, 0, 0, 0, 0,],
[0, 0, 0, 0, 0, 0, 0, 0, 0,]],
[[0, 0, 0, 0, 0, 0, 0, 0, 0,],
[0, 0, 0, 0, 0, 0, 0, 0, 0,],
[0, 0, 1, 1, 1, 0, 0, 0, 0,],
[0, 1, 0, 0, 0, 1, 0, 0, 0,],
[0, 1, 0, 0, 0, 1, 0, 0, 0,],
[0, 1, 0, 0, 0, 1, 0, 0, 0,],
[0, 0, 1, 1, 1, 0, 0, 0, 0,],
[0, 1, 0, 0, 0, 1, 0, 0, 0,],
[0, 1, 0, 0, 0, 1, 0, 1, 0,],
[0, 1, 0, 0, 0, 0, 1, 0, 0,],
[0, 1, 0, 0, 0, 1, 1, 0, 0,],
[0, 0, 1, 1, 1, 0, 0, 1, 0,],
[0, 0, 0, 0, 0, 0, 0, 0, 0,],
[0, 0, 0, 0, 0, 0, 0, 0, 0,],
[0, 0, 0, 0, 0, 0, 0, 0, 0,]],
[[0, 0, 0, 0, 0, 0, 0, 0, 0,],
[0, 0, 0, 0, 0, 0, 0, 0, 0,],
[0, 0, 0, 0, 1, 1, 1, 0, 0,],
[0, 0, 0, 0, 1, 1, 0, 0, 0,],
[0, 0, 0, 1, 0, 0, 0, 0, 0,],
[0, 0, 0, 0, 0, 0, 0, 0, 0,],
[0, 0, 0, 0, 0, 0, 0, 0, 0,],
[0, 0, 0, 0, 0, 0, 0, 0, 0,],
[0, 0, 0, 0, 0, 0, 0, 0, 0,],
[0, 0, 0, 0, 0, 0, 0, 0, 0,],
[0, 0, 0, 0, 0, 0, 0, 0, 0,],
[0, 0, 0, 0, 0, 0, 0, 0, 0,],
[0, 0, 0, 0, 0, 0, 0, 0, 0,],
[0, 0, 0, 0, 0, 0, 0, 0, 0,],
[0, 0, 0, 0, 0, 0, 0, 0, 0,]],
[[0, 0, 0, 0, 0, 0, 0, 0, 0,],
[0, 0, 0, 0, 0, 0, 0, 0, 0,],
[0, 0, 0, 0, 0, 1, 0, 0, 0,],
[0, 0, 0, 0, 1, 0, 0, 0, 0,],
[0, 0, 0, 0, 1, 0, 0, 0, 0,],
[0, 0, 0, 1, 0, 0, 0, 0, 0,],
[0, 0, 0, 1, 0, 0, 0, 0, 0,],
[0, 0, 0, 1, 0, 0, 0, 0, 0,],
[0, 0, 0, 1, 0, 0, 0, 0, 0,],
[0, 0, 0, 0, 1, 0, 0, 0, 0,],
[0, 0, 0, 0, 1, 0, 0, 0, 0,],
[0, 0, 0, 0, 0, 1, 0, 0, 0,],
[0, 0, 0, 0, 0, 0, 0, 0, 0,],
[0, 0, 0, 0, 0, 0, 0, 0, 0,],
[0, 0, 0, 0, 0, 0, 0, 0, 0,]],
[[0, 0, 0, 0, 0, 0, 0, 0, 0,],
[0, 0, 0, 0, 0, 0, 0, 0, 0,],
[0, 0, 0, 1, 0, 0, 0, 0, 0,],
[0, 0, 0, 0, 1, 0, 0, 0, 0,],
[0, 0, 0, 0, 1, 0, 0, 0, 0,],
[0, 0, 0, 0, 0, 1, 0, 0, 0,],
[0, 0, 0, 0, 0, 1, 0, 0, 0,],
[0, 0, 0, 0, 0, 1, 0, 0, 0,],
[0, 0, 0, 0, 0, 1, 0, 0, 0,],
[0, 0, 0, 0, 1, 0, 0, 0, 0,],
[0, 0, 0, 0, 1, 0, 0, 0, 0,],
[0, 0, 0, 1, 0, 0, 0, 0, 0,],
[0, 0, 0, 0, 0, 0, 0, 0, 0,],
[0, 0, 0, 0, 0, 0, 0, 0, 0,],
[0, 0, 0, 0, 0, 0, 0, 0, 0,]],
[[0, 0, 0, 0, 0, 0, 0, 0, 0,],
[0, 0, 0, 0, 0, 0, 0, 0, 0,],
[0, 0, 0, 0, 0, 0, 0, 0, 0,],
[0, 0, 0, 0, 0, 0, 0, 0, 0,],
[0, 0, 0, 0, 0, 0, 0, 0, 0,],
[0, 0, 1, 0, 0, 0, 1, 0, 0,],
[0, 0, 0, 1, 0, 1, 0, 0, 0,],
[0, 1, 1, 1, 1, 1, 1, 1, 0,],
[0, 0, 0, 1, 0, 1, 0, 0, 0,],
[0, 0, 1, 0, 0, 0, 1, 0, 0,],
[0, 0, 0, 0, 0, 0, 0, 0, 0,],
[0, 0, 0, 0, 0, 0, 0, 0, 0,],
[0, 0, 0, 0, 0, 0, 0, 0, 0,],
[0, 0, 0, 0, 0, 0, 0, 0, 0,],
[0, 0, 0, 0, 0, 0, 0, 0, 0,]],
[[0, 0, 0, 0, 0, 0, 0, 0, 0,],
[0, 0, 0, 0, 0, 0, 0, 0, 0,],
[0, 0, 0, 0, 0, 0, 0, 0, 0,],
[0, 0, 0, 0, 0, 0, 0, 0, 0,],
[0, 0, 0, 0, 1, 0, 0, 0, 0,],
[0, 0, 0, 0, 1, 0, 0, 0, 0,],
[0, 0, 0, 0, 1, 0, 0, 0, 0,],
[0, 1, 1, 1, 1, 1, 1, 1, 0,],
[0, 0, 0, 0, 1, 0, 0, 0, 0,],
[0, 0, 0, 0, 1, 0, 0, 0, 0,],
[0, 0, 0, 0, 1, 0, 0, 0, 0,],
[0, 0, 0, 0, 0, 0, 0, 0, 0,],
[0, 0, 0, 0, 0, 0, 0, 0, 0,],
[0, 0, 0, 0, 0, 0, 0, 0, 0,],
[0, 0, 0, 0, 0, 0, 0, 0, 0,]],
[[0, 0, 0, 0, 0, 0, 0, 0, 0,],
[0, 0, 0, 0, 0, 0, 0, 0, 0,],
[0, 0, 0, 0, 0, 0, 0, 0, 0,],
[0, 0, 0, 0, 0, 0, 0, 0, 0,],
[0, 0, 0, 0, 0, 0, 0, 0, 0,],
[0, 0, 0, 0, 0, 0, 0, 0, 0,],
[0, 0, 0, 0, 0, 0, 0, 0, 0,],
[0, 0, 0, 0, 0, 0, 0, 0, 0,],
[0, 0, 0, 0, 0, 0, 0, 0, 0,],
[0, 0, 0, 0, 0, 0, 0, 0, 0,],
[0, 0, 0, 1, 1, 1, 0, 0, 0,],
[0, 0, 0, 1, 1, 0, 0, 0, 0,],
[0, 0, 1, 0, 0, 0, 0, 0, 0,],
[0, 0, 0, 0, 0, 0, 0, 0, 0,],
[0, 0, 0, 0, 0, 0, 0, 0, 0,]],
[[0, 0, 0, 0, 0, 0, 0, 0, 0,],
[0, 0, 0, 0, 0, 0, 0, 0, 0,],
[0, 0, 0, 0, 0, 0, 0, 0, 0,],
[0, 0, 0, 0, 0, 0, 0, 0, 0,],
[0, 0, 0, 0, 0, 0, 0, 0, 0,],
[0, 0, 0, 0, 0, 0, 0, 0, 0,],
[0, 0, 0, 0, 0, 0, 0, 0, 0,],
[0, 1, 1, 1, 1, 1, 1, 1, 0,],
[0, 0, 0, 0, 0, 0, 0, 0, 0,],
[0, 0, 0, 0, 0, 0, 0, 0, 0,],
[0, 0, 0, 0, 0, 0, 0, 0, 0,],
[0, 0, 0, 0, 0, 0, 0, 0, 0,],
[0, 0, 0, 0, 0, 0, 0, 0, 0,],
[0, 0, 0, 0, 0, 0, 0, 0, 0,],
[0, 0, 0, 0, 0, 0, 0, 0, 0,]],
[[0, 0, 0, 0, 0, 0, 0, 0, 0,],
[0, 0, 0, 0, 0, 0, 0, 0, 0,],
[0, 0, 0, 0, 0, 0, 0, 0, 0,],
[0, 0, 0, 0, 0, 0, 0, 0, 0,],
[0, 0, 0, 0, 0, 0, 0, 0, 0,],
[0, 0, 0, 0, 0, 0, 0, 0, 0,],
[0, 0, 0, 0, 0, 0, 0, 0, 0,],
[0, 0, 0, 0, 0, 0, 0, 0, 0,],
[0, 0, 0, 0, 0, 0, 0, 0, 0,],
[0, 0, 0, 0, 0, 0, 0, 0, 0,],
[0, 0, 0, 0, 1, 0, 0, 0, 0,],
[0, 0, 0, 1, 1, 1, 0, 0, 0,],
[0, 0, 0, 0, 1, 0, 0, 0, 0,],
[0, 0, 0, 0, 0, 0, 0, 0, 0,],
[0, 0, 0, 0, 0, 0, 0, 0, 0,]],
[[0, 0, 0, 0, 0, 0, 0, 0, 0,],
[0, 0, 0, 0, 0, 0, 0, 0, 0,],
[0, 0, 0, 0, 0, 0, 0, 1, 0,],
[0, 0, 0, 0, 0, 0, 1, 0, 0,],
[0, 0, 0, 0, 0, 0, 1, 0, 0,],
[0, 0, 0, 0, 0, 1, 0, 0, 0,],
[0, 0, 0, 0, 1, 0, 0, 0, 0,],
[0, 0, 0, 0, 1, 0, 0, 0, 0,],
[0, 0, 0, 1, 0, 0, 0, 0, 0,],
[0, 0, 1, 0, 0, 0, 0, 0, 0,],
[0, 0, 1, 0, 0, 0, 0, 0, 0,],
[0, 1, 0, 0, 0, 0, 0, 0, 0,],
[0, 0, 0, 0, 0, 0, 0, 0, 0,],
[0, 0, 0, 0, 0, 0, 0, 0, 0,],
[0, 0, 0, 0, 0, 0, 0, 0, 0,]],
[[0, 0, 0, 0, 0, 0, 0, 0, 0,],
[0, 0, 0, 0, 0, 0, 0, 0, 0,],
[0, 0, 0, 1, 1, 1, 0, 0, 0,],
[0, 0, 1, 0, 0, 0, 1, 0, 0,],
[0, 1, 0, 0, 0, 0, 0, 1, 0,],
[0, 1, 0, 0, 0, 0, 0, 1, 0,],
[0, 1, 0, 0, 0, 0, 0, 1, 0,],
[0, 1, 0, 0, 0, 0, 0, 1, 0,],
[0, 1, 0, 0, 0, 0, 0, 1, 0,],
[0, 1, 0, 0, 0, 0, 0, 1, 0,],
[0, 0, 1, 0, 0, 0, 1, 0, 0,],
[0, 0, 0, 1, 1, 1, 0, 0, 0,],
[0, 0, 0, 0, 0, 0, 0, 0, 0,],
[0, 0, 0, 0, 0, 0, 0, 0, 0,],
[0, 0, 0, 0, 0, 0, 0, 0, 0,]],
[[0, 0, 0, 0, 0, 0, 0, 0, 0,],
[0, 0, 0, 0, 0, 0, 0, 0, 0,],
[0, 0, 0, 0, 1, 0, 0, 0, 0,],
[0, 0, 0, 1, 1, 0, 0, 0, 0,],
[0, 0, 1, 0, 1, 0, 0, 0, 0,],
[0, 0, 0, 0, 1, 0, 0, 0, 0,],
[0, 0, 0, 0, 1, 0, 0, 0, 0,],
[0, 0, 0, 0, 1, 0, 0, 0, 0,],
[0, 0, 0, 0, 1, 0, 0, 0, 0,],
[0, 0, 0, 0, 1, 0, 0, 0, 0,],
[0, 0, 0, 0, 1, 0, 0, 0, 0,],
[0, 0, 1, 1, 1, 1, 1, 0, 0,],
[0, 0, 0, 0, 0, 0, 0, 0, 0,],
[0, 0, 0, 0, 0, 0, 0, 0, 0,],
[0, 0, 0, 0, 0, 0, 0, 0, 0,]],
[[0, 0, 0, 0, 0, 0, 0, 0, 0,],
[0, 0, 0, 0, 0, 0, 0, 0, 0,],
[0, 0, 1, 1, 1, 1, 1, 0, 0,],
[0, 1, 0, 0, 0, 0, 0, 1, 0,],
[0, 1, 0, 0, 0, 0, 0, 1, 0,],
[0, 0, 0, 0, 0, 0, 0, 1, 0,],
[0, 0, 0, 0, 0, 0, 1, 0, 0,],
[0, 0, 0, 0, 0, 1, 0, 0, 0,],
[0, 0, 0, 1, 1, 0, 0, 0, 0,],
[0, 0, 1, 0, 0, 0, 0, 0, 0,],
[0, 1, 0, 0, 0, 0, 0, 0, 0,],
[0, 1, 1, 1, 1, 1, 1, 1, 0,],
[0, 0, 0, 0, 0, 0, 0, 0, 0,],
[0, 0, 0, 0, 0, 0, 0, 0, 0,],
[0, 0, 0, 0, 0, 0, 0, 0, 0,]],
[[0, 0, 0, 0, 0, 0, 0, 0, 0,],
[0, 0, 0, 0, 0, 0, 0, 0, 0,],
[0, 1, 1, 1, 1, 1, 1, 1, 0,],
[0, 0, 0, 0, 0, 0, 0, 1, 0,],
[0, 0, 0, 0, 0, 0, 1, 0, 0,],
[0, 0, 0, 0, 0, 1, 0, 0, 0,],
[0, 0, 0, 0, 1, 1, 1, 0, 0,],
[0, 0, 0, 0, 0, 0, 0, 1, 0,],
[0, 0, 0, 0, 0, 0, 0, 1, 0,],
[0, 0, 0, 0, 0, 0, 0, 1, 0,],
[0, 1, 0, 0, 0, 0, 0, 1, 0,],
[0, 0, 1, 1, 1, 1, 1, 0, 0,],
[0, 0, 0, 0, 0, 0, 0, 0, 0,],
[0, 0, 0, 0, 0, 0, 0, 0, 0,],
[0, 0, 0, 0, 0, 0, 0, 0, 0,]],
[[0, 0, 0, 0, 0, 0, 0, 0, 0,],
[0, 0, 0, 0, 0, 0, 0, 0, 0,],
[0, 0, 0, 0, 0, 0, 1, 0, 0,],
[0, 0, 0, 0, 0, 1, 1, 0, 0,],
[0, 0, 0, 0, 1, 0, 1, 0, 0,],
[0, 0, 0, 1, 0, 0, 1, 0, 0,],
[0, 0, 1, 0, 0, 0, 1, 0, 0,],
[0, 1, 0, 0, 0, 0, 1, 0, 0,],
[0, 1, 1, 1, 1, 1, 1, 1, 0,],
[0, 0, 0, 0, 0, 0, 1, 0, 0,],
[0, 0, 0, 0, 0, 0, 1, 0, 0,],
[0, 0, 0, 0, 0, 0, 1, 0, 0,],
[0, 0, 0, 0, 0, 0, 0, 0, 0,],
[0, 0, 0, 0, 0, 0, 0, 0, 0,],
[0, 0, 0, 0, 0, 0, 0, 0, 0,]],
[[0, 0, 0, 0, 0, 0, 0, 0, 0,],
[0, 0, 0, 0, 0, 0, 0, 0, 0,],
[0, 1, 1, 1, 1, 1, 1, 1, 0,],
[0, 1, 0, 0, 0, 0, 0, 0, 0,],
[0, 1, 0, 0, 0, 0, 0, 0, 0,],
[0, 1, 0, 1, 1, 1, 1, 0, 0,],
[0, 1, 1, 0, 0, 0, 0, 1, 0,],
[0, 0, 0, 0, 0, 0, 0, 1, 0,],
[0, 0, 0, 0, 0, 0, 0, 1, 0,],
[0, 0, 0, 0, 0, 0, 0, 1, 0,],
[0, 1, 0, 0, 0, 0, 0, 1, 0,],
[0, 0, 1, 1, 1, 1, 1, 0, 0,],
[0, 0, 0, 0, 0, 0, 0, 0, 0,],
[0, 0, 0, 0, 0, 0, 0, 0, 0,],
[0, 0, 0, 0, 0, 0, 0, 0, 0,]],
[[0, 0, 0, 0, 0, 0, 0, 0, 0,],
[0, 0, 0, 0, 0, 0, 0, 0, 0,],
[0, 0, 0, 1, 1, 1, 1, 0, 0,],
[0, 0, 1, 0, 0, 0, 0, 0, 0,],
[0, 1, 0, 0, 0, 0, 0, 0, 0,],
[0, 1, 0, 0, 0, 0, 0, 0, 0,],
[0, 1, 0, 1, 1, 1, 1, 0, 0,],
[0, 1, 1, 0, 0, 0, 0, 1, 0,],
[0, 1, 0, 0, 0, 0, 0, 1, 0,],
[0, 1, 0, 0, 0, 0, 0, 1, 0,],
[0, 1, 0, 0, 0, 0, 0, 1, 0,],
[0, 0, 1, 1, 1, 1, 1, 0, 0,],
[0, 0, 0, 0, 0, 0, 0, 0, 0,],
[0, 0, 0, 0, 0, 0, 0, 0, 0,],
[0, 0, 0, 0, 0, 0, 0, 0, 0,]],
[[0, 0, 0, 0, 0, 0, 0, 0, 0,],
[0, 0, 0, 0, 0, 0, 0, 0, 0,],
[0, 1, 1, 1, 1, 1, 1, 1, 0,],
[0, 0, 0, 0, 0, 0, 0, 1, 0,],
[0, 0, 0, 0, 0, 0, 0, 1, 0,],
[0, 0, 0, 0, 0, 0, 1, 0, 0,],
[0, 0, 0, 0, 0, 1, 0, 0, 0,],
[0, 0, 0, 0, 1, 0, 0, 0, 0,],
[0, 0, 0, 1, 0, 0, 0, 0, 0,],
[0, 0, 0, 1, 0, 0, 0, 0, 0,],
[0, 0, 1, 0, 0, 0, 0, 0, 0,],
[0, 0, 1, 0, 0, 0, 0, 0, 0,],
[0, 0, 0, 0, 0, 0, 0, 0, 0,],
[0, 0, 0, 0, 0, 0, 0, 0, 0,],
[0, 0, 0, 0, 0, 0, 0, 0, 0,]],
[[0, 0, 0, 0, 0, 0, 0, 0, 0,],
[0, 0, 0, 0, 0, 0, 0, 0, 0,],
[0, 0, 1, 1, 1, 1, 1, 0, 0,],
[0, 1, 0, 0, 0, 0, 0, 1, 0,],
[0, 1, 0, 0, 0, 0, 0, 1, 0,],
[0, 1, 0, 0, 0, 0, 0, 1, 0,],
[0, 0, 1, 1, 1, 1, 1, 0, 0,],
[0, 1, 0, 0, 0, 0, 0, 1, 0,],
[0, 1, 0, 0, 0, 0, 0, 1, 0,],
[0, 1, 0, 0, 0, 0, 0, 1, 0,],
[0, 1, 0, 0, 0, 0, 0, 1, 0,],
[0, 0, 1, 1, 1, 1, 1, 0, 0,],
[0, 0, 0, 0, 0, 0, 0, 0, 0,],
[0, 0, 0, 0, 0, 0, 0, 0, 0,],
[0, 0, 0, 0, 0, 0, 0, 0, 0,]],
[[0, 0, 0, 0, 0, 0, 0, 0, 0,],
[0, 0, 0, 0, 0, 0, 0, 0, 0,],
[0, 0, 1, 1, 1, 1, 1, 0, 0,],
[0, 1, 0, 0, 0, 0, 0, 1, 0,],
[0, 1, 0, 0, 0, 0, 0, 1, 0,],
[0, 1, 0, 0, 0, 0, 0, 1, 0,],
[0, 1, 0, 0, 0, 0, 1, 1, 0,],
[0, 0, 1, 1, 1, 1, 0, 1, 0,],
[0, 0, 0, 0, 0, 0, 0, 1, 0,],
[0, 0, 0, 0, 0, 0, 0, 1, 0,],
[0, 0, 0, 0, 0, 0, 1, 0, 0,],
[0, 0, 1, 1, 1, 1, 0, 0, 0,],
[0, 0, 0, 0, 0, 0, 0, 0, 0,],
[0, 0, 0, 0, 0, 0, 0, 0, 0,],
[0, 0, 0, 0, 0, 0, 0, 0, 0,]],
[[0, 0, 0, 0, 0, 0, 0, 0, 0,],
[0, 0, 0, 0, 0, 0, 0, 0, 0,],
[0, 0, 0, 0, 0, 0, 0, 0, 0,],
[0, 0, 0, 0, 0, 0, 0, 0, 0,],
[0, 0, 0, 0, 0, 0, 0, 0, 0,],
[0, 0, 0, 0, 1, 0, 0, 0, 0,],
[0, 0, 0, 1, 1, 1, 0, 0, 0,],
[0, 0, 0, 0, 1, 0, 0, 0, 0,],
[0, 0, 0, 0, 0, 0, 0, 0, 0,],
[0, 0, 0, 0, 0, 0, 0, 0, 0,],
[0, 0, 0, 0, 1, 0, 0, 0, 0,],
[0, 0, 0, 1, 1, 1, 0, 0, 0,],
[0, 0, 0, 0, 1, 0, 0, 0, 0,],
[0, 0, 0, 0, 0, 0, 0, 0, 0,],
[0, 0, 0, 0, 0, 0, 0, 0, 0,]],
[[0, 0, 0, 0, 0, 0, 0, 0, 0,],
[0, 0, 0, 0, 0, 0, 0, 0, 0,],
[0, 0, 0, 0, 0, 0, 0, 0, 0,],
[0, 0, 0, 0, 0, 0, 0, 0, 0,],
[0, 0, 0, 0, 0, 0, 0, 0, 0,],
[0, 0, 0, 0, 1, 0, 0, 0, 0,],
[0, 0, 0, 1, 1, 1, 0, 0, 0,],
[0, 0, 0, 0, 1, 0, 0, 0, 0,],
[0, 0, 0, 0, 0, 0, 0, 0, 0,],
[0, 0, 0, 0, 0, 0, 0, 0, 0,],
[0, 0, 0, 1, 1, 1, 0, 0, 0,],
[0, 0, 0, 1, 1, 0, 0, 0, 0,],
[0, 0, 1, 0, 0, 0, 0, 0, 0,],
[0, 0, 0, 0, 0, 0, 0, 0, 0,],
[0, 0, 0, 0, 0, 0, 0, 0, 0,]],
[[0, 0, 0, 0, 0, 0, 0, 0, 0,],
[0, 0, 0, 0, 0, 0, 0, 0, 0,],
[0, 0, 0, 0, 0, 0, 1, 0, 0,],
[0, 0, 0, 0, 0, 1, 0, 0, 0,],
[0, 0, 0, 0, 1, 0, 0, 0, 0,],
[0, 0, 0, 1, 0, 0, 0, 0, 0,],
[0, 0, 1, 0, 0, 0, 0, 0, 0,],
[0, 0, 1, 0, 0, 0, 0, 0, 0,],
[0, 0, 0, 1, 0, 0, 0, 0, 0,],
[0, 0, 0, 0, 1, 0, 0, 0, 0,],
[0, 0, 0, 0, 0, 1, 0, 0, 0,],
[0, 0, 0, 0, 0, 0, 1, 0, 0,],
[0, 0, 0, 0, 0, 0, 0, 0, 0,],
[0, 0, 0, 0, 0, 0, 0, 0, 0,],
[0, 0, 0, 0, 0, 0, 0, 0, 0,]],
[[0, 0, 0, 0, 0, 0, 0, 0, 0,],
[0, 0, 0, 0, 0, 0, 0, 0, 0,],
[0, 0, 0, 0, 0, 0, 0, 0, 0,],
[0, 0, 0, 0, 0, 0, 0, 0, 0,],
[0, 0, 0, 0, 0, 0, 0, 0, 0,],
[0, 0, 0, 0, 0, 0, 0, 0, 0,],
[0, 1, 1, 1, 1, 1, 1, 1, 0,],
[0, 0, 0, 0, 0, 0, 0, 0, 0,],
[0, 0, 0, 0, 0, 0, 0, 0, 0,],
[0, 1, 1, 1, 1, 1, 1, 1, 0,],
[0, 0, 0, 0, 0, 0, 0, 0, 0,],
[0, 0, 0, 0, 0, 0, 0, 0, 0,],
[0, 0, 0, 0, 0, 0, 0, 0, 0,],
[0, 0, 0, 0, 0, 0, 0, 0, 0,],
[0, 0, 0, 0, 0, 0, 0, 0, 0,]],
[[0, 0, 0, 0, 0, 0, 0, 0, 0,],
[0, 0, 0, 0, 0, 0, 0, 0, 0,],
[0, 0, 1, 0, 0, 0, 0, 0, 0,],
[0, 0, 0, 1, 0, 0, 0, 0, 0,],
[0, 0, 0, 0, 1, 0, 0, 0, 0,],
[0, 0, 0, 0, 0, 1, 0, 0, 0,],
[0, 0, 0, 0, 0, 0, 1, 0, 0,],
[0, 0, 0, 0, 0, 0, 1, 0, 0,],
[0, 0, 0, 0, 0, 1, 0, 0, 0,],
[0, 0, 0, 0, 1, 0, 0, 0, 0,],
[0, 0, 0, 1, 0, 0, 0, 0, 0,],
[0, 0, 1, 0, 0, 0, 0, 0, 0,],
[0, 0, 0, 0, 0, 0, 0, 0, 0,],
[0, 0, 0, 0, 0, 0, 0, 0, 0,],
[0, 0, 0, 0, 0, 0, 0, 0, 0,]],
[[0, 0, 0, 0, 0, 0, 0, 0, 0,],
[0, 0, 0, 0, 0, 0, 0, 0, 0,],
[0, 0, 1, 1, 1, 1, 1, 0, 0,],
[0, 1, 0, 0, 0, 0, 0, 1, 0,],
[0, 1, 0, 0, 0, 0, 0, 1, 0,],
[0, 0, 0, 0, 0, 0, 0, 1, 0,],
[0, 0, 0, 0, 0, 0, 1, 0, 0,],
[0, 0, 0, 0, 0, 1, 0, 0, 0,],
[0, 0, 0, 0, 1, 0, 0, 0, 0,],
[0, 0, 0, 0, 1, 0, 0, 0, 0,],
[0, 0, 0, 0, 0, 0, 0, 0, 0,],
[0, 0, 0, 0, 1, 0, 0, 0, 0,],
[0, 0, 0, 0, 0, 0, 0, 0, 0,],
[0, 0, 0, 0, 0, 0, 0, 0, 0,],
[0, 0, 0, 0, 0, 0, 0, 0, 0,]],
[[0, 0, 0, 0, 0, 0, 0, 0, 0,],
[0, 0, 0, 0, 0, 0, 0, 0, 0,],
[0, 0, 1, 1, 1, 1, 1, 0, 0,],
[0, 1, 0, 0, 0, 0, 0, 1, 0,],
[0, 1, 0, 0, 0, 0, 0, 1, 0,],
[0, 1, 0, 0, 1, 1, 1, 1, 0,],
[0, 1, 0, 1, 0, 0, 0, 1, 0,],
[0, 1, 0, 1, 0, 0, 1, 1, 0,],
[0, 1, 0, 0, 1, 1, 0, 1, 0,],
[0, 1, 0, 0, 0, 0, 0, 0, 0,],
[0, 1, 0, 0, 0, 0, 0, 0, 0,],
[0, 0, 1, 1, 1, 1, 1, 0, 0,],
[0, 0, 0, 0, 0, 0, 0, 0, 0,],
[0, 0, 0, 0, 0, 0, 0, 0, 0,],
[0, 0, 0, 0, 0, 0, 0, 0, 0,]],
[[0, 0, 0, 0, 0, 0, 0, 0, 0,],
[0, 0, 0, 0, 0, 0, 0, 0, 0,],
[0, 0, 0, 0, 1, 0, 0, 0, 0,],
[0, 0, 0, 1, 0, 1, 0, 0, 0,],
[0, 0, 1, 0, 0, 0, 1, 0, 0,],
[0, 1, 0, 0, 0, 0, 0, 1, 0,],
[0, 1, 0, 0, 0, 0, 0, 1, 0,],
[0, 1, 0, 0, 0, 0, 0, 1, 0,],
[0, 1, 1, 1, 1, 1, 1, 1, 0,],
[0, 1, 0, 0, 0, 0, 0, 1, 0,],
[0, 1, 0, 0, 0, 0, 0, 1, 0,],
[0, 1, 0, 0, 0, 0, 0, 1, 0,],
[0, 0, 0, 0, 0, 0, 0, 0, 0,],
[0, 0, 0, 0, 0, 0, 0, 0, 0,],
[0, 0, 0, 0, 0, 0, 0, 0, 0,]],
[[0, 0, 0, 0, 0, 0, 0, 0, 0,],
[0, 0, 0, 0, 0, 0, 0, 0, 0,],
[0, 1, 1, 1, 1, 1, 1, 0, 0,],
[0, 0, 1, 0, 0, 0, 0, 1, 0,],
[0, 0, 1, 0, 0, 0, 0, 1, 0,],
[0, 0, 1, 0, 0, 0, 0, 1, 0,],
[0, 0, 1, 1, 1, 1, 1, 0, 0,],
[0, 0, 1, 0, 0, 0, 0, 1, 0,],
[0, 0, 1, 0, 0, 0, 0, 1, 0,],
[0, 0, 1, 0, 0, 0, 0, 1, 0,],
[0, 0, 1, 0, 0, 0, 0, 1, 0,],
[0, 1, 1, 1, 1, 1, 1, 0, 0,],
[0, 0, 0, 0, 0, 0, 0, 0, 0,],
[0, 0, 0, 0, 0, 0, 0, 0, 0,],
[0, 0, 0, 0, 0, 0, 0, 0, 0,]],
[[0, 0, 0, 0, 0, 0, 0, 0, 0,],
[0, 0, 0, 0, 0, 0, 0, 0, 0,],
[0, 0, 1, 1, 1, 1, 1, 0, 0,],
[0, 1, 0, 0, 0, 0, 0, 1, 0,],
[0, 1, 0, 0, 0, 0, 0, 0, 0,],
[0, 1, 0, 0, 0, 0, 0, 0, 0,],
[0, 1, 0, 0, 0, 0, 0, 0, 0,],
[0, 1, 0, 0, 0, 0, 0, 0, 0,],
[0, 1, 0, 0, 0, 0, 0, 0, 0,],
[0, 1, 0, 0, 0, 0, 0, 0, 0,],
[0, 1, 0, 0, 0, 0, 0, 1, 0,],
[0, 0, 1, 1, 1, 1, 1, 0, 0,],
[0, 0, 0, 0, 0, 0, 0, 0, 0,],
[0, 0, 0, 0, 0, 0, 0, 0, 0,],
[0, 0, 0, 0, 0, 0, 0, 0, 0,]],
[[0, 0, 0, 0, 0, 0, 0, 0, 0,],
[0, 0, 0, 0, 0, 0, 0, 0, 0,],
[0, 1, 1, 1, 1, 1, 1, 0, 0,],
[0, 0, 1, 0, 0, 0, 0, 1, 0,],
[0, 0, 1, 0, 0, 0, 0, 1, 0,],
[0, 0, 1, 0, 0, 0, 0, 1, 0,],
[0, 0, 1, 0, 0, 0, 0, 1, 0,],
[0, 0, 1, 0, 0, 0, 0, 1, 0,],
[0, 0, 1, 0, 0, 0, 0, 1, 0,],
[0, 0, 1, 0, 0, 0, 0, 1, 0,],
[0, 0, 1, 0, 0, 0, 0, 1, 0,],
[0, 1, 1, 1, 1, 1, 1, 0, 0,],
[0, 0, 0, 0, 0, 0, 0, 0, 0,],
[0, 0, 0, 0, 0, 0, 0, 0, 0,],
[0, 0, 0, 0, 0, 0, 0, 0, 0,]],
[[0, 0, 0, 0, 0, 0, 0, 0, 0,],
[0, 0, 0, 0, 0, 0, 0, 0, 0,],
[0, 1, 1, 1, 1, 1, 1, 1, 0,],
[0, 0, 1, 0, 0, 0, 0, 0, 0,],
[0, 0, 1, 0, 0, 0, 0, 0, 0,],
[0, 0, 1, 0, 0, 0, 0, 0, 0,],
[0, 0, 1, 1, 1, 1, 0, 0, 0,],
[0, 0, 1, 0, 0, 0, 0, 0, 0,],
[0, 0, 1, 0, 0, 0, 0, 0, 0,],
[0, 0, 1, 0, 0, 0, 0, 0, 0,],
[0, 0, 1, 0, 0, 0, 0, 0, 0,],
[0, 1, 1, 1, 1, 1, 1, 1, 0,],
[0, 0, 0, 0, 0, 0, 0, 0, 0,],
[0, 0, 0, 0, 0, 0, 0, 0, 0,],
[0, 0, 0, 0, 0, 0, 0, 0, 0,]],
[[0, 0, 0, 0, 0, 0, 0, 0, 0,],
[0, 0, 0, 0, 0, 0, 0, 0, 0,],
[0, 1, 1, 1, 1, 1, 1, 1, 0,],
[0, 0, 1, 0, 0, 0, 0, 0, 0,],
[0, 0, 1, 0, 0, 0, 0, 0, 0,],
[0, 0, 1, 0, 0, 0, 0, 0, 0,],
[0, 0, 1, 1, 1, 1, 0, 0, 0,],
[0, 0, 1, 0, 0, 0, 0, 0, 0,],
[0, 0, 1, 0, 0, 0, 0, 0, 0,],
[0, 0, 1, 0, 0, 0, 0, 0, 0,],
[0, 0, 1, 0, 0, 0, 0, 0, 0,],
[0, 0, 1, 0, 0, 0, 0, 0, 0,],
[0, 0, 0, 0, 0, 0, 0, 0, 0,],
[0, 0, 0, 0, 0, 0, 0, 0, 0,],
[0, 0, 0, 0, 0, 0, 0, 0, 0,]],
[[0, 0, 0, 0, 0, 0, 0, 0, 0,],
[0, 0, 0, 0, 0, 0, 0, 0, 0,],
[0, 0, 1, 1, 1, 1, 1, 0, 0,],
[0, 1, 0, 0, 0, 0, 0, 1, 0,],
[0, 1, 0, 0, 0, 0, 0, 0, 0,],
[0, 1, 0, 0, 0, 0, 0, 0, 0,],
[0, 1, 0, 0, 0, 0, 0, 0, 0,],
[0, 1, 0, 0, 0, 1, 1, 1, 0,],
[0, 1, 0, 0, 0, 0, 0, 1, 0,],
[0, 1, 0, 0, 0, 0, 0, 1, 0,],
[0, 1, 0, 0, 0, 0, 0, 1, 0,],
[0, 0, 1, 1, 1, 1, 1, 0, 0,],
[0, 0, 0, 0, 0, 0, 0, 0, 0,],
[0, 0, 0, 0, 0, 0, 0, 0, 0,],
[0, 0, 0, 0, 0, 0, 0, 0, 0,]],
[[0, 0, 0, 0, 0, 0, 0, 0, 0,],
[0, 0, 0, 0, 0, 0, 0, 0, 0,],
[0, 1, 0, 0, 0, 0, 0, 1, 0,],
[0, 1, 0, 0, 0, 0, 0, 1, 0,],
[0, 1, 0, 0, 0, 0, 0, 1, 0,],
[0, 1, 0, 0, 0, 0, 0, 1, 0,],
[0, 1, 1, 1, 1, 1, 1, 1, 0,],
[0, 1, 0, 0, 0, 0, 0, 1, 0,],
[0, 1, 0, 0, 0, 0, 0, 1, 0,],
[0, 1, 0, 0, 0, 0, 0, 1, 0,],
[0, 1, 0, 0, 0, 0, 0, 1, 0,],
[0, 1, 0, 0, 0, 0, 0, 1, 0,],
[0, 0, 0, 0, 0, 0, 0, 0, 0,],
[0, 0, 0, 0, 0, 0, 0, 0, 0,],
[0, 0, 0, 0, 0, 0, 0, 0, 0,]],
[[0, 0, 0, 0, 0, 0, 0, 0, 0,],
[0, 0, 0, 0, 0, 0, 0, 0, 0,],
[0, 0, 1, 1, 1, 1, 1, 0, 0,],
[0, 0, 0, 0, 1, 0, 0, 0, 0,],
[0, 0, 0, 0, 1, 0, 0, 0, 0,],
[0, 0, 0, 0, 1, 0, 0, 0, 0,],
[0, 0, 0, 0, 1, 0, 0, 0, 0,],
[0, 0, 0, 0, 1, 0, 0, 0, 0,],
[0, 0, 0, 0, 1, 0, 0, 0, 0,],
[0, 0, 0, 0, 1, 0, 0, 0, 0,],
[0, 0, 0, 0, 1, 0, 0, 0, 0,],
[0, 0, 1, 1, 1, 1, 1, 0, 0,],
[0, 0, 0, 0, 0, 0, 0, 0, 0,],
[0, 0, 0, 0, 0, 0, 0, 0, 0,],
[0, 0, 0, 0, 0, 0, 0, 0, 0,]],
[[0, 0, 0, 0, 0, 0, 0, 0, 0,],
[0, 0, 0, 0, 0, 0, 0, 0, 0,],
[0, 0, 0, 0, 1, 1, 1, 1, 0,],
[0, 0, 0, 0, 0, 0, 1, 0, 0,],
[0, 0, 0, 0, 0, 0, 1, 0, 0,],
[0, 0, 0, 0, 0, 0, 1, 0, 0,],
[0, 0, 0, 0, 0, 0, 1, 0, 0,],
[0, 0, 0, 0, 0, 0, 1, 0, 0,],
[0, 0, 0, 0, 0, 0, 1, 0, 0,],
[0, 0, 0, 0, 0, 0, 1, 0, 0,],
[0, 1, 0, 0, 0, 0, 1, 0, 0,],
[0, 0, 1, 1, 1, 1, 0, 0, 0,],
[0, 0, 0, 0, 0, 0, 0, 0, 0,],
[0, 0, 0, 0, 0, 0, 0, 0, 0,],
[0, 0, 0, 0, 0, 0, 0, 0, 0,]],
[[0, 0, 0, 0, 0, 0, 0, 0, 0,],
[0, 0, 0, 0, 0, 0, 0, 0, 0,],
[0, 1, 0, 0, 0, 0, 0, 1, 0,],
[0, 1, 0, 0, 0, 0, 1, 0, 0,],
[0, 1, 0, 0, 0, 1, 0, 0, 0,],
[0, 1, 0, 0, 1, 0, 0, 0, 0,],
[0, 1, 1, 1, 0, 0, 0, 0, 0,],
[0, 1, 0, 1, 0, 0, 0, 0, 0,],
[0, 1, 0, 0, 1, 0, 0, 0, 0,],
[0, 1, 0, 0, 0, 1, 0, 0, 0,],
[0, 1, 0, 0, 0, 0, 1, 0, 0,],
[0, 1, 0, 0, 0, 0, 0, 1, 0,],
[0, 0, 0, 0, 0, 0, 0, 0, 0,],
[0, 0, 0, 0, 0, 0, 0, 0, 0,],
[0, 0, 0, 0, 0, 0, 0, 0, 0,]],
[[0, 0, 0, 0, 0, 0, 0, 0, 0,],
[0, 0, 0, 0, 0, 0, 0, 0, 0,],
[0, 1, 0, 0, 0, 0, 0, 0, 0,],
[0, 1, 0, 0, 0, 0, 0, 0, 0,],
[0, 1, 0, 0, 0, 0, 0, 0, 0,],
[0, 1, 0, 0, 0, 0, 0, 0, 0,],
[0, 1, 0, 0, 0, 0, 0, 0, 0,],
[0, 1, 0, 0, 0, 0, 0, 0, 0,],
[0, 1, 0, 0, 0, 0, 0, 0, 0,],
[0, 1, 0, 0, 0, 0, 0, 0, 0,],
[0, 1, 0, 0, 0, 0, 0, 0, 0,],
[0, 1, 1, 1, 1, 1, 1, 1, 0,],
[0, 0, 0, 0, 0, 0, 0, 0, 0,],
[0, 0, 0, 0, 0, 0, 0, 0, 0,],
[0, 0, 0, 0, 0, 0, 0, 0, 0,]],
[[0, 0, 0, 0, 0, 0, 0, 0, 0,],
[0, 0, 0, 0, 0, 0, 0, 0, 0,],
[0, 1, 0, 0, 0, 0, 0, 1, 0,],
[0, 1, 0, 0, 0, 0, 0, 1, 0,],
[0, 1, 1, 0, 0, 0, 1, 1, 0,],
[0, 1, 0, 1, 0, 1, 0, 1, 0,],
[0, 1, 0, 1, 0, 1, 0, 1, 0,],
[0, 1, 0, 0, 1, 0, 0, 1, 0,],
[0, 1, 0, 0, 1, 0, 0, 1, 0,],
[0, 1, 0, 0, 0, 0, 0, 1, 0,],
[0, 1, 0, 0, 0, 0, 0, 1, 0,],
[0, 1, 0, 0, 0, 0, 0, 1, 0,],
[0, 0, 0, 0, 0, 0, 0, 0, 0,],
[0, 0, 0, 0, 0, 0, 0, 0, 0,],
[0, 0, 0, 0, 0, 0, 0, 0, 0,]],
[[0, 0, 0, 0, 0, 0, 0, 0, 0,],
[0, 0, 0, 0, 0, 0, 0, 0, 0,],
[0, 1, 0, 0, 0, 0, 0, 1, 0,],
[0, 1, 0, 0, 0, 0, 0, 1, 0,],
[0, 1, 1, 0, 0, 0, 0, 1, 0,],
[0, 1, 0, 1, 0, 0, 0, 1, 0,],
[0, 1, 0, 0, 1, 0, 0, 1, 0,],
[0, 1, 0, 0, 0, 1, 0, 1, 0,],
[0, 1, 0, 0, 0, 0, 1, 1, 0,],
[0, 1, 0, 0, 0, 0, 0, 1, 0,],
[0, 1, 0, 0, 0, 0, 0, 1, 0,],
[0, 1, 0, 0, 0, 0, 0, 1, 0,],
[0, 0, 0, 0, 0, 0, 0, 0, 0,],
[0, 0, 0, 0, 0, 0, 0, 0, 0,],
[0, 0, 0, 0, 0, 0, 0, 0, 0,]],
[[0, 0, 0, 0, 0, 0, 0, 0, 0,],
[0, 0, 0, 0, 0, 0, 0, 0, 0,],
[0, 0, 1, 1, 1, 1, 1, 0, 0,],
[0, 1, 0, 0, 0, 0, 0, 1, 0,],
[0, 1, 0, 0, 0, 0, 0, 1, 0,],
[0, 1, 0, 0, 0, 0, 0, 1, 0,],
[0, 1, 0, 0, 0, 0, 0, 1, 0,],
[0, 1, 0, 0, 0, 0, 0, 1, 0,],
[0, 1, 0, 0, 0, 0, 0, 1, 0,],
[0, 1, 0, 0, 0, 0, 0, 1, 0,],
[0, 1, 0, 0, 0, 0, 0, 1, 0,],
[0, 0, 1, 1, 1, 1, 1, 0, 0,],
[0, 0, 0, 0, 0, 0, 0, 0, 0,],
[0, 0, 0, 0, 0, 0, 0, 0, 0,],
[0, 0, 0, 0, 0, 0, 0, 0, 0,]],
[[0, 0, 0, 0, 0, 0, 0, 0, 0,],
[0, 0, 0, 0, 0, 0, 0, 0, 0,],
[0, 1, 1, 1, 1, 1, 1, 0, 0,],
[0, 1, 0, 0, 0, 0, 0, 1, 0,],
[0, 1, 0, 0, 0, 0, 0, 1, 0,],
[0, 1, 0, 0, 0, 0, 0, 1, 0,],
[0, 1, 1, 1, 1, 1, 1, 0, 0,],
[0, 1, 0, 0, 0, 0, 0, 0, 0,],
[0, 1, 0, 0, 0, 0, 0, 0, 0,],
[0, 1, 0, 0, 0, 0, 0, 0, 0,],
[0, 1, 0, 0, 0, 0, 0, 0, 0,],
[0, 1, 0, 0, 0, 0, 0, 0, 0,],
[0, 0, 0, 0, 0, 0, 0, 0, 0,],
[0, 0, 0, 0, 0, 0, 0, 0, 0,],
[0, 0, 0, 0, 0, 0, 0, 0, 0,]],
[[0, 0, 0, 0, 0, 0, 0, 0, 0,],
[0, 0, 0, 0, 0, 0, 0, 0, 0,],
[0, 0, 1, 1, 1, 1, 1, 0, 0,],
[0, 1, 0, 0, 0, 0, 0, 1, 0,],
[0, 1, 0, 0, 0, 0, 0, 1, 0,],
[0, 1, 0, 0, 0, 0, 0, 1, 0,],
[0, 1, 0, 0, 0, 0, 0, 1, 0,],
[0, 1, 0, 0, 0, 0, 0, 1, 0,],
[0, 1, 0, 0, 0, 0, 0, 1, 0,],
[0, 1, 0, 0, 1, 0, 0, 1, 0,],
[0, 1, 0, 0, 0, 1, 0, 1, 0,],
[0, 0, 1, 1, 1, 1, 1, 0, 0,],
[0, 0, 0, 0, 0, 0, 0, 1, 0,],
[0, 0, 0, 0, 0, 0, 0, 0, 0,],
[0, 0, 0, 0, 0, 0, 0, 0, 0,]],
[[0, 0, 0, 0, 0, 0, 0, 0, 0,],
[0, 0, 0, 0, 0, 0, 0, 0, 0,],
[0, 1, 1, 1, 1, 1, 1, 0, 0,],
[0, 1, 0, 0, 0, 0, 0, 1, 0,],
[0, 1, 0, 0, 0, 0, 0, 1, 0,],
[0, 1, 0, 0, 0, 0, 0, 1, 0,],
[0, 1, 1, 1, 1, 1, 1, 0, 0,],
[0, 1, 0, 0, 1, 0, 0, 0, 0,],
[0, 1, 0, 0, 0, 1, 0, 0, 0,],
[0, 1, 0, 0, 0, 0, 1, 0, 0,],
[0, 1, 0, 0, 0, 0, 0, 1, 0,],
[0, 1, 0, 0, 0, 0, 0, 1, 0,],
[0, 0, 0, 0, 0, 0, 0, 0, 0,],
[0, 0, 0, 0, 0, 0, 0, 0, 0,],
[0, 0, 0, 0, 0, 0, 0, 0, 0,]],
[[0, 0, 0, 0, 0, 0, 0, 0, 0,],
[0, 0, 0, 0, 0, 0, 0, 0, 0,],
[0, 0, 1, 1, 1, 1, 1, 0, 0,],
[0, 1, 0, 0, 0, 0, 0, 1, 0,],
[0, 1, 0, 0, 0, 0, 0, 0, 0,],
[0, 1, 0, 0, 0, 0, 0, 0, 0,],
[0, 0, 1, 1, 1, 1, 1, 0, 0,],
[0, 0, 0, 0, 0, 0, 0, 1, 0,],
[0, 0, 0, 0, 0, 0, 0, 1, 0,],
[0, 0, 0, 0, 0, 0, 0, 1, 0,],
[0, 1, 0, 0, 0, 0, 0, 1, 0,],
[0, 0, 1, 1, 1, 1, 1, 0, 0,],
[0, 0, 0, 0, 0, 0, 0, 0, 0,],
[0, 0, 0, 0, 0, 0, 0, 0, 0,],
[0, 0, 0, 0, 0, 0, 0, 0, 0,]],
[[0, 0, 0, 0, 0, 0, 0, 0, 0,],
[0, 0, 0, 0, 0, 0, 0, 0, 0,],
[0, 1, 1, 1, 1, 1, 1, 1, 0,],
[0, 0, 0, 0, 1, 0, 0, 0, 0,],
[0, 0, 0, 0, 1, 0, 0, 0, 0,],
[0, 0, 0, 0, 1, 0, 0, 0, 0,],
[0, 0, 0, 0, 1, 0, 0, 0, 0,],
[0, 0, 0, 0, 1, 0, 0, 0, 0,],
[0, 0, 0, 0, 1, 0, 0, 0, 0,],
[0, 0, 0, 0, 1, 0, 0, 0, 0,],
[0, 0, 0, 0, 1, 0, 0, 0, 0,],
[0, 0, 0, 0, 1, 0, 0, 0, 0,],
[0, 0, 0, 0, 0, 0, 0, 0, 0,],
[0, 0, 0, 0, 0, 0, 0, 0, 0,],
[0, 0, 0, 0, 0, 0, 0, 0, 0,]],
[[0, 0, 0, 0, 0, 0, 0, 0, 0,],
[0, 0, 0, 0, 0, 0, 0, 0, 0,],
[0, 1, 0, 0, 0, 0, 0, 1, 0,],
[0, 1, 0, 0, 0, 0, 0, 1, 0,],
[0, 1, 0, 0, 0, 0, 0, 1, 0,],
[0, 1, 0, 0, 0, 0, 0, 1, 0,],
[0, 1, 0, 0, 0, 0, 0, 1, 0,],
[0, 1, 0, 0, 0, 0, 0, 1, 0,],
[0, 1, 0, 0, 0, 0, 0, 1, 0,],
[0, 1, 0, 0, 0, 0, 0, 1, 0,],
[0, 1, 0, 0, 0, 0, 0, 1, 0,],
[0, 0, 1, 1, 1, 1, 1, 0, 0,],
[0, 0, 0, 0, 0, 0, 0, 0, 0,],
[0, 0, 0, 0, 0, 0, 0, 0, 0,],
[0, 0, 0, 0, 0, 0, 0, 0, 0,]],
[[0, 0, 0, 0, 0, 0, 0, 0, 0,],
[0, 0, 0, 0, 0, 0, 0, 0, 0,],
[0, 1, 0, 0, 0, 0, 0, 1, 0,],
[0, 1, 0, 0, 0, 0, 0, 1, 0,],
[0, 1, 0, 0, 0, 0, 0, 1, 0,],
[0, 0, 1, 0, 0, 0, 1, 0, 0,],
[0, 0, 1, 0, 0, 0, 1, 0, 0,],
[0, 0, 1, 0, 0, 0, 1, 0, 0,],
[0, 0, 0, 1, 0, 1, 0, 0, 0,],
[0, 0, 0, 1, 0, 1, 0, 0, 0,],
[0, 0, 0, 1, 0, 1, 0, 0, 0,],
[0, 0, 0, 0, 1, 0, 0, 0, 0,],
[0, 0, 0, 0, 0, 0, 0, 0, 0,],
[0, 0, 0, 0, 0, 0, 0, 0, 0,],
[0, 0, 0, 0, 0, 0, 0, 0, 0,]],
[[0, 0, 0, 0, 0, 0, 0, 0, 0,],
[0, 0, 0, 0, 0, 0, 0, 0, 0,],
[0, 1, 0, 0, 0, 0, 0, 1, 0,],
[0, 1, 0, 0, 0, 0, 0, 1, 0,],
[0, 1, 0, 0, 0, 0, 0, 1, 0,],
[0, 1, 0, 0, 0, 0, 0, 1, 0,],
[0, 1, 0, 0, 1, 0, 0, 1, 0,],
[0, 1, 0, 0, 1, 0, 0, 1, 0,],
[0, 1, 0, 0, 1, 0, 0, 1, 0,],
[0, 1, 0, 0, 1, 0, 0, 1, 0,],
[0, 1, 0, 1, 0, 1, 0, 1, 0,],
[0, 0, 1, 0, 0, 0, 1, 0, 0,],
[0, 0, 0, 0, 0, 0, 0, 0, 0,],
[0, 0, 0, 0, 0, 0, 0, 0, 0,],
[0, 0, 0, 0, 0, 0, 0, 0, 0,]],
[[0, 0, 0, 0, 0, 0, 0, 0, 0,],
[0, 0, 0, 0, 0, 0, 0, 0, 0,],
[0, 1, 0, 0, 0, 0, 0, 1, 0,],
[0, 1, 0, 0, 0, 0, 0, 1, 0,],
[0, 0, 1, 0, 0, 0, 1, 0, 0,],
[0, 0, 0, 1, 0, 1, 0, 0, 0,],
[0, 0, 0, 0, 1, 0, 0, 0, 0,],
[0, 0, 0, 0, 1, 0, 0, 0, 0,],
[0, 0, 0, 1, 0, 1, 0, 0, 0,],
[0, 0, 1, 0, 0, 0, 1, 0, 0,],
[0, 1, 0, 0, 0, 0, 0, 1, 0,],
[0, 1, 0, 0, 0, 0, 0, 1, 0,],
[0, 0, 0, 0, 0, 0, 0, 0, 0,],
[0, 0, 0, 0, 0, 0, 0, 0, 0,],
[0, 0, 0, 0, 0, 0, 0, 0, 0,]],
[[0, 0, 0, 0, 0, 0, 0, 0, 0,],
[0, 0, 0, 0, 0, 0, 0, 0, 0,],
[0, 1, 0, 0, 0, 0, 0, 1, 0,],
[0, 1, 0, 0, 0, 0, 0, 1, 0,],
[0, 0, 1, 0, 0, 0, 1, 0, 0,],
[0, 0, 0, 1, 0, 1, 0, 0, 0,],
[0, 0, 0, 0, 1, 0, 0, 0, 0,],
[0, 0, 0, 0, 1, 0, 0, 0, 0,],
[0, 0, 0, 0, 1, 0, 0, 0, 0,],
[0, 0, 0, 0, 1, 0, 0, 0, 0,],
[0, 0, 0, 0, 1, 0, 0, 0, 0,],
[0, 0, 0, 0, 1, 0, 0, 0, 0,],
[0, 0, 0, 0, 0, 0, 0, 0, 0,],
[0, 0, 0, 0, 0, 0, 0, 0, 0,],
[0, 0, 0, 0, 0, 0, 0, 0, 0,]],
[[0, 0, 0, 0, 0, 0, 0, 0, 0,],
[0, 0, 0, 0, 0, 0, 0, 0, 0,],
[0, 1, 1, 1, 1, 1, 1, 1, 0,],
[0, 0, 0, 0, 0, 0, 0, 1, 0,],
[0, 0, 0, 0, 0, 0, 1, 0, 0,],
[0, 0, 0, 0, 0, 1, 0, 0, 0,],
[0, 0, 0, 0, 1, 0, 0, 0, 0,],
[0, 0, 0, 1, 0, 0, 0, 0, 0,],
[0, 0, 1, 0, 0, 0, 0, 0, 0,],
[0, 1, 0, 0, 0, 0, 0, 0, 0,],
[0, 1, 0, 0, 0, 0, 0, 0, 0,],
[0, 1, 1, 1, 1, 1, 1, 1, 0,],
[0, 0, 0, 0, 0, 0, 0, 0, 0,],
[0, 0, 0, 0, 0, 0, 0, 0, 0,],
[0, 0, 0, 0, 0, 0, 0, 0, 0,]],
[[0, 0, 0, 0, 0, 0, 0, 0, 0,],
[0, 0, 0, 0, 0, 0, 0, 0, 0,],
[0, 0, 0, 1, 1, 1, 1, 0, 0,],
[0, 0, 0, 1, 0, 0, 0, 0, 0,],
[0, 0, 0, 1, 0, 0, 0, 0, 0,],
[0, 0, 0, 1, 0, 0, 0, 0, 0,],
[0, 0, 0, 1, 0, 0, 0, 0, 0,],
[0, 0, 0, 1, 0, 0, 0, 0, 0,],
[0, 0, 0, 1, 0, 0, 0, 0, 0,],
[0, 0, 0, 1, 0, 0, 0, 0, 0,],
[0, 0, 0, 1, 0, 0, 0, 0, 0,],
[0, 0, 0, 1, 1, 1, 1, 0, 0,],
[0, 0, 0, 0, 0, 0, 0, 0, 0,],
[0, 0, 0, 0, 0, 0, 0, 0, 0,],
[0, 0, 0, 0, 0, 0, 0, 0, 0,]],
[[0, 0, 0, 0, 0, 0, 0, 0, 0,],
[0, 0, 0, 0, 0, 0, 0, 0, 0,],
[0, 1, 0, 0, 0, 0, 0, 0, 0,],
[0, 0, 1, 0, 0, 0, 0, 0, 0,],
[0, 0, 1, 0, 0, 0, 0, 0, 0,],
[0, 0, 0, 1, 0, 0, 0, 0, 0,],
[0, 0, 0, 0, 1, 0, 0, 0, 0,],
[0, 0, 0, 0, 1, 0, 0, 0, 0,],
[0, 0, 0, 0, 0, 1, 0, 0, 0,],
[0, 0, 0, 0, 0, 0, 1, 0, 0,],
[0, 0, 0, 0, 0, 0, 1, 0, 0,],
[0, 0, 0, 0, 0, 0, 0, 1, 0,],
[0, 0, 0, 0, 0, 0, 0, 0, 0,],
[0, 0, 0, 0, 0, 0, 0, 0, 0,],
[0, 0, 0, 0, 0, 0, 0, 0, 0,]],
[[0, 0, 0, 0, 0, 0, 0, 0, 0,],
[0, 0, 0, 0, 0, 0, 0, 0, 0,],
[0, 0, 1, 1, 1, 1, 0, 0, 0,],
[0, 0, 0, 0, 0, 1, 0, 0, 0,],
[0, 0, 0, 0, 0, 1, 0, 0, 0,],
[0, 0, 0, 0, 0, 1, 0, 0, 0,],
[0, 0, 0, 0, 0, 1, 0, 0, 0,],
[0, 0, 0, 0, 0, 1, 0, 0, 0,],
[0, 0, 0, 0, 0, 1, 0, 0, 0,],
[0, 0, 0, 0, 0, 1, 0, 0, 0,],
[0, 0, 0, 0, 0, 1, 0, 0, 0,],
[0, 0, 1, 1, 1, 1, 0, 0, 0,],
[0, 0, 0, 0, 0, 0, 0, 0, 0,],
[0, 0, 0, 0, 0, 0, 0, 0, 0,],
[0, 0, 0, 0, 0, 0, 0, 0, 0,]],
[[0, 0, 0, 0, 0, 0, 0, 0, 0,],
[0, 0, 0, 0, 0, 0, 0, 0, 0,],
[0, 0, 0, 0, 1, 0, 0, 0, 0,],
[0, 0, 0, 1, 0, 1, 0, 0, 0,],
[0, 0, 1, 0, 0, 0, 1, 0, 0,],
[0, 1, 0, 0, 0, 0, 0, 1, 0,],
[0, 0, 0, 0, 0, 0, 0, 0, 0,],
[0, 0, 0, 0, 0, 0, 0, 0, 0,],
[0, 0, 0, 0, 0, 0, 0, 0, 0,],
[0, 0, 0, 0, 0, 0, 0, 0, 0,],
[0, 0, 0, 0, 0, 0, 0, 0, 0,],
[0, 0, 0, 0, 0, 0, 0, 0, 0,],
[0, 0, 0, 0, 0, 0, 0, 0, 0,],
[0, 0, 0, 0, 0, 0, 0, 0, 0,],
[0, 0, 0, 0, 0, 0, 0, 0, 0,]],
[[0, 0, 0, 0, 0, 0, 0, 0, 0,],
[0, 0, 0, 0, 0, 0, 0, 0, 0,],
[0, 0, 0, 0, 0, 0, 0, 0, 0,],
[0, 0, 0, 0, 0, 0, 0, 0, 0,],
[0, 0, 0, 0, 0, 0, 0, 0, 0,],
[0, 0, 0, 0, 0, 0, 0, 0, 0,],
[0, 0, 0, 0, 0, 0, 0, 0, 0,],
[0, 0, 0, 0, 0, 0, 0, 0, 0,],
[0, 0, 0, 0, 0, 0, 0, 0, 0,],
[0, 0, 0, 0, 0, 0, 0, 0, 0,],
[0, 0, 0, 0, 0, 0, 0, 0, 0,],
[0, 0, 0, 0, 0, 0, 0, 0, 0,],
[5, 1, 1, 1, 1, 1, 1, 1, 0,],
[0, 0, 0, 0, 0, 0, 0, 0, 0,],
[0, 0, 0, 0, 0, 0, 0, 0, 0,]],
[[0, 0, 0, 0, 0, 0, 0, 0, 0,],
[0, 0, 0, 0, 0, 0, 0, 0, 0,],
[0, 0, 1, 1, 1, 0, 0, 0, 0,],
[0, 0, 0, 1, 1, 0, 0, 0, 0,],
[0, 0, 0, 0, 0, 1, 0, 0, 0,],
[0, 0, 0, 0, 0, 0, 0, 0, 0,],
[0, 0, 0, 0, 0, 0, 0, 0, 0,],
[0, 0, 0, 0, 0, 0, 0, 0, 0,],
[0, 0, 0, 0, 0, 0, 0, 0, 0,],
[0, 0, 0, 0, 0, 0, 0, 0, 0,],
[0, 0, 0, 0, 0, 0, 0, 0, 0,],
[0, 0, 0, 0, 0, 0, 0, 0, 0,],
[0, 0, 0, 0, 0, 0, 0, 0, 0,],
[0, 0, 0, 0, 0, 0, 0, 0, 0,],
[0, 0, 0, 0, 0, 0, 0, 0, 0,]],
[[0, 0, 0, 0, 0, 0, 0, 0, 0,],
[0, 0, 0, 0, 0, 0, 0, 0, 0,],
[0, 0, 0, 0, 0, 0, 0, 0, 0,],
[0, 0, 0, 0, 0, 0, 0, 0, 0,],
[0, 0, 0, 0, 0, 0, 0, 0, 0,],
[0, 0, 1, 1, 1, 1, 1, 0, 0,],
[0, 0, 0, 0, 0, 0, 0, 1, 0,],
[0, 0, 0, 0, 0, 0, 0, 1, 0,],
[0, 0, 1, 1, 1, 1, 1, 1, 0,],
[0, 1, 0, 0, 0, 0, 0, 1, 0,],
[0, 1, 0, 0, 0, 0, 1, 1, 0,],
[0, 0, 1, 1, 1, 1, 0, 1, 0,],
[0, 0, 0, 0, 0, 0, 0, 0, 0,],
[0, 0, 0, 0, 0, 0, 0, 0, 0,],
[0, 0, 0, 0, 0, 0, 0, 0, 0,]],
[[0, 0, 0, 0, 0, 0, 0, 0, 0,],
[0, 0, 0, 0, 0, 0, 0, 0, 0,],
[0, 1, 0, 0, 0, 0, 0, 0, 0,],
[0, 1, 0, 0, 0, 0, 0, 0, 0,],
[0, 1, 0, 0, 0, 0, 0, 0, 0,],
[0, 1, 0, 1, 1, 1, 1, 0, 0,],
[0, 1, 1, 0, 0, 0, 0, 1, 0,],
[0, 1, 0, 0, 0, 0, 0, 1, 0,],
[0, 1, 0, 0, 0, 0, 0, 1, 0,],
[0, 1, 0, 0, 0, 0, 0, 1, 0,],
[0, 1, 1, 0, 0, 0, 0, 1, 0,],
[0, 1, 0, 1, 1, 1, 1, 0, 0,],
[0, 0, 0, 0, 0, 0, 0, 0, 0,],
[0, 0, 0, 0, 0, 0, 0, 0, 0,],
[0, 0, 0, 0, 0, 0, 0, 0, 0,]],
[[0, 0, 0, 0, 0, 0, 0, 0, 0,],
[0, 0, 0, 0, 0, 0, 0, 0, 0,],
[0, 0, 0, 0, 0, 0, 0, 0, 0,],
[0, 0, 0, 0, 0, 0, 0, 0, 0,],
[0, 0, 0, 0, 0, 0, 0, 0, 0,],
[0, 0, 1, 1, 1, 1, 1, 0, 0,],
[0, 1, 0, 0, 0, 0, 0, 1, 0,],
[0, 1, 0, 0, 0, 0, 0, 0, 0,],
[0, 1, 0, 0, 0, 0, 0, 0, 0,],
[0, 1, 0, 0, 0, 0, 0, 0, 0,],
[0, 1, 0, 0, 0, 0, 0, 1, 0,],
[0, 0, 1, 1, 1, 1, 1, 0, 0,],
[0, 0, 0, 0, 0, 0, 0, 0, 0,],
[0, 0, 0, 0, 0, 0, 0, 0, 0,],
[0, 0, 0, 0, 0, 0, 0, 0, 0,]],
[[0, 0, 0, 0, 0, 0, 0, 0, 0,],
[0, 0, 0, 0, 0, 0, 0, 0, 0,],
[0, 0, 0, 0, 0, 0, 0, 1, 0,],
[0, 0, 0, 0, 0, 0, 0, 1, 0,],
[0, 0, 0, 0, 0, 0, 0, 1, 0,],
[0, 0, 1, 1, 1, 1, 0, 1, 0,],
[0, 1, 0, 0, 0, 0, 1, 1, 0,],
[0, 1, 0, 0, 0, 0, 0, 1, 0,],
[0, 1, 0, 0, 0, 0, 0, 1, 0,],
[0, 1, 0, 0, 0, 0, 0, 1, 0,],
[0, 1, 0, 0, 0, 0, 1, 1, 0,],
[0, 0, 1, 1, 1, 1, 0, 1, 0,],
[0, 0, 0, 0, 0, 0, 0, 0, 0,],
[0, 0, 0, 0, 0, 0, 0, 0, 0,],
[0, 0, 0, 0, 0, 0, 0, 0, 0,]],
[[0, 0, 0, 0, 0, 0, 0, 0, 0,],
[0, 0, 0, 0, 0, 0, 0, 0, 0,],
[0, 0, 0, 0, 0, 0, 0, 0, 0,],
[0, 0, 0, 0, 0, 0, 0, 0, 0,],
[0, 0, 0, 0, 0, 0, 0, 0, 0,],
[0, 0, 1, 1, 1, 1, 1, 0, 0,],
[0, 1, 0, 0, 0, 0, 0, 1, 0,],
[0, 1, 0, 0, 0, 0, 0, 1, 0,],
[0, 1, 1, 1, 1, 1, 1, 1, 0,],
[0, 1, 0, 0, 0, 0, 0, 0, 0,],
[0, 1, 0, 0, 0, 0, 0, 0, 0,],
[0, 0, 1, 1, 1, 1, 1, 0, 0,],
[0, 0, 0, 0, 0, 0, 0, 0, 0,],
[0, 0, 0, 0, 0, 0, 0, 0, 0,],
[0, 0, 0, 0, 0, 0, 0, 0, 0,]],
[[0, 0, 0, 0, 0, 0, 0, 0, 0,],
[0, 0, 0, 0, 0, 0, 0, 0, 0,],
[0, 0, 0, 0, 1, 1, 1, 0, 0,],
[0, 0, 0, 1, 0, 0, 0, 1, 0,],
[0, 0, 0, 1, 0, 0, 0, 1, 0,],
[0, 0, 0, 1, 0, 0, 0, 0, 0,],
[0, 0, 0, 1, 0, 0, 0, 0, 0,],
[0, 1, 1, 1, 1, 1, 0, 0, 0,],
[0, 0, 0, 1, 0, 0, 0, 0, 0,],
[0, 0, 0, 1, 0, 0, 0, 0, 0,],
[0, 0, 0, 1, 0, 0, 0, 0, 0,],
[0, 0, 0, 1, 0, 0, 0, 0, 0,],
[0, 0, 0, 0, 0, 0, 0, 0, 0,],
[0, 0, 0, 0, 0, 0, 0, 0, 0,],
[0, 0, 0, 0, 0, 0, 0, 0, 0,]],
[[0, 0, 0, 0, 0, 0, 0, 0, 0,],
[0, 0, 0, 0, 0, 0, 0, 0, 0,],
[0, 0, 0, 0, 0, 0, 0, 0, 0,],
[0, 0, 0, 0, 0, 0, 0, 0, 0,],
[0, 0, 0, 0, 0, 0, 0, 0, 0,],
[0, 0, 1, 1, 1, 1, 0, 1, 0,],
[0, 1, 0, 0, 0, 0, 1, 0, 0,],
[0, 1, 0, 0, 0, 0, 1, 0, 0,],
[0, 1, 0, 0, 0, 0, 1, 0, 0,],
[0, 0, 1, 1, 1, 1, 0, 0, 0,],
[0, 1, 0, 0, 0, 0, 0, 0, 0,],
[0, 0, 1, 1, 1, 1, 1, 0, 0,],
[0, 1, 0, 0, 0, 0, 0, 1, 0,],
[0, 1, 0, 0, 0, 0, 0, 1, 0,],
[0, 0, 1, 1, 1, 1, 1, 0, 0,]],
[[0, 0, 0, 0, 0, 0, 0, 0, 0,],
[0, 0, 0, 0, 0, 0, 0, 0, 0,],
[0, 1, 0, 0, 0, 0, 0, 0, 0,],
[0, 1, 0, 0, 0, 0, 0, 0, 0,],
[0, 1, 0, 0, 0, 0, 0, 0, 0,],
[0, 1, 0, 1, 1, 1, 1, 0, 0,],
[0, 1, 1, 0, 0, 0, 0, 1, 0,],
[0, 1, 0, 0, 0, 0, 0, 1, 0,],
[0, 1, 0, 0, 0, 0, 0, 1, 0,],
[0, 1, 0, 0, 0, 0, 0, 1, 0,],
[0, 1, 0, 0, 0, 0, 0, 1, 0,],
[0, 1, 0, 0, 0, 0, 0, 1, 0,],
[0, 0, 0, 0, 0, 0, 0, 0, 0,],
[0, 0, 0, 0, 0, 0, 0, 0, 0,],
[0, 0, 0, 0, 0, 0, 0, 0, 0,]],
[[0, 0, 0, 0, 0, 0, 0, 0, 0,],
[0, 0, 0, 0, 0, 0, 0, 0, 0,],
[0, 0, 0, 0, 0, 0, 0, 0, 0,],
[0, 0, 0, 0, 1, 0, 0, 0, 0,],
[0, 0, 0, 0, 0, 0, 0, 0, 0,],
[0, 0, 0, 1, 1, 0, 0, 0, 0,],
[0, 0, 0, 0, 1, 0, 0, 0, 0,],
[0, 0, 0, 0, 1, 0, 0, 0, 0,],
[0, 0, 0, 0, 1, 0, 0, 0, 0,],
[0, 0, 0, 0, 1, 0, 0, 0, 0,],
[0, 0, 0, 0, 1, 0, 0, 0, 0,],
[0, 0, 1, 1, 1, 1, 1, 0, 0,],
[0, 0, 0, 0, 0, 0, 0, 0, 0,],
[0, 0, 0, 0, 0, 0, 0, 0, 0,],
[0, 0, 0, 0, 0, 0, 0, 0, 0,]],
[[0, 0, 0, 0, 0, 0, 0, 0, 0,],
[0, 0, 0, 0, 0, 0, 0, 0, 0,],
[0, 0, 0, 0, 0, 0, 0, 0, 0,],
[0, 0, 0, 0, 0, 0, 1, 0, 0,],
[0, 0, 0, 0, 0, 0, 0, 0, 0,],
[0, 0, 0, 0, 1, 1, 1, 0, 0,],
[0, 0, 0, 0, 0, 0, 1, 0, 0,],
[0, 0, 0, 0, 0, 0, 1, 0, 0,],
[0, 0, 0, 0, 0, 0, 1, 0, 0,],
[0, 0, 0, 0, 0, 0, 1, 0, 0,],
[0, 0, 0, 0, 0, 0, 1, 0, 0,],
[0, 1, 0, 0, 0, 0, 1, 0, 0,],
[0, 1, 0, 0, 0, 0, 1, 0, 0,],
[0, 1, 0, 0, 0, 0, 1, 0, 0,],
[0, 0, 1, 1, 1, 1, 0, 0, 0,]],
[[0, 0, 0, 0, 0, 0, 0, 0, 0,],
[0, 0, 0, 0, 0, 0, 0, 0, 0,],
[0, 1, 0, 0, 0, 0, 0, 0, 0,],
[0, 1, 0, 0, 0, 0, 0, 0, 0,],
[0, 1, 0, 0, 0, 0, 0, 0, 0,],
[0, 1, 0, 0, 0, 0, 0, 1, 0,],
[0, 1, 0, 0, 0, 1, 1, 0, 0,],
[0, 1, 0, 1, 1, 0, 0, 0, 0,],
[0, 1, 1, 0, 0, 0, 0, 0, 0,],
[0, 1, 0, 1, 1, 0, 0, 0, 0,],
[0, 1, 0, 0, 0, 1, 1, 0, 0,],
[0, 1, 0, 0, 0, 0, 0, 1, 0,],
[0, 0, 0, 0, 0, 0, 0, 0, 0,],
[0, 0, 0, 0, 0, 0, 0, 0, 0,],
[0, 0, 0, 0, 0, 0, 0, 0, 0,]],
[[0, 0, 0, 0, 0, 0, 0, 0, 0,],
[0, 0, 0, 0, 0, 0, 0, 0, 0,],
[0, 0, 0, 1, 1, 0, 0, 0, 0,],
[0, 0, 0, 0, 1, 0, 0, 0, 0,],
[0, 0, 0, 0, 1, 0, 0, 0, 0,],
[0, 0, 0, 0, 1, 0, 0, 0, 0,],
[0, 0, 0, 0, 1, 0, 0, 0, 0,],
[0, 0, 0, 0, 1, 0, 0, 0, 0,],
[0, 0, 0, 0, 1, 0, 0, 0, 0,],
[0, 0, 0, 0, 1, 0, 0, 0, 0,],
[0, 0, 0, 0, 1, 0, 0, 0, 0,],
[0, 0, 1, 1, 1, 1, 1, 0, 0,],
[0, 0, 0, 0, 0, 0, 0, 0, 0,],
[0, 0, 0, 0, 0, 0, 0, 0, 0,],
[0, 0, 0, 0, 0, 0, 0, 0, 0,]],
[[0, 0, 0, 0, 0, 0, 0, 0, 0,],
[0, 0, 0, 0, 0, 0, 0, 0, 0,],
[0, 0, 0, 0, 0, 0, 0, 0, 0,],
[0, 0, 0, 0, 0, 0, 0, 0, 0,],
[0, 0, 0, 0, 0, 0, 0, 0, 0,],
[0, 1, 1, 1, 0, 1, 1, 0, 0,],
[0, 1, 0, 0, 1, 0, 0, 1, 0,],
[0, 1, 0, 0, 1, 0, 0, 1, 0,],
[0, 1, 0, 0, 1, 0, 0, 1, 0,],
[0, 1, 0, 0, 1, 0, 0, 1, 0,],
[0, 1, 0, 0, 1, 0, 0, 1, 0,],
[0, 1, 0, 0, 0, 0, 0, 1, 0,],
[0, 0, 0, 0, 0, 0, 0, 0, 0,],
[0, 0, 0, 0, 0, 0, 0, 0, 0,],
[0, 0, 0, 0, 0, 0, 0, 0, 0,]],
[[0, 0, 0, 0, 0, 0, 0, 0, 0,],
[0, 0, 0, 0, 0, 0, 0, 0, 0,],
[0, 0, 0, 0, 0, 0, 0, 0, 0,],
[0, 0, 0, 0, 0, 0, 0, 0, 0,],
[0, 0, 0, 0, 0, 0, 0, 0, 0,],
[0, 1, 0, 1, 1, 1, 1, 0, 0,],
[0, 1, 1, 0, 0, 0, 0, 1, 0,],
[0, 1, 0, 0, 0, 0, 0, 1, 0,],
[0, 1, 0, 0, 0, 0, 0, 1, 0,],
[0, 1, 0, 0, 0, 0, 0, 1, 0,],
[0, 1, 0, 0, 0, 0, 0, 1, 0,],
[0, 1, 0, 0, 0, 0, 0, 1, 0,],
[0, 0, 0, 0, 0, 0, 0, 0, 0,],
[0, 0, 0, 0, 0, 0, 0, 0, 0,],
[0, 0, 0, 0, 0, 0, 0, 0, 0,]],
[[0, 0, 0, 0, 0, 0, 0, 0, 0,],
[0, 0, 0, 0, 0, 0, 0, 0, 0,],
[0, 0, 0, 0, 0, 0, 0, 0, 0,],
[0, 0, 0, 0, 0, 0, 0, 0, 0,],
[0, 0, 0, 0, 0, 0, 0, 0, 0,],
[0, 0, 1, 1, 1, 1, 1, 0, 0,],
[0, 1, 0, 0, 0, 0, 0, 1, 0,],
[0, 1, 0, 0, 0, 0, 0, 1, 0,],
[0, 1, 0, 0, 0, 0, 0, 1, 0,],
[0, 1, 0, 0, 0, 0, 0, 1, 0,],
[0, 1, 0, 0, 0, 0, 0, 1, 0,],
[0, 0, 1, 1, 1, 1, 1, 0, 0,],
[0, 0, 0, 0, 0, 0, 0, 0, 0,],
[0, 0, 0, 0, 0, 0, 0, 0, 0,],
[0, 0, 0, 0, 0, 0, 0, 0, 0,]],
[[0, 0, 0, 0, 0, 0, 0, 0, 0,],
[0, 0, 0, 0, 0, 0, 0, 0, 0,],
[0, 0, 0, 0, 0, 0, 0, 0, 0,],
[0, 0, 0, 0, 0, 0, 0, 0, 0,],
[0, 0, 0, 0, 0, 0, 0, 0, 0,],
[0, 1, 0, 1, 1, 1, 1, 0, 0,],
[0, 1, 1, 0, 0, 0, 0, 1, 0,],
[0, 1, 0, 0, 0, 0, 0, 1, 0,],
[0, 1, 0, 0, 0, 0, 0, 1, 0,],
[0, 1, 0, 0, 0, 0, 0, 1, 0,],
[0, 1, 1, 0, 0, 0, 0, 1, 0,],
[0, 1, 0, 1, 1, 1, 1, 0, 0,],
[0, 1, 0, 0, 0, 0, 0, 0, 0,],
[0, 1, 0, 0, 0, 0, 0, 0, 0,],
[0, 1, 0, 0, 0, 0, 0, 0, 0,]],
[[0, 0, 0, 0, 0, 0, 0, 0, 0,],
[0, 0, 0, 0, 0, 0, 0, 0, 0,],
[0, 0, 0, 0, 0, 0, 0, 0, 0,],
[0, 0, 0, 0, 0, 0, 0, 0, 0,],
[0, 0, 0, 0, 0, 0, 0, 0, 0,],
[0, 0, 1, 1, 1, 1, 0, 1, 0,],
[0, 1, 0, 0, 0, 0, 1, 1, 0,],
[0, 1, 0, 0, 0, 0, 0, 1, 0,],
[0, 1, 0, 0, 0, 0, 0, 1, 0,],
[0, 1, 0, 0, 0, 0, 0, 1, 0,],
[0, 1, 0, 0, 0, 0, 1, 1, 0,],
[0, 0, 1, 1, 1, 1, 0, 1, 0,],
[0, 0, 0, 0, 0, 0, 0, 1, 0,],
[0, 0, 0, 0, 0, 0, 0, 1, 0,],
[0, 0, 0, 0, 0, 0, 0, 1, 0,]],
[[0, 0, 0, 0, 0, 0, 0, 0, 0,],
[0, 0, 0, 0, 0, 0, 0, 0, 0,],
[0, 0, 0, 0, 0, 0, 0, 0, 0,],
[0, 0, 0, 0, 0, 0, 0, 0, 0,],
[0, 0, 0, 0, 0, 0, 0, 0, 0,],
[0, 1, 0, 0, 1, 1, 1, 0, 0,],
[0, 0, 1, 1, 0, 0, 0, 1, 0,],
[0, 0, 1, 0, 0, 0, 0, 1, 0,],
[0, 0, 1, 0, 0, 0, 0, 0, 0,],
[0, 0, 1, 0, 0, 0, 0, 0, 0,],
[0, 0, 1, 0, 0, 0, 0, 0, 0,],
[0, 0, 1, 0, 0, 0, 0, 0, 0,],
[0, 0, 0, 0, 0, 0, 0, 0, 0,],
[0, 0, 0, 0, 0, 0, 0, 0, 0,],
[0, 0, 0, 0, 0, 0, 0, 0, 0,]],
[[0, 0, 0, 0, 0, 0, 0, 0, 0,],
[0, 0, 0, 0, 0, 0, 0, 0, 0,],
[0, 0, 0, 0, 0, 0, 0, 0, 0,],
[0, 0, 0, 0, 0, 0, 0, 0, 0,],
[0, 0, 0, 0, 0, 0, 0, 0, 0,],
[0, 0, 1, 1, 1, 1, 1, 0, 0,],
[0, 1, 0, 0, 0, 0, 0, 1, 0,],
[0, 1, 0, 0, 0, 0, 0, 0, 0,],
[0, 0, 1, 1, 1, 1, 1, 0, 0,],
[0, 0, 0, 0, 0, 0, 0, 1, 0,],
[0, 1, 0, 0, 0, 0, 0, 1, 0,],
[0, 0, 1, 1, 1, 1, 1, 0, 0,],
[0, 0, 0, 0, 0, 0, 0, 0, 0,],
[0, 0, 0, 0, 0, 0, 0, 0, 0,],
[0, 0, 0, 0, 0, 0, 0, 0, 0,]],
[[0, 0, 0, 0, 0, 0, 0, 0, 0,],
[0, 0, 0, 0, 0, 0, 0, 0, 0,],
[0, 0, 0, 0, 0, 0, 0, 0, 0,],
[0, 0, 0, 1, 0, 0, 0, 0, 0,],
[0, 0, 0, 1, 0, 0, 0, 0, 0,],
[0, 1, 1, 1, 1, 1, 1, 0, 0,],
[0, 0, 0, 1, 0, 0, 0, 0, 0,],
[0, 0, 0, 1, 0, 0, 0, 0, 0,],
[0, 0, 0, 1, 0, 0, 0, 0, 0,],
[0, 0, 0, 1, 0, 0, 0, 0, 0,],
[0, 0, 0, 1, 0, 0, 0, 1, 0,],
[0, 0, 0, 0, 1, 1, 1, 0, 0,],
[0, 0, 0, 0, 0, 0, 0, 0, 0,],
[0, 0, 0, 0, 0, 0, 0, 0, 0,],
[0, 0, 0, 0, 0, 0, 0, 0, 0,]],
[[0, 0, 0, 0, 0, 0, 0, 0, 0,],
[0, 0, 0, 0, 0, 0, 0, 0, 0,],
[0, 0, 0, 0, 0, 0, 0, 0, 0,],
[0, 0, 0, 0, 0, 0, 0, 0, 0,],
[0, 0, 0, 0, 0, 0, 0, 0, 0,],
[0, 1, 0, 0, 0, 0, 1, 0, 0,],
[0, 1, 0, 0, 0, 0, 1, 0, 0,],
[0, 1, 0, 0, 0, 0, 1, 0, 0,],
[0, 1, 0, 0, 0, 0, 1, 0, 0,],
[0, 1, 0, 0, 0, 0, 1, 0, 0,],
[0, 1, 0, 0, 0, 0, 1, 0, 0,],
[0, 0, 1, 1, 1, 1, 0, 1, 0,],
[0, 0, 0, 0, 0, 0, 0, 0, 0,],
[0, 0, 0, 0, 0, 0, 0, 0, 0,],
[0, 0, 0, 0, 0, 0, 0, 0, 0,]],
[[0, 0, 0, 0, 0, 0, 0, 0, 0,],
[0, 0, 0, 0, 0, 0, 0, 0, 0,],
[0, 0, 0, 0, 0, 0, 0, 0, 0,],
[0, 0, 0, 0, 0, 0, 0, 0, 0,],
[0, 0, 0, 0, 0, 0, 0, 0, 0,],
[0, 1, 0, 0, 0, 0, 0, 1, 0,],
[0, 1, 0, 0, 0, 0, 0, 1, 0,],
[0, 0, 1, 0, 0, 0, 1, 0, 0,],
[0, 0, 1, 0, 0, 0, 1, 0, 0,],
[0, 0, 0, 1, 0, 1, 0, 0, 0,],
[0, 0, 0, 1, 0, 1, 0, 0, 0,],
[0, 0, 0, 0, 1, 0, 0, 0, 0,],
[0, 0, 0, 0, 0, 0, 0, 0, 0,],
[0, 0, 0, 0, 0, 0, 0, 0, 0,],
[0, 0, 0, 0, 0, 0, 0, 0, 0,]],
[[0, 0, 0, 0, 0, 0, 0, 0, 0,],
[0, 0, 0, 0, 0, 0, 0, 0, 0,],
[0, 0, 0, 0, 0, 0, 0, 0, 0,],
[0, 0, 0, 0, 0, 0, 0, 0, 0,],
[0, 0, 0, 0, 0, 0, 0, 0, 0,],
[0, 1, 0, 0, 0, 0, 0, 1, 0,],
[0, 1, 0, 0, 0, 0, 0, 1, 0,],
[0, 1, 0, 0, 1, 0, 0, 1, 0,],
[0, 1, 0, 0, 1, 0, 0, 1, 0,],
[0, 1, 0, 0, 1, 0, 0, 1, 0,],
[0, 1, 0, 1, 0, 1, 0, 1, 0,],
[0, 0, 1, 0, 0, 0, 1, 0, 0,],
[0, 0, 0, 0, 0, 0, 0, 0, 0,],
[0, 0, 0, 0, 0, 0, 0, 0, 0,],
[0, 0, 0, 0, 0, 0, 0, 0, 0,]],
[[0, 0, 0, 0, 0, 0, 0, 0, 0,],
[0, 0, 0, 0, 0, 0, 0, 0, 0,],
[0, 0, 0, 0, 0, 0, 0, 0, 0,],
[0, 0, 0, 0, 0, 0, 0, 0, 0,],
[0, 0, 0, 0, 0, 0, 0, 0, 0,],
[0, 1, 0, 0, 0, 0, 0, 1, 0,],
[0, 0, 1, 0, 0, 0, 1, 0, 0,],
[0, 0, 0, 1, 0, 1, 0, 0, 0,],
[0, 0, 0, 0, 1, 0, 0, 0, 0,],
[0, 0, 0, 1, 0, 1, 0, 0, 0,],
[0, 0, 1, 0, 0, 0, 1, 0, 0,],
[0, 1, 0, 0, 0, 0, 0, 1, 0,],
[0, 0, 0, 0, 0, 0, 0, 0, 0,],
[0, 0, 0, 0, 0, 0, 0, 0, 0,],
[0, 0, 0, 0, 0, 0, 0, 0, 0,]],
[[0, 0, 0, 0, 0, 0, 0, 0, 0,],
[0, 0, 0, 0, 0, 0, 0, 0, 0,],
[0, 0, 0, 0, 0, 0, 0, 0, 0,],
[0, 0, 0, 0, 0, 0, 0, 0, 0,],
[0, 0, 0, 0, 0, 0, 0, 0, 0,],
[0, 1, 0, 0, 0, 0, 1, 0, 0,],
[0, 1, 0, 0, 0, 0, 1, 0, 0,],
[0, 1, 0, 0, 0, 0, 1, 0, 0,],
[0, 1, 0, 0, 0, 0, 1, 0, 0,],
[0, 1, 0, 0, 0, 0, 1, 0, 0,],
[0, 1, 0, 0, 0, 1, 1, 0, 0,],
[0, 0, 1, 1, 1, 0, 1, 0, 0,],
[0, 0, 0, 0, 0, 0, 1, 0, 0,],
[0, 1, 0, 0, 0, 0, 1, 0, 0,],
[0, 0, 1, 1, 1, 1, 0, 0, 0,]],
[[0, 0, 0, 0, 0, 0, 0, 0, 0,],
[0, 0, 0, 0, 0, 0, 0, 0, 0,],
[0, 0, 0, 0, 0, 0, 0, 0, 0,],
[0, 0, 0, 0, 0, 0, 0, 0, 0,],
[0, 0, 0, 0, 0, 0, 0, 0, 0,],
[0, 1, 1, 1, 1, 1, 1, 1, 0,],
[0, 0, 0, 0, 0, 0, 1, 0, 0,],
[0, 0, 0, 0, 0, 1, 0, 0, 0,],
[0, 0, 0, 0, 1, 0, 0, 0, 0,],
[0, 0, 0, 1, 0, 0, 0, 0, 0,],
[0, 0, 1, 0, 0, 0, 0, 0, 0,],
[0, 1, 1, 1, 1, 1, 1, 1, 0,],
[0, 0, 0, 0, 0, 0, 0, 0, 0,],
[0, 0, 0, 0, 0, 0, 0, 0, 0,],
[0, 0, 0, 0, 0, 0, 0, 0, 0,]],
[[0, 0, 0, 0, 0, 0, 0, 0, 0,],
[0, 0, 0, 0, 0, 0, 0, 0, 0,],
[0, 0, 0, 0, 0, 1, 1, 1, 0,],
[0, 0, 0, 0, 1, 0, 0, 0, 0,],
[0, 0, 0, 0, 1, 0, 0, 0, 0,],
[0, 0, 0, 0, 0, 1, 0, 0, 0,],
[0, 0, 0, 1, 1, 0, 0, 0, 0,],
[0, 0, 0, 1, 1, 0, 0, 0, 0,],
[0, 0, 0, 0, 0, 1, 0, 0, 0,],
[0, 0, 0, 0, 1, 0, 0, 0, 0,],
[0, 0, 0, 0, 1, 0, 0, 0, 0,],
[0, 0, 0, 0, 0, 1, 1, 1, 0,],
[0, 0, 0, 0, 0, 0, 0, 0, 0,],
[0, 0, 0, 0, 0, 0, 0, 0, 0,],
[0, 0, 0, 0, 0, 0, 0, 0, 0,]],
[[0, 0, 0, 0, 0, 0, 0, 0, 0,],
[0, 0, 0, 0, 0, 0, 0, 0, 0,],
[0, 0, 0, 0, 1, 0, 0, 0, 0,],
[0, 0, 0, 0, 1, 0, 0, 0, 0,],
[0, 0, 0, 0, 1, 0, 0, 0, 0,],
[0, 0, 0, 0, 1, 0, 0, 0, 0,],
[0, 0, 0, 0, 1, 0, 0, 0, 0,],
[0, 0, 0, 0, 1, 0, 0, 0, 0,],
[0, 0, 0, 0, 1, 0, 0, 0, 0,],
[0, 0, 0, 0, 1, 0, 0, 0, 0,],
[0, 0, 0, 0, 1, 0, 0, 0, 0,],
[0, 0, 0, 0, 1, 0, 0, 0, 0,],
[0, 0, 0, 0, 0, 0, 0, 0, 0,],
[0, 0, 0, 0, 0, 0, 0, 0, 0,],
[0, 0, 0, 0, 0, 0, 0, 0, 0,]],
[[0, 0, 0, 0, 0, 0, 0, 0, 0,],
[0, 0, 0, 0, 0, 0, 0, 0, 0,],
[0, 1, 1, 1, 0, 0, 0, 0, 0,],
[0, 0, 0, 0, 1, 0, 0, 0, 0,],
[0, 0, 0, 0, 1, 0, 0, 0, 0,],
[0, 0, 0, 1, 0, 0, 0, 0, 0,],
[0, 0, 0, 0, 1, 1, 0, 0, 0,],
[0, 0, 0, 0, 1, 1, 0, 0, 0,],
[0, 0, 0, 1, 0, 0, 0, 0, 0,],
[0, 0, 0, 0, 1, 0, 0, 0, 0,],
[0, 0, 0, 0, 1, 0, 0, 0, 0,],
[0, 1, 1, 1, 0, 0, 0, 0, 0,],
[0, 0, 0, 0, 0, 0, 0, 0, 0,],
[0, 0, 0, 0, 0, 0, 0, 0, 0,],
[0, 0, 0, 0, 0, 0, 0, 0, 0,]],
],
np.bool)
def text(txt):
"""
y = text(txt)
Create a binary image of a text.
text creates the binary image y of the text txt . The
background of y is 0, while its foreground is 1. The text should
be composed only by lower and upper case letters.
Parameters
----------
txt : Text as a string
Returns
-------
y : Binary image.
"""
from numpy import reshape, array
FIRST_CHAR = 32
LAST_CHAR = 126
N_CHARS = LAST_CHAR - FIRST_CHAR
for i,ch in enumerate(txt):
ind = ord(ch) - FIRST_CHAR
assert ind < N_CHARS,'pymorph.text: code not allowed (%s)' % ch
print ch, ind
glyph = FontDft[ind]
if i == 0:
y = glyph
else:
y = concat('w', y, glyph)
return y
|
mugizico/scikit-learn | refs/heads/master | sklearn/linear_model/least_angle.py | 42 | """
Least Angle Regression algorithm. See the documentation on the
Generalized Linear Model for a complete discussion.
"""
from __future__ import print_function
# Author: Fabian Pedregosa <fabian.pedregosa@inria.fr>
# Alexandre Gramfort <alexandre.gramfort@inria.fr>
# Gael Varoquaux
#
# License: BSD 3 clause
from math import log
import sys
import warnings
from distutils.version import LooseVersion
import numpy as np
from scipy import linalg, interpolate
from scipy.linalg.lapack import get_lapack_funcs
from .base import LinearModel
from ..base import RegressorMixin
from ..utils import arrayfuncs, as_float_array, check_X_y
from ..cross_validation import check_cv
from ..utils import ConvergenceWarning
from ..externals.joblib import Parallel, delayed
from ..externals.six.moves import xrange
import scipy
solve_triangular_args = {}
if LooseVersion(scipy.__version__) >= LooseVersion('0.12'):
solve_triangular_args = {'check_finite': False}
def lars_path(X, y, Xy=None, Gram=None, max_iter=500,
alpha_min=0, method='lar', copy_X=True,
eps=np.finfo(np.float).eps,
copy_Gram=True, verbose=0, return_path=True,
return_n_iter=False):
"""Compute Least Angle Regression or Lasso path using LARS algorithm [1]
The optimization objective for the case method='lasso' is::
(1 / (2 * n_samples)) * ||y - Xw||^2_2 + alpha * ||w||_1
in the case of method='lars', the objective function is only known in
the form of an implicit equation (see discussion in [1])
Read more in the :ref:`User Guide <least_angle_regression>`.
Parameters
-----------
X : array, shape: (n_samples, n_features)
Input data.
y : array, shape: (n_samples)
Input targets.
max_iter : integer, optional (default=500)
Maximum number of iterations to perform, set to infinity for no limit.
Gram : None, 'auto', array, shape: (n_features, n_features), optional
Precomputed Gram matrix (X' * X), if ``'auto'``, the Gram
matrix is precomputed from the given X, if there are more samples
than features.
alpha_min : float, optional (default=0)
Minimum correlation along the path. It corresponds to the
regularization parameter alpha parameter in the Lasso.
method : {'lar', 'lasso'}, optional (default='lar')
Specifies the returned model. Select ``'lar'`` for Least Angle
Regression, ``'lasso'`` for the Lasso.
eps : float, optional (default=``np.finfo(np.float).eps``)
The machine-precision regularization in the computation of the
Cholesky diagonal factors. Increase this for very ill-conditioned
systems.
copy_X : bool, optional (default=True)
If ``False``, ``X`` is overwritten.
copy_Gram : bool, optional (default=True)
If ``False``, ``Gram`` is overwritten.
verbose : int (default=0)
Controls output verbosity.
return_path : bool, optional (default=True)
If ``return_path==True`` returns the entire path, else returns only the
last point of the path.
return_n_iter : bool, optional (default=False)
Whether to return the number of iterations.
Returns
--------
alphas : array, shape: [n_alphas + 1]
Maximum of covariances (in absolute value) at each iteration.
``n_alphas`` is either ``max_iter``, ``n_features`` or the
number of nodes in the path with ``alpha >= alpha_min``, whichever
is smaller.
active : array, shape [n_alphas]
Indices of active variables at the end of the path.
coefs : array, shape (n_features, n_alphas + 1)
Coefficients along the path
n_iter : int
Number of iterations run. Returned only if return_n_iter is set
to True.
See also
--------
lasso_path
LassoLars
Lars
LassoLarsCV
LarsCV
sklearn.decomposition.sparse_encode
References
----------
.. [1] "Least Angle Regression", Effron et al.
http://www-stat.stanford.edu/~tibs/ftp/lars.pdf
.. [2] `Wikipedia entry on the Least-angle regression
<http://en.wikipedia.org/wiki/Least-angle_regression>`_
.. [3] `Wikipedia entry on the Lasso
<http://en.wikipedia.org/wiki/Lasso_(statistics)#Lasso_method>`_
"""
n_features = X.shape[1]
n_samples = y.size
max_features = min(max_iter, n_features)
if return_path:
coefs = np.zeros((max_features + 1, n_features))
alphas = np.zeros(max_features + 1)
else:
coef, prev_coef = np.zeros(n_features), np.zeros(n_features)
alpha, prev_alpha = np.array([0.]), np.array([0.]) # better ideas?
n_iter, n_active = 0, 0
active, indices = list(), np.arange(n_features)
# holds the sign of covariance
sign_active = np.empty(max_features, dtype=np.int8)
drop = False
# will hold the cholesky factorization. Only lower part is
# referenced.
# We are initializing this to "zeros" and not empty, because
# it is passed to scipy linalg functions and thus if it has NaNs,
# even if they are in the upper part that it not used, we
# get errors raised.
# Once we support only scipy > 0.12 we can use check_finite=False and
# go back to "empty"
L = np.zeros((max_features, max_features), dtype=X.dtype)
swap, nrm2 = linalg.get_blas_funcs(('swap', 'nrm2'), (X,))
solve_cholesky, = get_lapack_funcs(('potrs',), (X,))
if Gram is None:
if copy_X:
# force copy. setting the array to be fortran-ordered
# speeds up the calculation of the (partial) Gram matrix
# and allows to easily swap columns
X = X.copy('F')
elif Gram == 'auto':
Gram = None
if X.shape[0] > X.shape[1]:
Gram = np.dot(X.T, X)
elif copy_Gram:
Gram = Gram.copy()
if Xy is None:
Cov = np.dot(X.T, y)
else:
Cov = Xy.copy()
if verbose:
if verbose > 1:
print("Step\t\tAdded\t\tDropped\t\tActive set size\t\tC")
else:
sys.stdout.write('.')
sys.stdout.flush()
tiny = np.finfo(np.float).tiny # to avoid division by 0 warning
tiny32 = np.finfo(np.float32).tiny # to avoid division by 0 warning
equality_tolerance = np.finfo(np.float32).eps
while True:
if Cov.size:
C_idx = np.argmax(np.abs(Cov))
C_ = Cov[C_idx]
C = np.fabs(C_)
else:
C = 0.
if return_path:
alpha = alphas[n_iter, np.newaxis]
coef = coefs[n_iter]
prev_alpha = alphas[n_iter - 1, np.newaxis]
prev_coef = coefs[n_iter - 1]
alpha[0] = C / n_samples
if alpha[0] <= alpha_min + equality_tolerance: # early stopping
if abs(alpha[0] - alpha_min) > equality_tolerance:
# interpolation factor 0 <= ss < 1
if n_iter > 0:
# In the first iteration, all alphas are zero, the formula
# below would make ss a NaN
ss = ((prev_alpha[0] - alpha_min) /
(prev_alpha[0] - alpha[0]))
coef[:] = prev_coef + ss * (coef - prev_coef)
alpha[0] = alpha_min
if return_path:
coefs[n_iter] = coef
break
if n_iter >= max_iter or n_active >= n_features:
break
if not drop:
##########################################################
# Append x_j to the Cholesky factorization of (Xa * Xa') #
# #
# ( L 0 ) #
# L -> ( ) , where L * w = Xa' x_j #
# ( w z ) and z = ||x_j|| #
# #
##########################################################
sign_active[n_active] = np.sign(C_)
m, n = n_active, C_idx + n_active
Cov[C_idx], Cov[0] = swap(Cov[C_idx], Cov[0])
indices[n], indices[m] = indices[m], indices[n]
Cov_not_shortened = Cov
Cov = Cov[1:] # remove Cov[0]
if Gram is None:
X.T[n], X.T[m] = swap(X.T[n], X.T[m])
c = nrm2(X.T[n_active]) ** 2
L[n_active, :n_active] = \
np.dot(X.T[n_active], X.T[:n_active].T)
else:
# swap does only work inplace if matrix is fortran
# contiguous ...
Gram[m], Gram[n] = swap(Gram[m], Gram[n])
Gram[:, m], Gram[:, n] = swap(Gram[:, m], Gram[:, n])
c = Gram[n_active, n_active]
L[n_active, :n_active] = Gram[n_active, :n_active]
# Update the cholesky decomposition for the Gram matrix
if n_active:
linalg.solve_triangular(L[:n_active, :n_active],
L[n_active, :n_active],
trans=0, lower=1,
overwrite_b=True,
**solve_triangular_args)
v = np.dot(L[n_active, :n_active], L[n_active, :n_active])
diag = max(np.sqrt(np.abs(c - v)), eps)
L[n_active, n_active] = diag
if diag < 1e-7:
# The system is becoming too ill-conditioned.
# We have degenerate vectors in our active set.
# We'll 'drop for good' the last regressor added.
# Note: this case is very rare. It is no longer triggered by the
# test suite. The `equality_tolerance` margin added in 0.16.0 to
# get early stopping to work consistently on all versions of
# Python including 32 bit Python under Windows seems to make it
# very difficult to trigger the 'drop for good' strategy.
warnings.warn('Regressors in active set degenerate. '
'Dropping a regressor, after %i iterations, '
'i.e. alpha=%.3e, '
'with an active set of %i regressors, and '
'the smallest cholesky pivot element being %.3e'
% (n_iter, alpha, n_active, diag),
ConvergenceWarning)
# XXX: need to figure a 'drop for good' way
Cov = Cov_not_shortened
Cov[0] = 0
Cov[C_idx], Cov[0] = swap(Cov[C_idx], Cov[0])
continue
active.append(indices[n_active])
n_active += 1
if verbose > 1:
print("%s\t\t%s\t\t%s\t\t%s\t\t%s" % (n_iter, active[-1], '',
n_active, C))
if method == 'lasso' and n_iter > 0 and prev_alpha[0] < alpha[0]:
# alpha is increasing. This is because the updates of Cov are
# bringing in too much numerical error that is greater than
# than the remaining correlation with the
# regressors. Time to bail out
warnings.warn('Early stopping the lars path, as the residues '
'are small and the current value of alpha is no '
'longer well controlled. %i iterations, alpha=%.3e, '
'previous alpha=%.3e, with an active set of %i '
'regressors.'
% (n_iter, alpha, prev_alpha, n_active),
ConvergenceWarning)
break
# least squares solution
least_squares, info = solve_cholesky(L[:n_active, :n_active],
sign_active[:n_active],
lower=True)
if least_squares.size == 1 and least_squares == 0:
# This happens because sign_active[:n_active] = 0
least_squares[...] = 1
AA = 1.
else:
# is this really needed ?
AA = 1. / np.sqrt(np.sum(least_squares * sign_active[:n_active]))
if not np.isfinite(AA):
# L is too ill-conditioned
i = 0
L_ = L[:n_active, :n_active].copy()
while not np.isfinite(AA):
L_.flat[::n_active + 1] += (2 ** i) * eps
least_squares, info = solve_cholesky(
L_, sign_active[:n_active], lower=True)
tmp = max(np.sum(least_squares * sign_active[:n_active]),
eps)
AA = 1. / np.sqrt(tmp)
i += 1
least_squares *= AA
if Gram is None:
# equiangular direction of variables in the active set
eq_dir = np.dot(X.T[:n_active].T, least_squares)
# correlation between each unactive variables and
# eqiangular vector
corr_eq_dir = np.dot(X.T[n_active:], eq_dir)
else:
# if huge number of features, this takes 50% of time, I
# think could be avoided if we just update it using an
# orthogonal (QR) decomposition of X
corr_eq_dir = np.dot(Gram[:n_active, n_active:].T,
least_squares)
g1 = arrayfuncs.min_pos((C - Cov) / (AA - corr_eq_dir + tiny))
g2 = arrayfuncs.min_pos((C + Cov) / (AA + corr_eq_dir + tiny))
gamma_ = min(g1, g2, C / AA)
# TODO: better names for these variables: z
drop = False
z = -coef[active] / (least_squares + tiny32)
z_pos = arrayfuncs.min_pos(z)
if z_pos < gamma_:
# some coefficients have changed sign
idx = np.where(z == z_pos)[0][::-1]
# update the sign, important for LAR
sign_active[idx] = -sign_active[idx]
if method == 'lasso':
gamma_ = z_pos
drop = True
n_iter += 1
if return_path:
if n_iter >= coefs.shape[0]:
del coef, alpha, prev_alpha, prev_coef
# resize the coefs and alphas array
add_features = 2 * max(1, (max_features - n_active))
coefs = np.resize(coefs, (n_iter + add_features, n_features))
alphas = np.resize(alphas, n_iter + add_features)
coef = coefs[n_iter]
prev_coef = coefs[n_iter - 1]
alpha = alphas[n_iter, np.newaxis]
prev_alpha = alphas[n_iter - 1, np.newaxis]
else:
# mimic the effect of incrementing n_iter on the array references
prev_coef = coef
prev_alpha[0] = alpha[0]
coef = np.zeros_like(coef)
coef[active] = prev_coef[active] + gamma_ * least_squares
# update correlations
Cov -= gamma_ * corr_eq_dir
# See if any coefficient has changed sign
if drop and method == 'lasso':
# handle the case when idx is not length of 1
[arrayfuncs.cholesky_delete(L[:n_active, :n_active], ii) for ii in
idx]
n_active -= 1
m, n = idx, n_active
# handle the case when idx is not length of 1
drop_idx = [active.pop(ii) for ii in idx]
if Gram is None:
# propagate dropped variable
for ii in idx:
for i in range(ii, n_active):
X.T[i], X.T[i + 1] = swap(X.T[i], X.T[i + 1])
# yeah this is stupid
indices[i], indices[i + 1] = indices[i + 1], indices[i]
# TODO: this could be updated
residual = y - np.dot(X[:, :n_active], coef[active])
temp = np.dot(X.T[n_active], residual)
Cov = np.r_[temp, Cov]
else:
for ii in idx:
for i in range(ii, n_active):
indices[i], indices[i + 1] = indices[i + 1], indices[i]
Gram[i], Gram[i + 1] = swap(Gram[i], Gram[i + 1])
Gram[:, i], Gram[:, i + 1] = swap(Gram[:, i],
Gram[:, i + 1])
# Cov_n = Cov_j + x_j * X + increment(betas) TODO:
# will this still work with multiple drops ?
# recompute covariance. Probably could be done better
# wrong as Xy is not swapped with the rest of variables
# TODO: this could be updated
residual = y - np.dot(X, coef)
temp = np.dot(X.T[drop_idx], residual)
Cov = np.r_[temp, Cov]
sign_active = np.delete(sign_active, idx)
sign_active = np.append(sign_active, 0.) # just to maintain size
if verbose > 1:
print("%s\t\t%s\t\t%s\t\t%s\t\t%s" % (n_iter, '', drop_idx,
n_active, abs(temp)))
if return_path:
# resize coefs in case of early stop
alphas = alphas[:n_iter + 1]
coefs = coefs[:n_iter + 1]
if return_n_iter:
return alphas, active, coefs.T, n_iter
else:
return alphas, active, coefs.T
else:
if return_n_iter:
return alpha, active, coef, n_iter
else:
return alpha, active, coef
###############################################################################
# Estimator classes
class Lars(LinearModel, RegressorMixin):
"""Least Angle Regression model a.k.a. LAR
Read more in the :ref:`User Guide <least_angle_regression>`.
Parameters
----------
n_nonzero_coefs : int, optional
Target number of non-zero coefficients. Use ``np.inf`` for no limit.
fit_intercept : boolean
Whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
verbose : boolean or integer, optional
Sets the verbosity amount
normalize : boolean, optional, default False
If ``True``, the regressors X will be normalized before regression.
precompute : True | False | 'auto' | array-like
Whether to use a precomputed Gram matrix to speed up
calculations. If set to ``'auto'`` let us decide. The Gram
matrix can also be passed as argument.
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
eps : float, optional
The machine-precision regularization in the computation of the
Cholesky diagonal factors. Increase this for very ill-conditioned
systems. Unlike the ``tol`` parameter in some iterative
optimization-based algorithms, this parameter does not control
the tolerance of the optimization.
fit_path : boolean
If True the full path is stored in the ``coef_path_`` attribute.
If you compute the solution for a large problem or many targets,
setting ``fit_path`` to ``False`` will lead to a speedup, especially
with a small alpha.
Attributes
----------
alphas_ : array, shape (n_alphas + 1,) | list of n_targets such arrays
Maximum of covariances (in absolute value) at each iteration. \
``n_alphas`` is either ``n_nonzero_coefs`` or ``n_features``, \
whichever is smaller.
active_ : list, length = n_alphas | list of n_targets such lists
Indices of active variables at the end of the path.
coef_path_ : array, shape (n_features, n_alphas + 1) \
| list of n_targets such arrays
The varying values of the coefficients along the path. It is not
present if the ``fit_path`` parameter is ``False``.
coef_ : array, shape (n_features,) or (n_targets, n_features)
Parameter vector (w in the formulation formula).
intercept_ : float | array, shape (n_targets,)
Independent term in decision function.
n_iter_ : array-like or int
The number of iterations taken by lars_path to find the
grid of alphas for each target.
Examples
--------
>>> from sklearn import linear_model
>>> clf = linear_model.Lars(n_nonzero_coefs=1)
>>> clf.fit([[-1, 1], [0, 0], [1, 1]], [-1.1111, 0, -1.1111])
... # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
Lars(copy_X=True, eps=..., fit_intercept=True, fit_path=True,
n_nonzero_coefs=1, normalize=True, precompute='auto', verbose=False)
>>> print(clf.coef_) # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
[ 0. -1.11...]
See also
--------
lars_path, LarsCV
sklearn.decomposition.sparse_encode
"""
def __init__(self, fit_intercept=True, verbose=False, normalize=True,
precompute='auto', n_nonzero_coefs=500,
eps=np.finfo(np.float).eps, copy_X=True, fit_path=True):
self.fit_intercept = fit_intercept
self.verbose = verbose
self.normalize = normalize
self.method = 'lar'
self.precompute = precompute
self.n_nonzero_coefs = n_nonzero_coefs
self.eps = eps
self.copy_X = copy_X
self.fit_path = fit_path
def _get_gram(self):
# precompute if n_samples > n_features
precompute = self.precompute
if hasattr(precompute, '__array__'):
Gram = precompute
elif precompute == 'auto':
Gram = 'auto'
else:
Gram = None
return Gram
def fit(self, X, y, Xy=None):
"""Fit the model using X, y as training data.
parameters
----------
X : array-like, shape (n_samples, n_features)
Training data.
y : array-like, shape (n_samples,) or (n_samples, n_targets)
Target values.
Xy : array-like, shape (n_samples,) or (n_samples, n_targets), \
optional
Xy = np.dot(X.T, y) that can be precomputed. It is useful
only when the Gram matrix is precomputed.
returns
-------
self : object
returns an instance of self.
"""
X, y = check_X_y(X, y, y_numeric=True, multi_output=True)
n_features = X.shape[1]
X, y, X_mean, y_mean, X_std = self._center_data(X, y,
self.fit_intercept,
self.normalize,
self.copy_X)
if y.ndim == 1:
y = y[:, np.newaxis]
n_targets = y.shape[1]
alpha = getattr(self, 'alpha', 0.)
if hasattr(self, 'n_nonzero_coefs'):
alpha = 0. # n_nonzero_coefs parametrization takes priority
max_iter = self.n_nonzero_coefs
else:
max_iter = self.max_iter
precompute = self.precompute
if not hasattr(precompute, '__array__') and (
precompute is True or
(precompute == 'auto' and X.shape[0] > X.shape[1]) or
(precompute == 'auto' and y.shape[1] > 1)):
Gram = np.dot(X.T, X)
else:
Gram = self._get_gram()
self.alphas_ = []
self.n_iter_ = []
if self.fit_path:
self.coef_ = []
self.active_ = []
self.coef_path_ = []
for k in xrange(n_targets):
this_Xy = None if Xy is None else Xy[:, k]
alphas, active, coef_path, n_iter_ = lars_path(
X, y[:, k], Gram=Gram, Xy=this_Xy, copy_X=self.copy_X,
copy_Gram=True, alpha_min=alpha, method=self.method,
verbose=max(0, self.verbose - 1), max_iter=max_iter,
eps=self.eps, return_path=True,
return_n_iter=True)
self.alphas_.append(alphas)
self.active_.append(active)
self.n_iter_.append(n_iter_)
self.coef_path_.append(coef_path)
self.coef_.append(coef_path[:, -1])
if n_targets == 1:
self.alphas_, self.active_, self.coef_path_, self.coef_ = [
a[0] for a in (self.alphas_, self.active_, self.coef_path_,
self.coef_)]
self.n_iter_ = self.n_iter_[0]
else:
self.coef_ = np.empty((n_targets, n_features))
for k in xrange(n_targets):
this_Xy = None if Xy is None else Xy[:, k]
alphas, _, self.coef_[k], n_iter_ = lars_path(
X, y[:, k], Gram=Gram, Xy=this_Xy, copy_X=self.copy_X,
copy_Gram=True, alpha_min=alpha, method=self.method,
verbose=max(0, self.verbose - 1), max_iter=max_iter,
eps=self.eps, return_path=False, return_n_iter=True)
self.alphas_.append(alphas)
self.n_iter_.append(n_iter_)
if n_targets == 1:
self.alphas_ = self.alphas_[0]
self.n_iter_ = self.n_iter_[0]
self._set_intercept(X_mean, y_mean, X_std)
return self
class LassoLars(Lars):
"""Lasso model fit with Least Angle Regression a.k.a. Lars
It is a Linear Model trained with an L1 prior as regularizer.
The optimization objective for Lasso is::
(1 / (2 * n_samples)) * ||y - Xw||^2_2 + alpha * ||w||_1
Read more in the :ref:`User Guide <least_angle_regression>`.
Parameters
----------
alpha : float
Constant that multiplies the penalty term. Defaults to 1.0.
``alpha = 0`` is equivalent to an ordinary least square, solved
by :class:`LinearRegression`. For numerical reasons, using
``alpha = 0`` with the LassoLars object is not advised and you
should prefer the LinearRegression object.
fit_intercept : boolean
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
verbose : boolean or integer, optional
Sets the verbosity amount
normalize : boolean, optional, default False
If True, the regressors X will be normalized before regression.
copy_X : boolean, optional, default True
If True, X will be copied; else, it may be overwritten.
precompute : True | False | 'auto' | array-like
Whether to use a precomputed Gram matrix to speed up
calculations. If set to ``'auto'`` let us decide. The Gram
matrix can also be passed as argument.
max_iter : integer, optional
Maximum number of iterations to perform.
eps : float, optional
The machine-precision regularization in the computation of the
Cholesky diagonal factors. Increase this for very ill-conditioned
systems. Unlike the ``tol`` parameter in some iterative
optimization-based algorithms, this parameter does not control
the tolerance of the optimization.
fit_path : boolean
If ``True`` the full path is stored in the ``coef_path_`` attribute.
If you compute the solution for a large problem or many targets,
setting ``fit_path`` to ``False`` will lead to a speedup, especially
with a small alpha.
Attributes
----------
alphas_ : array, shape (n_alphas + 1,) | list of n_targets such arrays
Maximum of covariances (in absolute value) at each iteration. \
``n_alphas`` is either ``max_iter``, ``n_features``, or the number of \
nodes in the path with correlation greater than ``alpha``, whichever \
is smaller.
active_ : list, length = n_alphas | list of n_targets such lists
Indices of active variables at the end of the path.
coef_path_ : array, shape (n_features, n_alphas + 1) or list
If a list is passed it's expected to be one of n_targets such arrays.
The varying values of the coefficients along the path. It is not
present if the ``fit_path`` parameter is ``False``.
coef_ : array, shape (n_features,) or (n_targets, n_features)
Parameter vector (w in the formulation formula).
intercept_ : float | array, shape (n_targets,)
Independent term in decision function.
n_iter_ : array-like or int.
The number of iterations taken by lars_path to find the
grid of alphas for each target.
Examples
--------
>>> from sklearn import linear_model
>>> clf = linear_model.LassoLars(alpha=0.01)
>>> clf.fit([[-1, 1], [0, 0], [1, 1]], [-1, 0, -1])
... # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
LassoLars(alpha=0.01, copy_X=True, eps=..., fit_intercept=True,
fit_path=True, max_iter=500, normalize=True, precompute='auto',
verbose=False)
>>> print(clf.coef_) # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
[ 0. -0.963257...]
See also
--------
lars_path
lasso_path
Lasso
LassoCV
LassoLarsCV
sklearn.decomposition.sparse_encode
"""
def __init__(self, alpha=1.0, fit_intercept=True, verbose=False,
normalize=True, precompute='auto', max_iter=500,
eps=np.finfo(np.float).eps, copy_X=True, fit_path=True):
self.alpha = alpha
self.fit_intercept = fit_intercept
self.max_iter = max_iter
self.verbose = verbose
self.normalize = normalize
self.method = 'lasso'
self.precompute = precompute
self.copy_X = copy_X
self.eps = eps
self.fit_path = fit_path
###############################################################################
# Cross-validated estimator classes
def _check_copy_and_writeable(array, copy=False):
if copy or not array.flags.writeable:
return array.copy()
return array
def _lars_path_residues(X_train, y_train, X_test, y_test, Gram=None,
copy=True, method='lars', verbose=False,
fit_intercept=True, normalize=True, max_iter=500,
eps=np.finfo(np.float).eps):
"""Compute the residues on left-out data for a full LARS path
Parameters
-----------
X_train : array, shape (n_samples, n_features)
The data to fit the LARS on
y_train : array, shape (n_samples)
The target variable to fit LARS on
X_test : array, shape (n_samples, n_features)
The data to compute the residues on
y_test : array, shape (n_samples)
The target variable to compute the residues on
Gram : None, 'auto', array, shape: (n_features, n_features), optional
Precomputed Gram matrix (X' * X), if ``'auto'``, the Gram
matrix is precomputed from the given X, if there are more samples
than features
copy : boolean, optional
Whether X_train, X_test, y_train and y_test should be copied;
if False, they may be overwritten.
method : 'lar' | 'lasso'
Specifies the returned model. Select ``'lar'`` for Least Angle
Regression, ``'lasso'`` for the Lasso.
verbose : integer, optional
Sets the amount of verbosity
fit_intercept : boolean
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
normalize : boolean, optional, default False
If True, the regressors X will be normalized before regression.
max_iter : integer, optional
Maximum number of iterations to perform.
eps : float, optional
The machine-precision regularization in the computation of the
Cholesky diagonal factors. Increase this for very ill-conditioned
systems. Unlike the ``tol`` parameter in some iterative
optimization-based algorithms, this parameter does not control
the tolerance of the optimization.
Returns
--------
alphas : array, shape (n_alphas,)
Maximum of covariances (in absolute value) at each iteration.
``n_alphas`` is either ``max_iter`` or ``n_features``, whichever
is smaller.
active : list
Indices of active variables at the end of the path.
coefs : array, shape (n_features, n_alphas)
Coefficients along the path
residues : array, shape (n_alphas, n_samples)
Residues of the prediction on the test data
"""
X_train = _check_copy_and_writeable(X_train, copy)
y_train = _check_copy_and_writeable(y_train, copy)
X_test = _check_copy_and_writeable(X_test, copy)
y_test = _check_copy_and_writeable(y_test, copy)
if fit_intercept:
X_mean = X_train.mean(axis=0)
X_train -= X_mean
X_test -= X_mean
y_mean = y_train.mean(axis=0)
y_train = as_float_array(y_train, copy=False)
y_train -= y_mean
y_test = as_float_array(y_test, copy=False)
y_test -= y_mean
if normalize:
norms = np.sqrt(np.sum(X_train ** 2, axis=0))
nonzeros = np.flatnonzero(norms)
X_train[:, nonzeros] /= norms[nonzeros]
alphas, active, coefs = lars_path(
X_train, y_train, Gram=Gram, copy_X=False, copy_Gram=False,
method=method, verbose=max(0, verbose - 1), max_iter=max_iter, eps=eps)
if normalize:
coefs[nonzeros] /= norms[nonzeros][:, np.newaxis]
residues = np.dot(X_test, coefs) - y_test[:, np.newaxis]
return alphas, active, coefs, residues.T
class LarsCV(Lars):
"""Cross-validated Least Angle Regression model
Read more in the :ref:`User Guide <least_angle_regression>`.
Parameters
----------
fit_intercept : boolean
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
verbose : boolean or integer, optional
Sets the verbosity amount
normalize : boolean, optional, default False
If True, the regressors X will be normalized before regression.
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
precompute : True | False | 'auto' | array-like
Whether to use a precomputed Gram matrix to speed up
calculations. If set to ``'auto'`` let us decide. The Gram
matrix can also be passed as argument.
max_iter: integer, optional
Maximum number of iterations to perform.
cv : cross-validation generator, optional
see :mod:`sklearn.cross_validation`. If ``None`` is passed, default to
a 5-fold strategy
max_n_alphas : integer, optional
The maximum number of points on the path used to compute the
residuals in the cross-validation
n_jobs : integer, optional
Number of CPUs to use during the cross validation. If ``-1``, use
all the CPUs
eps : float, optional
The machine-precision regularization in the computation of the
Cholesky diagonal factors. Increase this for very ill-conditioned
systems.
Attributes
----------
coef_ : array, shape (n_features,)
parameter vector (w in the formulation formula)
intercept_ : float
independent term in decision function
coef_path_ : array, shape (n_features, n_alphas)
the varying values of the coefficients along the path
alpha_ : float
the estimated regularization parameter alpha
alphas_ : array, shape (n_alphas,)
the different values of alpha along the path
cv_alphas_ : array, shape (n_cv_alphas,)
all the values of alpha along the path for the different folds
cv_mse_path_ : array, shape (n_folds, n_cv_alphas)
the mean square error on left-out for each fold along the path
(alpha values given by ``cv_alphas``)
n_iter_ : array-like or int
the number of iterations run by Lars with the optimal alpha.
See also
--------
lars_path, LassoLars, LassoLarsCV
"""
method = 'lar'
def __init__(self, fit_intercept=True, verbose=False, max_iter=500,
normalize=True, precompute='auto', cv=None,
max_n_alphas=1000, n_jobs=1, eps=np.finfo(np.float).eps,
copy_X=True):
self.fit_intercept = fit_intercept
self.max_iter = max_iter
self.verbose = verbose
self.normalize = normalize
self.precompute = precompute
self.copy_X = copy_X
self.cv = cv
self.max_n_alphas = max_n_alphas
self.n_jobs = n_jobs
self.eps = eps
def fit(self, X, y):
"""Fit the model using X, y as training data.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data.
y : array-like, shape (n_samples,)
Target values.
Returns
-------
self : object
returns an instance of self.
"""
self.fit_path = True
X, y = check_X_y(X, y, y_numeric=True)
# init cross-validation generator
cv = check_cv(self.cv, X, y, classifier=False)
Gram = 'auto' if self.precompute else None
cv_paths = Parallel(n_jobs=self.n_jobs, verbose=self.verbose)(
delayed(_lars_path_residues)(
X[train], y[train], X[test], y[test], Gram=Gram, copy=False,
method=self.method, verbose=max(0, self.verbose - 1),
normalize=self.normalize, fit_intercept=self.fit_intercept,
max_iter=self.max_iter, eps=self.eps)
for train, test in cv)
all_alphas = np.concatenate(list(zip(*cv_paths))[0])
# Unique also sorts
all_alphas = np.unique(all_alphas)
# Take at most max_n_alphas values
stride = int(max(1, int(len(all_alphas) / float(self.max_n_alphas))))
all_alphas = all_alphas[::stride]
mse_path = np.empty((len(all_alphas), len(cv_paths)))
for index, (alphas, active, coefs, residues) in enumerate(cv_paths):
alphas = alphas[::-1]
residues = residues[::-1]
if alphas[0] != 0:
alphas = np.r_[0, alphas]
residues = np.r_[residues[0, np.newaxis], residues]
if alphas[-1] != all_alphas[-1]:
alphas = np.r_[alphas, all_alphas[-1]]
residues = np.r_[residues, residues[-1, np.newaxis]]
this_residues = interpolate.interp1d(alphas,
residues,
axis=0)(all_alphas)
this_residues **= 2
mse_path[:, index] = np.mean(this_residues, axis=-1)
mask = np.all(np.isfinite(mse_path), axis=-1)
all_alphas = all_alphas[mask]
mse_path = mse_path[mask]
# Select the alpha that minimizes left-out error
i_best_alpha = np.argmin(mse_path.mean(axis=-1))
best_alpha = all_alphas[i_best_alpha]
# Store our parameters
self.alpha_ = best_alpha
self.cv_alphas_ = all_alphas
self.cv_mse_path_ = mse_path
# Now compute the full model
# it will call a lasso internally when self if LassoLarsCV
# as self.method == 'lasso'
Lars.fit(self, X, y)
return self
@property
def alpha(self):
# impedance matching for the above Lars.fit (should not be documented)
return self.alpha_
class LassoLarsCV(LarsCV):
"""Cross-validated Lasso, using the LARS algorithm
The optimization objective for Lasso is::
(1 / (2 * n_samples)) * ||y - Xw||^2_2 + alpha * ||w||_1
Read more in the :ref:`User Guide <least_angle_regression>`.
Parameters
----------
fit_intercept : boolean
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
verbose : boolean or integer, optional
Sets the verbosity amount
normalize : boolean, optional, default False
If True, the regressors X will be normalized before regression.
precompute : True | False | 'auto' | array-like
Whether to use a precomputed Gram matrix to speed up
calculations. If set to ``'auto'`` let us decide. The Gram
matrix can also be passed as argument.
max_iter : integer, optional
Maximum number of iterations to perform.
cv : cross-validation generator, optional
see sklearn.cross_validation module. If None is passed, default to
a 5-fold strategy
max_n_alphas : integer, optional
The maximum number of points on the path used to compute the
residuals in the cross-validation
n_jobs : integer, optional
Number of CPUs to use during the cross validation. If ``-1``, use
all the CPUs
eps : float, optional
The machine-precision regularization in the computation of the
Cholesky diagonal factors. Increase this for very ill-conditioned
systems.
copy_X : boolean, optional, default True
If True, X will be copied; else, it may be overwritten.
Attributes
----------
coef_ : array, shape (n_features,)
parameter vector (w in the formulation formula)
intercept_ : float
independent term in decision function.
coef_path_ : array, shape (n_features, n_alphas)
the varying values of the coefficients along the path
alpha_ : float
the estimated regularization parameter alpha
alphas_ : array, shape (n_alphas,)
the different values of alpha along the path
cv_alphas_ : array, shape (n_cv_alphas,)
all the values of alpha along the path for the different folds
cv_mse_path_ : array, shape (n_folds, n_cv_alphas)
the mean square error on left-out for each fold along the path
(alpha values given by ``cv_alphas``)
n_iter_ : array-like or int
the number of iterations run by Lars with the optimal alpha.
Notes
-----
The object solves the same problem as the LassoCV object. However,
unlike the LassoCV, it find the relevant alphas values by itself.
In general, because of this property, it will be more stable.
However, it is more fragile to heavily multicollinear datasets.
It is more efficient than the LassoCV if only a small number of
features are selected compared to the total number, for instance if
there are very few samples compared to the number of features.
See also
--------
lars_path, LassoLars, LarsCV, LassoCV
"""
method = 'lasso'
class LassoLarsIC(LassoLars):
"""Lasso model fit with Lars using BIC or AIC for model selection
The optimization objective for Lasso is::
(1 / (2 * n_samples)) * ||y - Xw||^2_2 + alpha * ||w||_1
AIC is the Akaike information criterion and BIC is the Bayes
Information criterion. Such criteria are useful to select the value
of the regularization parameter by making a trade-off between the
goodness of fit and the complexity of the model. A good model should
explain well the data while being simple.
Read more in the :ref:`User Guide <least_angle_regression>`.
Parameters
----------
criterion : 'bic' | 'aic'
The type of criterion to use.
fit_intercept : boolean
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
verbose : boolean or integer, optional
Sets the verbosity amount
normalize : boolean, optional, default False
If True, the regressors X will be normalized before regression.
copy_X : boolean, optional, default True
If True, X will be copied; else, it may be overwritten.
precompute : True | False | 'auto' | array-like
Whether to use a precomputed Gram matrix to speed up
calculations. If set to ``'auto'`` let us decide. The Gram
matrix can also be passed as argument.
max_iter : integer, optional
Maximum number of iterations to perform. Can be used for
early stopping.
eps : float, optional
The machine-precision regularization in the computation of the
Cholesky diagonal factors. Increase this for very ill-conditioned
systems. Unlike the ``tol`` parameter in some iterative
optimization-based algorithms, this parameter does not control
the tolerance of the optimization.
Attributes
----------
coef_ : array, shape (n_features,)
parameter vector (w in the formulation formula)
intercept_ : float
independent term in decision function.
alpha_ : float
the alpha parameter chosen by the information criterion
n_iter_ : int
number of iterations run by lars_path to find the grid of
alphas.
criterion_ : array, shape (n_alphas,)
The value of the information criteria ('aic', 'bic') across all
alphas. The alpha which has the smallest information criteria
is chosen.
Examples
--------
>>> from sklearn import linear_model
>>> clf = linear_model.LassoLarsIC(criterion='bic')
>>> clf.fit([[-1, 1], [0, 0], [1, 1]], [-1.1111, 0, -1.1111])
... # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
LassoLarsIC(copy_X=True, criterion='bic', eps=..., fit_intercept=True,
max_iter=500, normalize=True, precompute='auto',
verbose=False)
>>> print(clf.coef_) # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
[ 0. -1.11...]
Notes
-----
The estimation of the number of degrees of freedom is given by:
"On the degrees of freedom of the lasso"
Hui Zou, Trevor Hastie, and Robert Tibshirani
Ann. Statist. Volume 35, Number 5 (2007), 2173-2192.
http://en.wikipedia.org/wiki/Akaike_information_criterion
http://en.wikipedia.org/wiki/Bayesian_information_criterion
See also
--------
lars_path, LassoLars, LassoLarsCV
"""
def __init__(self, criterion='aic', fit_intercept=True, verbose=False,
normalize=True, precompute='auto', max_iter=500,
eps=np.finfo(np.float).eps, copy_X=True):
self.criterion = criterion
self.fit_intercept = fit_intercept
self.max_iter = max_iter
self.verbose = verbose
self.normalize = normalize
self.copy_X = copy_X
self.precompute = precompute
self.eps = eps
def fit(self, X, y, copy_X=True):
"""Fit the model using X, y as training data.
Parameters
----------
X : array-like, shape (n_samples, n_features)
training data.
y : array-like, shape (n_samples,)
target values.
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
Returns
-------
self : object
returns an instance of self.
"""
self.fit_path = True
X, y = check_X_y(X, y, multi_output=True, y_numeric=True)
X, y, Xmean, ymean, Xstd = LinearModel._center_data(
X, y, self.fit_intercept, self.normalize, self.copy_X)
max_iter = self.max_iter
Gram = self._get_gram()
alphas_, active_, coef_path_, self.n_iter_ = lars_path(
X, y, Gram=Gram, copy_X=copy_X, copy_Gram=True, alpha_min=0.0,
method='lasso', verbose=self.verbose, max_iter=max_iter,
eps=self.eps, return_n_iter=True)
n_samples = X.shape[0]
if self.criterion == 'aic':
K = 2 # AIC
elif self.criterion == 'bic':
K = log(n_samples) # BIC
else:
raise ValueError('criterion should be either bic or aic')
R = y[:, np.newaxis] - np.dot(X, coef_path_) # residuals
mean_squared_error = np.mean(R ** 2, axis=0)
df = np.zeros(coef_path_.shape[1], dtype=np.int) # Degrees of freedom
for k, coef in enumerate(coef_path_.T):
mask = np.abs(coef) > np.finfo(coef.dtype).eps
if not np.any(mask):
continue
# get the number of degrees of freedom equal to:
# Xc = X[:, mask]
# Trace(Xc * inv(Xc.T, Xc) * Xc.T) ie the number of non-zero coefs
df[k] = np.sum(mask)
self.alphas_ = alphas_
with np.errstate(divide='ignore'):
self.criterion_ = n_samples * np.log(mean_squared_error) + K * df
n_best = np.argmin(self.criterion_)
self.alpha_ = alphas_[n_best]
self.coef_ = coef_path_[:, n_best]
self._set_intercept(Xmean, ymean, Xstd)
return self
|
brendangregg/bcc | refs/heads/master | tests/python/test_debuginfo.py | 3 | #!/usr/bin/env python
# Copyright (c) Sasha Goldshtein
# Licensed under the Apache License, Version 2.0 (the "License")
import os
import subprocess
from bcc import SymbolCache, BPF
from unittest import main, TestCase
from utils import mayFail
class TestKSyms(TestCase):
def grab_sym(self):
address = ""
aliases = []
# Grab the first symbol in kallsyms that has type 't' or 'T'.
# Also, find all aliases of this symbol which are identifiable
# by the same address.
with open("/proc/kallsyms", "rb") as f:
for line in f:
# Extract the first 3 columns only. The 4th column
# containing the module name may not exist for all
# symbols.
(addr, t, name) = line.strip().split()[:3]
if t == b"t" or t == b"T":
if not address:
address = addr
if addr == address:
aliases.append(name)
# Return all aliases of the first symbol.
return (address, aliases)
def test_ksymname(self):
sym = BPF.ksymname(b"__kmalloc")
self.assertIsNotNone(sym)
self.assertNotEqual(sym, 0)
def test_ksym(self):
(addr, aliases) = self.grab_sym()
sym = BPF.ksym(int(addr, 16))
found = sym in aliases
self.assertTrue(found)
class Harness(TestCase):
def setUp(self):
self.build_command()
subprocess.check_output('objcopy --only-keep-debug dummy dummy.debug'
.split())
self.debug_command()
subprocess.check_output('strip dummy'.split())
self.process = subprocess.Popen('./dummy', stdout=subprocess.PIPE)
# The process prints out the address of some symbol, which we then
# try to resolve in the test.
self.addr = int(self.process.stdout.readline().strip(), 16)
self.syms = SymbolCache(self.process.pid)
def tearDown(self):
self.process.kill()
self.process.wait()
self.process.stdout.close()
self.process = None
def resolve_addr(self):
sym, offset, module = self.syms.resolve(self.addr, False)
self.assertEqual(sym, self.mangled_name)
self.assertEqual(offset, 0)
self.assertTrue(module[-5:] == b'dummy')
sym, offset, module = self.syms.resolve(self.addr, True)
self.assertEqual(sym, b'some_namespace::some_function(int, int)')
self.assertEqual(offset, 0)
self.assertTrue(module[-5:] == b'dummy')
def resolve_name(self):
script_dir = os.path.dirname(os.path.realpath(__file__).encode("utf8"))
addr = self.syms.resolve_name(os.path.join(script_dir, b'dummy'),
self.mangled_name)
self.assertEqual(addr, self.addr)
pass
class TestDebuglink(Harness):
def build_command(self):
subprocess.check_output('g++ -o dummy dummy.cc'.split())
lines = subprocess.check_output('nm dummy'.split()).splitlines()
for line in lines:
if b"some_function" in line:
self.mangled_name = line.split(b' ')[2]
break
self.assertTrue(self.mangled_name)
def debug_command(self):
subprocess.check_output('objcopy --add-gnu-debuglink=dummy.debug dummy'
.split())
def tearDown(self):
super(TestDebuglink, self).tearDown()
subprocess.check_output('rm dummy dummy.debug'.split())
def test_resolve_addr(self):
self.resolve_addr()
@mayFail("This fails on github actions environment, and needs to be fixed")
def test_resolve_name(self):
self.resolve_name()
class TestBuildid(Harness):
def build_command(self):
subprocess.check_output(('g++ -o dummy -Xlinker ' + \
'--build-id=0x123456789abcdef0123456789abcdef012345678 dummy.cc')
.split())
lines = subprocess.check_output('nm dummy'.split()).splitlines()
for line in lines:
if b"some_function" in line:
self.mangled_name = line.split(b' ')[2]
break
self.assertTrue(self.mangled_name)
def debug_command(self):
subprocess.check_output('mkdir -p /usr/lib/debug/.build-id/12'.split())
subprocess.check_output(('mv dummy.debug /usr/lib/debug/.build-id' + \
'/12/3456789abcdef0123456789abcdef012345678.debug').split())
def tearDown(self):
super(TestBuildid, self).tearDown()
subprocess.check_output('rm dummy'.split())
subprocess.check_output(('rm /usr/lib/debug/.build-id/12' +
'/3456789abcdef0123456789abcdef012345678.debug').split())
def test_resolve_name(self):
self.resolve_addr()
@mayFail("This fails on github actions environment, and needs to be fixed")
def test_resolve_addr(self):
self.resolve_name()
if __name__ == "__main__":
main()
|
dasdachs/flask-blog | refs/heads/master | backend/app/api/auth.py | 1 | #!/usr/bin/env python3.4
from flask import Blueprint, flash, redirect, render_template, request, url_for
from flask.ext.login import login_user, logout_user, login_required
from ..models import User
from ..forms import LoginForm
auth = Blueprint('auth', __name__)
@auth.route('/login', methods=['GET', 'POST'])
def login():
"""
The login view. It uses the login form from forms and relies on
Flask-login to do it's biding.
If the form is valid on submit, the functions gets the user object
using his username.
"""
form = LoginForm()
if form.validate_on_submit():
user = User.query.filter_by(username=form.username.data).first()
if not user or not user.verify_password(form.password.data):
flash('Invalid email or password')
return redirect(url_for('auth.login'))
login_user(user, form.remember_me.data)
return redirect(request.args.get('next') or url_for('admin.dashboard'))
return render_template('auth/login.html', form=form)
@auth.route('/logout')
@login_required
def logout():
logout_user()
flash('Logged out and good to go.')
return redirect(url_for('blog.main')) |
Vishluck/sympy | refs/heads/master | sympy/functions/special/tests/test_elliptic_integrals.py | 97 | from sympy import (S, Symbol, pi, I, oo, zoo, sin, sqrt, tan, gamma,
atanh, hyper, meijerg, O)
from sympy.functions.special.elliptic_integrals import (elliptic_k as K,
elliptic_f as F, elliptic_e as E, elliptic_pi as P)
from sympy.utilities.randtest import (test_derivative_numerically as td,
random_complex_number as randcplx,
verify_numerically as tn)
from sympy.abc import z, m, n
i = Symbol('i', integer=True)
j = Symbol('k', integer=True, positive=True)
def test_K():
assert K(0) == pi/2
assert K(S(1)/2) == 8*pi**(S(3)/2)/gamma(-S(1)/4)**2
assert K(1) == zoo
assert K(-1) == gamma(S(1)/4)**2/(4*sqrt(2*pi))
assert K(oo) == 0
assert K(-oo) == 0
assert K(I*oo) == 0
assert K(-I*oo) == 0
assert K(zoo) == 0
assert K(z).diff(z) == (E(z) - (1 - z)*K(z))/(2*z*(1 - z))
assert td(K(z), z)
zi = Symbol('z', real=False)
assert K(zi).conjugate() == K(zi.conjugate())
zr = Symbol('z', real=True, negative=True)
assert K(zr).conjugate() == K(zr)
assert K(z).rewrite(hyper) == \
(pi/2)*hyper((S.Half, S.Half), (S.One,), z)
assert tn(K(z), (pi/2)*hyper((S.Half, S.Half), (S.One,), z))
assert K(z).rewrite(meijerg) == \
meijerg(((S.Half, S.Half), []), ((S.Zero,), (S.Zero,)), -z)/2
assert tn(K(z), meijerg(((S.Half, S.Half), []), ((S.Zero,), (S.Zero,)), -z)/2)
assert K(z).series(z) == pi/2 + pi*z/8 + 9*pi*z**2/128 + \
25*pi*z**3/512 + 1225*pi*z**4/32768 + 3969*pi*z**5/131072 + O(z**6)
def test_F():
assert F(z, 0) == z
assert F(0, m) == 0
assert F(pi*i/2, m) == i*K(m)
assert F(z, oo) == 0
assert F(z, -oo) == 0
assert F(-z, m) == -F(z, m)
assert F(z, m).diff(z) == 1/sqrt(1 - m*sin(z)**2)
assert F(z, m).diff(m) == E(z, m)/(2*m*(1 - m)) - F(z, m)/(2*m) - \
sin(2*z)/(4*(1 - m)*sqrt(1 - m*sin(z)**2))
r = randcplx()
assert td(F(z, r), z)
assert td(F(r, m), m)
mi = Symbol('m', real=False)
assert F(z, mi).conjugate() == F(z.conjugate(), mi.conjugate())
mr = Symbol('m', real=True, negative=True)
assert F(z, mr).conjugate() == F(z.conjugate(), mr)
assert F(z, m).series(z) == \
z + z**5*(3*m**2/40 - m/30) + m*z**3/6 + O(z**6)
def test_E():
assert E(z, 0) == z
assert E(0, m) == 0
assert E(i*pi/2, m) == i*E(m)
assert E(z, oo) == zoo
assert E(z, -oo) == zoo
assert E(0) == pi/2
assert E(1) == 1
assert E(oo) == I*oo
assert E(-oo) == oo
assert E(zoo) == zoo
assert E(-z, m) == -E(z, m)
assert E(z, m).diff(z) == sqrt(1 - m*sin(z)**2)
assert E(z, m).diff(m) == (E(z, m) - F(z, m))/(2*m)
assert E(z).diff(z) == (E(z) - K(z))/(2*z)
r = randcplx()
assert td(E(r, m), m)
assert td(E(z, r), z)
assert td(E(z), z)
mi = Symbol('m', real=False)
assert E(z, mi).conjugate() == E(z.conjugate(), mi.conjugate())
assert E(mi).conjugate() == E(mi.conjugate())
mr = Symbol('m', real=True, negative=True)
assert E(z, mr).conjugate() == E(z.conjugate(), mr)
assert E(mr).conjugate() == E(mr)
assert E(z).rewrite(hyper) == (pi/2)*hyper((-S.Half, S.Half), (S.One,), z)
assert tn(E(z), (pi/2)*hyper((-S.Half, S.Half), (S.One,), z))
assert E(z).rewrite(meijerg) == \
-meijerg(((S.Half, S(3)/2), []), ((S.Zero,), (S.Zero,)), -z)/4
assert tn(E(z), -meijerg(((S.Half, S(3)/2), []), ((S.Zero,), (S.Zero,)), -z)/4)
assert E(z, m).series(z) == \
z + z**5*(-m**2/40 + m/30) - m*z**3/6 + O(z**6)
assert E(z).series(z) == pi/2 - pi*z/8 - 3*pi*z**2/128 - \
5*pi*z**3/512 - 175*pi*z**4/32768 - 441*pi*z**5/131072 + O(z**6)
def test_P():
assert P(0, z, m) == F(z, m)
assert P(1, z, m) == F(z, m) + \
(sqrt(1 - m*sin(z)**2)*tan(z) - E(z, m))/(1 - m)
assert P(n, i*pi/2, m) == i*P(n, m)
assert P(n, z, 0) == atanh(sqrt(n - 1)*tan(z))/sqrt(n - 1)
assert P(n, z, n) == F(z, n) - P(1, z, n) + tan(z)/sqrt(1 - n*sin(z)**2)
assert P(oo, z, m) == 0
assert P(-oo, z, m) == 0
assert P(n, z, oo) == 0
assert P(n, z, -oo) == 0
assert P(0, m) == K(m)
assert P(1, m) == zoo
assert P(n, 0) == pi/(2*sqrt(1 - n))
assert P(2, 1) == -oo
assert P(-1, 1) == oo
assert P(n, n) == E(n)/(1 - n)
assert P(n, -z, m) == -P(n, z, m)
ni, mi = Symbol('n', real=False), Symbol('m', real=False)
assert P(ni, z, mi).conjugate() == \
P(ni.conjugate(), z.conjugate(), mi.conjugate())
nr, mr = Symbol('n', real=True, negative=True), \
Symbol('m', real=True, negative=True)
assert P(nr, z, mr).conjugate() == P(nr, z.conjugate(), mr)
assert P(n, m).conjugate() == P(n.conjugate(), m.conjugate())
assert P(n, z, m).diff(n) == (E(z, m) + (m - n)*F(z, m)/n +
(n**2 - m)*P(n, z, m)/n - n*sqrt(1 -
m*sin(z)**2)*sin(2*z)/(2*(1 - n*sin(z)**2)))/(2*(m - n)*(n - 1))
assert P(n, z, m).diff(z) == 1/(sqrt(1 - m*sin(z)**2)*(1 - n*sin(z)**2))
assert P(n, z, m).diff(m) == (E(z, m)/(m - 1) + P(n, z, m) -
m*sin(2*z)/(2*(m - 1)*sqrt(1 - m*sin(z)**2)))/(2*(n - m))
assert P(n, m).diff(n) == (E(m) + (m - n)*K(m)/n +
(n**2 - m)*P(n, m)/n)/(2*(m - n)*(n - 1))
assert P(n, m).diff(m) == (E(m)/(m - 1) + P(n, m))/(2*(n - m))
rx, ry = randcplx(), randcplx()
assert td(P(n, rx, ry), n)
assert td(P(rx, z, ry), z)
assert td(P(rx, ry, m), m)
assert P(n, z, m).series(z) == z + z**3*(m/6 + n/3) + \
z**5*(3*m**2/40 + m*n/10 - m/30 + n**2/5 - n/15) + O(z**6)
|
yfried/ansible | refs/heads/devel | lib/ansible/plugins/inventory/host_list.py | 113 | # Copyright (c) 2017 Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
DOCUMENTATION = r'''
inventory: host_list
version_added: "2.4"
short_description: Parses a 'host list' string
description:
- Parses a host list string as a comma separated values of hosts
- This plugin only applies to inventory strings that are not paths and contain a comma.
'''
EXAMPLES = r'''
# define 2 hosts in command line
# ansible -i '10.10.2.6, 10.10.2.4' -m ping all
# DNS resolvable names
# ansible -i 'host1.example.com, host2' -m user -a 'name=me state=absent' all
# just use localhost
# ansible-playbook -i 'localhost,' play.yml -c local
'''
import os
from ansible.errors import AnsibleError, AnsibleParserError
from ansible.module_utils._text import to_bytes, to_native
from ansible.parsing.utils.addresses import parse_address
from ansible.plugins.inventory import BaseInventoryPlugin
class InventoryModule(BaseInventoryPlugin):
NAME = 'host_list'
def verify_file(self, host_list):
valid = False
b_path = to_bytes(host_list, errors='surrogate_or_strict')
if not os.path.exists(b_path) and ',' in host_list:
valid = True
return valid
def parse(self, inventory, loader, host_list, cache=True):
''' parses the inventory file '''
super(InventoryModule, self).parse(inventory, loader, host_list)
try:
for h in host_list.split(','):
h = h.strip()
if h:
try:
(host, port) = parse_address(h, allow_ranges=False)
except AnsibleError as e:
self.display.vvv("Unable to parse address from hostname, leaving unchanged: %s" % to_native(e))
host = h
port = None
if host not in self.inventory.hosts:
self.inventory.add_host(host, group='ungrouped', port=port)
except Exception as e:
raise AnsibleParserError("Invalid data from string, could not parse: %s" % to_native(e))
|
Wuteyan/VTK | refs/heads/master | Graphics/Testing/Python/streamSurface2.py | 8 | #!/usr/bin/env python
import vtk
from vtk.util.misc import vtkGetDataRoot
# create planes
# Create the RenderWindow, Renderer
#
ren = vtk.vtkRenderer()
renWin = vtk.vtkRenderWindow()
renWin.AddRenderer( ren )
iren = vtk.vtkRenderWindowInteractor()
iren.SetRenderWindow(renWin)
# create pipeline
#
pl3d = vtk.vtkPLOT3DReader()
pl3d.SetXYZFileName( vtkGetDataRoot() + '/Data/combxyz.bin' )
pl3d.SetQFileName( vtkGetDataRoot() + '/Data/combq.bin' )
pl3d.SetScalarFunctionNumber( 100 )
pl3d.SetVectorFunctionNumber( 202 )
pl3d.Update()
outline = vtk.vtkStructuredGridOutlineFilter()
outline.SetInputConnection(pl3d.GetOutputPort())
outlineMapper = vtk.vtkPolyDataMapper()
outlineMapper.SetInputConnection(outline.GetOutputPort())
outlineActor = vtk.vtkActor()
outlineActor.SetMapper(outlineMapper)
seeds = vtk.vtkLineSource()
seeds.SetPoint1(15, -5, 32)
seeds.SetPoint2(15, 5, 32)
seeds.SetResolution(10)
integ = vtk.vtkRungeKutta4()
sl = vtk.vtkStreamLine()
sl.SetIntegrator(integ)
sl.SetInputConnection(pl3d.GetOutputPort())
sl.SetSource(seeds.GetOutput())
sl.SetMaximumPropagationTime(0.1)
sl.SetIntegrationStepLength(0.1)
sl.SetIntegrationDirectionToBackward()
sl.SetStepLength(0.001)
scalarSurface = vtk.vtkRuledSurfaceFilter ()
scalarSurface.SetInputConnection(sl.GetOutputPort())
scalarSurface.SetOffset(0)
scalarSurface.SetOnRatio(2)
scalarSurface.PassLinesOn()
scalarSurface.SetRuledModeToResample()
scalarSurface.SetResolution(100,1)
scalarSurface.SetDistanceFactor(30)
mapper = vtk.vtkPolyDataMapper()
mapper.SetInputConnection(scalarSurface.GetOutputPort())
actor = vtk.vtkActor()
actor.SetMapper(mapper)
mmapper = vtk.vtkPolyDataMapper()
mmapper.SetInputConnection(seeds.GetOutputPort())
mactor = vtk.vtkActor()
mactor.SetMapper(mmapper)
ren.AddActor(mactor)
ren.AddActor(actor)
ren.AddActor(outlineActor)
cam=ren.GetActiveCamera()
cam.SetClippingRange( 3.95297, 50 )
cam.SetFocalPoint( 8.88908, 0.595038, 29.3342 )
cam.SetPosition( -12.3332, 31.7479, 41.2387 )
cam.SetViewUp( 0.060772, -0.319905, 0.945498 )
renWin.Render()
|
googleapis/googleapis-gen | refs/heads/master | google/cloud/datacatalog/v1beta1/datacatalog-v1beta1-py/tests/__init__.py | 951 |
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
|
punchagan/zulip | refs/heads/master | zerver/management/commands/deliver_scheduled_messages.py | 3 | import logging
import time
from datetime import timedelta
from typing import Any
from django.conf import settings
from django.core.management.base import BaseCommand
from django.utils.timezone import now as timezone_now
from zerver.lib.actions import build_message_send_dict, do_send_messages
from zerver.lib.logging_util import log_to_file
from zerver.lib.management import sleep_forever
from zerver.lib.message import SendMessageRequest
from zerver.models import Message, ScheduledMessage, get_user_by_delivery_email
## Setup ##
logger = logging.getLogger(__name__)
log_to_file(logger, settings.SCHEDULED_MESSAGE_DELIVERER_LOG_PATH)
class Command(BaseCommand):
help = """Deliver scheduled messages from the ScheduledMessage table.
Run this command under supervisor.
This management command is run via supervisor. Do not run on multiple
machines, as you may encounter multiple sends in a specific race
condition. (Alternatively, you can set `EMAIL_DELIVERER_DISABLED=True`
on all but one machine to make the command have no effect.)
Usage: ./manage.py deliver_scheduled_messages
"""
def construct_message(self, scheduled_message: ScheduledMessage) -> SendMessageRequest:
message = Message()
original_sender = scheduled_message.sender
message.content = scheduled_message.content
message.recipient = scheduled_message.recipient
message.subject = scheduled_message.subject
message.date_sent = timezone_now()
message.sending_client = scheduled_message.sending_client
delivery_type = scheduled_message.delivery_type
if delivery_type == ScheduledMessage.SEND_LATER:
message.sender = original_sender
elif delivery_type == ScheduledMessage.REMIND:
message.sender = get_user_by_delivery_email(
settings.NOTIFICATION_BOT, original_sender.realm
)
message_dict = {
"message": message,
"stream": scheduled_message.stream,
"realm": scheduled_message.realm,
}
return build_message_send_dict(message_dict)
def handle(self, *args: Any, **options: Any) -> None:
try:
if settings.EMAIL_DELIVERER_DISABLED:
# Here doing a check and sleeping indefinitely on this setting might
# not sound right. Actually we do this check to avoid running this
# process on every server that might be in service to a realm. See
# the comment in zproject/default_settings.py file about renaming this
# setting.
sleep_forever()
while True:
messages_to_deliver = ScheduledMessage.objects.filter(
scheduled_timestamp__lte=timezone_now(), delivered=False
)
for message in messages_to_deliver:
do_send_messages([self.construct_message(message)])
message.delivered = True
message.save(update_fields=["delivered"])
cur_time = timezone_now()
time_next_min = (cur_time + timedelta(minutes=1)).replace(second=0, microsecond=0)
sleep_time = (time_next_min - cur_time).total_seconds()
time.sleep(sleep_time)
except KeyboardInterrupt:
pass
|
tvalacarta/tvalacarta | refs/heads/master | python/main-classic/lib/youtube_dl/extractor/rtvs.py | 28 | # coding: utf-8
from __future__ import unicode_literals
from .common import InfoExtractor
class RTVSIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?rtvs\.sk/(?:radio|televizia)/archiv/\d+/(?P<id>\d+)'
_TESTS = [{
# radio archive
'url': 'http://www.rtvs.sk/radio/archiv/11224/414872',
'md5': '134d5d6debdeddf8a5d761cbc9edacb8',
'info_dict': {
'id': '414872',
'ext': 'mp3',
'title': 'Ostrov pokladov 1 časť.mp3'
},
'params': {
'skip_download': True,
}
}, {
# tv archive
'url': 'http://www.rtvs.sk/televizia/archiv/8249/63118',
'md5': '85e2c55cf988403b70cac24f5c086dc6',
'info_dict': {
'id': '63118',
'ext': 'mp4',
'title': 'Amaro Džives - Náš deň',
'description': 'Galavečer pri príležitosti Medzinárodného dňa Rómov.'
},
'params': {
'skip_download': True,
}
}]
def _real_extract(self, url):
video_id = self._match_id(url)
webpage = self._download_webpage(url, video_id)
playlist_url = self._search_regex(
r'playlist["\']?\s*:\s*(["\'])(?P<url>(?:(?!\1).)+)\1', webpage,
'playlist url', group='url')
data = self._download_json(
playlist_url, video_id, 'Downloading playlist')[0]
return self._parse_jwplayer_data(data, video_id=video_id)
|
emencia/emencia-django-xbus | refs/heads/master | xbus/south_migrations/0009_auto__chg_field_event_xref.py | 1 | # -*- coding: utf-8 -*-
# flake8: noqa
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Changing field 'Event.xref'
db.alter_column(u'xbus_event', 'xref', self.gf('django.db.models.fields.CharField')(max_length=80))
def backwards(self, orm):
# Changing field 'Event.xref'
db.alter_column(u'xbus_event', 'xref', self.gf('django.db.models.fields.CharField')(max_length=36))
models = {
u'xbus.event': {
'Meta': {'object_name': 'Event'},
'admin_url': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '250', 'null': 'True'}),
'comment': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'ctime': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'direction': ('django.db.models.fields.CharField', [], {'max_length': '25'}),
'event_id': ('django.db.models.fields.CharField', [], {'max_length': '80', 'null': 'True', 'blank': 'True'}),
'event_type': ('django.db.models.fields.CharField', [], {'max_length': '80'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'item': ('django.db.models.fields.BinaryField', [], {}),
'state': ('django.db.models.fields.CharField', [], {'max_length': '20'}),
'xbus_message_correlation_id': ('django.db.models.fields.CharField', [], {'max_length': '36'}),
'xref': ('django.db.models.fields.CharField', [], {'max_length': '80'})
}
}
complete_apps = ['xbus']
|
pepeportela/edx-platform | refs/heads/master | common/lib/xmodule/xmodule/modulestore/split_mongo/mongo_connection.py | 21 | """
Segregation of pymongo functions from the data modeling mechanisms for split modulestore.
"""
import datetime
import cPickle as pickle
import math
import zlib
import pymongo
import pytz
import re
from contextlib import contextmanager
from time import time
# Import this just to export it
from pymongo.errors import DuplicateKeyError # pylint: disable=unused-import
try:
from django.core.cache import caches, InvalidCacheBackendError
DJANGO_AVAILABLE = True
except ImportError:
DJANGO_AVAILABLE = False
import dogstats_wrapper as dog_stats_api
import logging
from contracts import check, new_contract
from mongodb_proxy import autoretry_read
from xmodule.exceptions import HeartbeatFailure
from xmodule.modulestore import BlockData
from xmodule.modulestore.split_mongo import BlockKey
from xmodule.mongo_utils import connect_to_mongodb, create_collection_index
new_contract('BlockData', BlockData)
log = logging.getLogger(__name__)
def get_cache(alias):
"""
Return cache for an `alias`
Note: The primary purpose of this is to mock the cache in test_split_modulestore.py
"""
return caches[alias]
def round_power_2(value):
"""
Return value rounded up to the nearest power of 2.
"""
if value == 0:
return 0
return math.pow(2, math.ceil(math.log(value, 2)))
class Tagger(object):
"""
An object used by :class:`QueryTimer` to allow timed code blocks
to add measurements and tags to the timer.
"""
def __init__(self, default_sample_rate):
self.added_tags = []
self.measures = []
self.sample_rate = default_sample_rate
def measure(self, name, size):
"""
Record a measurement of the timed data. This would be something to
indicate the size of the value being timed.
Arguments:
name: The name of the measurement.
size (float): The size of the measurement.
"""
self.measures.append((name, size))
def tag(self, **kwargs):
"""
Add tags to the timer.
Arguments:
**kwargs: Each keyword is treated as a tag name, and the
value of the argument is the tag value.
"""
self.added_tags.extend(kwargs.items())
@property
def tags(self):
"""
Return all tags for this (this includes any tags added with :meth:`tag`,
and also all of the added measurements, bucketed into powers of 2).
"""
return [
'{}:{}'.format(name, round_power_2(size))
for name, size in self.measures
] + [
'{}:{}'.format(name, value)
for name, value in self.added_tags
]
class QueryTimer(object):
"""
An object that allows timing a block of code while also recording measurements
about that code.
"""
def __init__(self, metric_base, sample_rate=1):
"""
Arguments:
metric_base: The prefix to be used for all queries captured
with this :class:`QueryTimer`.
"""
self._metric_base = metric_base
self._sample_rate = sample_rate
@contextmanager
def timer(self, metric_name, course_context):
"""
Contextmanager which acts as a timer for the metric ``metric_name``,
but which also yields a :class:`Tagger` object that allows the timed block
of code to add tags and quantity measurements. Tags are added verbatim to the
timer output. Measurements are recorded as histogram measurements in their own,
and also as bucketed tags on the timer measurement.
Arguments:
metric_name: The name used to aggregate all of these metrics.
course_context: The course which the query is being made for.
"""
tagger = Tagger(self._sample_rate)
metric_name = "{}.{}".format(self._metric_base, metric_name)
start = time()
try:
yield tagger
finally:
end = time()
tags = tagger.tags
tags.append('course:{}'.format(course_context))
for name, size in tagger.measures:
dog_stats_api.histogram(
'{}.{}'.format(metric_name, name),
size,
timestamp=end,
tags=[tag for tag in tags if not tag.startswith('{}:'.format(metric_name))],
sample_rate=tagger.sample_rate,
)
dog_stats_api.histogram(
'{}.duration'.format(metric_name),
end - start,
timestamp=end,
tags=tags,
sample_rate=tagger.sample_rate,
)
dog_stats_api.increment(
metric_name,
timestamp=end,
tags=tags,
sample_rate=tagger.sample_rate,
)
TIMER = QueryTimer(__name__, 0.01)
def structure_from_mongo(structure, course_context=None):
"""
Converts the 'blocks' key from a list [block_data] to a map
{BlockKey: block_data}.
Converts 'root' from [block_type, block_id] to BlockKey.
Converts 'blocks.*.fields.children' from [[block_type, block_id]] to [BlockKey].
N.B. Does not convert any other ReferenceFields (because we don't know which fields they are at this level).
Arguments:
structure: The document structure to convert
course_context (CourseKey): For metrics gathering, the CourseKey
for the course that this data is being processed for.
"""
with TIMER.timer('structure_from_mongo', course_context) as tagger:
tagger.measure('blocks', len(structure['blocks']))
check('seq[2]', structure['root'])
check('list(dict)', structure['blocks'])
for block in structure['blocks']:
if 'children' in block['fields']:
check('list(list[2])', block['fields']['children'])
structure['root'] = BlockKey(*structure['root'])
new_blocks = {}
for block in structure['blocks']:
if 'children' in block['fields']:
block['fields']['children'] = [BlockKey(*child) for child in block['fields']['children']]
new_blocks[BlockKey(block['block_type'], block.pop('block_id'))] = BlockData(**block)
structure['blocks'] = new_blocks
return structure
def structure_to_mongo(structure, course_context=None):
"""
Converts the 'blocks' key from a map {BlockKey: block_data} to
a list [block_data], inserting BlockKey.type as 'block_type'
and BlockKey.id as 'block_id'.
Doesn't convert 'root', since namedtuple's can be inserted
directly into mongo.
"""
with TIMER.timer('structure_to_mongo', course_context) as tagger:
tagger.measure('blocks', len(structure['blocks']))
check('BlockKey', structure['root'])
check('dict(BlockKey: BlockData)', structure['blocks'])
for block in structure['blocks'].itervalues():
if 'children' in block.fields:
check('list(BlockKey)', block.fields['children'])
new_structure = dict(structure)
new_structure['blocks'] = []
for block_key, block in structure['blocks'].iteritems():
new_block = dict(block.to_storable())
new_block.setdefault('block_type', block_key.type)
new_block['block_id'] = block_key.id
new_structure['blocks'].append(new_block)
return new_structure
class CourseStructureCache(object):
"""
Wrapper around django cache object to cache course structure objects.
The course structures are pickled and compressed when cached.
If the 'course_structure_cache' doesn't exist, then don't do anything for
for set and get.
"""
def __init__(self):
self.cache = None
if DJANGO_AVAILABLE:
try:
self.cache = get_cache('course_structure_cache')
except InvalidCacheBackendError:
pass
def get(self, key, course_context=None):
"""Pull the compressed, pickled struct data from cache and deserialize."""
if self.cache is None:
return None
with TIMER.timer("CourseStructureCache.get", course_context) as tagger:
compressed_pickled_data = self.cache.get(key)
tagger.tag(from_cache=str(compressed_pickled_data is not None).lower())
if compressed_pickled_data is None:
# Always log cache misses, because they are unexpected
tagger.sample_rate = 1
return None
tagger.measure('compressed_size', len(compressed_pickled_data))
pickled_data = zlib.decompress(compressed_pickled_data)
tagger.measure('uncompressed_size', len(pickled_data))
return pickle.loads(pickled_data)
def set(self, key, structure, course_context=None):
"""Given a structure, will pickle, compress, and write to cache."""
if self.cache is None:
return None
with TIMER.timer("CourseStructureCache.set", course_context) as tagger:
pickled_data = pickle.dumps(structure, pickle.HIGHEST_PROTOCOL)
tagger.measure('uncompressed_size', len(pickled_data))
# 1 = Fastest (slightly larger results)
compressed_pickled_data = zlib.compress(pickled_data, 1)
tagger.measure('compressed_size', len(compressed_pickled_data))
# Stuctures are immutable, so we set a timeout of "never"
self.cache.set(key, compressed_pickled_data, None)
class MongoConnection(object):
"""
Segregation of pymongo functions from the data modeling mechanisms for split modulestore.
"""
def __init__(
self, db, collection, host, port=27017, tz_aware=True, user=None, password=None,
asset_collection=None, retry_wait_time=0.1, **kwargs
):
"""
Create & open the connection, authenticate, and provide pointers to the collections
"""
# Set a write concern of 1, which makes writes complete successfully to the primary
# only before returning. Also makes pymongo report write errors.
kwargs['w'] = 1
self.database = connect_to_mongodb(
db, host,
port=port, tz_aware=tz_aware, user=user, password=password,
retry_wait_time=retry_wait_time, **kwargs
)
self.course_index = self.database[collection + '.active_versions']
self.structures = self.database[collection + '.structures']
self.definitions = self.database[collection + '.definitions']
def heartbeat(self):
"""
Check that the db is reachable.
"""
if self.database.connection.alive():
return True
else:
raise HeartbeatFailure("Can't connect to {}".format(self.database.name), 'mongo')
def get_structure(self, key, course_context=None):
"""
Get the structure from the persistence mechanism whose id is the given key.
This method will use a cached version of the structure if it is available.
"""
with TIMER.timer("get_structure", course_context) as tagger_get_structure:
cache = CourseStructureCache()
structure = cache.get(key, course_context)
tagger_get_structure.tag(from_cache=str(bool(structure)).lower())
if not structure:
# Always log cache misses, because they are unexpected
tagger_get_structure.sample_rate = 1
with TIMER.timer("get_structure.find_one", course_context) as tagger_find_one:
doc = self.structures.find_one({'_id': key})
if doc is None:
log.warning(
"doc was None when attempting to retrieve structure for item with key %s",
unicode(key)
)
return None
tagger_find_one.measure("blocks", len(doc['blocks']))
structure = structure_from_mongo(doc, course_context)
tagger_find_one.sample_rate = 1
cache.set(key, structure, course_context)
return structure
@autoretry_read()
def find_structures_by_id(self, ids, course_context=None):
"""
Return all structures that specified in ``ids``.
Arguments:
ids (list): A list of structure ids
"""
with TIMER.timer("find_structures_by_id", course_context) as tagger:
tagger.measure("requested_ids", len(ids))
docs = [
structure_from_mongo(structure, course_context)
for structure in self.structures.find({'_id': {'$in': ids}})
]
tagger.measure("structures", len(docs))
return docs
@autoretry_read()
def find_course_blocks_by_id(self, ids, course_context=None):
"""
Find all structures that specified in `ids`. Among the blocks only return block whose type is `course`.
Arguments:
ids (list): A list of structure ids
"""
with TIMER.timer("find_course_blocks_by_id", course_context) as tagger:
tagger.measure("requested_ids", len(ids))
docs = [
structure_from_mongo(structure, course_context)
for structure in self.structures.find(
{'_id': {'$in': ids}},
{'blocks': {'$elemMatch': {'block_type': 'course'}}, 'root': 1}
)
]
tagger.measure("structures", len(docs))
return docs
@autoretry_read()
def find_structures_derived_from(self, ids, course_context=None):
"""
Return all structures that were immediately derived from a structure listed in ``ids``.
Arguments:
ids (list): A list of structure ids
"""
with TIMER.timer("find_structures_derived_from", course_context) as tagger:
tagger.measure("base_ids", len(ids))
docs = [
structure_from_mongo(structure, course_context)
for structure in self.structures.find({'previous_version': {'$in': ids}})
]
tagger.measure("structures", len(docs))
return docs
@autoretry_read()
def find_ancestor_structures(self, original_version, block_key, course_context=None):
"""
Find all structures that originated from ``original_version`` that contain ``block_key``.
Arguments:
original_version (str or ObjectID): The id of a structure
block_key (BlockKey): The id of the block in question
"""
with TIMER.timer("find_ancestor_structures", course_context) as tagger:
docs = [
structure_from_mongo(structure, course_context)
for structure in self.structures.find({
'original_version': original_version,
'blocks': {
'$elemMatch': {
'block_id': block_key.id,
'block_type': block_key.type,
'edit_info.update_version': {
'$exists': True,
},
},
},
})
]
tagger.measure("structures", len(docs))
return docs
def insert_structure(self, structure, course_context=None):
"""
Insert a new structure into the database.
"""
with TIMER.timer("insert_structure", course_context) as tagger:
tagger.measure("blocks", len(structure["blocks"]))
self.structures.insert(structure_to_mongo(structure, course_context))
def get_course_index(self, key, ignore_case=False):
"""
Get the course_index from the persistence mechanism whose id is the given key
"""
with TIMER.timer("get_course_index", key):
if ignore_case:
query = {
key_attr: re.compile(u'^{}$'.format(re.escape(getattr(key, key_attr))), re.IGNORECASE)
for key_attr in ('org', 'course', 'run')
}
else:
query = {
key_attr: getattr(key, key_attr)
for key_attr in ('org', 'course', 'run')
}
return self.course_index.find_one(query)
def find_matching_course_indexes(self, branch=None, search_targets=None, org_target=None, course_context=None):
"""
Find the course_index matching particular conditions.
Arguments:
branch: If specified, this branch must exist in the returned courses
search_targets: If specified, this must be a dictionary specifying field values
that must exist in the search_targets of the returned courses
org_target: If specified, this is an ORG filter so that only course_indexs are
returned for the specified ORG
"""
with TIMER.timer("find_matching_course_indexes", course_context):
query = {}
if branch is not None:
query['versions.{}'.format(branch)] = {'$exists': True}
if search_targets:
for key, value in search_targets.iteritems():
query['search_targets.{}'.format(key)] = value
if org_target:
query['org'] = org_target
return self.course_index.find(query)
def insert_course_index(self, course_index, course_context=None):
"""
Create the course_index in the db
"""
with TIMER.timer("insert_course_index", course_context):
course_index['last_update'] = datetime.datetime.now(pytz.utc)
self.course_index.insert(course_index)
def update_course_index(self, course_index, from_index=None, course_context=None):
"""
Update the db record for course_index.
Arguments:
from_index: If set, only update an index if it matches the one specified in `from_index`.
"""
with TIMER.timer("update_course_index", course_context):
if from_index:
query = {"_id": from_index["_id"]}
# last_update not only tells us when this course was last updated but also helps
# prevent collisions
if 'last_update' in from_index:
query['last_update'] = from_index['last_update']
else:
query = {
'org': course_index['org'],
'course': course_index['course'],
'run': course_index['run'],
}
course_index['last_update'] = datetime.datetime.now(pytz.utc)
self.course_index.update(query, course_index, upsert=False,)
def delete_course_index(self, course_key):
"""
Delete the course_index from the persistence mechanism whose id is the given course_index
"""
with TIMER.timer("delete_course_index", course_key):
query = {
key_attr: getattr(course_key, key_attr)
for key_attr in ('org', 'course', 'run')
}
return self.course_index.remove(query)
def get_definition(self, key, course_context=None):
"""
Get the definition from the persistence mechanism whose id is the given key
"""
with TIMER.timer("get_definition", course_context) as tagger:
definition = self.definitions.find_one({'_id': key})
tagger.measure("fields", len(definition['fields']))
tagger.tag(block_type=definition['block_type'])
return definition
def get_definitions(self, definitions, course_context=None):
"""
Retrieve all definitions listed in `definitions`.
"""
with TIMER.timer("get_definitions", course_context) as tagger:
tagger.measure('definitions', len(definitions))
definitions = self.definitions.find({'_id': {'$in': definitions}})
return definitions
def insert_definition(self, definition, course_context=None):
"""
Create the definition in the db
"""
with TIMER.timer("insert_definition", course_context) as tagger:
tagger.measure('fields', len(definition['fields']))
tagger.tag(block_type=definition['block_type'])
self.definitions.insert(definition)
def ensure_indexes(self):
"""
Ensure that all appropriate indexes are created that are needed by this modulestore, or raise
an exception if unable to.
This method is intended for use by tests and administrative commands, and not
to be run during server startup.
"""
create_collection_index(
self.course_index,
[
('org', pymongo.ASCENDING),
('course', pymongo.ASCENDING),
('run', pymongo.ASCENDING)
],
unique=True,
background=True
)
def close_connections(self):
"""
Closes any open connections to the underlying databases
"""
self.database.connection.close()
def mongo_wire_version(self):
"""
Returns the wire version for mongo. Only used to unit tests which instrument the connection.
"""
return self.database.connection.max_wire_version
def _drop_database(self, database=True, collections=True, connections=True):
"""
A destructive operation to drop the underlying database and close all connections.
Intended to be used by test code for cleanup.
If database is True, then this should drop the entire database.
Otherwise, if collections is True, then this should drop all of the collections used
by this modulestore.
Otherwise, the modulestore should remove all data from the collections.
If connections is True, then close the connection to the database as well.
"""
connection = self.database.connection
if database:
connection.drop_database(self.database.name)
elif collections:
self.course_index.drop()
self.structures.drop()
self.definitions.drop()
else:
self.course_index.remove({})
self.structures.remove({})
self.definitions.remove({})
if connections:
connection.close()
|
stormi/tsunami | refs/heads/master | src/secondaires/crafting/fonctions/ouvrir_sac_materiau.py | 1 | # -*-coding:Utf-8 -*
# Copyright (c) 2015 LE GOFF Vincent
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
# OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""Fichier contenant la fonction ouvrir_sac_materiau."""
from primaires.scripting.fonction import Fonction
from primaires.scripting.instruction import ErreurExecution
class ClasseFonction(Fonction):
"""Crée un sac de matériau."""
@classmethod
def init_types(cls):
cls.ajouter_types(cls.ouvrir_sac_materiau, "Objet")
@staticmethod
def ouvrir_sac_materiau(sac):
"""Ouvre le sac de matériau en extrayant ce qu'il contient.
Cette fonction extrait du sac de matériau précisé en argument
les objets contenus, et retourne la liste des objets.
Il vous appartiendra de les mettre quelque part (les
poser dans un conteneur ou sur le sol, par exemple).
Paramètres à préciser :
* sac : l'objet de type sac
Exemples d'utilisation :
# 'sac' contient un sac de farine
objets = ouvrir_sac_materiau(sac)
# Et peut-être ensuite
pour chaque objet dans objets:
poser salle objet
fait
"""
if not sac.est_de_type("sac de matériau"):
raise ErreurExecution("{} n'est pas un sac de matériau".format(
repr(sac.cle)))
objets = []
prototype = getattr(sac, "materiau", None)
quantite = getattr(sac, "quantite", None)
if prototype and quantite:
for i in range(quantite):
objets.append(importeur.objet.creer_objet(prototype))
importeur.objet.supprimer_objet(sac.identifiant)
return objets
|
aifil/odoo | refs/heads/8.0 | addons/payment_ogone/tests/test_ogone.py | 430 | # -*- coding: utf-8 -*-
from lxml import objectify
import time
import urlparse
from openerp.addons.payment.models.payment_acquirer import ValidationError
from openerp.addons.payment.tests.common import PaymentAcquirerCommon
from openerp.addons.payment_ogone.controllers.main import OgoneController
from openerp.tools import mute_logger
class OgonePayment(PaymentAcquirerCommon):
def setUp(self):
super(OgonePayment, self).setUp()
cr, uid = self.cr, self.uid
self.base_url = self.registry('ir.config_parameter').get_param(cr, uid, 'web.base.url')
# get the adyen account
model, self.ogone_id = self.registry('ir.model.data').get_object_reference(cr, uid, 'payment_ogone', 'payment_acquirer_ogone')
def test_10_ogone_form_render(self):
cr, uid, context = self.cr, self.uid, {}
# be sure not to do stupid thing
ogone = self.payment_acquirer.browse(self.cr, self.uid, self.ogone_id, None)
self.assertEqual(ogone.environment, 'test', 'test without test environment')
# ----------------------------------------
# Test: button direct rendering + shasign
# ----------------------------------------
form_values = {
'PSPID': 'dummy',
'ORDERID': 'test_ref0',
'AMOUNT': '1',
'CURRENCY': 'EUR',
'LANGUAGE': 'en_US',
'CN': 'Norbert Buyer',
'EMAIL': 'norbert.buyer@example.com',
'OWNERZIP': '1000',
'OWNERADDRESS': 'Huge Street 2/543',
'OWNERCTY': 'Belgium',
'OWNERTOWN': 'Sin City',
'OWNERTELNO': '0032 12 34 56 78',
'SHASIGN': '815f67b8ff70d234ffcf437c13a9fa7f807044cc',
'ACCEPTURL': '%s' % urlparse.urljoin(self.base_url, OgoneController._accept_url),
'DECLINEURL': '%s' % urlparse.urljoin(self.base_url, OgoneController._decline_url),
'EXCEPTIONURL': '%s' % urlparse.urljoin(self.base_url, OgoneController._exception_url),
'CANCELURL': '%s' % urlparse.urljoin(self.base_url, OgoneController._cancel_url),
}
# render the button
res = self.payment_acquirer.render(
cr, uid, self.ogone_id,
'test_ref0', 0.01, self.currency_euro_id,
partner_id=None,
partner_values=self.buyer_values,
context=context)
# check form result
tree = objectify.fromstring(res)
self.assertEqual(tree.get('action'), 'https://secure.ogone.com/ncol/test/orderstandard.asp', 'ogone: wrong form POST url')
for form_input in tree.input:
if form_input.get('name') in ['submit']:
continue
self.assertEqual(
form_input.get('value'),
form_values[form_input.get('name')],
'ogone: wrong value for input %s: received %s instead of %s' % (form_input.get('name'), form_input.get('value'), form_values[form_input.get('name')])
)
# ----------------------------------------
# Test2: button using tx + validation
# ----------------------------------------
# create a new draft tx
tx_id = self.payment_transaction.create(
cr, uid, {
'amount': 0.01,
'acquirer_id': self.ogone_id,
'currency_id': self.currency_euro_id,
'reference': 'test_ref0',
'partner_id': self.buyer_id,
}, context=context
)
# render the button
res = self.payment_acquirer.render(
cr, uid, self.ogone_id,
'should_be_erased', 0.01, self.currency_euro,
tx_id=tx_id,
partner_id=None,
partner_values=self.buyer_values,
context=context)
# check form result
tree = objectify.fromstring(res)
self.assertEqual(tree.get('action'), 'https://secure.ogone.com/ncol/test/orderstandard.asp', 'ogone: wrong form POST url')
for form_input in tree.input:
if form_input.get('name') in ['submit']:
continue
self.assertEqual(
form_input.get('value'),
form_values[form_input.get('name')],
'ogone: wrong value for form input %s: received %s instead of %s' % (form_input.get('name'), form_input.get('value'), form_values[form_input.get('name')])
)
@mute_logger('openerp.addons.payment_ogone.models.ogone', 'ValidationError')
def test_20_ogone_form_management(self):
cr, uid, context = self.cr, self.uid, {}
# be sure not to do stupid thing
ogone = self.payment_acquirer.browse(self.cr, self.uid, self.ogone_id, None)
self.assertEqual(ogone.environment, 'test', 'test without test environment')
# typical data posted by ogone after client has successfully paid
ogone_post_data = {
'orderID': u'test_ref_2',
'STATUS': u'9',
'CARDNO': u'XXXXXXXXXXXX0002',
'PAYID': u'25381582',
'CN': u'Norbert Buyer',
'NCERROR': u'0',
'TRXDATE': u'11/15/13',
'IP': u'85.201.233.72',
'BRAND': u'VISA',
'ACCEPTANCE': u'test123',
'currency': u'EUR',
'amount': u'1.95',
'SHASIGN': u'7B7B0ED9CBC4A85543A9073374589033A62A05A5',
'ED': u'0315',
'PM': u'CreditCard'
}
# should raise error about unknown tx
with self.assertRaises(ValidationError):
self.payment_transaction.ogone_form_feedback(cr, uid, ogone_post_data, context=context)
# create tx
tx_id = self.payment_transaction.create(
cr, uid, {
'amount': 1.95,
'acquirer_id': self.ogone_id,
'currency_id': self.currency_euro_id,
'reference': 'test_ref_2',
'partner_name': 'Norbert Buyer',
'partner_country_id': self.country_france_id,
}, context=context
)
# validate it
self.payment_transaction.ogone_form_feedback(cr, uid, ogone_post_data, context=context)
# check state
tx = self.payment_transaction.browse(cr, uid, tx_id, context=context)
self.assertEqual(tx.state, 'done', 'ogone: validation did not put tx into done state')
self.assertEqual(tx.ogone_payid, ogone_post_data.get('PAYID'), 'ogone: validation did not update tx payid')
# reset tx
tx.write({'state': 'draft', 'date_validate': False, 'ogone_payid': False})
# now ogone post is ok: try to modify the SHASIGN
ogone_post_data['SHASIGN'] = 'a4c16bae286317b82edb49188d3399249a784691'
with self.assertRaises(ValidationError):
self.payment_transaction.ogone_form_feedback(cr, uid, ogone_post_data, context=context)
# simulate an error
ogone_post_data['STATUS'] = 2
ogone_post_data['SHASIGN'] = 'a4c16bae286317b82edb49188d3399249a784691'
self.payment_transaction.ogone_form_feedback(cr, uid, ogone_post_data, context=context)
# check state
tx = self.payment_transaction.browse(cr, uid, tx_id, context=context)
self.assertEqual(tx.state, 'error', 'ogone: erroneous validation did not put tx into error state')
def test_30_ogone_s2s(self):
test_ref = 'test_ref_%.15f' % time.time()
cr, uid, context = self.cr, self.uid, {}
# be sure not to do stupid thing
ogone = self.payment_acquirer.browse(self.cr, self.uid, self.ogone_id, None)
self.assertEqual(ogone.environment, 'test', 'test without test environment')
# create a new draft tx
tx_id = self.payment_transaction.create(
cr, uid, {
'amount': 0.01,
'acquirer_id': self.ogone_id,
'currency_id': self.currency_euro_id,
'reference': test_ref,
'partner_id': self.buyer_id,
'type': 'server2server',
}, context=context
)
# create an alias
res = self.payment_transaction.ogone_s2s_create_alias(
cr, uid, tx_id, {
'expiry_date_mm': '01',
'expiry_date_yy': '2015',
'holder_name': 'Norbert Poilu',
'number': '4000000000000002',
'brand': 'VISA',
}, context=context)
# check an alias is set, containing at least OPENERP
tx = self.payment_transaction.browse(cr, uid, tx_id, context=context)
self.assertIn('OPENERP', tx.partner_reference, 'ogone: wrong partner reference after creating an alias')
res = self.payment_transaction.ogone_s2s_execute(cr, uid, tx_id, {}, context=context)
# print res
# {
# 'orderID': u'reference',
# 'STATUS': u'9',
# 'CARDNO': u'XXXXXXXXXXXX0002',
# 'PAYID': u'24998692',
# 'CN': u'Norbert Poilu',
# 'NCERROR': u'0',
# 'TRXDATE': u'11/05/13',
# 'IP': u'85.201.233.72',
# 'BRAND': u'VISA',
# 'ACCEPTANCE': u'test123',
# 'currency': u'EUR',
# 'amount': u'1.95',
# 'SHASIGN': u'EFDC56879EF7DE72CCF4B397076B5C9A844CB0FA',
# 'ED': u'0314',
# 'PM': u'CreditCard'
# }
|
sve-odoo/odoo | refs/heads/master | openerp/addons/base/tests/test_translate.py | 460 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2010 OpenERP S.A. http://www.openerp.com
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import unittest
from openerp.tools.translate import quote, unquote
class TranslationToolsTestCase(unittest.TestCase):
def test_quote_unquote(self):
def test_string(str):
quoted = quote(str)
#print "\n1:", repr(str)
#print "2:", repr(quoted)
unquoted = unquote("".join(quoted.split('"\n"')))
#print "3:", repr(unquoted)
self.assertEquals(str, unquoted)
test_string("""test \nall kinds\n \n o\r
\\\\ nope\n\n"
""")
# The ones with 1+ backslashes directly followed by
# a newline or literal N can fail... we would need a
# state-machine parser to handle these, but this would
# be much slower so it's better to avoid them at the moment
self.assertRaises(AssertionError, quote, """test \nall kinds\n\no\r
\\\\nope\n\n"
""")
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
garwynn/L900_3.4_Experiment | refs/heads/master | scripts/tracing/draw_functrace.py | 14679 | #!/usr/bin/python
"""
Copyright 2008 (c) Frederic Weisbecker <fweisbec@gmail.com>
Licensed under the terms of the GNU GPL License version 2
This script parses a trace provided by the function tracer in
kernel/trace/trace_functions.c
The resulted trace is processed into a tree to produce a more human
view of the call stack by drawing textual but hierarchical tree of
calls. Only the functions's names and the the call time are provided.
Usage:
Be sure that you have CONFIG_FUNCTION_TRACER
# mount -t debugfs nodev /sys/kernel/debug
# echo function > /sys/kernel/debug/tracing/current_tracer
$ cat /sys/kernel/debug/tracing/trace_pipe > ~/raw_trace_func
Wait some times but not too much, the script is a bit slow.
Break the pipe (Ctrl + Z)
$ scripts/draw_functrace.py < raw_trace_func > draw_functrace
Then you have your drawn trace in draw_functrace
"""
import sys, re
class CallTree:
""" This class provides a tree representation of the functions
call stack. If a function has no parent in the kernel (interrupt,
syscall, kernel thread...) then it is attached to a virtual parent
called ROOT.
"""
ROOT = None
def __init__(self, func, time = None, parent = None):
self._func = func
self._time = time
if parent is None:
self._parent = CallTree.ROOT
else:
self._parent = parent
self._children = []
def calls(self, func, calltime):
""" If a function calls another one, call this method to insert it
into the tree at the appropriate place.
@return: A reference to the newly created child node.
"""
child = CallTree(func, calltime, self)
self._children.append(child)
return child
def getParent(self, func):
""" Retrieve the last parent of the current node that
has the name given by func. If this function is not
on a parent, then create it as new child of root
@return: A reference to the parent.
"""
tree = self
while tree != CallTree.ROOT and tree._func != func:
tree = tree._parent
if tree == CallTree.ROOT:
child = CallTree.ROOT.calls(func, None)
return child
return tree
def __repr__(self):
return self.__toString("", True)
def __toString(self, branch, lastChild):
if self._time is not None:
s = "%s----%s (%s)\n" % (branch, self._func, self._time)
else:
s = "%s----%s\n" % (branch, self._func)
i = 0
if lastChild:
branch = branch[:-1] + " "
while i < len(self._children):
if i != len(self._children) - 1:
s += "%s" % self._children[i].__toString(branch +\
" |", False)
else:
s += "%s" % self._children[i].__toString(branch +\
" |", True)
i += 1
return s
class BrokenLineException(Exception):
"""If the last line is not complete because of the pipe breakage,
we want to stop the processing and ignore this line.
"""
pass
class CommentLineException(Exception):
""" If the line is a comment (as in the beginning of the trace file),
just ignore it.
"""
pass
def parseLine(line):
line = line.strip()
if line.startswith("#"):
raise CommentLineException
m = re.match("[^]]+?\\] +([0-9.]+): (\\w+) <-(\\w+)", line)
if m is None:
raise BrokenLineException
return (m.group(1), m.group(2), m.group(3))
def main():
CallTree.ROOT = CallTree("Root (Nowhere)", None, None)
tree = CallTree.ROOT
for line in sys.stdin:
try:
calltime, callee, caller = parseLine(line)
except BrokenLineException:
break
except CommentLineException:
continue
tree = tree.getParent(caller)
tree = tree.calls(callee, calltime)
print CallTree.ROOT
if __name__ == "__main__":
main()
|
F5Networks/f5-common-python | refs/heads/development | f5/bigiq/tm/sys/test/functional/test_global_settings.py | 2 | # Copyright 2016 F5 Networks Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
def set_global_settings_test(request, mgmt_root):
def teardown():
gs.usernamePrompt = 'Username'
gs.update()
request.addfinalizer(teardown)
gs = mgmt_root.tm.sys.global_settings.load()
return gs
class TestGlobal_Setting(object):
def test_RUL(self, request, mgmt_root):
# Load
gs1 = set_global_settings_test(request, mgmt_root)
gs2 = mgmt_root.tm.sys.global_settings.load()
assert gs1.usernamePrompt == 'Username'
assert gs1.usernamePrompt == gs2.usernamePrompt
# Update
gs1.usernamePrompt = 'User'
gs1.update()
assert gs1.usernamePrompt == 'User'
assert gs2.usernamePrompt == 'Username'
# Refresh
gs2.refresh()
assert gs1.usernamePrompt == gs2.usernamePrompt
|
yangdongsheng/autotest | refs/heads/master | client/shared/utils.py | 3 | """
Convenience functions for use by tests or whomever.
NOTE: this is a mixin library that pulls in functions from several places
Note carefully what the precendece order is
There's no really good way to do this, as this isn't a class we can do
inheritance with, just a collection of static methods.
"""
from autotest.client.shared.base_utils import *
if os.path.exists(os.path.join(os.path.dirname(__file__), 'site_utils.py')):
# Here we are importing site utils only if it exists
# pylint: disable=E0611
from autotest.client.shared.site_utils import *
|
meisamhe/GPLshared | refs/heads/master | Programming/MPI — AMath 483 583, Spring 2013 1.0 documentation_files/magic_maze_game.py | 1 | from maze_game import MazeGame
from maze_game import Room
# @include
class MagicMazeGame(MazeGame):
def make_room(self):
return MagicRoom()
class MagicRoom(Room):
def connect(self, room2):
print("Connecting magic room")
# @exclude
x = MagicMazeGame()
assert x is not None
assert x.make_room() is not None
|
kikocorreoso/brython | refs/heads/master | www/src/Lib/test/test_tokenize.py | 2 | from test import support
from tokenize import (tokenize, _tokenize, untokenize, NUMBER, NAME, OP,
STRING, ENDMARKER, ENCODING, tok_name, detect_encoding,
open as tokenize_open, Untokenizer, generate_tokens,
NEWLINE)
from io import BytesIO, StringIO
import unittest
from unittest import TestCase, mock
from test.test_grammar import (VALID_UNDERSCORE_LITERALS,
INVALID_UNDERSCORE_LITERALS)
import os
import token
# Converts a source string into a list of textual representation
# of the tokens such as:
# ` NAME 'if' (1, 0) (1, 2)`
# to make writing tests easier.
def stringify_tokens_from_source(token_generator, source_string):
result = []
num_lines = len(source_string.splitlines())
missing_trailing_nl = source_string[-1] not in '\r\n'
for type, token, start, end, line in token_generator:
if type == ENDMARKER:
break
# Ignore the new line on the last line if the input lacks one
if missing_trailing_nl and type == NEWLINE and end[0] == num_lines:
continue
type = tok_name[type]
result.append(f" {type:10} {token!r:13} {start} {end}")
return result
class TokenizeTest(TestCase):
# Tests for the tokenize module.
# The tests can be really simple. Given a small fragment of source
# code, print out a table with tokens. The ENDMARKER, ENCODING and
# final NEWLINE are omitted for brevity.
def check_tokenize(self, s, expected):
# Format the tokens in s in a table format.
# The ENDMARKER and final NEWLINE are omitted.
f = BytesIO(s.encode('utf-8'))
result = stringify_tokens_from_source(tokenize(f.readline), s)
self.assertEqual(result,
[" ENCODING 'utf-8' (0, 0) (0, 0)"] +
expected.rstrip().splitlines())
def test_implicit_newline(self):
# Make sure that the tokenizer puts in an implicit NEWLINE
# when the input lacks a trailing new line.
f = BytesIO("x".encode('utf-8'))
tokens = list(tokenize(f.readline))
self.assertEqual(tokens[-2].type, NEWLINE)
self.assertEqual(tokens[-1].type, ENDMARKER)
def test_basic(self):
self.check_tokenize("1 + 1", """\
NUMBER '1' (1, 0) (1, 1)
OP '+' (1, 2) (1, 3)
NUMBER '1' (1, 4) (1, 5)
""")
self.check_tokenize("if False:\n"
" # NL\n"
" \n"
" True = False # NEWLINE\n", """\
NAME 'if' (1, 0) (1, 2)
NAME 'False' (1, 3) (1, 8)
OP ':' (1, 8) (1, 9)
NEWLINE '\\n' (1, 9) (1, 10)
COMMENT '# NL' (2, 4) (2, 8)
NL '\\n' (2, 8) (2, 9)
NL '\\n' (3, 4) (3, 5)
INDENT ' ' (4, 0) (4, 4)
NAME 'True' (4, 4) (4, 8)
OP '=' (4, 9) (4, 10)
NAME 'False' (4, 11) (4, 16)
COMMENT '# NEWLINE' (4, 17) (4, 26)
NEWLINE '\\n' (4, 26) (4, 27)
DEDENT '' (5, 0) (5, 0)
""")
indent_error_file = b"""\
def k(x):
x += 2
x += 5
"""
readline = BytesIO(indent_error_file).readline
with self.assertRaisesRegex(IndentationError,
"unindent does not match any "
"outer indentation level"):
for tok in tokenize(readline):
pass
def test_int(self):
# Ordinary integers and binary operators
self.check_tokenize("0xff <= 255", """\
NUMBER '0xff' (1, 0) (1, 4)
OP '<=' (1, 5) (1, 7)
NUMBER '255' (1, 8) (1, 11)
""")
self.check_tokenize("0b10 <= 255", """\
NUMBER '0b10' (1, 0) (1, 4)
OP '<=' (1, 5) (1, 7)
NUMBER '255' (1, 8) (1, 11)
""")
self.check_tokenize("0o123 <= 0O123", """\
NUMBER '0o123' (1, 0) (1, 5)
OP '<=' (1, 6) (1, 8)
NUMBER '0O123' (1, 9) (1, 14)
""")
self.check_tokenize("1234567 > ~0x15", """\
NUMBER '1234567' (1, 0) (1, 7)
OP '>' (1, 8) (1, 9)
OP '~' (1, 10) (1, 11)
NUMBER '0x15' (1, 11) (1, 15)
""")
self.check_tokenize("2134568 != 1231515", """\
NUMBER '2134568' (1, 0) (1, 7)
OP '!=' (1, 8) (1, 10)
NUMBER '1231515' (1, 11) (1, 18)
""")
self.check_tokenize("(-124561-1) & 200000000", """\
OP '(' (1, 0) (1, 1)
OP '-' (1, 1) (1, 2)
NUMBER '124561' (1, 2) (1, 8)
OP '-' (1, 8) (1, 9)
NUMBER '1' (1, 9) (1, 10)
OP ')' (1, 10) (1, 11)
OP '&' (1, 12) (1, 13)
NUMBER '200000000' (1, 14) (1, 23)
""")
self.check_tokenize("0xdeadbeef != -1", """\
NUMBER '0xdeadbeef' (1, 0) (1, 10)
OP '!=' (1, 11) (1, 13)
OP '-' (1, 14) (1, 15)
NUMBER '1' (1, 15) (1, 16)
""")
self.check_tokenize("0xdeadc0de & 12345", """\
NUMBER '0xdeadc0de' (1, 0) (1, 10)
OP '&' (1, 11) (1, 12)
NUMBER '12345' (1, 13) (1, 18)
""")
self.check_tokenize("0xFF & 0x15 | 1234", """\
NUMBER '0xFF' (1, 0) (1, 4)
OP '&' (1, 5) (1, 6)
NUMBER '0x15' (1, 7) (1, 11)
OP '|' (1, 12) (1, 13)
NUMBER '1234' (1, 14) (1, 18)
""")
def test_long(self):
# Long integers
self.check_tokenize("x = 0", """\
NAME 'x' (1, 0) (1, 1)
OP '=' (1, 2) (1, 3)
NUMBER '0' (1, 4) (1, 5)
""")
self.check_tokenize("x = 0xfffffffffff", """\
NAME 'x' (1, 0) (1, 1)
OP '=' (1, 2) (1, 3)
NUMBER '0xfffffffffff' (1, 4) (1, 17)
""")
self.check_tokenize("x = 123141242151251616110", """\
NAME 'x' (1, 0) (1, 1)
OP '=' (1, 2) (1, 3)
NUMBER '123141242151251616110' (1, 4) (1, 25)
""")
self.check_tokenize("x = -15921590215012591", """\
NAME 'x' (1, 0) (1, 1)
OP '=' (1, 2) (1, 3)
OP '-' (1, 4) (1, 5)
NUMBER '15921590215012591' (1, 5) (1, 22)
""")
def test_float(self):
# Floating point numbers
self.check_tokenize("x = 3.14159", """\
NAME 'x' (1, 0) (1, 1)
OP '=' (1, 2) (1, 3)
NUMBER '3.14159' (1, 4) (1, 11)
""")
self.check_tokenize("x = 314159.", """\
NAME 'x' (1, 0) (1, 1)
OP '=' (1, 2) (1, 3)
NUMBER '314159.' (1, 4) (1, 11)
""")
self.check_tokenize("x = .314159", """\
NAME 'x' (1, 0) (1, 1)
OP '=' (1, 2) (1, 3)
NUMBER '.314159' (1, 4) (1, 11)
""")
self.check_tokenize("x = 3e14159", """\
NAME 'x' (1, 0) (1, 1)
OP '=' (1, 2) (1, 3)
NUMBER '3e14159' (1, 4) (1, 11)
""")
self.check_tokenize("x = 3E123", """\
NAME 'x' (1, 0) (1, 1)
OP '=' (1, 2) (1, 3)
NUMBER '3E123' (1, 4) (1, 9)
""")
self.check_tokenize("x+y = 3e-1230", """\
NAME 'x' (1, 0) (1, 1)
OP '+' (1, 1) (1, 2)
NAME 'y' (1, 2) (1, 3)
OP '=' (1, 4) (1, 5)
NUMBER '3e-1230' (1, 6) (1, 13)
""")
self.check_tokenize("x = 3.14e159", """\
NAME 'x' (1, 0) (1, 1)
OP '=' (1, 2) (1, 3)
NUMBER '3.14e159' (1, 4) (1, 12)
""")
def test_underscore_literals(self):
def number_token(s):
f = BytesIO(s.encode('utf-8'))
for toktype, token, start, end, line in tokenize(f.readline):
if toktype == NUMBER:
return token
return 'invalid token'
for lit in VALID_UNDERSCORE_LITERALS:
if '(' in lit:
# this won't work with compound complex inputs
continue
self.assertEqual(number_token(lit), lit)
for lit in INVALID_UNDERSCORE_LITERALS:
self.assertNotEqual(number_token(lit), lit)
def test_string(self):
# String literals
self.check_tokenize("x = ''; y = \"\"", """\
NAME 'x' (1, 0) (1, 1)
OP '=' (1, 2) (1, 3)
STRING "''" (1, 4) (1, 6)
OP ';' (1, 6) (1, 7)
NAME 'y' (1, 8) (1, 9)
OP '=' (1, 10) (1, 11)
STRING '""' (1, 12) (1, 14)
""")
self.check_tokenize("x = '\"'; y = \"'\"", """\
NAME 'x' (1, 0) (1, 1)
OP '=' (1, 2) (1, 3)
STRING '\\'"\\'' (1, 4) (1, 7)
OP ';' (1, 7) (1, 8)
NAME 'y' (1, 9) (1, 10)
OP '=' (1, 11) (1, 12)
STRING '"\\'"' (1, 13) (1, 16)
""")
self.check_tokenize("x = \"doesn't \"shrink\", does it\"", """\
NAME 'x' (1, 0) (1, 1)
OP '=' (1, 2) (1, 3)
STRING '"doesn\\'t "' (1, 4) (1, 14)
NAME 'shrink' (1, 14) (1, 20)
STRING '", does it"' (1, 20) (1, 31)
""")
self.check_tokenize("x = 'abc' + 'ABC'", """\
NAME 'x' (1, 0) (1, 1)
OP '=' (1, 2) (1, 3)
STRING "'abc'" (1, 4) (1, 9)
OP '+' (1, 10) (1, 11)
STRING "'ABC'" (1, 12) (1, 17)
""")
self.check_tokenize('y = "ABC" + "ABC"', """\
NAME 'y' (1, 0) (1, 1)
OP '=' (1, 2) (1, 3)
STRING '"ABC"' (1, 4) (1, 9)
OP '+' (1, 10) (1, 11)
STRING '"ABC"' (1, 12) (1, 17)
""")
self.check_tokenize("x = r'abc' + r'ABC' + R'ABC' + R'ABC'", """\
NAME 'x' (1, 0) (1, 1)
OP '=' (1, 2) (1, 3)
STRING "r'abc'" (1, 4) (1, 10)
OP '+' (1, 11) (1, 12)
STRING "r'ABC'" (1, 13) (1, 19)
OP '+' (1, 20) (1, 21)
STRING "R'ABC'" (1, 22) (1, 28)
OP '+' (1, 29) (1, 30)
STRING "R'ABC'" (1, 31) (1, 37)
""")
self.check_tokenize('y = r"abc" + r"ABC" + R"ABC" + R"ABC"', """\
NAME 'y' (1, 0) (1, 1)
OP '=' (1, 2) (1, 3)
STRING 'r"abc"' (1, 4) (1, 10)
OP '+' (1, 11) (1, 12)
STRING 'r"ABC"' (1, 13) (1, 19)
OP '+' (1, 20) (1, 21)
STRING 'R"ABC"' (1, 22) (1, 28)
OP '+' (1, 29) (1, 30)
STRING 'R"ABC"' (1, 31) (1, 37)
""")
self.check_tokenize("u'abc' + U'abc'", """\
STRING "u'abc'" (1, 0) (1, 6)
OP '+' (1, 7) (1, 8)
STRING "U'abc'" (1, 9) (1, 15)
""")
self.check_tokenize('u"abc" + U"abc"', """\
STRING 'u"abc"' (1, 0) (1, 6)
OP '+' (1, 7) (1, 8)
STRING 'U"abc"' (1, 9) (1, 15)
""")
self.check_tokenize("b'abc' + B'abc'", """\
STRING "b'abc'" (1, 0) (1, 6)
OP '+' (1, 7) (1, 8)
STRING "B'abc'" (1, 9) (1, 15)
""")
self.check_tokenize('b"abc" + B"abc"', """\
STRING 'b"abc"' (1, 0) (1, 6)
OP '+' (1, 7) (1, 8)
STRING 'B"abc"' (1, 9) (1, 15)
""")
self.check_tokenize("br'abc' + bR'abc' + Br'abc' + BR'abc'", """\
STRING "br'abc'" (1, 0) (1, 7)
OP '+' (1, 8) (1, 9)
STRING "bR'abc'" (1, 10) (1, 17)
OP '+' (1, 18) (1, 19)
STRING "Br'abc'" (1, 20) (1, 27)
OP '+' (1, 28) (1, 29)
STRING "BR'abc'" (1, 30) (1, 37)
""")
self.check_tokenize('br"abc" + bR"abc" + Br"abc" + BR"abc"', """\
STRING 'br"abc"' (1, 0) (1, 7)
OP '+' (1, 8) (1, 9)
STRING 'bR"abc"' (1, 10) (1, 17)
OP '+' (1, 18) (1, 19)
STRING 'Br"abc"' (1, 20) (1, 27)
OP '+' (1, 28) (1, 29)
STRING 'BR"abc"' (1, 30) (1, 37)
""")
self.check_tokenize("rb'abc' + rB'abc' + Rb'abc' + RB'abc'", """\
STRING "rb'abc'" (1, 0) (1, 7)
OP '+' (1, 8) (1, 9)
STRING "rB'abc'" (1, 10) (1, 17)
OP '+' (1, 18) (1, 19)
STRING "Rb'abc'" (1, 20) (1, 27)
OP '+' (1, 28) (1, 29)
STRING "RB'abc'" (1, 30) (1, 37)
""")
self.check_tokenize('rb"abc" + rB"abc" + Rb"abc" + RB"abc"', """\
STRING 'rb"abc"' (1, 0) (1, 7)
OP '+' (1, 8) (1, 9)
STRING 'rB"abc"' (1, 10) (1, 17)
OP '+' (1, 18) (1, 19)
STRING 'Rb"abc"' (1, 20) (1, 27)
OP '+' (1, 28) (1, 29)
STRING 'RB"abc"' (1, 30) (1, 37)
""")
# Check 0, 1, and 2 character string prefixes.
self.check_tokenize(r'"a\
de\
fg"', """\
STRING '"a\\\\\\nde\\\\\\nfg"\' (1, 0) (3, 3)
""")
self.check_tokenize(r'u"a\
de"', """\
STRING 'u"a\\\\\\nde"\' (1, 0) (2, 3)
""")
self.check_tokenize(r'rb"a\
d"', """\
STRING 'rb"a\\\\\\nd"\' (1, 0) (2, 2)
""")
self.check_tokenize(r'"""a\
b"""', """\
STRING '\"\""a\\\\\\nb\"\""' (1, 0) (2, 4)
""")
self.check_tokenize(r'u"""a\
b"""', """\
STRING 'u\"\""a\\\\\\nb\"\""' (1, 0) (2, 4)
""")
self.check_tokenize(r'rb"""a\
b\
c"""', """\
STRING 'rb"\""a\\\\\\nb\\\\\\nc"\""' (1, 0) (3, 4)
""")
self.check_tokenize('f"abc"', """\
STRING 'f"abc"' (1, 0) (1, 6)
""")
self.check_tokenize('fR"a{b}c"', """\
STRING 'fR"a{b}c"' (1, 0) (1, 9)
""")
self.check_tokenize('f"""abc"""', """\
STRING 'f\"\"\"abc\"\"\"' (1, 0) (1, 10)
""")
self.check_tokenize(r'f"abc\
def"', """\
STRING 'f"abc\\\\\\ndef"' (1, 0) (2, 4)
""")
self.check_tokenize(r'Rf"abc\
def"', """\
STRING 'Rf"abc\\\\\\ndef"' (1, 0) (2, 4)
""")
def test_function(self):
self.check_tokenize("def d22(a, b, c=2, d=2, *k): pass", """\
NAME 'def' (1, 0) (1, 3)
NAME 'd22' (1, 4) (1, 7)
OP '(' (1, 7) (1, 8)
NAME 'a' (1, 8) (1, 9)
OP ',' (1, 9) (1, 10)
NAME 'b' (1, 11) (1, 12)
OP ',' (1, 12) (1, 13)
NAME 'c' (1, 14) (1, 15)
OP '=' (1, 15) (1, 16)
NUMBER '2' (1, 16) (1, 17)
OP ',' (1, 17) (1, 18)
NAME 'd' (1, 19) (1, 20)
OP '=' (1, 20) (1, 21)
NUMBER '2' (1, 21) (1, 22)
OP ',' (1, 22) (1, 23)
OP '*' (1, 24) (1, 25)
NAME 'k' (1, 25) (1, 26)
OP ')' (1, 26) (1, 27)
OP ':' (1, 27) (1, 28)
NAME 'pass' (1, 29) (1, 33)
""")
self.check_tokenize("def d01v_(a=1, *k, **w): pass", """\
NAME 'def' (1, 0) (1, 3)
NAME 'd01v_' (1, 4) (1, 9)
OP '(' (1, 9) (1, 10)
NAME 'a' (1, 10) (1, 11)
OP '=' (1, 11) (1, 12)
NUMBER '1' (1, 12) (1, 13)
OP ',' (1, 13) (1, 14)
OP '*' (1, 15) (1, 16)
NAME 'k' (1, 16) (1, 17)
OP ',' (1, 17) (1, 18)
OP '**' (1, 19) (1, 21)
NAME 'w' (1, 21) (1, 22)
OP ')' (1, 22) (1, 23)
OP ':' (1, 23) (1, 24)
NAME 'pass' (1, 25) (1, 29)
""")
self.check_tokenize("def d23(a: str, b: int=3) -> int: pass", """\
NAME 'def' (1, 0) (1, 3)
NAME 'd23' (1, 4) (1, 7)
OP '(' (1, 7) (1, 8)
NAME 'a' (1, 8) (1, 9)
OP ':' (1, 9) (1, 10)
NAME 'str' (1, 11) (1, 14)
OP ',' (1, 14) (1, 15)
NAME 'b' (1, 16) (1, 17)
OP ':' (1, 17) (1, 18)
NAME 'int' (1, 19) (1, 22)
OP '=' (1, 22) (1, 23)
NUMBER '3' (1, 23) (1, 24)
OP ')' (1, 24) (1, 25)
OP '->' (1, 26) (1, 28)
NAME 'int' (1, 29) (1, 32)
OP ':' (1, 32) (1, 33)
NAME 'pass' (1, 34) (1, 38)
""")
def test_comparison(self):
# Comparison
self.check_tokenize("if 1 < 1 > 1 == 1 >= 5 <= 0x15 <= 0x12 != "
"1 and 5 in 1 not in 1 is 1 or 5 is not 1: pass", """\
NAME 'if' (1, 0) (1, 2)
NUMBER '1' (1, 3) (1, 4)
OP '<' (1, 5) (1, 6)
NUMBER '1' (1, 7) (1, 8)
OP '>' (1, 9) (1, 10)
NUMBER '1' (1, 11) (1, 12)
OP '==' (1, 13) (1, 15)
NUMBER '1' (1, 16) (1, 17)
OP '>=' (1, 18) (1, 20)
NUMBER '5' (1, 21) (1, 22)
OP '<=' (1, 23) (1, 25)
NUMBER '0x15' (1, 26) (1, 30)
OP '<=' (1, 31) (1, 33)
NUMBER '0x12' (1, 34) (1, 38)
OP '!=' (1, 39) (1, 41)
NUMBER '1' (1, 42) (1, 43)
NAME 'and' (1, 44) (1, 47)
NUMBER '5' (1, 48) (1, 49)
NAME 'in' (1, 50) (1, 52)
NUMBER '1' (1, 53) (1, 54)
NAME 'not' (1, 55) (1, 58)
NAME 'in' (1, 59) (1, 61)
NUMBER '1' (1, 62) (1, 63)
NAME 'is' (1, 64) (1, 66)
NUMBER '1' (1, 67) (1, 68)
NAME 'or' (1, 69) (1, 71)
NUMBER '5' (1, 72) (1, 73)
NAME 'is' (1, 74) (1, 76)
NAME 'not' (1, 77) (1, 80)
NUMBER '1' (1, 81) (1, 82)
OP ':' (1, 82) (1, 83)
NAME 'pass' (1, 84) (1, 88)
""")
def test_shift(self):
# Shift
self.check_tokenize("x = 1 << 1 >> 5", """\
NAME 'x' (1, 0) (1, 1)
OP '=' (1, 2) (1, 3)
NUMBER '1' (1, 4) (1, 5)
OP '<<' (1, 6) (1, 8)
NUMBER '1' (1, 9) (1, 10)
OP '>>' (1, 11) (1, 13)
NUMBER '5' (1, 14) (1, 15)
""")
def test_additive(self):
# Additive
self.check_tokenize("x = 1 - y + 15 - 1 + 0x124 + z + a[5]", """\
NAME 'x' (1, 0) (1, 1)
OP '=' (1, 2) (1, 3)
NUMBER '1' (1, 4) (1, 5)
OP '-' (1, 6) (1, 7)
NAME 'y' (1, 8) (1, 9)
OP '+' (1, 10) (1, 11)
NUMBER '15' (1, 12) (1, 14)
OP '-' (1, 15) (1, 16)
NUMBER '1' (1, 17) (1, 18)
OP '+' (1, 19) (1, 20)
NUMBER '0x124' (1, 21) (1, 26)
OP '+' (1, 27) (1, 28)
NAME 'z' (1, 29) (1, 30)
OP '+' (1, 31) (1, 32)
NAME 'a' (1, 33) (1, 34)
OP '[' (1, 34) (1, 35)
NUMBER '5' (1, 35) (1, 36)
OP ']' (1, 36) (1, 37)
""")
def test_multiplicative(self):
# Multiplicative
self.check_tokenize("x = 1//1*1/5*12%0x12@42", """\
NAME 'x' (1, 0) (1, 1)
OP '=' (1, 2) (1, 3)
NUMBER '1' (1, 4) (1, 5)
OP '//' (1, 5) (1, 7)
NUMBER '1' (1, 7) (1, 8)
OP '*' (1, 8) (1, 9)
NUMBER '1' (1, 9) (1, 10)
OP '/' (1, 10) (1, 11)
NUMBER '5' (1, 11) (1, 12)
OP '*' (1, 12) (1, 13)
NUMBER '12' (1, 13) (1, 15)
OP '%' (1, 15) (1, 16)
NUMBER '0x12' (1, 16) (1, 20)
OP '@' (1, 20) (1, 21)
NUMBER '42' (1, 21) (1, 23)
""")
def test_unary(self):
# Unary
self.check_tokenize("~1 ^ 1 & 1 |1 ^ -1", """\
OP '~' (1, 0) (1, 1)
NUMBER '1' (1, 1) (1, 2)
OP '^' (1, 3) (1, 4)
NUMBER '1' (1, 5) (1, 6)
OP '&' (1, 7) (1, 8)
NUMBER '1' (1, 9) (1, 10)
OP '|' (1, 11) (1, 12)
NUMBER '1' (1, 12) (1, 13)
OP '^' (1, 14) (1, 15)
OP '-' (1, 16) (1, 17)
NUMBER '1' (1, 17) (1, 18)
""")
self.check_tokenize("-1*1/1+1*1//1 - ---1**1", """\
OP '-' (1, 0) (1, 1)
NUMBER '1' (1, 1) (1, 2)
OP '*' (1, 2) (1, 3)
NUMBER '1' (1, 3) (1, 4)
OP '/' (1, 4) (1, 5)
NUMBER '1' (1, 5) (1, 6)
OP '+' (1, 6) (1, 7)
NUMBER '1' (1, 7) (1, 8)
OP '*' (1, 8) (1, 9)
NUMBER '1' (1, 9) (1, 10)
OP '//' (1, 10) (1, 12)
NUMBER '1' (1, 12) (1, 13)
OP '-' (1, 14) (1, 15)
OP '-' (1, 16) (1, 17)
OP '-' (1, 17) (1, 18)
OP '-' (1, 18) (1, 19)
NUMBER '1' (1, 19) (1, 20)
OP '**' (1, 20) (1, 22)
NUMBER '1' (1, 22) (1, 23)
""")
def test_selector(self):
# Selector
self.check_tokenize("import sys, time\nx = sys.modules['time'].time()", """\
NAME 'import' (1, 0) (1, 6)
NAME 'sys' (1, 7) (1, 10)
OP ',' (1, 10) (1, 11)
NAME 'time' (1, 12) (1, 16)
NEWLINE '\\n' (1, 16) (1, 17)
NAME 'x' (2, 0) (2, 1)
OP '=' (2, 2) (2, 3)
NAME 'sys' (2, 4) (2, 7)
OP '.' (2, 7) (2, 8)
NAME 'modules' (2, 8) (2, 15)
OP '[' (2, 15) (2, 16)
STRING "'time'" (2, 16) (2, 22)
OP ']' (2, 22) (2, 23)
OP '.' (2, 23) (2, 24)
NAME 'time' (2, 24) (2, 28)
OP '(' (2, 28) (2, 29)
OP ')' (2, 29) (2, 30)
""")
def test_method(self):
# Methods
self.check_tokenize("@staticmethod\ndef foo(x,y): pass", """\
OP '@' (1, 0) (1, 1)
NAME 'staticmethod' (1, 1) (1, 13)
NEWLINE '\\n' (1, 13) (1, 14)
NAME 'def' (2, 0) (2, 3)
NAME 'foo' (2, 4) (2, 7)
OP '(' (2, 7) (2, 8)
NAME 'x' (2, 8) (2, 9)
OP ',' (2, 9) (2, 10)
NAME 'y' (2, 10) (2, 11)
OP ')' (2, 11) (2, 12)
OP ':' (2, 12) (2, 13)
NAME 'pass' (2, 14) (2, 18)
""")
def test_tabs(self):
# Evil tabs
self.check_tokenize("def f():\n"
"\tif x\n"
" \tpass", """\
NAME 'def' (1, 0) (1, 3)
NAME 'f' (1, 4) (1, 5)
OP '(' (1, 5) (1, 6)
OP ')' (1, 6) (1, 7)
OP ':' (1, 7) (1, 8)
NEWLINE '\\n' (1, 8) (1, 9)
INDENT '\\t' (2, 0) (2, 1)
NAME 'if' (2, 1) (2, 3)
NAME 'x' (2, 4) (2, 5)
NEWLINE '\\n' (2, 5) (2, 6)
INDENT ' \\t' (3, 0) (3, 9)
NAME 'pass' (3, 9) (3, 13)
DEDENT '' (4, 0) (4, 0)
DEDENT '' (4, 0) (4, 0)
""")
def test_non_ascii_identifiers(self):
# Non-ascii identifiers
self.check_tokenize("Örter = 'places'\ngrün = 'green'", """\
NAME 'Örter' (1, 0) (1, 5)
OP '=' (1, 6) (1, 7)
STRING "'places'" (1, 8) (1, 16)
NEWLINE '\\n' (1, 16) (1, 17)
NAME 'grün' (2, 0) (2, 4)
OP '=' (2, 5) (2, 6)
STRING "'green'" (2, 7) (2, 14)
""")
def test_unicode(self):
# Legacy unicode literals:
self.check_tokenize("Örter = u'places'\ngrün = U'green'", """\
NAME 'Örter' (1, 0) (1, 5)
OP '=' (1, 6) (1, 7)
STRING "u'places'" (1, 8) (1, 17)
NEWLINE '\\n' (1, 17) (1, 18)
NAME 'grün' (2, 0) (2, 4)
OP '=' (2, 5) (2, 6)
STRING "U'green'" (2, 7) (2, 15)
""")
def test_async(self):
# Async/await extension:
self.check_tokenize("async = 1", """\
NAME 'async' (1, 0) (1, 5)
OP '=' (1, 6) (1, 7)
NUMBER '1' (1, 8) (1, 9)
""")
self.check_tokenize("a = (async = 1)", """\
NAME 'a' (1, 0) (1, 1)
OP '=' (1, 2) (1, 3)
OP '(' (1, 4) (1, 5)
NAME 'async' (1, 5) (1, 10)
OP '=' (1, 11) (1, 12)
NUMBER '1' (1, 13) (1, 14)
OP ')' (1, 14) (1, 15)
""")
self.check_tokenize("async()", """\
NAME 'async' (1, 0) (1, 5)
OP '(' (1, 5) (1, 6)
OP ')' (1, 6) (1, 7)
""")
self.check_tokenize("class async(Bar):pass", """\
NAME 'class' (1, 0) (1, 5)
NAME 'async' (1, 6) (1, 11)
OP '(' (1, 11) (1, 12)
NAME 'Bar' (1, 12) (1, 15)
OP ')' (1, 15) (1, 16)
OP ':' (1, 16) (1, 17)
NAME 'pass' (1, 17) (1, 21)
""")
self.check_tokenize("class async:pass", """\
NAME 'class' (1, 0) (1, 5)
NAME 'async' (1, 6) (1, 11)
OP ':' (1, 11) (1, 12)
NAME 'pass' (1, 12) (1, 16)
""")
self.check_tokenize("await = 1", """\
NAME 'await' (1, 0) (1, 5)
OP '=' (1, 6) (1, 7)
NUMBER '1' (1, 8) (1, 9)
""")
self.check_tokenize("foo.async", """\
NAME 'foo' (1, 0) (1, 3)
OP '.' (1, 3) (1, 4)
NAME 'async' (1, 4) (1, 9)
""")
self.check_tokenize("async for a in b: pass", """\
NAME 'async' (1, 0) (1, 5)
NAME 'for' (1, 6) (1, 9)
NAME 'a' (1, 10) (1, 11)
NAME 'in' (1, 12) (1, 14)
NAME 'b' (1, 15) (1, 16)
OP ':' (1, 16) (1, 17)
NAME 'pass' (1, 18) (1, 22)
""")
self.check_tokenize("async with a as b: pass", """\
NAME 'async' (1, 0) (1, 5)
NAME 'with' (1, 6) (1, 10)
NAME 'a' (1, 11) (1, 12)
NAME 'as' (1, 13) (1, 15)
NAME 'b' (1, 16) (1, 17)
OP ':' (1, 17) (1, 18)
NAME 'pass' (1, 19) (1, 23)
""")
self.check_tokenize("async.foo", """\
NAME 'async' (1, 0) (1, 5)
OP '.' (1, 5) (1, 6)
NAME 'foo' (1, 6) (1, 9)
""")
self.check_tokenize("async", """\
NAME 'async' (1, 0) (1, 5)
""")
self.check_tokenize("async\n#comment\nawait", """\
NAME 'async' (1, 0) (1, 5)
NEWLINE '\\n' (1, 5) (1, 6)
COMMENT '#comment' (2, 0) (2, 8)
NL '\\n' (2, 8) (2, 9)
NAME 'await' (3, 0) (3, 5)
""")
self.check_tokenize("async\n...\nawait", """\
NAME 'async' (1, 0) (1, 5)
NEWLINE '\\n' (1, 5) (1, 6)
OP '...' (2, 0) (2, 3)
NEWLINE '\\n' (2, 3) (2, 4)
NAME 'await' (3, 0) (3, 5)
""")
self.check_tokenize("async\nawait", """\
NAME 'async' (1, 0) (1, 5)
NEWLINE '\\n' (1, 5) (1, 6)
NAME 'await' (2, 0) (2, 5)
""")
self.check_tokenize("foo.async + 1", """\
NAME 'foo' (1, 0) (1, 3)
OP '.' (1, 3) (1, 4)
NAME 'async' (1, 4) (1, 9)
OP '+' (1, 10) (1, 11)
NUMBER '1' (1, 12) (1, 13)
""")
self.check_tokenize("async def foo(): pass", """\
NAME 'async' (1, 0) (1, 5)
NAME 'def' (1, 6) (1, 9)
NAME 'foo' (1, 10) (1, 13)
OP '(' (1, 13) (1, 14)
OP ')' (1, 14) (1, 15)
OP ':' (1, 15) (1, 16)
NAME 'pass' (1, 17) (1, 21)
""")
self.check_tokenize('''\
async def foo():
def foo(await):
await = 1
if 1:
await
async += 1
''', """\
NAME 'async' (1, 0) (1, 5)
NAME 'def' (1, 6) (1, 9)
NAME 'foo' (1, 10) (1, 13)
OP '(' (1, 13) (1, 14)
OP ')' (1, 14) (1, 15)
OP ':' (1, 15) (1, 16)
NEWLINE '\\n' (1, 16) (1, 17)
INDENT ' ' (2, 0) (2, 2)
NAME 'def' (2, 2) (2, 5)
NAME 'foo' (2, 6) (2, 9)
OP '(' (2, 9) (2, 10)
NAME 'await' (2, 10) (2, 15)
OP ')' (2, 15) (2, 16)
OP ':' (2, 16) (2, 17)
NEWLINE '\\n' (2, 17) (2, 18)
INDENT ' ' (3, 0) (3, 4)
NAME 'await' (3, 4) (3, 9)
OP '=' (3, 10) (3, 11)
NUMBER '1' (3, 12) (3, 13)
NEWLINE '\\n' (3, 13) (3, 14)
DEDENT '' (4, 2) (4, 2)
NAME 'if' (4, 2) (4, 4)
NUMBER '1' (4, 5) (4, 6)
OP ':' (4, 6) (4, 7)
NEWLINE '\\n' (4, 7) (4, 8)
INDENT ' ' (5, 0) (5, 4)
NAME 'await' (5, 4) (5, 9)
NEWLINE '\\n' (5, 9) (5, 10)
DEDENT '' (6, 0) (6, 0)
DEDENT '' (6, 0) (6, 0)
NAME 'async' (6, 0) (6, 5)
OP '+=' (6, 6) (6, 8)
NUMBER '1' (6, 9) (6, 10)
NEWLINE '\\n' (6, 10) (6, 11)
""")
self.check_tokenize('''\
async def foo():
async for i in 1: pass''', """\
NAME 'async' (1, 0) (1, 5)
NAME 'def' (1, 6) (1, 9)
NAME 'foo' (1, 10) (1, 13)
OP '(' (1, 13) (1, 14)
OP ')' (1, 14) (1, 15)
OP ':' (1, 15) (1, 16)
NEWLINE '\\n' (1, 16) (1, 17)
INDENT ' ' (2, 0) (2, 2)
NAME 'async' (2, 2) (2, 7)
NAME 'for' (2, 8) (2, 11)
NAME 'i' (2, 12) (2, 13)
NAME 'in' (2, 14) (2, 16)
NUMBER '1' (2, 17) (2, 18)
OP ':' (2, 18) (2, 19)
NAME 'pass' (2, 20) (2, 24)
DEDENT '' (3, 0) (3, 0)
""")
self.check_tokenize('''async def foo(async): await''', """\
NAME 'async' (1, 0) (1, 5)
NAME 'def' (1, 6) (1, 9)
NAME 'foo' (1, 10) (1, 13)
OP '(' (1, 13) (1, 14)
NAME 'async' (1, 14) (1, 19)
OP ')' (1, 19) (1, 20)
OP ':' (1, 20) (1, 21)
NAME 'await' (1, 22) (1, 27)
""")
self.check_tokenize('''\
def f():
def baz(): pass
async def bar(): pass
await = 2''', """\
NAME 'def' (1, 0) (1, 3)
NAME 'f' (1, 4) (1, 5)
OP '(' (1, 5) (1, 6)
OP ')' (1, 6) (1, 7)
OP ':' (1, 7) (1, 8)
NEWLINE '\\n' (1, 8) (1, 9)
NL '\\n' (2, 0) (2, 1)
INDENT ' ' (3, 0) (3, 2)
NAME 'def' (3, 2) (3, 5)
NAME 'baz' (3, 6) (3, 9)
OP '(' (3, 9) (3, 10)
OP ')' (3, 10) (3, 11)
OP ':' (3, 11) (3, 12)
NAME 'pass' (3, 13) (3, 17)
NEWLINE '\\n' (3, 17) (3, 18)
NAME 'async' (4, 2) (4, 7)
NAME 'def' (4, 8) (4, 11)
NAME 'bar' (4, 12) (4, 15)
OP '(' (4, 15) (4, 16)
OP ')' (4, 16) (4, 17)
OP ':' (4, 17) (4, 18)
NAME 'pass' (4, 19) (4, 23)
NEWLINE '\\n' (4, 23) (4, 24)
NL '\\n' (5, 0) (5, 1)
NAME 'await' (6, 2) (6, 7)
OP '=' (6, 8) (6, 9)
NUMBER '2' (6, 10) (6, 11)
DEDENT '' (7, 0) (7, 0)
""")
self.check_tokenize('''\
async def f():
def baz(): pass
async def bar(): pass
await = 2''', """\
NAME 'async' (1, 0) (1, 5)
NAME 'def' (1, 6) (1, 9)
NAME 'f' (1, 10) (1, 11)
OP '(' (1, 11) (1, 12)
OP ')' (1, 12) (1, 13)
OP ':' (1, 13) (1, 14)
NEWLINE '\\n' (1, 14) (1, 15)
NL '\\n' (2, 0) (2, 1)
INDENT ' ' (3, 0) (3, 2)
NAME 'def' (3, 2) (3, 5)
NAME 'baz' (3, 6) (3, 9)
OP '(' (3, 9) (3, 10)
OP ')' (3, 10) (3, 11)
OP ':' (3, 11) (3, 12)
NAME 'pass' (3, 13) (3, 17)
NEWLINE '\\n' (3, 17) (3, 18)
NAME 'async' (4, 2) (4, 7)
NAME 'def' (4, 8) (4, 11)
NAME 'bar' (4, 12) (4, 15)
OP '(' (4, 15) (4, 16)
OP ')' (4, 16) (4, 17)
OP ':' (4, 17) (4, 18)
NAME 'pass' (4, 19) (4, 23)
NEWLINE '\\n' (4, 23) (4, 24)
NL '\\n' (5, 0) (5, 1)
NAME 'await' (6, 2) (6, 7)
OP '=' (6, 8) (6, 9)
NUMBER '2' (6, 10) (6, 11)
DEDENT '' (7, 0) (7, 0)
""")
class GenerateTokensTest(TokenizeTest):
def check_tokenize(self, s, expected):
# Format the tokens in s in a table format.
# The ENDMARKER and final NEWLINE are omitted.
f = StringIO(s)
result = stringify_tokens_from_source(generate_tokens(f.readline), s)
self.assertEqual(result, expected.rstrip().splitlines())
def decistmt(s):
result = []
g = tokenize(BytesIO(s.encode('utf-8')).readline) # tokenize the string
for toknum, tokval, _, _, _ in g:
if toknum == NUMBER and '.' in tokval: # replace NUMBER tokens
result.extend([
(NAME, 'Decimal'),
(OP, '('),
(STRING, repr(tokval)),
(OP, ')')
])
else:
result.append((toknum, tokval))
return untokenize(result).decode('utf-8')
class TestMisc(TestCase):
def test_decistmt(self):
# Substitute Decimals for floats in a string of statements.
# This is an example from the docs.
from decimal import Decimal
s = '+21.3e-5*-.1234/81.7'
self.assertEqual(decistmt(s),
"+Decimal ('21.3e-5')*-Decimal ('.1234')/Decimal ('81.7')")
# The format of the exponent is inherited from the platform C library.
# Known cases are "e-007" (Windows) and "e-07" (not Windows). Since
# we're only showing 11 digits, and the 12th isn't close to 5, the
# rest of the output should be platform-independent.
self.assertRegex(repr(eval(s)), '-3.2171603427[0-9]*e-0+7')
# Output from calculations with Decimal should be identical across all
# platforms.
self.assertEqual(eval(decistmt(s)),
Decimal('-3.217160342717258261933904529E-7'))
class TestTokenizerAdheresToPep0263(TestCase):
"""
Test that tokenizer adheres to the coding behaviour stipulated in PEP 0263.
"""
def _testFile(self, filename):
path = os.path.join(os.path.dirname(__file__), filename)
TestRoundtrip.check_roundtrip(self, open(path, 'rb'))
def test_utf8_coding_cookie_and_no_utf8_bom(self):
f = 'tokenize_tests-utf8-coding-cookie-and-no-utf8-bom-sig.txt'
self._testFile(f)
def test_latin1_coding_cookie_and_utf8_bom(self):
"""
As per PEP 0263, if a file starts with a utf-8 BOM signature, the only
allowed encoding for the comment is 'utf-8'. The text file used in
this test starts with a BOM signature, but specifies latin1 as the
coding, so verify that a SyntaxError is raised, which matches the
behaviour of the interpreter when it encounters a similar condition.
"""
f = 'tokenize_tests-latin1-coding-cookie-and-utf8-bom-sig.txt'
self.assertRaises(SyntaxError, self._testFile, f)
def test_no_coding_cookie_and_utf8_bom(self):
f = 'tokenize_tests-no-coding-cookie-and-utf8-bom-sig-only.txt'
self._testFile(f)
def test_utf8_coding_cookie_and_utf8_bom(self):
f = 'tokenize_tests-utf8-coding-cookie-and-utf8-bom-sig.txt'
self._testFile(f)
def test_bad_coding_cookie(self):
self.assertRaises(SyntaxError, self._testFile, 'bad_coding.py')
self.assertRaises(SyntaxError, self._testFile, 'bad_coding2.py')
class Test_Tokenize(TestCase):
def test__tokenize_decodes_with_specified_encoding(self):
literal = '"ЉЊЈЁЂ"'
line = literal.encode('utf-8')
first = False
def readline():
nonlocal first
if not first:
first = True
return line
else:
return b''
# skip the initial encoding token and the end tokens
tokens = list(_tokenize(readline, encoding='utf-8'))[1:-2]
expected_tokens = [(3, '"ЉЊЈЁЂ"', (1, 0), (1, 7), '"ЉЊЈЁЂ"')]
self.assertEqual(tokens, expected_tokens,
"bytes not decoded with encoding")
def test__tokenize_does_not_decode_with_encoding_none(self):
literal = '"ЉЊЈЁЂ"'
first = False
def readline():
nonlocal first
if not first:
first = True
return literal
else:
return b''
# skip the end tokens
tokens = list(_tokenize(readline, encoding=None))[:-2]
expected_tokens = [(3, '"ЉЊЈЁЂ"', (1, 0), (1, 7), '"ЉЊЈЁЂ"')]
self.assertEqual(tokens, expected_tokens,
"string not tokenized when encoding is None")
class TestDetectEncoding(TestCase):
def get_readline(self, lines):
index = 0
def readline():
nonlocal index
if index == len(lines):
raise StopIteration
line = lines[index]
index += 1
return line
return readline
def test_no_bom_no_encoding_cookie(self):
lines = (
b'# something\n',
b'print(something)\n',
b'do_something(else)\n'
)
encoding, consumed_lines = detect_encoding(self.get_readline(lines))
self.assertEqual(encoding, 'utf-8')
self.assertEqual(consumed_lines, list(lines[:2]))
def test_bom_no_cookie(self):
lines = (
b'\xef\xbb\xbf# something\n',
b'print(something)\n',
b'do_something(else)\n'
)
encoding, consumed_lines = detect_encoding(self.get_readline(lines))
self.assertEqual(encoding, 'utf-8-sig')
self.assertEqual(consumed_lines,
[b'# something\n', b'print(something)\n'])
def test_cookie_first_line_no_bom(self):
lines = (
b'# -*- coding: latin-1 -*-\n',
b'print(something)\n',
b'do_something(else)\n'
)
encoding, consumed_lines = detect_encoding(self.get_readline(lines))
self.assertEqual(encoding, 'iso-8859-1')
self.assertEqual(consumed_lines, [b'# -*- coding: latin-1 -*-\n'])
def test_matched_bom_and_cookie_first_line(self):
lines = (
b'\xef\xbb\xbf# coding=utf-8\n',
b'print(something)\n',
b'do_something(else)\n'
)
encoding, consumed_lines = detect_encoding(self.get_readline(lines))
self.assertEqual(encoding, 'utf-8-sig')
self.assertEqual(consumed_lines, [b'# coding=utf-8\n'])
def test_mismatched_bom_and_cookie_first_line_raises_syntaxerror(self):
lines = (
b'\xef\xbb\xbf# vim: set fileencoding=ascii :\n',
b'print(something)\n',
b'do_something(else)\n'
)
readline = self.get_readline(lines)
self.assertRaises(SyntaxError, detect_encoding, readline)
def test_cookie_second_line_no_bom(self):
lines = (
b'#! something\n',
b'# vim: set fileencoding=ascii :\n',
b'print(something)\n',
b'do_something(else)\n'
)
encoding, consumed_lines = detect_encoding(self.get_readline(lines))
self.assertEqual(encoding, 'ascii')
expected = [b'#! something\n', b'# vim: set fileencoding=ascii :\n']
self.assertEqual(consumed_lines, expected)
def test_matched_bom_and_cookie_second_line(self):
lines = (
b'\xef\xbb\xbf#! something\n',
b'f# coding=utf-8\n',
b'print(something)\n',
b'do_something(else)\n'
)
encoding, consumed_lines = detect_encoding(self.get_readline(lines))
self.assertEqual(encoding, 'utf-8-sig')
self.assertEqual(consumed_lines,
[b'#! something\n', b'f# coding=utf-8\n'])
def test_mismatched_bom_and_cookie_second_line_raises_syntaxerror(self):
lines = (
b'\xef\xbb\xbf#! something\n',
b'# vim: set fileencoding=ascii :\n',
b'print(something)\n',
b'do_something(else)\n'
)
readline = self.get_readline(lines)
self.assertRaises(SyntaxError, detect_encoding, readline)
def test_cookie_second_line_noncommented_first_line(self):
lines = (
b"print('\xc2\xa3')\n",
b'# vim: set fileencoding=iso8859-15 :\n',
b"print('\xe2\x82\xac')\n"
)
encoding, consumed_lines = detect_encoding(self.get_readline(lines))
self.assertEqual(encoding, 'utf-8')
expected = [b"print('\xc2\xa3')\n"]
self.assertEqual(consumed_lines, expected)
def test_cookie_second_line_commented_first_line(self):
lines = (
b"#print('\xc2\xa3')\n",
b'# vim: set fileencoding=iso8859-15 :\n',
b"print('\xe2\x82\xac')\n"
)
encoding, consumed_lines = detect_encoding(self.get_readline(lines))
self.assertEqual(encoding, 'iso8859-15')
expected = [b"#print('\xc2\xa3')\n", b'# vim: set fileencoding=iso8859-15 :\n']
self.assertEqual(consumed_lines, expected)
def test_cookie_second_line_empty_first_line(self):
lines = (
b'\n',
b'# vim: set fileencoding=iso8859-15 :\n',
b"print('\xe2\x82\xac')\n"
)
encoding, consumed_lines = detect_encoding(self.get_readline(lines))
self.assertEqual(encoding, 'iso8859-15')
expected = [b'\n', b'# vim: set fileencoding=iso8859-15 :\n']
self.assertEqual(consumed_lines, expected)
def test_latin1_normalization(self):
# See get_normal_name() in tokenizer.c.
encodings = ("latin-1", "iso-8859-1", "iso-latin-1", "latin-1-unix",
"iso-8859-1-unix", "iso-latin-1-mac")
for encoding in encodings:
for rep in ("-", "_"):
enc = encoding.replace("-", rep)
lines = (b"#!/usr/bin/python\n",
b"# coding: " + enc.encode("ascii") + b"\n",
b"print(things)\n",
b"do_something += 4\n")
rl = self.get_readline(lines)
found, consumed_lines = detect_encoding(rl)
self.assertEqual(found, "iso-8859-1")
def test_syntaxerror_latin1(self):
# Issue 14629: need to raise SyntaxError if the first
# line(s) have non-UTF-8 characters
lines = (
b'print("\xdf")', # Latin-1: LATIN SMALL LETTER SHARP S
)
readline = self.get_readline(lines)
self.assertRaises(SyntaxError, detect_encoding, readline)
def test_utf8_normalization(self):
# See get_normal_name() in tokenizer.c.
encodings = ("utf-8", "utf-8-mac", "utf-8-unix")
for encoding in encodings:
for rep in ("-", "_"):
enc = encoding.replace("-", rep)
lines = (b"#!/usr/bin/python\n",
b"# coding: " + enc.encode("ascii") + b"\n",
b"1 + 3\n")
rl = self.get_readline(lines)
found, consumed_lines = detect_encoding(rl)
self.assertEqual(found, "utf-8")
def test_short_files(self):
readline = self.get_readline((b'print(something)\n',))
encoding, consumed_lines = detect_encoding(readline)
self.assertEqual(encoding, 'utf-8')
self.assertEqual(consumed_lines, [b'print(something)\n'])
encoding, consumed_lines = detect_encoding(self.get_readline(()))
self.assertEqual(encoding, 'utf-8')
self.assertEqual(consumed_lines, [])
readline = self.get_readline((b'\xef\xbb\xbfprint(something)\n',))
encoding, consumed_lines = detect_encoding(readline)
self.assertEqual(encoding, 'utf-8-sig')
self.assertEqual(consumed_lines, [b'print(something)\n'])
readline = self.get_readline((b'\xef\xbb\xbf',))
encoding, consumed_lines = detect_encoding(readline)
self.assertEqual(encoding, 'utf-8-sig')
self.assertEqual(consumed_lines, [])
readline = self.get_readline((b'# coding: bad\n',))
self.assertRaises(SyntaxError, detect_encoding, readline)
def test_false_encoding(self):
# Issue 18873: "Encoding" detected in non-comment lines
readline = self.get_readline((b'print("#coding=fake")',))
encoding, consumed_lines = detect_encoding(readline)
self.assertEqual(encoding, 'utf-8')
self.assertEqual(consumed_lines, [b'print("#coding=fake")'])
def test_open(self):
filename = support.TESTFN + '.py'
self.addCleanup(support.unlink, filename)
# test coding cookie
for encoding in ('iso-8859-15', 'utf-8'):
with open(filename, 'w', encoding=encoding) as fp:
print("# coding: %s" % encoding, file=fp)
print("print('euro:\u20ac')", file=fp)
with tokenize_open(filename) as fp:
self.assertEqual(fp.encoding, encoding)
self.assertEqual(fp.mode, 'r')
# test BOM (no coding cookie)
with open(filename, 'w', encoding='utf-8-sig') as fp:
print("print('euro:\u20ac')", file=fp)
with tokenize_open(filename) as fp:
self.assertEqual(fp.encoding, 'utf-8-sig')
self.assertEqual(fp.mode, 'r')
def test_filename_in_exception(self):
# When possible, include the file name in the exception.
path = 'some_file_path'
lines = (
b'print("\xdf")', # Latin-1: LATIN SMALL LETTER SHARP S
)
class Bunk:
def __init__(self, lines, path):
self.name = path
self._lines = lines
self._index = 0
def readline(self):
if self._index == len(lines):
raise StopIteration
line = lines[self._index]
self._index += 1
return line
with self.assertRaises(SyntaxError):
ins = Bunk(lines, path)
# Make sure lacking a name isn't an issue.
del ins.name
detect_encoding(ins.readline)
with self.assertRaisesRegex(SyntaxError, '.*{}'.format(path)):
ins = Bunk(lines, path)
detect_encoding(ins.readline)
def test_open_error(self):
# Issue #23840: open() must close the binary file on error
m = BytesIO(b'#coding:xxx')
with mock.patch('tokenize._builtin_open', return_value=m):
self.assertRaises(SyntaxError, tokenize_open, 'foobar')
self.assertTrue(m.closed)
class TestTokenize(TestCase):
def test_tokenize(self):
import tokenize as tokenize_module
encoding = object()
encoding_used = None
def mock_detect_encoding(readline):
return encoding, [b'first', b'second']
def mock__tokenize(readline, encoding):
nonlocal encoding_used
encoding_used = encoding
out = []
while True:
next_line = readline()
if next_line:
out.append(next_line)
continue
return out
counter = 0
def mock_readline():
nonlocal counter
counter += 1
if counter == 5:
return b''
return str(counter).encode()
orig_detect_encoding = tokenize_module.detect_encoding
orig__tokenize = tokenize_module._tokenize
tokenize_module.detect_encoding = mock_detect_encoding
tokenize_module._tokenize = mock__tokenize
try:
results = tokenize(mock_readline)
self.assertEqual(list(results),
[b'first', b'second', b'1', b'2', b'3', b'4'])
finally:
tokenize_module.detect_encoding = orig_detect_encoding
tokenize_module._tokenize = orig__tokenize
self.assertEqual(encoding_used, encoding)
def test_oneline_defs(self):
buf = []
for i in range(500):
buf.append('def i{i}(): return {i}'.format(i=i))
buf.append('OK')
buf = '\n'.join(buf)
# Test that 500 consequent, one-line defs is OK
toks = list(tokenize(BytesIO(buf.encode('utf-8')).readline))
self.assertEqual(toks[-3].string, 'OK') # [-1] is always ENDMARKER
# [-2] is always NEWLINE
def assertExactTypeEqual(self, opstr, *optypes):
tokens = list(tokenize(BytesIO(opstr.encode('utf-8')).readline))
num_optypes = len(optypes)
self.assertEqual(len(tokens), 3 + num_optypes)
self.assertEqual(tok_name[tokens[0].exact_type],
tok_name[ENCODING])
for i in range(num_optypes):
self.assertEqual(tok_name[tokens[i + 1].exact_type],
tok_name[optypes[i]])
self.assertEqual(tok_name[tokens[1 + num_optypes].exact_type],
tok_name[token.NEWLINE])
self.assertEqual(tok_name[tokens[2 + num_optypes].exact_type],
tok_name[token.ENDMARKER])
def test_exact_type(self):
self.assertExactTypeEqual('()', token.LPAR, token.RPAR)
self.assertExactTypeEqual('[]', token.LSQB, token.RSQB)
self.assertExactTypeEqual(':', token.COLON)
self.assertExactTypeEqual(',', token.COMMA)
self.assertExactTypeEqual(';', token.SEMI)
self.assertExactTypeEqual('+', token.PLUS)
self.assertExactTypeEqual('-', token.MINUS)
self.assertExactTypeEqual('*', token.STAR)
self.assertExactTypeEqual('/', token.SLASH)
self.assertExactTypeEqual('|', token.VBAR)
self.assertExactTypeEqual('&', token.AMPER)
self.assertExactTypeEqual('<', token.LESS)
self.assertExactTypeEqual('>', token.GREATER)
self.assertExactTypeEqual('=', token.EQUAL)
self.assertExactTypeEqual('.', token.DOT)
self.assertExactTypeEqual('%', token.PERCENT)
self.assertExactTypeEqual('{}', token.LBRACE, token.RBRACE)
self.assertExactTypeEqual('==', token.EQEQUAL)
self.assertExactTypeEqual('!=', token.NOTEQUAL)
self.assertExactTypeEqual('<=', token.LESSEQUAL)
self.assertExactTypeEqual('>=', token.GREATEREQUAL)
self.assertExactTypeEqual('~', token.TILDE)
self.assertExactTypeEqual('^', token.CIRCUMFLEX)
self.assertExactTypeEqual('<<', token.LEFTSHIFT)
self.assertExactTypeEqual('>>', token.RIGHTSHIFT)
self.assertExactTypeEqual('**', token.DOUBLESTAR)
self.assertExactTypeEqual('+=', token.PLUSEQUAL)
self.assertExactTypeEqual('-=', token.MINEQUAL)
self.assertExactTypeEqual('*=', token.STAREQUAL)
self.assertExactTypeEqual('/=', token.SLASHEQUAL)
self.assertExactTypeEqual('%=', token.PERCENTEQUAL)
self.assertExactTypeEqual('&=', token.AMPEREQUAL)
self.assertExactTypeEqual('|=', token.VBAREQUAL)
self.assertExactTypeEqual('^=', token.CIRCUMFLEXEQUAL)
self.assertExactTypeEqual('^=', token.CIRCUMFLEXEQUAL)
self.assertExactTypeEqual('<<=', token.LEFTSHIFTEQUAL)
self.assertExactTypeEqual('>>=', token.RIGHTSHIFTEQUAL)
self.assertExactTypeEqual('**=', token.DOUBLESTAREQUAL)
self.assertExactTypeEqual('//', token.DOUBLESLASH)
self.assertExactTypeEqual('//=', token.DOUBLESLASHEQUAL)
self.assertExactTypeEqual(':=', token.COLONEQUAL)
self.assertExactTypeEqual('...', token.ELLIPSIS)
self.assertExactTypeEqual('->', token.RARROW)
self.assertExactTypeEqual('@', token.AT)
self.assertExactTypeEqual('@=', token.ATEQUAL)
self.assertExactTypeEqual('a**2+b**2==c**2',
NAME, token.DOUBLESTAR, NUMBER,
token.PLUS,
NAME, token.DOUBLESTAR, NUMBER,
token.EQEQUAL,
NAME, token.DOUBLESTAR, NUMBER)
self.assertExactTypeEqual('{1, 2, 3}',
token.LBRACE,
token.NUMBER, token.COMMA,
token.NUMBER, token.COMMA,
token.NUMBER,
token.RBRACE)
self.assertExactTypeEqual('^(x & 0x1)',
token.CIRCUMFLEX,
token.LPAR,
token.NAME, token.AMPER, token.NUMBER,
token.RPAR)
def test_pathological_trailing_whitespace(self):
# See http://bugs.python.org/issue16152
self.assertExactTypeEqual('@ ', token.AT)
class UntokenizeTest(TestCase):
def test_bad_input_order(self):
# raise if previous row
u = Untokenizer()
u.prev_row = 2
u.prev_col = 2
with self.assertRaises(ValueError) as cm:
u.add_whitespace((1,3))
self.assertEqual(cm.exception.args[0],
'start (1,3) precedes previous end (2,2)')
# raise if previous column in row
self.assertRaises(ValueError, u.add_whitespace, (2,1))
def test_backslash_continuation(self):
# The problem is that <whitespace>\<newline> leaves no token
u = Untokenizer()
u.prev_row = 1
u.prev_col = 1
u.tokens = []
u.add_whitespace((2, 0))
self.assertEqual(u.tokens, ['\\\n'])
u.prev_row = 2
u.add_whitespace((4, 4))
self.assertEqual(u.tokens, ['\\\n', '\\\n\\\n', ' '])
TestRoundtrip.check_roundtrip(self, 'a\n b\n c\n \\\n c\n')
def test_iter_compat(self):
u = Untokenizer()
token = (NAME, 'Hello')
tokens = [(ENCODING, 'utf-8'), token]
u.compat(token, iter([]))
self.assertEqual(u.tokens, ["Hello "])
u = Untokenizer()
self.assertEqual(u.untokenize(iter([token])), 'Hello ')
u = Untokenizer()
self.assertEqual(u.untokenize(iter(tokens)), 'Hello ')
self.assertEqual(u.encoding, 'utf-8')
self.assertEqual(untokenize(iter(tokens)), b'Hello ')
class TestRoundtrip(TestCase):
def check_roundtrip(self, f):
"""
Test roundtrip for `untokenize`. `f` is an open file or a string.
The source code in f is tokenized to both 5- and 2-tuples.
Both sequences are converted back to source code via
tokenize.untokenize(), and the latter tokenized again to 2-tuples.
The test fails if the 3 pair tokenizations do not match.
When untokenize bugs are fixed, untokenize with 5-tuples should
reproduce code that does not contain a backslash continuation
following spaces. A proper test should test this.
"""
# Get source code and original tokenizations
if isinstance(f, str):
code = f.encode('utf-8')
else:
code = f.read()
f.close()
readline = iter(code.splitlines(keepends=True)).__next__
tokens5 = list(tokenize(readline))
tokens2 = [tok[:2] for tok in tokens5]
# Reproduce tokens2 from pairs
bytes_from2 = untokenize(tokens2)
readline2 = iter(bytes_from2.splitlines(keepends=True)).__next__
tokens2_from2 = [tok[:2] for tok in tokenize(readline2)]
self.assertEqual(tokens2_from2, tokens2)
# Reproduce tokens2 from 5-tuples
bytes_from5 = untokenize(tokens5)
readline5 = iter(bytes_from5.splitlines(keepends=True)).__next__
tokens2_from5 = [tok[:2] for tok in tokenize(readline5)]
self.assertEqual(tokens2_from5, tokens2)
def test_roundtrip(self):
# There are some standard formatting practices that are easy to get right.
self.check_roundtrip("if x == 1:\n"
" print(x)\n")
self.check_roundtrip("# This is a comment\n"
"# This also\n")
# Some people use different formatting conventions, which makes
# untokenize a little trickier. Note that this test involves trailing
# whitespace after the colon. Note that we use hex escapes to make the
# two trailing blanks apparent in the expected output.
self.check_roundtrip("if x == 1 : \n"
" print(x)\n")
fn = support.findfile("tokenize_tests.txt")
with open(fn, 'rb') as f:
self.check_roundtrip(f)
self.check_roundtrip("if x == 1:\n"
" # A comment by itself.\n"
" print(x) # Comment here, too.\n"
" # Another comment.\n"
"after_if = True\n")
self.check_roundtrip("if (x # The comments need to go in the right place\n"
" == 1):\n"
" print('x==1')\n")
self.check_roundtrip("class Test: # A comment here\n"
" # A comment with weird indent\n"
" after_com = 5\n"
" def x(m): return m*5 # a one liner\n"
" def y(m): # A whitespace after the colon\n"
" return y*4 # 3-space indent\n")
# Some error-handling code
self.check_roundtrip("try: import somemodule\n"
"except ImportError: # comment\n"
" print('Can not import' # comment2\n)"
"else: print('Loaded')\n")
def test_continuation(self):
# Balancing continuation
self.check_roundtrip("a = (3,4, \n"
"5,6)\n"
"y = [3, 4,\n"
"5]\n"
"z = {'a': 5,\n"
"'b':15, 'c':True}\n"
"x = len(y) + 5 - a[\n"
"3] - a[2]\n"
"+ len(z) - z[\n"
"'b']\n")
def test_backslash_continuation(self):
# Backslash means line continuation, except for comments
self.check_roundtrip("x=1+\\\n"
"1\n"
"# This is a comment\\\n"
"# This also\n")
self.check_roundtrip("# Comment \\\n"
"x = 0")
def test_string_concatenation(self):
# Two string literals on the same line
self.check_roundtrip("'' ''")
def test_random_files(self):
# Test roundtrip on random python modules.
# pass the '-ucpu' option to process the full directory.
import glob, random
fn = support.findfile("tokenize_tests.txt")
tempdir = os.path.dirname(fn) or os.curdir
testfiles = glob.glob(os.path.join(tempdir, "test*.py"))
# Tokenize is broken on test_pep3131.py because regular expressions are
# broken on the obscure unicode identifiers in it. *sigh*
# With roundtrip extended to test the 5-tuple mode of untokenize,
# 7 more testfiles fail. Remove them also until the failure is diagnosed.
testfiles.remove(os.path.join(tempdir, "test_unicode_identifiers.py"))
for f in ('buffer', 'builtin', 'fileio', 'inspect', 'os', 'platform', 'sys'):
testfiles.remove(os.path.join(tempdir, "test_%s.py") % f)
if not support.is_resource_enabled("cpu"):
testfiles = random.sample(testfiles, 10)
for testfile in testfiles:
if support.verbose >= 2:
print('tokenize', testfile)
with open(testfile, 'rb') as f:
with self.subTest(file=testfile):
self.check_roundtrip(f)
def roundtrip(self, code):
if isinstance(code, str):
code = code.encode('utf-8')
return untokenize(tokenize(BytesIO(code).readline)).decode('utf-8')
def test_indentation_semantics_retained(self):
"""
Ensure that although whitespace might be mutated in a roundtrip,
the semantic meaning of the indentation remains consistent.
"""
code = "if False:\n\tx=3\n\tx=3\n"
codelines = self.roundtrip(code).split('\n')
self.assertEqual(codelines[1], codelines[2])
self.check_roundtrip(code)
if __name__ == "__main__":
unittest.main()
|
JioEducation/edx-platform | refs/heads/master | common/test/acceptance/pages/lms/pay_and_verify.py | 110 | """Payment and verification pages"""
import re
from bok_choy.page_object import PageObject
from bok_choy.promise import Promise
from . import BASE_URL
from .dashboard import DashboardPage
class PaymentAndVerificationFlow(PageObject):
"""Interact with the split payment and verification flow.
The flow can be accessed at the following URLs:
`/verify_student/start-flow/{course}/`
`/verify_student/upgrade/{course}/`
`/verify_student/verify-now/{course}/`
`/verify_student/verify-later/{course}/`
`/verify_student/payment-confirmation/{course}/`
Users can reach the flow when attempting to enroll in a course's verified
mode, either directly from the track selection page, or by upgrading from
the honor mode. Users can also reach the flow when attempting to complete
a deferred verification, or when attempting to view a receipt corresponding
to an earlier payment.
"""
def __init__(self, browser, course_id, entry_point='start-flow'):
"""Initialize the page.
Arguments:
browser (Browser): The browser instance.
course_id (unicode): The course in which the user is enrolling.
Keyword Arguments:
entry_point (str): Where to begin the flow; must be one of 'start-flow',
'upgrade', 'verify-now', verify-later', or 'payment-confirmation'.
Raises:
ValueError
"""
super(PaymentAndVerificationFlow, self).__init__(browser)
self._course_id = course_id
if entry_point not in ['start-flow', 'upgrade', 'verify-now', 'verify-later', 'payment-confirmation']:
raise ValueError(
"Entry point must be either 'start-flow', 'upgrade', 'verify-now', 'verify-later', or 'payment-confirmation'."
)
self._entry_point = entry_point
@property
def url(self):
"""Return the URL corresponding to the initial position in the flow."""
url = "{base}/verify_student/{entry_point}/{course}/".format(
base=BASE_URL,
entry_point=self._entry_point,
course=self._course_id
)
return url
def is_browser_on_page(self):
"""Check if a step in the payment and verification flow has loaded."""
return (
self.q(css="div .make-payment-step").is_present() or
self.q(css="div .payment-confirmation-step").is_present() or
self.q(css="div .face-photo-step").is_present() or
self.q(css="div .id-photo-step").is_present() or
self.q(css="div .review-photos-step").is_present() or
self.q(css="div .enrollment-confirmation-step").is_present()
)
def indicate_contribution(self):
"""Interact with the radio buttons appearing on the first page of the upgrade flow."""
self.q(css=".contribution-option > input").first.click()
def proceed_to_payment(self):
"""Interact with the payment button."""
self.q(css=".payment-button").click()
FakePaymentPage(self.browser, self._course_id).wait_for_page()
def immediate_verification(self):
"""Interact with the immediate verification button."""
self.q(css="#verify_now_button").click()
PaymentAndVerificationFlow(self.browser, self._course_id, entry_point='verify-now').wait_for_page()
def defer_verification(self):
"""Interact with the link allowing the user to defer their verification."""
self.q(css="#verify_later_button").click()
DashboardPage(self.browser).wait_for_page()
def webcam_capture(self):
"""Interact with a webcam capture button."""
self.q(css="#webcam_capture_button").click()
def _check_func():
next_step_button_classes = self.q(css="#next_step_button").attrs('class')
next_step_button_enabled = 'is-disabled' not in next_step_button_classes
return (next_step_button_enabled, next_step_button_classes)
# Check that the #next_step_button is enabled before returning control to the caller
Promise(_check_func, "The 'Next Step' button is enabled.").fulfill()
def next_verification_step(self, next_page_object):
"""Interact with the 'Next' step button found in the verification flow."""
self.q(css="#next_step_button").click()
next_page_object.wait_for_page()
def go_to_dashboard(self):
"""Interact with the link to the dashboard appearing on the enrollment confirmation page."""
if self.q(css="div .enrollment-confirmation-step").is_present():
self.q(css=".action-primary").click()
else:
raise Exception("The dashboard can only be accessed from the enrollment confirmation.")
DashboardPage(self.browser).wait_for_page()
class FakePaymentPage(PageObject):
"""Interact with the fake payment endpoint.
This page is hidden behind the feature flag `ENABLE_PAYMENT_FAKE`,
which is enabled in the Bok Choy env settings.
Configuring this payment endpoint also requires configuring the Bok Choy
auth settings with the following:
"CC_PROCESSOR_NAME": "CyberSource2",
"CC_PROCESSOR": {
"CyberSource2": {
"SECRET_KEY": <string>,
"ACCESS_KEY": <string>,
"PROFILE_ID": "edx",
"PURCHASE_ENDPOINT": "/shoppingcart/payment_fake"
}
}
"""
def __init__(self, browser, course_id):
"""Initialize the page.
Arguments:
browser (Browser): The browser instance.
course_id (unicode): The course in which the user is enrolling.
"""
super(FakePaymentPage, self).__init__(browser)
self._course_id = course_id
url = BASE_URL + "/shoppingcart/payment_fake/"
def is_browser_on_page(self):
"""Check if a step in the payment and verification flow has loaded."""
message = self.q(css='BODY').text[0]
match = re.search('Payment page', message)
return True if match else False
def submit_payment(self):
"""Interact with the payment submission button."""
self.q(css="input[value='Submit']").click()
return PaymentAndVerificationFlow(self.browser, self._course_id, entry_point='payment-confirmation').wait_for_page()
|
dscho/hg | refs/heads/default | mercurial/pure/osutil.py | 1 | # osutil.py - pure Python version of osutil.c
#
# Copyright 2009 Matt Mackall <mpm@selenic.com> and others
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
from __future__ import absolute_import
import ctypes
import ctypes.util
import os
import socket
import stat as statmod
import sys
from . import policy
modulepolicy = policy.policy
policynocffi = policy.policynocffi
def _mode_to_kind(mode):
if statmod.S_ISREG(mode):
return statmod.S_IFREG
if statmod.S_ISDIR(mode):
return statmod.S_IFDIR
if statmod.S_ISLNK(mode):
return statmod.S_IFLNK
if statmod.S_ISBLK(mode):
return statmod.S_IFBLK
if statmod.S_ISCHR(mode):
return statmod.S_IFCHR
if statmod.S_ISFIFO(mode):
return statmod.S_IFIFO
if statmod.S_ISSOCK(mode):
return statmod.S_IFSOCK
return mode
def listdirpure(path, stat=False, skip=None):
'''listdir(path, stat=False) -> list_of_tuples
Return a sorted list containing information about the entries
in the directory.
If stat is True, each element is a 3-tuple:
(name, type, stat object)
Otherwise, each element is a 2-tuple:
(name, type)
'''
result = []
prefix = path
if not prefix.endswith(os.sep):
prefix += os.sep
names = os.listdir(path)
names.sort()
for fn in names:
st = os.lstat(prefix + fn)
if fn == skip and statmod.S_ISDIR(st.st_mode):
return []
if stat:
result.append((fn, _mode_to_kind(st.st_mode), st))
else:
result.append((fn, _mode_to_kind(st.st_mode)))
return result
ffi = None
if modulepolicy not in policynocffi and sys.platform == 'darwin':
try:
from _osutil_cffi import ffi, lib
except ImportError:
if modulepolicy == 'cffi': # strict cffi import
raise
if sys.platform == 'darwin' and ffi is not None:
listdir_batch_size = 4096
# tweakable number, only affects performance, which chunks
# of bytes do we get back from getattrlistbulk
attrkinds = [None] * 20 # we need the max no for enum VXXX, 20 is plenty
attrkinds[lib.VREG] = statmod.S_IFREG
attrkinds[lib.VDIR] = statmod.S_IFDIR
attrkinds[lib.VLNK] = statmod.S_IFLNK
attrkinds[lib.VBLK] = statmod.S_IFBLK
attrkinds[lib.VCHR] = statmod.S_IFCHR
attrkinds[lib.VFIFO] = statmod.S_IFIFO
attrkinds[lib.VSOCK] = statmod.S_IFSOCK
class stat_res(object):
def __init__(self, st_mode, st_mtime, st_size):
self.st_mode = st_mode
self.st_mtime = st_mtime
self.st_size = st_size
tv_sec_ofs = ffi.offsetof("struct timespec", "tv_sec")
buf = ffi.new("char[]", listdir_batch_size)
def listdirinternal(dfd, req, stat, skip):
ret = []
while True:
r = lib.getattrlistbulk(dfd, req, buf, listdir_batch_size, 0)
if r == 0:
break
if r == -1:
raise OSError(ffi.errno, os.strerror(ffi.errno))
cur = ffi.cast("val_attrs_t*", buf)
for i in range(r):
lgt = cur.length
assert lgt == ffi.cast('uint32_t*', cur)[0]
ofs = cur.name_info.attr_dataoffset
str_lgt = cur.name_info.attr_length
base_ofs = ffi.offsetof('val_attrs_t', 'name_info')
name = str(ffi.buffer(ffi.cast("char*", cur) + base_ofs + ofs,
str_lgt - 1))
tp = attrkinds[cur.obj_type]
if name == "." or name == "..":
continue
if skip == name and tp == statmod.S_ISDIR:
return []
if stat:
mtime = cur.time.tv_sec
mode = (cur.accessmask & ~lib.S_IFMT)| tp
ret.append((name, tp, stat_res(st_mode=mode, st_mtime=mtime,
st_size=cur.datalength)))
else:
ret.append((name, tp))
cur += lgt
return ret
def listdir(path, stat=False, skip=None):
req = ffi.new("struct attrlist*")
req.bitmapcount = lib.ATTR_BIT_MAP_COUNT
req.commonattr = (lib.ATTR_CMN_RETURNED_ATTRS |
lib.ATTR_CMN_NAME |
lib.ATTR_CMN_OBJTYPE |
lib.ATTR_CMN_ACCESSMASK |
lib.ATTR_CMN_MODTIME)
req.fileattr = lib.ATTR_FILE_DATALENGTH
dfd = lib.open(path, lib.O_RDONLY, 0)
if dfd == -1:
raise OSError(ffi.errno, os.strerror(ffi.errno))
try:
ret = listdirinternal(dfd, req, stat, skip)
finally:
try:
lib.close(dfd)
except BaseException:
pass # we ignore all the errors from closing, not
# much we can do about that
return ret
else:
listdir = listdirpure
if os.name != 'nt':
posixfile = open
_SCM_RIGHTS = 0x01
_socklen_t = ctypes.c_uint
if sys.platform == 'linux2':
# socket.h says "the type should be socklen_t but the definition of
# the kernel is incompatible with this."
_cmsg_len_t = ctypes.c_size_t
_msg_controllen_t = ctypes.c_size_t
_msg_iovlen_t = ctypes.c_size_t
else:
_cmsg_len_t = _socklen_t
_msg_controllen_t = _socklen_t
_msg_iovlen_t = ctypes.c_int
class _iovec(ctypes.Structure):
_fields_ = [
(u'iov_base', ctypes.c_void_p),
(u'iov_len', ctypes.c_size_t),
]
class _msghdr(ctypes.Structure):
_fields_ = [
(u'msg_name', ctypes.c_void_p),
(u'msg_namelen', _socklen_t),
(u'msg_iov', ctypes.POINTER(_iovec)),
(u'msg_iovlen', _msg_iovlen_t),
(u'msg_control', ctypes.c_void_p),
(u'msg_controllen', _msg_controllen_t),
(u'msg_flags', ctypes.c_int),
]
class _cmsghdr(ctypes.Structure):
_fields_ = [
(u'cmsg_len', _cmsg_len_t),
(u'cmsg_level', ctypes.c_int),
(u'cmsg_type', ctypes.c_int),
(u'cmsg_data', ctypes.c_ubyte * 0),
]
_libc = ctypes.CDLL(ctypes.util.find_library(u'c'), use_errno=True)
_recvmsg = getattr(_libc, 'recvmsg', None)
if _recvmsg:
_recvmsg.restype = getattr(ctypes, 'c_ssize_t', ctypes.c_long)
_recvmsg.argtypes = (ctypes.c_int, ctypes.POINTER(_msghdr),
ctypes.c_int)
else:
# recvmsg isn't always provided by libc; such systems are unsupported
def _recvmsg(sockfd, msg, flags):
raise NotImplementedError('unsupported platform')
def _CMSG_FIRSTHDR(msgh):
if msgh.msg_controllen < ctypes.sizeof(_cmsghdr):
return
cmsgptr = ctypes.cast(msgh.msg_control, ctypes.POINTER(_cmsghdr))
return cmsgptr.contents
# The pure version is less portable than the native version because the
# handling of socket ancillary data heavily depends on C preprocessor.
# Also, some length fields are wrongly typed in Linux kernel.
def recvfds(sockfd):
"""receive list of file descriptors via socket"""
dummy = (ctypes.c_ubyte * 1)()
iov = _iovec(ctypes.cast(dummy, ctypes.c_void_p), ctypes.sizeof(dummy))
cbuf = ctypes.create_string_buffer(256)
msgh = _msghdr(None, 0,
ctypes.pointer(iov), 1,
ctypes.cast(cbuf, ctypes.c_void_p), ctypes.sizeof(cbuf),
0)
r = _recvmsg(sockfd, ctypes.byref(msgh), 0)
if r < 0:
e = ctypes.get_errno()
raise OSError(e, os.strerror(e))
# assumes that the first cmsg has fds because it isn't easy to write
# portable CMSG_NXTHDR() with ctypes.
cmsg = _CMSG_FIRSTHDR(msgh)
if not cmsg:
return []
if (cmsg.cmsg_level != socket.SOL_SOCKET or
cmsg.cmsg_type != _SCM_RIGHTS):
return []
rfds = ctypes.cast(cmsg.cmsg_data, ctypes.POINTER(ctypes.c_int))
rfdscount = ((cmsg.cmsg_len - _cmsghdr.cmsg_data.offset) /
ctypes.sizeof(ctypes.c_int))
return [rfds[i] for i in xrange(rfdscount)]
else:
import msvcrt
_kernel32 = ctypes.windll.kernel32
_DWORD = ctypes.c_ulong
_LPCSTR = _LPSTR = ctypes.c_char_p
_HANDLE = ctypes.c_void_p
_INVALID_HANDLE_VALUE = _HANDLE(-1).value
# CreateFile
_FILE_SHARE_READ = 0x00000001
_FILE_SHARE_WRITE = 0x00000002
_FILE_SHARE_DELETE = 0x00000004
_CREATE_ALWAYS = 2
_OPEN_EXISTING = 3
_OPEN_ALWAYS = 4
_GENERIC_READ = 0x80000000
_GENERIC_WRITE = 0x40000000
_FILE_ATTRIBUTE_NORMAL = 0x80
# open_osfhandle flags
_O_RDONLY = 0x0000
_O_RDWR = 0x0002
_O_APPEND = 0x0008
_O_TEXT = 0x4000
_O_BINARY = 0x8000
# types of parameters of C functions used (required by pypy)
_kernel32.CreateFileA.argtypes = [_LPCSTR, _DWORD, _DWORD, ctypes.c_void_p,
_DWORD, _DWORD, _HANDLE]
_kernel32.CreateFileA.restype = _HANDLE
def _raiseioerror(name):
err = ctypes.WinError()
raise IOError(err.errno, '%s: %s' % (name, err.strerror))
class posixfile(object):
'''a file object aiming for POSIX-like semantics
CPython's open() returns a file that was opened *without* setting the
_FILE_SHARE_DELETE flag, which causes rename and unlink to abort.
This even happens if any hardlinked copy of the file is in open state.
We set _FILE_SHARE_DELETE here, so files opened with posixfile can be
renamed and deleted while they are held open.
Note that if a file opened with posixfile is unlinked, the file
remains but cannot be opened again or be recreated under the same name,
until all reading processes have closed the file.'''
def __init__(self, name, mode='r', bufsize=-1):
if 'b' in mode:
flags = _O_BINARY
else:
flags = _O_TEXT
m0 = mode[0]
if m0 == 'r' and '+' not in mode:
flags |= _O_RDONLY
access = _GENERIC_READ
else:
# work around http://support.microsoft.com/kb/899149 and
# set _O_RDWR for 'w' and 'a', even if mode has no '+'
flags |= _O_RDWR
access = _GENERIC_READ | _GENERIC_WRITE
if m0 == 'r':
creation = _OPEN_EXISTING
elif m0 == 'w':
creation = _CREATE_ALWAYS
elif m0 == 'a':
creation = _OPEN_ALWAYS
flags |= _O_APPEND
else:
raise ValueError("invalid mode: %s" % mode)
fh = _kernel32.CreateFileA(name, access,
_FILE_SHARE_READ | _FILE_SHARE_WRITE | _FILE_SHARE_DELETE,
None, creation, _FILE_ATTRIBUTE_NORMAL, None)
if fh == _INVALID_HANDLE_VALUE:
_raiseioerror(name)
fd = msvcrt.open_osfhandle(fh, flags)
if fd == -1:
_kernel32.CloseHandle(fh)
_raiseioerror(name)
f = os.fdopen(fd, mode, bufsize)
# unfortunately, f.name is '<fdopen>' at this point -- so we store
# the name on this wrapper. We cannot just assign to f.name,
# because that attribute is read-only.
object.__setattr__(self, 'name', name)
object.__setattr__(self, '_file', f)
def __iter__(self):
return self._file
def __getattr__(self, name):
return getattr(self._file, name)
def __setattr__(self, name, value):
'''mimics the read-only attributes of Python file objects
by raising 'TypeError: readonly attribute' if someone tries:
f = posixfile('foo.txt')
f.name = 'bla' '''
return self._file.__setattr__(name, value)
def __enter__(self):
return self._file.__enter__()
def __exit__(self, exc_type, exc_value, exc_tb):
return self._file.__exit__(exc_type, exc_value, exc_tb)
|
jasonbot/django | refs/heads/master | django/contrib/auth/checks.py | 374 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.apps import apps
from django.conf import settings
from django.core import checks
def check_user_model(**kwargs):
errors = []
cls = apps.get_model(settings.AUTH_USER_MODEL)
# Check that REQUIRED_FIELDS is a list
if not isinstance(cls.REQUIRED_FIELDS, (list, tuple)):
errors.append(
checks.Error(
"'REQUIRED_FIELDS' must be a list or tuple.",
hint=None,
obj=cls,
id='auth.E001',
)
)
# Check that the USERNAME FIELD isn't included in REQUIRED_FIELDS.
if cls.USERNAME_FIELD in cls.REQUIRED_FIELDS:
errors.append(
checks.Error(
("The field named as the 'USERNAME_FIELD' "
"for a custom user model must not be included in 'REQUIRED_FIELDS'."),
hint=None,
obj=cls,
id='auth.E002',
)
)
# Check that the username field is unique
if not cls._meta.get_field(cls.USERNAME_FIELD).unique:
if (settings.AUTHENTICATION_BACKENDS ==
['django.contrib.auth.backends.ModelBackend']):
errors.append(
checks.Error(
"'%s.%s' must be unique because it is named as the 'USERNAME_FIELD'." % (
cls._meta.object_name, cls.USERNAME_FIELD
),
hint=None,
obj=cls,
id='auth.E003',
)
)
else:
errors.append(
checks.Warning(
"'%s.%s' is named as the 'USERNAME_FIELD', but it is not unique." % (
cls._meta.object_name, cls.USERNAME_FIELD
),
hint=('Ensure that your authentication backend(s) can handle '
'non-unique usernames.'),
obj=cls,
id='auth.W004',
)
)
return errors
|
pyramania/scipy | refs/heads/master | scipy/stats/setup.py | 41 | #!/usr/bin/env python
from __future__ import division, print_function, absolute_import
from os.path import join
def configuration(parent_package='',top_path=None):
from numpy.distutils.misc_util import Configuration
config = Configuration('stats', parent_package, top_path)
config.add_data_dir('tests')
statlib_src = [join('statlib', '*.f')]
config.add_library('statlib', sources=statlib_src)
# add statlib module
config.add_extension('statlib',
sources=['statlib.pyf'],
f2py_options=['--no-wrap-functions'],
libraries=['statlib'],
depends=statlib_src
)
# add _stats module
config.add_extension('_stats',
sources=['_stats.c'],
)
# add mvn module
config.add_extension('mvn',
sources=['mvn.pyf','mvndst.f'],
)
return config
if __name__ == '__main__':
from numpy.distutils.core import setup
setup(**configuration(top_path='').todict())
|
slevenhagen/odoo-npg | refs/heads/8.0 | addons/website_event/controllers/__init__.py | 4497 | # -*- coding: utf-8 -*-
import main
|
sysalexis/kbengine | refs/heads/master | kbe/src/lib/python/Lib/idlelib/configHelpSourceEdit.py | 82 | "Dialog to specify or edit the parameters for a user configured help source."
import os
import sys
from tkinter import *
import tkinter.messagebox as tkMessageBox
import tkinter.filedialog as tkFileDialog
class GetHelpSourceDialog(Toplevel):
def __init__(self, parent, title, menuItem='', filePath='', _htest=False):
"""Get menu entry and url/ local file location for Additional Help
User selects a name for the Help resource and provides a web url
or a local file as its source. The user can enter a url or browse
for the file.
_htest - bool, change box location when running htest
"""
Toplevel.__init__(self, parent)
self.configure(borderwidth=5)
self.resizable(height=FALSE, width=FALSE)
self.title(title)
self.transient(parent)
self.grab_set()
self.protocol("WM_DELETE_WINDOW", self.Cancel)
self.parent = parent
self.result = None
self.CreateWidgets()
self.menu.set(menuItem)
self.path.set(filePath)
self.withdraw() #hide while setting geometry
#needs to be done here so that the winfo_reqwidth is valid
self.update_idletasks()
#centre dialog over parent. below parent if running htest.
self.geometry(
"+%d+%d" % (
parent.winfo_rootx() +
(parent.winfo_width()/2 - self.winfo_reqwidth()/2),
parent.winfo_rooty() +
((parent.winfo_height()/2 - self.winfo_reqheight()/2)
if not _htest else 150)))
self.deiconify() #geometry set, unhide
self.bind('<Return>', self.Ok)
self.wait_window()
def CreateWidgets(self):
self.menu = StringVar(self)
self.path = StringVar(self)
self.fontSize = StringVar(self)
self.frameMain = Frame(self, borderwidth=2, relief=GROOVE)
self.frameMain.pack(side=TOP, expand=TRUE, fill=BOTH)
labelMenu = Label(self.frameMain, anchor=W, justify=LEFT,
text='Menu Item:')
self.entryMenu = Entry(self.frameMain, textvariable=self.menu,
width=30)
self.entryMenu.focus_set()
labelPath = Label(self.frameMain, anchor=W, justify=LEFT,
text='Help File Path: Enter URL or browse for file')
self.entryPath = Entry(self.frameMain, textvariable=self.path,
width=40)
self.entryMenu.focus_set()
labelMenu.pack(anchor=W, padx=5, pady=3)
self.entryMenu.pack(anchor=W, padx=5, pady=3)
labelPath.pack(anchor=W, padx=5, pady=3)
self.entryPath.pack(anchor=W, padx=5, pady=3)
browseButton = Button(self.frameMain, text='Browse', width=8,
command=self.browseFile)
browseButton.pack(pady=3)
frameButtons = Frame(self)
frameButtons.pack(side=BOTTOM, fill=X)
self.buttonOk = Button(frameButtons, text='OK',
width=8, default=ACTIVE, command=self.Ok)
self.buttonOk.grid(row=0, column=0, padx=5,pady=5)
self.buttonCancel = Button(frameButtons, text='Cancel',
width=8, command=self.Cancel)
self.buttonCancel.grid(row=0, column=1, padx=5, pady=5)
def browseFile(self):
filetypes = [
("HTML Files", "*.htm *.html", "TEXT"),
("PDF Files", "*.pdf", "TEXT"),
("Windows Help Files", "*.chm"),
("Text Files", "*.txt", "TEXT"),
("All Files", "*")]
path = self.path.get()
if path:
dir, base = os.path.split(path)
else:
base = None
if sys.platform[:3] == 'win':
dir = os.path.join(os.path.dirname(sys.executable), 'Doc')
if not os.path.isdir(dir):
dir = os.getcwd()
else:
dir = os.getcwd()
opendialog = tkFileDialog.Open(parent=self, filetypes=filetypes)
file = opendialog.show(initialdir=dir, initialfile=base)
if file:
self.path.set(file)
def MenuOk(self):
"Simple validity check for a sensible menu item name"
menuOk = True
menu = self.menu.get()
menu.strip()
if not menu:
tkMessageBox.showerror(title='Menu Item Error',
message='No menu item specified',
parent=self)
self.entryMenu.focus_set()
menuOk = False
elif len(menu) > 30:
tkMessageBox.showerror(title='Menu Item Error',
message='Menu item too long:'
'\nLimit 30 characters.',
parent=self)
self.entryMenu.focus_set()
menuOk = False
return menuOk
def PathOk(self):
"Simple validity check for menu file path"
pathOk = True
path = self.path.get()
path.strip()
if not path: #no path specified
tkMessageBox.showerror(title='File Path Error',
message='No help file path specified.',
parent=self)
self.entryPath.focus_set()
pathOk = False
elif path.startswith(('www.', 'http')):
pass
else:
if path[:5] == 'file:':
path = path[5:]
if not os.path.exists(path):
tkMessageBox.showerror(title='File Path Error',
message='Help file path does not exist.',
parent=self)
self.entryPath.focus_set()
pathOk = False
return pathOk
def Ok(self, event=None):
if self.MenuOk() and self.PathOk():
self.result = (self.menu.get().strip(),
self.path.get().strip())
if sys.platform == 'darwin':
path = self.result[1]
if path.startswith(('www', 'file:', 'http:')):
pass
else:
# Mac Safari insists on using the URI form for local files
self.result = list(self.result)
self.result[1] = "file://" + path
self.destroy()
def Cancel(self, event=None):
self.result = None
self.destroy()
if __name__ == '__main__':
from idlelib.idle_test.htest import run
run(GetHelpSourceDialog)
|
pterk/django-tinymce | refs/heads/master | docs/conf.py | 13 | # -*- coding: utf-8 -*-
#
# django-tinymce documentation build configuration file
#
# This file is execfile()d with the current directory set to its containing dir.
#
# The contents of this file are pickled, so don't put values in the namespace
# that aren't pickleable (module imports are okay).
#
# All configuration values have a default value; values that are commented out
# serve to show the default value.
import sys, os
# If your extensions are in another directory, add it here. If the directory
# is relative to the documentation root, use os.path.abspath to make it
# absolute, like shown here.
sys.path.append(os.path.abspath('..'))
# Load project meta-data.
import metadata
# General configuration
# ---------------------
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = []
# Add any paths that contain templates here, relative to this directory.
templates_path = ['.templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# General substitutions.
project = metadata.name
copyright = "2008, %s" % metadata.authors
# The default replacements for |version| and |release|, also used in various
# other places throughout the built documents.
#
# The short X.Y version.
version = metadata.version
# The full version, including alpha/beta/rc tags.
release = metadata.release
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
today_fmt = '%B %d, %Y'
# List of documents that shouldn't be included in the build.
#unused_docs = []
# List of directories, relative to source directories, that shouldn't be searched
# for source files.
#exclude_dirs = []
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# Options for HTML output
# -----------------------
# The style sheet to use for HTML and HTML Help pages. A file of that name
# must exist either in Sphinx' static/ path, or in one of the custom paths
# given in html_static_path.
html_style = 'default.css'
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
html_title = "%s %s documentation" % (project, version)
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (within the static path) to place at the top of
# the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['.static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
html_use_modindex = False
# If false, no index is generated.
html_use_index = False
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, the reST sources are included in the HTML build as _sources/<name>.
html_copy_source = False
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml").
html_file_suffix = '.html'
# Output file base name for HTML help builder.
htmlhelp_basename = '%sdoc' % metadata.name
# Options for LaTeX output
# ------------------------
# The paper size ('letter' or 'a4').
#latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
#latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, document class [howto/manual]).
latex_documents = [
(master_doc, '%s.tex' % metadata.name, '%s Documentation' % metadata.name,
metadata.authors, 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# Additional stuff for the LaTeX preamble.
#latex_preamble = ''
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_use_modindex = True
|
vdrey/Toolbox | refs/heads/master | Python/AutomateBoringStuff/automate_online-materials/pyperclip.py | 4 | """
Pyperclip
A cross-platform clipboard module for Python. (only handles plain text for now)
By Al Sweigart al@inventwithpython.com
BSD License
Usage:
import pyperclip
pyperclip.copy('The text to be copied to the clipboard.')
spam = pyperclip.paste()
On Windows, no additional modules are needed.
On Mac, this module makes use of the pbcopy and pbpaste commands, which should come with the os.
On Linux, this module makes use of the xclip or xsel commands, which should come with the os. Otherwise run "sudo apt-get install xclip" or "sudo apt-get install xsel"
Otherwise on Linux, you will need the gtk or PyQt4 modules installed.
The gtk module is not available for Python 3, and this module does not work with PyGObject yet.
"""
__version__ = '1.5.6'
import platform, os
from subprocess import call, Popen, PIPE
def _pasteWindows():
CF_UNICODETEXT = 13
d = ctypes.windll
d.user32.OpenClipboard(None)
handle = d.user32.GetClipboardData(CF_UNICODETEXT)
data = ctypes.c_wchar_p(handle).value
d.user32.CloseClipboard()
return data
def _copyWindows(text):
GMEM_DDESHARE = 0x2000
CF_UNICODETEXT = 13
d = ctypes.windll # cdll expects 4 more bytes in user32.OpenClipboard(None)
try: # Python 2
if not isinstance(text, unicode):
text = text.decode('mbcs')
except NameError:
if not isinstance(text, str):
text = text.decode('mbcs')
d.user32.OpenClipboard(None)
d.user32.EmptyClipboard()
hCd = d.kernel32.GlobalAlloc(GMEM_DDESHARE, len(text.encode('utf-16-le')) + 2)
pchData = d.kernel32.GlobalLock(hCd)
ctypes.cdll.msvcrt.wcscpy(ctypes.c_wchar_p(pchData), text)
d.kernel32.GlobalUnlock(hCd)
d.user32.SetClipboardData(CF_UNICODETEXT, hCd)
d.user32.CloseClipboard()
def _pasteCygwin():
CF_UNICODETEXT = 13
d = ctypes.cdll
d.user32.OpenClipboard(None)
handle = d.user32.GetClipboardData(CF_UNICODETEXT)
data = ctypes.c_wchar_p(handle).value
d.user32.CloseClipboard()
return data
def _copyCygwin(text):
GMEM_DDESHARE = 0x2000
CF_UNICODETEXT = 13
d = ctypes.cdll
try: # Python 2
if not isinstance(text, unicode):
text = text.decode('mbcs')
except NameError:
if not isinstance(text, str):
text = text.decode('mbcs')
d.user32.OpenClipboard(None)
d.user32.EmptyClipboard()
hCd = d.kernel32.GlobalAlloc(GMEM_DDESHARE, len(text.encode('utf-16-le')) + 2)
pchData = d.kernel32.GlobalLock(hCd)
ctypes.cdll.msvcrt.wcscpy(ctypes.c_wchar_p(pchData), text)
d.kernel32.GlobalUnlock(hCd)
d.user32.SetClipboardData(CF_UNICODETEXT, hCd)
d.user32.CloseClipboard()
def _copyOSX(text):
text = str(text)
p = Popen(['pbcopy', 'w'], stdin=PIPE)
try:
# works on Python 3 (bytes() requires an encoding)
p.communicate(input=bytes(text, 'utf-8'))
except TypeError:
# works on Python 2 (bytes() only takes one argument)
p.communicate(input=bytes(text))
def _pasteOSX():
p = Popen(['pbpaste', 'r'], stdout=PIPE)
stdout, stderr = p.communicate()
return bytes.decode(stdout)
def _pasteGtk():
return gtk.Clipboard().wait_for_text()
def _copyGtk(text):
global cb
text = str(text)
cb = gtk.Clipboard()
cb.set_text(text)
cb.store()
def _pasteQt():
return str(cb.text())
def _copyQt(text):
text = str(text)
cb.setText(text)
def _copyXclip(text):
p = Popen(['xclip', '-selection', 'c'], stdin=PIPE)
try:
# works on Python 3 (bytes() requires an encoding)
p.communicate(input=bytes(text, 'utf-8'))
except TypeError:
# works on Python 2 (bytes() only takes one argument)
p.communicate(input=bytes(text))
def _pasteXclip():
p = Popen(['xclip', '-selection', 'c', '-o'], stdout=PIPE)
stdout, stderr = p.communicate()
return bytes.decode(stdout)
def _copyXsel(text):
p = Popen(['xsel', '-i'], stdin=PIPE)
try:
# works on Python 3 (bytes() requires an encoding)
p.communicate(input=bytes(text, 'utf-8'))
except TypeError:
# works on Python 2 (bytes() only takes one argument)
p.communicate(input=bytes(text))
def _pasteXsel():
p = Popen(['xsel', '-o'], stdout=PIPE)
stdout, stderr = p.communicate()
return bytes.decode(stdout)
# Determine the OS/platform and set the copy() and paste() functions accordingly.
if 'cygwin' in platform.system().lower():
_functions = 'Cygwin' # for debugging
import ctypes
paste = _pasteCygwin
copy = _copyCygwin
elif os.name == 'nt' or platform.system() == 'Windows':
_functions = 'Windows' # for debugging
import ctypes
paste = _pasteWindows
copy = _copyWindows
elif os.name == 'mac' or platform.system() == 'Darwin':
_functions = 'OS X pbcopy/pbpaste' # for debugging
paste = _pasteOSX
copy = _copyOSX
elif os.name == 'posix' or platform.system() == 'Linux':
# Determine which command/module is installed, if any.
xclipExists = call(['which', 'xclip'],
stdout=PIPE, stderr=PIPE) == 0
xselExists = call(['which', 'xsel'],
stdout=PIPE, stderr=PIPE) == 0
gtkInstalled = False
try:
# Check it gtk is installed.
import gtk
gtkInstalled = True
except ImportError:
pass
if not gtkInstalled:
# Check if PyQt4 is installed.
PyQt4Installed = False
try:
import PyQt4.QtCore
import PyQt4.QtGui
PyQt4Installed = True
except ImportError:
pass
# Set one of the copy & paste functions.
if xclipExists:
_functions = 'xclip command' # for debugging
paste = _pasteXclip
copy = _copyXclip
elif gtkInstalled:
_functions = 'gtk module' # for debugging
paste = _pasteGtk
copy = _copyGtk
elif PyQt4Installed:
_functions = 'PyQt4 module' # for debugging
app = PyQt4.QtGui.QApplication([])
cb = PyQt4.QtGui.QApplication.clipboard()
paste = _pasteQt
copy = _copyQt
elif xselExists:
# TODO: xsel doesn't seem to work on Raspberry Pi (my test Linux environment). Putting this as the last method tried.
_functions = 'xsel command' # for debugging
paste = _pasteXsel
copy = _copyXsel
else:
raise Exception('Pyperclip requires the xclip or xsel application, or the gtk or PyQt4 module.')
else:
raise RuntimeError('pyperclip does not support your system.')
|
pantheon-systems/libcloud | refs/heads/trunk | libcloud/common/rackspace.py | 43 | # Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Common settings for Rackspace Cloud Servers and Cloud Files
"""
__all__ = [
'AUTH_URL'
]
AUTH_URL = 'https://identity.api.rackspacecloud.com'
|
MalloyPower/parsing-python | refs/heads/master | front-end/testsuite-python-lib/Python-3.2/Lib/encodings/ptcp154.py | 647 | """ Python Character Mapping Codec generated from 'PTCP154.txt' with gencodec.py.
Written by Marc-Andre Lemburg (mal@lemburg.com).
(c) Copyright CNRI, All Rights Reserved. NO WARRANTY.
(c) Copyright 2000 Guido van Rossum.
"""#"
import codecs
### Codec APIs
class Codec(codecs.Codec):
def encode(self,input,errors='strict'):
return codecs.charmap_encode(input,errors,encoding_map)
def decode(self,input,errors='strict'):
return codecs.charmap_decode(input,errors,decoding_map)
class IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input, final=False):
return codecs.charmap_encode(input,self.errors,encoding_map)[0]
class IncrementalDecoder(codecs.IncrementalDecoder):
def decode(self, input, final=False):
return codecs.charmap_decode(input,self.errors,decoding_map)[0]
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return codecs.CodecInfo(
name='ptcp154',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
### Decoding Map
decoding_map = codecs.make_identity_dict(range(256))
decoding_map.update({
0x0080: 0x0496, # CYRILLIC CAPITAL LETTER ZHE WITH DESCENDER
0x0081: 0x0492, # CYRILLIC CAPITAL LETTER GHE WITH STROKE
0x0082: 0x04ee, # CYRILLIC CAPITAL LETTER U WITH MACRON
0x0083: 0x0493, # CYRILLIC SMALL LETTER GHE WITH STROKE
0x0084: 0x201e, # DOUBLE LOW-9 QUOTATION MARK
0x0085: 0x2026, # HORIZONTAL ELLIPSIS
0x0086: 0x04b6, # CYRILLIC CAPITAL LETTER CHE WITH DESCENDER
0x0087: 0x04ae, # CYRILLIC CAPITAL LETTER STRAIGHT U
0x0088: 0x04b2, # CYRILLIC CAPITAL LETTER HA WITH DESCENDER
0x0089: 0x04af, # CYRILLIC SMALL LETTER STRAIGHT U
0x008a: 0x04a0, # CYRILLIC CAPITAL LETTER BASHKIR KA
0x008b: 0x04e2, # CYRILLIC CAPITAL LETTER I WITH MACRON
0x008c: 0x04a2, # CYRILLIC CAPITAL LETTER EN WITH DESCENDER
0x008d: 0x049a, # CYRILLIC CAPITAL LETTER KA WITH DESCENDER
0x008e: 0x04ba, # CYRILLIC CAPITAL LETTER SHHA
0x008f: 0x04b8, # CYRILLIC CAPITAL LETTER CHE WITH VERTICAL STROKE
0x0090: 0x0497, # CYRILLIC SMALL LETTER ZHE WITH DESCENDER
0x0091: 0x2018, # LEFT SINGLE QUOTATION MARK
0x0092: 0x2019, # RIGHT SINGLE QUOTATION MARK
0x0093: 0x201c, # LEFT DOUBLE QUOTATION MARK
0x0094: 0x201d, # RIGHT DOUBLE QUOTATION MARK
0x0095: 0x2022, # BULLET
0x0096: 0x2013, # EN DASH
0x0097: 0x2014, # EM DASH
0x0098: 0x04b3, # CYRILLIC SMALL LETTER HA WITH DESCENDER
0x0099: 0x04b7, # CYRILLIC SMALL LETTER CHE WITH DESCENDER
0x009a: 0x04a1, # CYRILLIC SMALL LETTER BASHKIR KA
0x009b: 0x04e3, # CYRILLIC SMALL LETTER I WITH MACRON
0x009c: 0x04a3, # CYRILLIC SMALL LETTER EN WITH DESCENDER
0x009d: 0x049b, # CYRILLIC SMALL LETTER KA WITH DESCENDER
0x009e: 0x04bb, # CYRILLIC SMALL LETTER SHHA
0x009f: 0x04b9, # CYRILLIC SMALL LETTER CHE WITH VERTICAL STROKE
0x00a1: 0x040e, # CYRILLIC CAPITAL LETTER SHORT U (Byelorussian)
0x00a2: 0x045e, # CYRILLIC SMALL LETTER SHORT U (Byelorussian)
0x00a3: 0x0408, # CYRILLIC CAPITAL LETTER JE
0x00a4: 0x04e8, # CYRILLIC CAPITAL LETTER BARRED O
0x00a5: 0x0498, # CYRILLIC CAPITAL LETTER ZE WITH DESCENDER
0x00a6: 0x04b0, # CYRILLIC CAPITAL LETTER STRAIGHT U WITH STROKE
0x00a8: 0x0401, # CYRILLIC CAPITAL LETTER IO
0x00aa: 0x04d8, # CYRILLIC CAPITAL LETTER SCHWA
0x00ad: 0x04ef, # CYRILLIC SMALL LETTER U WITH MACRON
0x00af: 0x049c, # CYRILLIC CAPITAL LETTER KA WITH VERTICAL STROKE
0x00b1: 0x04b1, # CYRILLIC SMALL LETTER STRAIGHT U WITH STROKE
0x00b2: 0x0406, # CYRILLIC CAPITAL LETTER BYELORUSSIAN-UKRAINIAN I
0x00b3: 0x0456, # CYRILLIC SMALL LETTER BYELORUSSIAN-UKRAINIAN I
0x00b4: 0x0499, # CYRILLIC SMALL LETTER ZE WITH DESCENDER
0x00b5: 0x04e9, # CYRILLIC SMALL LETTER BARRED O
0x00b8: 0x0451, # CYRILLIC SMALL LETTER IO
0x00b9: 0x2116, # NUMERO SIGN
0x00ba: 0x04d9, # CYRILLIC SMALL LETTER SCHWA
0x00bc: 0x0458, # CYRILLIC SMALL LETTER JE
0x00bd: 0x04aa, # CYRILLIC CAPITAL LETTER ES WITH DESCENDER
0x00be: 0x04ab, # CYRILLIC SMALL LETTER ES WITH DESCENDER
0x00bf: 0x049d, # CYRILLIC SMALL LETTER KA WITH VERTICAL STROKE
0x00c0: 0x0410, # CYRILLIC CAPITAL LETTER A
0x00c1: 0x0411, # CYRILLIC CAPITAL LETTER BE
0x00c2: 0x0412, # CYRILLIC CAPITAL LETTER VE
0x00c3: 0x0413, # CYRILLIC CAPITAL LETTER GHE
0x00c4: 0x0414, # CYRILLIC CAPITAL LETTER DE
0x00c5: 0x0415, # CYRILLIC CAPITAL LETTER IE
0x00c6: 0x0416, # CYRILLIC CAPITAL LETTER ZHE
0x00c7: 0x0417, # CYRILLIC CAPITAL LETTER ZE
0x00c8: 0x0418, # CYRILLIC CAPITAL LETTER I
0x00c9: 0x0419, # CYRILLIC CAPITAL LETTER SHORT I
0x00ca: 0x041a, # CYRILLIC CAPITAL LETTER KA
0x00cb: 0x041b, # CYRILLIC CAPITAL LETTER EL
0x00cc: 0x041c, # CYRILLIC CAPITAL LETTER EM
0x00cd: 0x041d, # CYRILLIC CAPITAL LETTER EN
0x00ce: 0x041e, # CYRILLIC CAPITAL LETTER O
0x00cf: 0x041f, # CYRILLIC CAPITAL LETTER PE
0x00d0: 0x0420, # CYRILLIC CAPITAL LETTER ER
0x00d1: 0x0421, # CYRILLIC CAPITAL LETTER ES
0x00d2: 0x0422, # CYRILLIC CAPITAL LETTER TE
0x00d3: 0x0423, # CYRILLIC CAPITAL LETTER U
0x00d4: 0x0424, # CYRILLIC CAPITAL LETTER EF
0x00d5: 0x0425, # CYRILLIC CAPITAL LETTER HA
0x00d6: 0x0426, # CYRILLIC CAPITAL LETTER TSE
0x00d7: 0x0427, # CYRILLIC CAPITAL LETTER CHE
0x00d8: 0x0428, # CYRILLIC CAPITAL LETTER SHA
0x00d9: 0x0429, # CYRILLIC CAPITAL LETTER SHCHA
0x00da: 0x042a, # CYRILLIC CAPITAL LETTER HARD SIGN
0x00db: 0x042b, # CYRILLIC CAPITAL LETTER YERU
0x00dc: 0x042c, # CYRILLIC CAPITAL LETTER SOFT SIGN
0x00dd: 0x042d, # CYRILLIC CAPITAL LETTER E
0x00de: 0x042e, # CYRILLIC CAPITAL LETTER YU
0x00df: 0x042f, # CYRILLIC CAPITAL LETTER YA
0x00e0: 0x0430, # CYRILLIC SMALL LETTER A
0x00e1: 0x0431, # CYRILLIC SMALL LETTER BE
0x00e2: 0x0432, # CYRILLIC SMALL LETTER VE
0x00e3: 0x0433, # CYRILLIC SMALL LETTER GHE
0x00e4: 0x0434, # CYRILLIC SMALL LETTER DE
0x00e5: 0x0435, # CYRILLIC SMALL LETTER IE
0x00e6: 0x0436, # CYRILLIC SMALL LETTER ZHE
0x00e7: 0x0437, # CYRILLIC SMALL LETTER ZE
0x00e8: 0x0438, # CYRILLIC SMALL LETTER I
0x00e9: 0x0439, # CYRILLIC SMALL LETTER SHORT I
0x00ea: 0x043a, # CYRILLIC SMALL LETTER KA
0x00eb: 0x043b, # CYRILLIC SMALL LETTER EL
0x00ec: 0x043c, # CYRILLIC SMALL LETTER EM
0x00ed: 0x043d, # CYRILLIC SMALL LETTER EN
0x00ee: 0x043e, # CYRILLIC SMALL LETTER O
0x00ef: 0x043f, # CYRILLIC SMALL LETTER PE
0x00f0: 0x0440, # CYRILLIC SMALL LETTER ER
0x00f1: 0x0441, # CYRILLIC SMALL LETTER ES
0x00f2: 0x0442, # CYRILLIC SMALL LETTER TE
0x00f3: 0x0443, # CYRILLIC SMALL LETTER U
0x00f4: 0x0444, # CYRILLIC SMALL LETTER EF
0x00f5: 0x0445, # CYRILLIC SMALL LETTER HA
0x00f6: 0x0446, # CYRILLIC SMALL LETTER TSE
0x00f7: 0x0447, # CYRILLIC SMALL LETTER CHE
0x00f8: 0x0448, # CYRILLIC SMALL LETTER SHA
0x00f9: 0x0449, # CYRILLIC SMALL LETTER SHCHA
0x00fa: 0x044a, # CYRILLIC SMALL LETTER HARD SIGN
0x00fb: 0x044b, # CYRILLIC SMALL LETTER YERU
0x00fc: 0x044c, # CYRILLIC SMALL LETTER SOFT SIGN
0x00fd: 0x044d, # CYRILLIC SMALL LETTER E
0x00fe: 0x044e, # CYRILLIC SMALL LETTER YU
0x00ff: 0x044f, # CYRILLIC SMALL LETTER YA
})
### Encoding Map
encoding_map = codecs.make_encoding_map(decoding_map)
|
shrimo/PyQt4 | refs/heads/master | examples/demos/qtdemo/dockitem.py | 9 | #############################################################################
##
## Copyright (C) 2010 Riverbank Computing Limited.
## Copyright (C) 2010 Nokia Corporation and/or its subsidiary(-ies).
## All rights reserved.
##
## This file is part of the examples of PyQt.
##
## $QT_BEGIN_LICENSE:LGPL$
## Commercial Usage
## Licensees holding valid Qt Commercial licenses may use this file in
## accordance with the Qt Commercial License Agreement provided with the
## Software or, alternatively, in accordance with the terms contained in
## a written agreement between you and Nokia.
##
## GNU Lesser General Public License Usage
## Alternatively, this file may be used under the terms of the GNU Lesser
## General Public License version 2.1 as published by the Free Software
## Foundation and appearing in the file LICENSE.LGPL included in the
## packaging of this file. Please review the following information to
## ensure the GNU Lesser General Public License version 2.1 requirements
## will be met: http://www.gnu.org/licenses/old-licenses/lgpl-2.1.html.
##
## In addition, as a special exception, Nokia gives you certain additional
## rights. These rights are described in the Nokia Qt LGPL Exception
## version 1.1, included in the file LGPL_EXCEPTION.txt in this package.
##
## GNU General Public License Usage
## Alternatively, this file may be used under the terms of the GNU
## General Public License version 3.0 as published by the Free Software
## Foundation and appearing in the file LICENSE.GPL included in the
## packaging of this file. Please review the following information to
## ensure the GNU General Public License version 3.0 requirements will be
## met: http://www.gnu.org/copyleft/gpl.html.
##
## If you have questions regarding the use of this file, please contact
## Nokia at qt-info@nokia.com.
## $QT_END_LICENSE$
##
#############################################################################
from PyQt4 import QtGui
from colors import Colors
from demoitem import DemoItem
class DockItem(DemoItem):
UP, DOWN, LEFT, RIGHT = range(4)
def __init__(self, orien, x, y, width, length, scene=None, parent=None):
super(DockItem, self).__init__(scene, parent)
self.orientation = orien
self.width = width
self.length = length
self.setPos(x, y)
self.setZValue(40)
self.setupPixmap()
def setupPixmap(self):
self.pixmap = QtGui.QPixmap(int(self.boundingRect().width()),
int(self.boundingRect().height()))
self.pixmap.fill(QtGui.QColor(0, 0, 0, 0))
painter = QtGui.QPainter(self.pixmap)
# Create brush.
background = Colors.sceneBg1
brush = QtGui.QLinearGradient(0, 0, 0, self.boundingRect().height())
brush.setSpread(QtGui.QGradient.PadSpread)
if self.orientation == DockItem.DOWN:
brush.setColorAt(0.0, background)
brush.setColorAt(0.2, background)
background.setAlpha(0)
brush.setColorAt(1.0, background)
elif self.orientation == DockItem.UP:
brush.setColorAt(1.0, background)
brush.setColorAt(0.8, background)
background.setAlpha(0)
brush.setColorAt(0.0, background)
painter.fillRect(0, 0, int(self.boundingRect().width()),
int(self.boundingRect().height()), brush)
def boundingRect(self):
if self.orientation in (DockItem.UP, DockItem.DOWN):
return QtCore.QRectF(0, 0, self.length, self.width)
else:
return QtCore.QRectF(0, 0, self.width, self.length)
def paint(self, painter, option, widget):
painter.drawPixmap(0, 0, self.pixmap)
|
indictranstech/tele-erpnext | refs/heads/develop | erpnext/projects/doctype/activity_cost/test_activity_cost.py | 22 | # -*- coding: utf-8 -*-
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors and Contributors
# See license.txt
from __future__ import unicode_literals
import frappe
import unittest
from erpnext.projects.doctype.activity_cost.activity_cost import DuplicationError
class TestActivityCost(unittest.TestCase):
def test_duplication(self):
frappe.db.sql("delete from `tabActivity Cost`")
activity_cost1 = frappe.new_doc('Activity Cost')
activity_cost1.update({
"employee": "_T-Employee-0001",
"employee_name": "_Test Employee",
"activity_type": "_Test Activity Type",
"billing_rate": 100,
"costing_rate": 50
})
activity_cost1.insert()
activity_cost2 = frappe.copy_doc(activity_cost1)
self.assertRaises(DuplicationError, activity_cost2.insert )
|
captainpete/rethinkdb | refs/heads/next | external/v8_3.30.33.16/build/gyp/test/win/gyptest-lib-ltcg.py | 269 | #!/usr/bin/env python
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Make sure LTCG setting is extracted properly.
"""
import TestGyp
import sys
if sys.platform == 'win32':
test = TestGyp.TestGyp(formats=['msvs', 'ninja'])
CHDIR = 'lib-flags'
test.run_gyp('ltcg.gyp', chdir=CHDIR)
test.build('ltcg.gyp', test.ALL, chdir=CHDIR)
test.must_not_contain_any_line(test.stdout(), ['restarting link with /LTCG'])
test.pass_test()
|
bpramod/azure-linux-extensions | refs/heads/master | VMEncryption/main/oscrypto/rhel_68/encryptstates/UnmountOldrootState.py | 7 | #!/usr/bin/env python
#
# VM Backup extension
#
# Copyright 2015 Microsoft Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Requires Python 2.7+
#
import os
import re
import sys
from time import sleep
from OSEncryptionState import *
class UnmountOldrootState(OSEncryptionState):
def __init__(self, context):
super(UnmountOldrootState, self).__init__('UnmountOldrootState', context)
def should_enter(self):
self.context.logger.log("Verifying if machine should enter unmount_oldroot state")
if not super(UnmountOldrootState, self).should_enter():
return False
self.context.logger.log("Performing enter checks for unmount_oldroot state")
self.command_executor.ExecuteInBash('[ -e "/oldroot" ]', True)
if self.command_executor.Execute('mountpoint /oldroot') != 0:
return False
return True
def enter(self):
if not self.should_enter():
return
self.context.logger.log("Entering unmount_oldroot state")
self.command_executor.ExecuteInBash('mkdir -p /var/empty/sshd', True)
self.command_executor.Execute('service sshd restart')
self.command_executor.Execute('dhclient')
proc_comm = ProcessCommunicator()
self.command_executor.Execute(command_to_execute="/sbin/service --status-all",
raise_exception_on_failure=True,
communicator=proc_comm)
for line in proc_comm.stdout.split('\n'):
if not "running" in line:
continue
if "waagent" in line or "ssh" in line:
continue
splitted = line.split()
if len(splitted):
service = splitted[0]
self.command_executor.Execute('service {0} restart'.format(service))
self.command_executor.Execute('swapoff -a', True)
self.bek_util.umount_azure_passhprase(self.encryption_config, force=True)
if os.path.exists("/oldroot/mnt/resource"):
self.command_executor.Execute('umount /oldroot/mnt/resource')
if os.path.exists("/oldroot/mnt"):
self.command_executor.Execute('umount /oldroot/mnt')
if os.path.exists("/oldroot/mnt/azure_bek_disk"):
self.command_executor.Execute('umount /oldroot/mnt/azure_bek_disk')
if os.path.exists("/mnt"):
self.command_executor.Execute('umount /mnt')
if os.path.exists("/mnt/azure_bek_disk"):
self.command_executor.Execute('umount /mnt/azure_bek_disk')
self.command_executor.Execute('umount /oldroot/mnt/resource')
self.command_executor.Execute('umount /oldroot/boot')
self.command_executor.Execute('umount /oldroot/misc')
self.command_executor.Execute('umount /oldroot/net')
self.command_executor.Execute('telinit u', True)
self.command_executor.Execute('kill 1', True)
proc_comm = ProcessCommunicator()
self.command_executor.Execute(command_to_execute="fuser -vm /oldroot",
raise_exception_on_failure=False,
communicator=proc_comm)
self.context.logger.log("Processes using oldroot:\n{0}".format(proc_comm.stdout))
procs_to_kill = filter(lambda p: p.isdigit(), proc_comm.stdout.split())
procs_to_kill = reversed(sorted(procs_to_kill))
for victim in procs_to_kill:
if int(victim) == os.getpid():
self.context.logger.log("Restarting WALA before committing suicide")
self.context.logger.log("Current executable path: " + sys.executable)
self.context.logger.log("Current executable arguments: " + " ".join(sys.argv))
# Kill any other daemons that are blocked and would be executed after this process commits
# suicide
self.command_executor.Execute('service atd restart')
os.chdir('/')
with open("/delete-lock.sh", "w") as f:
f.write("rm -f /var/lib/azure_disk_encryption_config/daemon_lock_file.lck\n")
self.command_executor.Execute('at -f /delete-lock.sh now + 1 minutes', True)
self.command_executor.Execute('at -f /restart-wala.sh now + 2 minutes', True)
self.command_executor.ExecuteInBash('pkill -f .*ForLinux.*handle.py.*daemon.*', True)
if int(victim) == 1:
self.context.logger.log("Skipping init")
continue
self.command_executor.Execute('kill -9 {0}'.format(victim))
sleep(3)
self.command_executor.ExecuteInBash('for mp in `grep /oldroot /proc/mounts | cut -f2 -d\' \' | sort -r`; do umount $mp; done', True)
sleep(3)
attempt = 1
while True:
if attempt > 10:
raise Exception("Block device {0} did not appear in 10 restart attempts".format(self.rootfs_block_device))
self.context.logger.log("Attempt #{0} for reloading udev rules".format(attempt))
self.command_executor.ExecuteInBash('pkill -f .*udev.*')
self.command_executor.ExecuteInBash('udevd &')
self.command_executor.ExecuteInBash('udevadm control --reload-rules && sleep 3')
sleep(10)
if self.command_executor.ExecuteInBash('[ -b {0} ]'.format(self.rootfs_block_device), False) == 0:
break
attempt += 1
self.command_executor.Execute('e2fsck -yf {0}'.format(self.rootfs_block_device), True)
def should_exit(self):
self.context.logger.log("Verifying if machine should exit unmount_oldroot state")
if os.path.exists('/oldroot/bin'):
self.context.logger.log("/oldroot was not unmounted")
return False
return super(UnmountOldrootState, self).should_exit()
|
hugs/django | refs/heads/master | django/core/serializers/xml_serializer.py | 16 | """
XML serializer.
"""
from django.conf import settings
from django.core.serializers import base
from django.db import models
from django.utils.xmlutils import SimplerXMLGenerator
from django.utils.encoding import smart_unicode
from xml.dom import pulldom
class Serializer(base.Serializer):
"""
Serializes a QuerySet to XML.
"""
def indent(self, level):
if self.options.get('indent', None) is not None:
self.xml.ignorableWhitespace('\n' + ' ' * self.options.get('indent', None) * level)
def start_serialization(self):
"""
Start serialization -- open the XML document and the root element.
"""
self.xml = SimplerXMLGenerator(self.stream, self.options.get("encoding", settings.DEFAULT_CHARSET))
self.xml.startDocument()
self.xml.startElement("django-objects", {"version" : "1.0"})
def end_serialization(self):
"""
End serialization -- end the document.
"""
self.indent(0)
self.xml.endElement("django-objects")
self.xml.endDocument()
def start_object(self, obj):
"""
Called as each object is handled.
"""
if not hasattr(obj, "_meta"):
raise base.SerializationError("Non-model object (%s) encountered during serialization" % type(obj))
self.indent(1)
self.xml.startElement("object", {
"pk" : smart_unicode(obj._get_pk_val()),
"model" : smart_unicode(obj._meta),
})
def end_object(self, obj):
"""
Called after handling all fields for an object.
"""
self.indent(1)
self.xml.endElement("object")
def handle_field(self, obj, field):
"""
Called to handle each field on an object (except for ForeignKeys and
ManyToManyFields)
"""
self.indent(2)
self.xml.startElement("field", {
"name" : field.name,
"type" : field.get_internal_type()
})
# Get a "string version" of the object's data (this is handled by the
# serializer base class).
if getattr(obj, field.name) is not None:
value = self.get_string_value(obj, field)
self.xml.characters(smart_unicode(value))
else:
self.xml.addQuickElement("None")
self.xml.endElement("field")
def handle_fk_field(self, obj, field):
"""
Called to handle a ForeignKey (we need to treat them slightly
differently from regular fields).
"""
self._start_relational_field(field)
related = getattr(obj, field.name)
if related is not None:
if field.rel.field_name == related._meta.pk.name:
# Related to remote object via primary key
related = related._get_pk_val()
else:
# Related to remote object via other field
related = getattr(related, field.rel.field_name)
self.xml.characters(smart_unicode(related))
else:
self.xml.addQuickElement("None")
self.xml.endElement("field")
def handle_m2m_field(self, obj, field):
"""
Called to handle a ManyToManyField. Related objects are only
serialized as references to the object's PK (i.e. the related *data*
is not dumped, just the relation).
"""
if field.creates_table:
self._start_relational_field(field)
for relobj in getattr(obj, field.name).iterator():
self.xml.addQuickElement("object", attrs={"pk" : smart_unicode(relobj._get_pk_val())})
self.xml.endElement("field")
def _start_relational_field(self, field):
"""
Helper to output the <field> element for relational fields
"""
self.indent(2)
self.xml.startElement("field", {
"name" : field.name,
"rel" : field.rel.__class__.__name__,
"to" : smart_unicode(field.rel.to._meta),
})
class Deserializer(base.Deserializer):
"""
Deserialize XML.
"""
def __init__(self, stream_or_string, **options):
super(Deserializer, self).__init__(stream_or_string, **options)
self.event_stream = pulldom.parse(self.stream)
def next(self):
for event, node in self.event_stream:
if event == "START_ELEMENT" and node.nodeName == "object":
self.event_stream.expandNode(node)
return self._handle_object(node)
raise StopIteration
def _handle_object(self, node):
"""
Convert an <object> node to a DeserializedObject.
"""
# Look up the model using the model loading mechanism. If this fails,
# bail.
Model = self._get_model_from_node(node, "model")
# Start building a data dictionary from the object. If the node is
# missing the pk attribute, bail.
pk = node.getAttribute("pk")
if not pk:
raise base.DeserializationError("<object> node is missing the 'pk' attribute")
data = {Model._meta.pk.attname : Model._meta.pk.to_python(pk)}
# Also start building a dict of m2m data (this is saved as
# {m2m_accessor_attribute : [list_of_related_objects]})
m2m_data = {}
# Deseralize each field.
for field_node in node.getElementsByTagName("field"):
# If the field is missing the name attribute, bail (are you
# sensing a pattern here?)
field_name = field_node.getAttribute("name")
if not field_name:
raise base.DeserializationError("<field> node is missing the 'name' attribute")
# Get the field from the Model. This will raise a
# FieldDoesNotExist if, well, the field doesn't exist, which will
# be propagated correctly.
field = Model._meta.get_field(field_name)
# As is usually the case, relation fields get the special treatment.
if field.rel and isinstance(field.rel, models.ManyToManyRel):
m2m_data[field.name] = self._handle_m2m_field_node(field_node, field)
elif field.rel and isinstance(field.rel, models.ManyToOneRel):
data[field.attname] = self._handle_fk_field_node(field_node, field)
else:
if field_node.getElementsByTagName('None'):
value = None
else:
value = field.to_python(getInnerText(field_node).strip())
data[field.name] = value
# Return a DeserializedObject so that the m2m data has a place to live.
return base.DeserializedObject(Model(**data), m2m_data)
def _handle_fk_field_node(self, node, field):
"""
Handle a <field> node for a ForeignKey
"""
# Check if there is a child node named 'None', returning None if so.
if node.getElementsByTagName('None'):
return None
else:
return field.rel.to._meta.get_field(field.rel.field_name).to_python(
getInnerText(node).strip())
def _handle_m2m_field_node(self, node, field):
"""
Handle a <field> node for a ManyToManyField.
"""
return [field.rel.to._meta.pk.to_python(
c.getAttribute("pk"))
for c in node.getElementsByTagName("object")]
def _get_model_from_node(self, node, attr):
"""
Helper to look up a model from a <object model=...> or a <field
rel=... to=...> node.
"""
model_identifier = node.getAttribute(attr)
if not model_identifier:
raise base.DeserializationError(
"<%s> node is missing the required '%s' attribute" \
% (node.nodeName, attr))
try:
Model = models.get_model(*model_identifier.split("."))
except TypeError:
Model = None
if Model is None:
raise base.DeserializationError(
"<%s> node has invalid model identifier: '%s'" % \
(node.nodeName, model_identifier))
return Model
def getInnerText(node):
"""
Get all the inner text of a DOM node (recursively).
"""
# inspired by http://mail.python.org/pipermail/xml-sig/2005-March/011022.html
inner_text = []
for child in node.childNodes:
if child.nodeType == child.TEXT_NODE or child.nodeType == child.CDATA_SECTION_NODE:
inner_text.append(child.data)
elif child.nodeType == child.ELEMENT_NODE:
inner_text.extend(getInnerText(child))
else:
pass
return u"".join(inner_text)
|
JonasThomas/free-cad | refs/heads/master | src/Mod/Arch/Arch.py | 1 | #***************************************************************************
#* *
#* Copyright (c) 2011 *
#* Yorik van Havre <yorik@uncreated.net> *
#* *
#* This program is free software; you can redistribute it and/or modify *
#* it under the terms of the GNU Lesser General Public License (LGPL) *
#* as published by the Free Software Foundation; either version 2 of *
#* the License, or (at your option) any later version. *
#* for detail see the LICENCE text file. *
#* *
#* This program is distributed in the hope that it will be useful, *
#* but WITHOUT ANY WARRANTY; without even the implied warranty of *
#* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
#* GNU Library General Public License for more details. *
#* *
#* You should have received a copy of the GNU Library General Public *
#* License along with this program; if not, write to the Free Software *
#* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 *
#* USA *
#* *
#***************************************************************************
__title__="FreeCAD Arch API"
__author__ = "Yorik van Havre"
__url__ = "http://free-cad.sourceforge.net"
import FreeCADGui
FreeCADGui.updateLocale()
from ArchWall import *
from ArchCell import *
from ArchFloor import *
from ArchSite import *
from ArchBuilding import *
from ArchStructure import *
from ArchCommands import *
from ArchSectionPlane import *
from ArchWindow import *
from ArchAxis import *
from ArchRoof import *
|
pemontto/engine-python | refs/heads/master | pollAlerts.py | 1 | #!/usr/bin/env python
############################################################################
# #
# Copyright 2014 Prelert Ltd #
# #
# Licensed under the Apache License, Version 2.0 (the "License"); #
# you may not use this file except in compliance with the License. #
# You may obtain a copy of the License at #
# #
# http://www.apache.org/licenses/LICENSE-2.0 #
# #
# Unless required by applicable law or agreed to in writing, software #
# distributed under the License is distributed on an "AS IS" BASIS, #
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #
# See the License for the specific language governing permissions and #
# limitations under the License. #
# #
############################################################################
'''
Subscribe to the Prelert Engine API Alerts long poll end point for
alerts.
The script is invoked with 1 positional argument -the id of the
job to alert on. Optional parameters set the threshold arguments
at which to alert on. One of --anomalyScore or --normalizedProbability
should be set.
The script runs in an infinite loop re-subscribing to new alerts after
the request either times out or an alert is returned.
Run the script with '--help' to see the options.
'''
import argparse
import sys
import json
import logging
import time
from prelert.engineApiClient import EngineApiClient
# defaults
HOST = 'localhost'
PORT = 8080
BASE_URL = 'engine/v1'
def setupLogging():
'''
Log to console
'''
logging.basicConfig(level=logging.INFO,format='%(asctime)s %(levelname)s %(message)s')
def parseArguments():
parser = argparse.ArgumentParser()
parser.add_argument("--host", help="The Prelert Engine API host, defaults to "
+ HOST, default=HOST)
parser.add_argument("--port", help="The Prelert Engine API port, defaults to "
+ str(PORT), default=PORT)
parser.add_argument("--anomalyScore", help="Alert on buckets with anomaly score >= "
+ "this value", type=float, default=None)
parser.add_argument("--normalizedProbability", help="Alert on records with a "
+ "normalized probablilty >= this", type=float, default=None)
parser.add_argument("--timeout", help="The long poll timeout period", type=int, default=None)
parser.add_argument("jobid", help="The job to alert on")
return parser.parse_args()
def printHeader():
print "Timestamp, Anomaly Score, Normalized Probablilty, URI, Results"
def printAlert(alert):
if 'bucket' in alert:
data = alert['bucket']
else:
data = alert['records']
line = "{0}, {1}, {2}, {3}. {4}".format(alert['timestamp'],
alert['anomalyScore'], alert['maxNormalizedProbability'],
alert['uri'], data)
print line
def main():
setupLogging()
args = parseArguments()
job_id = args.jobid
# Create the REST API client
engine_client = EngineApiClient(args.host, BASE_URL, args.port)
logging.info("Subscribing to job '" + job_id + "' for alerts")
printHeader()
while True:
try:
(http_status_code, response) = engine_client.alerts_longpoll(job_id,
normalized_probability_threshold=args.normalizedProbability,
anomaly_score_threshold=args.anomalyScore, timeout=args.timeout)
if http_status_code != 200:
print (http_status_code, json.dumps(response))
break
if response['timeout'] == False:
printAlert(response)
except KeyboardInterrupt:
print "Exiting script..."
if __name__ == "__main__":
main()
|
BeyondTheClouds/enoslib | refs/heads/master | docs/tutorials/network_emulation/tuto_svc_netem_s.py | 1 | import logging
from pathlib import Path
import enoslib as en
logging.basicConfig(level=logging.DEBUG)
CLUSTER = "paravance"
SITE = en.g5k_api_utils.get_cluster_site(CLUSTER)
job_name = Path(__file__).name
prod = en.G5kNetworkConf(id="n1", type="prod", roles=["my_network"], site=SITE)
private = en.G5kNetworkConf(id="n2", type="kavlan-global", roles=["private"], site=SITE)
conf = (
en.G5kConf.from_settings(job_name=__file__, walltime="00:30:00")
.add_network_conf(prod)
.add_network_conf(private)
.add_machine(
roles=["paris"],
cluster=CLUSTER,
nodes=1,
primary_network=prod,
secondary_networks=[private],
)
.add_machine(
roles=["londres"],
cluster=CLUSTER,
nodes=1,
primary_network=prod,
secondary_networks=[private],
)
.add_machine(
roles=["berlin"],
cluster=CLUSTER,
nodes=1,
primary_network=prod,
secondary_networks=[private],
)
.finalize()
)
provider = en.G5k(conf)
roles, networks = provider.init()
roles = en.sync_info(roles, networks)
netem = en.Netem()
(
netem
.add_constraints("delay 10ms", roles["paris"], networks=networks["private"], symetric=True)
.add_constraints("delay 20ms", roles["londres"], networks=networks["private"], symetric=True)
.add_constraints("delay 30ms", roles["berlin"], networks=networks["private"], symetric=True)
)
netem.deploy()
netem.validate()
for role, hosts in roles.items():
print(role)
for host in hosts:
print(f"-- {host.alias}")
|
rodrigocaldas/trabalho_programacao_web | refs/heads/master | languages/nl.py | 159 | # coding: utf8
{
'!langcode!': 'nl',
'!langname!': 'Nederlands',
'"update" is an optional expression like "field1=\'newvalue\'". You cannot update or delete the results of a JOIN': '"update" is an optional expression like "field1=\'newvalue\'". You cannot update or delete the results of a JOIN',
'%(nrows)s records found': '%(nrows)s records gevonden',
'%d days ago': '%d dagen geleden',
'%d weeks ago': '%d weken gelden',
'%s %%{row} deleted': '%s rijen verwijderd',
'%s %%{row} updated': '%s rijen geupdate',
'%s selected': '%s geselecteerd',
'%Y-%m-%d': '%Y-%m-%d',
'%Y-%m-%d %H:%M:%S': '%Y-%m-%d %H:%M:%S',
'(something like "it-it")': '(zoiets als "nl-nl")',
'1 day ago': '1 dag geleden',
'1 week ago': '1 week gelden',
'<': '<',
'<=': '<=',
'=': '=',
'>': '>',
'>=': '>=',
'A new version of web2py is available': 'Een nieuwe versie van web2py is beschikbaar',
'A new version of web2py is available: %s': 'Een nieuwe versie van web2py is beschikbaar: %s',
'About': 'Over',
'about': 'over',
'About application': 'Over applicatie',
'Access Control': 'Toegangscontrole',
'Add': 'Toevoegen',
'additional code for your application': 'additionele code voor je applicatie',
'admin disabled because no admin password': 'admin is uitgezet omdat er geen admin wachtwoord is',
'admin disabled because not supported on google app engine': 'admin is uitgezet omdat dit niet ondersteund wordt op google app engine',
'admin disabled because unable to access password file': 'admin is uitgezet omdat het wachtwoordbestand niet geopend kan worden',
'Admin is disabled because insecure channel': 'Admin is uitgezet om het kanaal onveilig is',
'Admin is disabled because unsecure channel': 'Admin is uitgezet om het kanaal onveilig is',
'Administration': 'Administratie',
'Administrative Interface': 'Administratieve Interface',
'Administrator Password:': 'Administrator Wachtwoord',
'Ajax Recipes': 'Ajax Recepten',
'And': 'En',
'and rename it (required):': 'en hernoem deze (vereist)',
'and rename it:': 'en hernoem:',
'appadmin': 'appadmin',
'appadmin is disabled because insecure channel': 'appadmin is uitgezet vanwege een onveilig kanaal',
'application "%s" uninstalled': 'applicatie "%s" gedeïnstalleerd',
'application compiled': 'applicatie gecompileerd',
'application is compiled and cannot be designed': 'applicatie is gecompileerd en kan niet worden ontworpen',
'Are you sure you want to delete file "%s"?': 'Weet je zeker dat je bestand "%s" wilt verwijderen?',
'Are you sure you want to delete this object?': 'Weet je zeker dat je dit object wilt verwijderen?',
'Are you sure you want to uninstall application "%s"?': 'Weet je zeker dat je applicatie "%s" wilt deïnstalleren?',
'ATTENTION: Login requires a secure (HTTPS) connection or running on localhost.': 'LET OP: Login vereist een beveiligde (HTTPS) connectie of moet draaien op localhost.',
'ATTENTION: TESTING IS NOT THREAD SAFE SO DO NOT PERFORM MULTIPLE TESTS CONCURRENTLY.': 'LET OP: TESTEN IS NIET THREAD SAFE, PROBEER NIET GELIJKTIJDIG MEERDERE TESTS TE DOEN.',
'ATTENTION: you cannot edit the running application!': 'LET OP: je kan de applicatie die nu draait niet editen!',
'Authentication': 'Authenticatie',
'Available Databases and Tables': 'Beschikbare databases en tabellen',
'Back': 'Terug',
'Buy this book': 'Koop dit boek',
'Cache': 'Cache',
'cache': 'cache',
'Cache Keys': 'Cache Keys',
'cache, errors and sessions cleaned': 'cache, errors en sessies geleegd',
'Cannot be empty': 'Mag niet leeg zijn',
'Cannot compile: there are errors in your app. Debug it, correct errors and try again.': 'Kan niet compileren: er bevinden zich fouten in je app. Debug, corrigeer de fouten en probeer opnieuw.',
'cannot create file': 'kan bestand niet maken',
'cannot upload file "%(filename)s"': 'kan bestand "%(filename)s" niet uploaden',
'Change Password': 'Wijzig wachtwoord',
'Change password': 'Wijzig Wachtwoord',
'change password': 'wijzig wachtwoord',
'check all': 'vink alles aan',
'Check to delete': 'Vink aan om te verwijderen',
'clean': 'leeg',
'Clear': 'Leeg',
'Clear CACHE?': 'Leeg CACHE?',
'Clear DISK': 'Leeg DISK',
'Clear RAM': 'Clear RAM',
'click to check for upgrades': 'Klik om voor upgrades te controleren',
'Client IP': 'Client IP',
'Community': 'Community',
'compile': 'compileren',
'compiled application removed': 'gecompileerde applicatie verwijderd',
'Components and Plugins': 'Components en Plugins',
'contains': 'bevat',
'Controller': 'Controller',
'Controllers': 'Controllers',
'controllers': 'controllers',
'Copyright': 'Copyright',
'create file with filename:': 'maak bestand met de naam:',
'Create new application': 'Maak nieuwe applicatie:',
'create new application:': 'maak nieuwe applicatie',
'Created By': 'Gemaakt Door',
'Created On': 'Gemaakt Op',
'crontab': 'crontab',
'Current request': 'Huidige request',
'Current response': 'Huidige response',
'Current session': 'Huidige sessie',
'currently saved or': 'op het moment opgeslagen of',
'customize me!': 'pas me aan!',
'data uploaded': 'data geupload',
'Database': 'Database',
'Database %s select': 'Database %s select',
'database administration': 'database administratie',
'Date and Time': 'Datum en Tijd',
'db': 'db',
'DB Model': 'DB Model',
'defines tables': 'definieer tabellen',
'Delete': 'Verwijder',
'delete': 'verwijder',
'delete all checked': 'verwijder alle aangevinkten',
'Delete:': 'Verwijder:',
'Demo': 'Demo',
'Deploy on Google App Engine': 'Deploy op Google App Engine',
'Deployment Recipes': 'Deployment Recepten',
'Description': 'Beschrijving',
'design': 'design',
'DESIGN': 'DESIGN',
'Design for': 'Design voor',
'DISK': 'DISK',
'Disk Cache Keys': 'Disk Cache Keys',
'Disk Cleared': 'Disk Geleegd',
'Documentation': 'Documentatie',
"Don't know what to do?": 'Weet je niet wat je moet doen?',
'done!': 'gereed!',
'Download': 'Download',
'E-mail': 'E-mail',
'E-mail invalid': 'E-mail ongeldig',
'edit': 'bewerk',
'EDIT': 'BEWERK',
'Edit': 'Bewerk',
'Edit application': 'Bewerk applicatie',
'edit controller': 'bewerk controller',
'Edit current record': 'Bewerk huidig record',
'Edit Profile': 'Bewerk Profiel',
'edit profile': 'bewerk profiel',
'Edit This App': 'Bewerk Deze App',
'Editing file': 'Bewerk bestand',
'Editing file "%s"': 'Bewerk bestand "%s"',
'Email and SMS': 'E-mail en SMS',
'enter a number between %(min)g and %(max)g': 'geef een getal tussen %(min)g en %(max)g',
'enter an integer between %(min)g and %(max)g': 'geef een integer tussen %(min)g en %(max)g',
'Error logs for "%(app)s"': 'Error logs voor "%(app)s"',
'errors': 'errors',
'Errors': 'Errors',
'Export': 'Export',
'export as csv file': 'exporteer als csv-bestand',
'exposes': 'stelt bloot',
'extends': 'extends',
'failed to reload module': 'niet gelukt om module te herladen',
'False': 'Onwaar',
'FAQ': 'FAQ',
'file "%(filename)s" created': 'bestand "%(filename)s" gemaakt',
'file "%(filename)s" deleted': 'bestand "%(filename)s" verwijderd',
'file "%(filename)s" uploaded': 'bestand "%(filename)s" geupload',
'file "%(filename)s" was not deleted': 'bestand "%(filename)s" was niet verwijderd',
'file "%s" of %s restored': 'bestand "%s" van %s hersteld',
'file changed on disk': 'bestand aangepast op schijf',
'file does not exist': 'bestand bestaat niet',
'file saved on %(time)s': 'bestand bewaard op %(time)s',
'file saved on %s': 'bestand bewaard op %s',
'First name': 'Voornaam',
'Forbidden': 'Verboden',
'Forms and Validators': 'Formulieren en Validators',
'Free Applications': 'Gratis Applicaties',
'Functions with no doctests will result in [passed] tests.': 'Functies zonder doctests zullen resulteren in [passed] tests.',
'Group %(group_id)s created': 'Groep %(group_id)s gemaakt',
'Group ID': 'Groep ID',
'Group uniquely assigned to user %(id)s': 'Groep is uniek toegekend aan gebruiker %(id)s',
'Groups': 'Groepen',
'Hello World': 'Hallo Wereld',
'help': 'help',
'Home': 'Home',
'How did you get here?': 'Hoe ben je hier gekomen?',
'htmledit': 'Bewerk HTML',
'import': 'import',
'Import/Export': 'Import/Export',
'includes': 'includes',
'Index': 'Index',
'insert new': 'voeg nieuwe',
'insert new %s': 'voeg nieuwe %s',
'Installed applications': 'Geïnstalleerde applicaties',
'internal error': 'interne error',
'Internal State': 'Interne State',
'Introduction': 'Introductie',
'Invalid action': 'Ongeldige actie',
'Invalid email': 'Ongeldig emailadres',
'invalid password': 'ongeldig wachtwoord',
'Invalid password': 'Ongeldig wachtwoord',
'Invalid Query': 'Ongeldige Query',
'invalid request': 'ongeldige request',
'invalid ticket': 'ongeldige ticket',
'Is Active': 'Is Actief',
'Key': 'Key',
'language file "%(filename)s" created/updated': 'taalbestand "%(filename)s" gemaakt/geupdate',
'Language files (static strings) updated': 'Taalbestanden (statische strings) geupdate',
'languages': 'talen',
'Languages': 'Talen',
'languages updated': 'talen geupdate',
'Last name': 'Achternaam',
'Last saved on:': 'Laatst bewaard op:',
'Layout': 'Layout',
'Layout Plugins': 'Layout Plugins',
'Layouts': 'Layouts',
'License for': 'Licentie voor',
'Live Chat': 'Live Chat',
'loading...': 'laden...',
'Logged in': 'Ingelogd',
'Logged out': 'Uitgelogd',
'Login': 'Login',
'login': 'login',
'Login to the Administrative Interface': 'Inloggen op de Administratieve Interface',
'logout': 'logout',
'Logout': 'Logout',
'Lost Password': 'Wachtwoord Kwijt',
'Lost password?': 'Wachtwoord kwijt?',
'Main Menu': 'Hoofdmenu',
'Manage Cache': 'Beheer Cache',
'Menu Model': 'Menu Model',
'merge': 'samenvoegen',
'Models': 'Modellen',
'models': 'modellen',
'Modified By': 'Aangepast Door',
'Modified On': 'Aangepast Op',
'Modules': 'Modules',
'modules': 'modules',
'My Sites': 'Mijn Sites',
'Name': 'Naam',
'New': 'Nieuw',
'new application "%s" created': 'nieuwe applicatie "%s" gemaakt',
'New password': 'Nieuw wachtwoord',
'New Record': 'Nieuw Record',
'new record inserted': 'nieuw record ingevoegd',
'next 100 rows': 'volgende 100 rijen',
'NO': 'NEE',
'No databases in this application': 'Geen database in deze applicatie',
'Object or table name': 'Object of tabelnaam',
'Old password': 'Oude wachtwoord',
'Online examples': 'Online voorbeelden',
'Or': 'Of',
'or import from csv file': 'of importeer van csv-bestand',
'or provide application url:': 'of geef een applicatie url:',
'Origin': 'Bron',
'Original/Translation': 'Oorspronkelijk/Vertaling',
'Other Plugins': 'Andere Plugins',
'Other Recipes': 'Andere Recepten',
'Overview': 'Overzicht',
'pack all': 'pack all',
'pack compiled': 'pack compiled',
'Password': 'Wachtwoord',
"Password fields don't match": 'Wachtwoordvelden komen niet overeen',
'Peeking at file': 'Naar bestand aan het gluren',
'please input your password again': 'geef alstublieft nogmaals uw wachtwoord',
'Plugins': 'Plugins',
'Powered by': 'Powered by',
'Preface': 'Inleiding',
'previous 100 rows': 'vorige 100 rijen',
'Profile': 'Profiel',
'Python': 'Python',
'Query': 'Query',
'Query:': 'Query:',
'Quick Examples': 'Snelle Voorbeelden',
'RAM': 'RAM',
'RAM Cache Keys': 'RAM Cache Keys',
'Ram Cleared': 'Ram Geleegd',
'Recipes': 'Recepten',
'Record': 'Record',
'record does not exist': 'record bestaat niet',
'Record ID': 'Record ID',
'Record id': 'Record id',
'register': 'registreer',
'Register': 'Registreer',
'Registration identifier': 'Registratie identifier',
'Registration key': 'Registratie sleutel',
'Registration successful': 'Registratie succesvol',
'Remember me (for 30 days)': 'Onthoudt mij (voor 30 dagen)',
'remove compiled': 'verwijder gecompileerde',
'Request reset password': 'Vraag een wachtwoord reset aan',
'Reset Password key': 'Reset Wachtwoord sleutel',
'Resolve Conflict file': 'Los Conflictbestand op',
'restore': 'herstel',
'revert': 'herstel',
'Role': 'Rol',
'Rows in Table': 'Rijen in tabel',
'Rows selected': 'Rijen geselecteerd',
'save': 'bewaar',
'Save profile': 'Bewaar profiel',
'Saved file hash:': 'Opgeslagen file hash:',
'Search': 'Zoek',
'Semantic': 'Semantisch',
'Services': 'Services',
'session expired': 'sessie verlopen',
'shell': 'shell',
'site': 'site',
'Size of cache:': 'Grootte van cache:',
'some files could not be removed': 'sommige bestanden konden niet worden verwijderd',
'starts with': 'begint met',
'state': 'state',
'static': 'statisch',
'Static files': 'Statische bestanden',
'Statistics': 'Statistieken',
'Stylesheet': 'Stylesheet',
'Submit': 'Submit',
'submit': 'submit',
'Support': 'Support',
'Sure you want to delete this object?': 'Weet je zeker dat je dit object wilt verwijderen?',
'Table': 'Tabel',
'Table name': 'Tabelnaam',
'test': 'test',
'Testing application': 'Applicatie testen',
'The "query" is a condition like "db.table1.field1==\'value\'". Something like "db.table1.field1==db.table2.field2" results in a SQL JOIN.': 'De "query" is een conditie zoals "db.tabel1.veld1==\'waarde\'". Zoiets als "db.tabel1.veld1==db.tabel2.veld2" resulteert in een SQL JOIN.',
'the application logic, each URL path is mapped in one exposed function in the controller': 'the applicatie logica, elk URL pad is gemapped in een blootgestelde functie in de controller',
'The Core': 'De Core',
'the data representation, define database tables and sets': 'de data representatie, definieert database tabellen en sets',
'The output of the file is a dictionary that was rendered by the view %s': 'De output van het bestand is een dictionary die gerenderd werd door de view %s',
'the presentations layer, views are also known as templates': 'de presentatie laag, views zijn ook bekend als templates',
'The Views': 'De Views',
'There are no controllers': 'Er zijn geen controllers',
'There are no models': 'Er zijn geen modellen',
'There are no modules': 'Er zijn geen modules',
'There are no static files': 'Er zijn geen statische bestanden',
'There are no translators, only default language is supported': 'Er zijn geen vertalingen, alleen de standaard taal wordt ondersteund.',
'There are no views': 'Er zijn geen views',
'these files are served without processing, your images go here': 'Deze bestanden worden geserveerd zonder verdere verwerking, je afbeeldingen horen hier',
'This App': 'Deze App',
'This is a copy of the scaffolding application': 'Dit is een kopie van de steiger-applicatie',
'This is the %(filename)s template': 'Dit is de %(filename)s template',
'Ticket': 'Ticket',
'Time in Cache (h:m:s)': 'Tijd in Cache (h:m:s)',
'Timestamp': 'Timestamp (timestamp)',
'to previous version.': 'naar vorige versie.',
'too short': 'te kort',
'translation strings for the application': 'vertaalstrings voor de applicatie',
'True': 'Waar',
'try': 'probeer',
'try something like': 'probeer zoiets als',
'Twitter': 'Twitter',
'Unable to check for upgrades': 'Niet mogelijk om te controleren voor upgrades',
'unable to create application "%s"': 'niet mogelijk om applicatie "%s" te maken',
'unable to delete file "%(filename)s"': 'niet mogelijk om bestand "%(filename)s" te verwijderen',
'Unable to download': 'Niet mogelijk om te downloaden',
'Unable to download app': 'Niet mogelijk om app te downloaden',
'unable to parse csv file': 'niet mogelijk om csv-bestand te parsen',
'unable to uninstall "%s"': 'niet mogelijk om "%s" te deïnstalleren',
'uncheck all': 'vink alles uit',
'uninstall': ' deïnstalleer',
'update': 'update',
'update all languages': 'update alle talen',
'Update:': 'Update:',
'upload application:': 'upload applicatie:',
'Upload existing application': 'Upload bestaande applicatie',
'upload file:': 'upload bestand',
'Use (...)&(...) for AND, (...)|(...) for OR, and ~(...) for NOT to build more complex queries.': 'Gebruik (...)&(...) voor AND, (...)|(...) voor OR, en ~(...) voor NOT om meer complexe queries te maken.',
'User %(id)s Logged-in': 'Gebruiker %(id)s Logged-in',
'User %(id)s Logged-out': 'Gebruiker %(id)s Logged-out',
'User %(id)s Password changed': 'Wachtwoord van gebruiker %(id)s is veranderd',
'User %(id)s Password reset': 'Wachtwoord van gebruiker %(id)s is gereset',
'User %(id)s Profile updated': 'Profiel van Gebruiker %(id)s geupdate',
'User %(id)s Registered': 'Gebruiker %(id)s Geregistreerd',
'User ID': 'User ID',
'value already in database or empty': 'waarde al in database of leeg',
'Verify Password': 'Verifieer Wachtwoord',
'versioning': 'versionering',
'Videos': 'Videos',
'View': 'View',
'view': 'view',
'Views': 'Vieuws',
'views': 'vieuws',
'web2py is up to date': 'web2py is up to date',
'web2py Recent Tweets': 'web2py Recente Tweets',
'Welcome': 'Welkom',
'Welcome %s': 'Welkom %s',
'Welcome to web2py': 'Welkom bij web2py',
'Welcome to web2py!': 'Welkom bij web2py!',
'Which called the function %s located in the file %s': 'Die functie %s aanriep en zich bevindt in het bestand %s',
'YES': 'JA',
'You are successfully running web2py': 'Je draait web2py succesvol',
'You can modify this application and adapt it to your needs': 'Je kan deze applicatie aanpassen naar je eigen behoeften',
'You visited the url %s': 'Je bezocht de url %s',
}
|
Teamxrtc/webrtc-streaming-node | refs/heads/master | third_party/webrtc/src/chromium/src/third_party/webpagereplay/sslproxy.py | 31 | # Copyright 2014 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Extends BaseHTTPRequestHandler with SSL certificate generation."""
import logging
import socket
import certutils
def _SetUpUsingDummyCert(handler):
"""Sets up connection providing the certificate to the client.
This method handles Server Name Indication (SNI) using dummy certs.
Args:
handler: an instance of BaseHTTPServer.BaseHTTPRequestHandler that is used
by some instance of BaseHTTPServer.HTTPServer.
"""
# One of: One of SSLv2_METHOD, SSLv3_METHOD, SSLv23_METHOD, or TLSv1_METHOD
context = certutils.get_ssl_context()
def handle_servername(connection):
"""A SNI callback that happens during do_handshake()."""
try:
host = connection.get_servername()
if host:
cert_str = (
handler.server.get_certificate(host))
new_context = certutils.get_ssl_context()
cert = certutils.load_cert(cert_str)
new_context.use_certificate(cert)
new_context.use_privatekey_file(handler.server.ca_cert_path)
connection.set_context(new_context)
return new_context
# else: fail with 'no shared cipher'
except Exception, e:
# Do not leak any exceptions or else openssl crashes.
logging.error('Exception in SNI handler: %s', e)
context.set_tlsext_servername_callback(handle_servername)
handler.connection = certutils.get_ssl_connection(context, handler.connection)
handler.connection.set_accept_state()
try:
handler.connection.do_handshake()
except certutils.Error, v:
host = handler.connection.get_servername()
if not host:
logging.error('Dropping request without SNI')
return ''
raise certutils.Error('SSL handshake error %s: %s' % (host, str(v)))
# Re-wrap the read/write streams with our new connection.
handler.rfile = socket._fileobject(handler.connection, 'rb', handler.rbufsize,
close=False)
handler.wfile = socket._fileobject(handler.connection, 'wb', handler.wbufsize,
close=False)
def wrap_handler(handler_class):
"""Wraps a BaseHTTPHandler with SSL MITM certificates."""
if certutils.openssl_import_error:
# pylint: disable=raising-bad-type
raise certutils.openssl_import_error
class WrappedHandler(handler_class):
def setup(self):
handler_class.setup(self)
_SetUpUsingDummyCert(self)
def finish(self):
handler_class.finish(self)
self.connection.shutdown()
self.connection.close()
return WrappedHandler
|
flotre/sickbeard-vfvo | refs/heads/master | lib/guessit/textutils.py | 48 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Smewt - A smart collection manager
# Copyright (c) 2008-2012 Nicolas Wack <wackou@gmail.com>
#
# Smewt is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# Smewt is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
from __future__ import unicode_literals
from guessit import s
from guessit.patterns import sep
import functools
import unicodedata
import re
# string-related functions
def normalize_unicode(s):
return unicodedata.normalize('NFC', s)
def strip_brackets(s):
if not s:
return s
if ((s[0] == '[' and s[-1] == ']') or
(s[0] == '(' and s[-1] == ')') or
(s[0] == '{' and s[-1] == '}')):
return s[1:-1]
return s
def clean_string(s):
for c in sep[:-2]: # do not remove dashes ('-')
s = s.replace(c, ' ')
parts = s.split()
result = ' '.join(p for p in parts if p != '')
# now also remove dashes on the outer part of the string
while result and result[0] in sep:
result = result[1:]
while result and result[-1] in sep:
result = result[:-1]
return result
_words_rexp = re.compile('\w+', re.UNICODE)
def find_words(s):
return _words_rexp.findall(s.replace('_', ' '))
def reorder_title(title):
ltitle = title.lower()
if ltitle[-4:] == ',the':
return title[-3:] + ' ' + title[:-4]
if ltitle[-5:] == ', the':
return title[-3:] + ' ' + title[:-5]
return title
def str_replace(string, pos, c):
return string[:pos] + c + string[pos+1:]
def str_fill(string, region, c):
start, end = region
return string[:start] + c * (end - start) + string[end:]
def levenshtein(a, b):
if not a:
return len(b)
if not b:
return len(a)
m = len(a)
n = len(b)
d = []
for i in range(m+1):
d.append([0] * (n+1))
for i in range(m+1):
d[i][0] = i
for j in range(n+1):
d[0][j] = j
for i in range(1, m+1):
for j in range(1, n+1):
if a[i-1] == b[j-1]:
cost = 0
else:
cost = 1
d[i][j] = min(d[i-1][j] + 1, # deletion
d[i][j-1] + 1, # insertion
d[i-1][j-1] + cost # substitution
)
return d[m][n]
# group-related functions
def find_first_level_groups_span(string, enclosing):
"""Return a list of pairs (start, end) for the groups delimited by the given
enclosing characters.
This does not return nested groups, ie: '(ab(c)(d))' will return a single group
containing the whole string.
>>> find_first_level_groups_span('abcd', '()')
[]
>>> find_first_level_groups_span('abc(de)fgh', '()')
[(3, 7)]
>>> find_first_level_groups_span('(ab(c)(d))', '()')
[(0, 10)]
>>> find_first_level_groups_span('ab[c]de[f]gh(i)', '[]')
[(2, 5), (7, 10)]
"""
opening, closing = enclosing
depth = [] # depth is a stack of indices where we opened a group
result = []
for i, c, in enumerate(string):
if c == opening:
depth.append(i)
elif c == closing:
try:
start = depth.pop()
end = i
if not depth:
# we emptied our stack, so we have a 1st level group
result.append((start, end+1))
except IndexError:
# we closed a group which was not opened before
pass
return result
def split_on_groups(string, groups):
"""Split the given string using the different known groups for boundaries.
>>> s(split_on_groups('0123456789', [ (2, 4) ]))
['01', '23', '456789']
>>> s(split_on_groups('0123456789', [ (2, 4), (4, 6) ]))
['01', '23', '45', '6789']
>>> s(split_on_groups('0123456789', [ (5, 7), (2, 4) ]))
['01', '23', '4', '56', '789']
"""
if not groups:
return [ string ]
boundaries = sorted(set(functools.reduce(lambda l, x: l + list(x), groups, [])))
if boundaries[0] != 0:
boundaries.insert(0, 0)
if boundaries[-1] != len(string):
boundaries.append(len(string))
groups = [ string[start:end] for start, end in zip(boundaries[:-1],
boundaries[1:]) ]
return [ g for g in groups if g ] # return only non-empty groups
def find_first_level_groups(string, enclosing, blank_sep=None):
"""Return a list of groups that could be split because of explicit grouping.
The groups are delimited by the given enclosing characters.
You can also specify if you want to blank the separator chars in the returned
list of groups by specifying a character for it. None means it won't be replaced.
This does not return nested groups, ie: '(ab(c)(d))' will return a single group
containing the whole string.
>>> s(find_first_level_groups('', '()'))
['']
>>> s(find_first_level_groups('abcd', '()'))
['abcd']
>>> s(find_first_level_groups('abc(de)fgh', '()'))
['abc', '(de)', 'fgh']
>>> s(find_first_level_groups('(ab(c)(d))', '()', blank_sep = '_'))
['_ab(c)(d)_']
>>> s(find_first_level_groups('ab[c]de[f]gh(i)', '[]'))
['ab', '[c]', 'de', '[f]', 'gh(i)']
>>> s(find_first_level_groups('()[]()', '()', blank_sep = '-'))
['--', '[]', '--']
"""
groups = find_first_level_groups_span(string, enclosing)
if blank_sep:
for start, end in groups:
string = str_replace(string, start, blank_sep)
string = str_replace(string, end-1, blank_sep)
return split_on_groups(string, groups)
|
billy1380/appengine-pipelines | refs/heads/master | python/src/pipeline/status_ui.py | 17 | #!/usr/bin/env python
#
# Copyright 2010 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Status UI for Google App Engine Pipeline API."""
import logging
import os
import pkgutil
import traceback
from google.appengine.api import users
from google.appengine.ext import webapp
try:
import json
except ImportError:
import simplejson as json
# Relative imports
import util
class _StatusUiHandler(webapp.RequestHandler):
"""Render the status UI."""
_RESOURCE_MAP = {
'/status': ('ui/status.html', 'text/html'),
'/status.css': ('ui/status.css', 'text/css'),
'/status.js': ('ui/status.js', 'text/javascript'),
'/list': ('ui/root_list.html', 'text/html'),
'/list.css': ('ui/root_list.css', 'text/css'),
'/list.js': ('ui/root_list.js', 'text/javascript'),
'/common.js': ('ui/common.js', 'text/javascript'),
'/common.css': ('ui/common.css', 'text/css'),
'/jquery-1.4.2.min.js': ('ui/jquery-1.4.2.min.js', 'text/javascript'),
'/jquery.treeview.min.js': ('ui/jquery.treeview.min.js', 'text/javascript'),
'/jquery.cookie.js': ('ui/jquery.cookie.js', 'text/javascript'),
'/jquery.timeago.js': ('ui/jquery.timeago.js', 'text/javascript'),
'/jquery.ba-hashchange.min.js': (
'ui/jquery.ba-hashchange.min.js', 'text/javascript'),
'/jquery.json.min.js': ('ui/jquery.json.min.js', 'text/javascript'),
'/jquery.treeview.css': ('ui/jquery.treeview.css', 'text/css'),
'/treeview-default.gif': ('ui/images/treeview-default.gif', 'image/gif'),
'/treeview-default-line.gif': (
'ui/images/treeview-default-line.gif', 'image/gif'),
'/treeview-black.gif': ('ui/images/treeview-black.gif', 'image/gif'),
'/treeview-black-line.gif': (
'ui/images/treeview-black-line.gif', 'image/gif'),
'/images/treeview-default.gif': (
'ui/images/treeview-default.gif', 'image/gif'),
'/images/treeview-default-line.gif': (
'ui/images/treeview-default-line.gif', 'image/gif'),
'/images/treeview-black.gif': (
'ui/images/treeview-black.gif', 'image/gif'),
'/images/treeview-black-line.gif': (
'ui/images/treeview-black-line.gif', 'image/gif'),
}
def get(self, resource=''):
import pipeline # Break circular dependency
if pipeline._ENFORCE_AUTH:
if users.get_current_user() is None:
logging.debug('User is not logged in')
self.redirect(users.create_login_url(self.request.url))
return
if not users.is_current_user_admin():
logging.debug('User is not admin: %r', users.get_current_user())
self.response.out.write('Forbidden')
self.response.set_status(403)
return
if resource not in self._RESOURCE_MAP:
logging.debug('Could not find: %s', resource)
self.response.set_status(404)
self.response.out.write("Resource not found.")
self.response.headers['Content-Type'] = 'text/plain'
return
relative_path, content_type = self._RESOURCE_MAP[resource]
path = os.path.join(os.path.dirname(__file__), relative_path)
if not pipeline._DEBUG:
self.response.headers["Cache-Control"] = "public, max-age=300"
self.response.headers["Content-Type"] = content_type
try:
data = pkgutil.get_data(__name__, relative_path)
except AttributeError: # Python < 2.6.
data = None
self.response.out.write(data or open(path, 'rb').read())
class _BaseRpcHandler(webapp.RequestHandler):
"""Base handler for JSON-RPC responses.
Sub-classes should fill in the 'json_response' property. All exceptions will
be returned.
"""
def get(self):
import pipeline # Break circular dependency
if pipeline._ENFORCE_AUTH:
if not users.is_current_user_admin():
logging.debug('User is not admin: %r', users.get_current_user())
self.response.out.write('Forbidden')
self.response.set_status(403)
return
# XSRF protection
if (not pipeline._DEBUG and
self.request.headers.get('X-Requested-With') != 'XMLHttpRequest'):
logging.debug('Request missing X-Requested-With header')
self.response.out.write('Request missing X-Requested-With header')
self.response.set_status(403)
return
self.json_response = {}
try:
self.handle()
output = json.dumps(self.json_response, cls=util.JsonEncoder)
except Exception, e:
self.json_response.clear()
self.json_response['error_class'] = e.__class__.__name__
self.json_response['error_message'] = str(e)
self.json_response['error_traceback'] = traceback.format_exc()
output = json.dumps(self.json_response, cls=util.JsonEncoder)
self.response.set_status(200)
self.response.headers['Content-Type'] = 'application/json'
self.response.headers['Cache-Control'] = 'no-cache'
self.response.out.write(output)
def handle(self):
raise NotImplementedError('To be implemented by sub-classes.')
class _TreeStatusHandler(_BaseRpcHandler):
"""RPC handler for getting the status of all children of root pipeline."""
def handle(self):
import pipeline # Break circular dependency
self.json_response.update(
pipeline.get_status_tree(self.request.get('root_pipeline_id')))
class _ClassPathListHandler(_BaseRpcHandler):
"""RPC handler for getting the list of all Pipeline classes defined."""
def handle(self):
import pipeline # Break circular dependency
self.json_response['classPaths'] = pipeline.get_pipeline_names()
class _RootListHandler(_BaseRpcHandler):
"""RPC handler for getting the status of all root pipelines."""
def handle(self):
import pipeline # Break circular dependency
self.json_response.update(
pipeline.get_root_list(
class_path=self.request.get('class_path'),
cursor=self.request.get('cursor')))
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.