repo_name stringlengths 5 100 | ref stringlengths 12 67 | path stringlengths 4 244 | copies stringlengths 1 8 | content stringlengths 0 1.05M ⌀ |
|---|---|---|---|---|
Cinntax/home-assistant | refs/heads/dev | homeassistant/components/wake_on_lan/__init__.py | 2 | """Support for sending Wake-On-LAN magic packets."""
from functools import partial
import logging
import voluptuous as vol
from homeassistant.const import CONF_MAC
import homeassistant.helpers.config_validation as cv
_LOGGER = logging.getLogger(__name__)
DOMAIN = "wake_on_lan"
CONF_BROADCAST_ADDRESS = "broadcast_address"
SERVICE_SEND_MAGIC_PACKET = "send_magic_packet"
WAKE_ON_LAN_SEND_MAGIC_PACKET_SCHEMA = vol.Schema(
{vol.Required(CONF_MAC): cv.string, vol.Optional(CONF_BROADCAST_ADDRESS): cv.string}
)
async def async_setup(hass, config):
"""Set up the wake on LAN component."""
import wakeonlan
async def send_magic_packet(call):
"""Send magic packet to wake up a device."""
mac_address = call.data.get(CONF_MAC)
broadcast_address = call.data.get(CONF_BROADCAST_ADDRESS)
_LOGGER.info(
"Send magic packet to mac %s (broadcast: %s)",
mac_address,
broadcast_address,
)
if broadcast_address is not None:
await hass.async_add_job(
partial(
wakeonlan.send_magic_packet,
mac_address,
ip_address=broadcast_address,
)
)
else:
await hass.async_add_job(partial(wakeonlan.send_magic_packet, mac_address))
hass.services.async_register(
DOMAIN,
SERVICE_SEND_MAGIC_PACKET,
send_magic_packet,
schema=WAKE_ON_LAN_SEND_MAGIC_PACKET_SCHEMA,
)
return True
|
funkotron/django-lean | refs/heads/master | src/django_lean/experiments/tests/views.py | 7 | # -*- coding: utf-8 -*-
import logging
l = logging.getLogger(__name__)
from django.template import Template, RequestContext
from django.http import HttpResponse
from django.views.decorators.cache import never_cache
EXPERIMENT_TEMPLATE = """
{%% load experiments %%}
{%% experiment %(experiment_name)s test %%}TEST{%% endexperiment %%}
{%% experiment %(experiment_name)s control %%}CONTROL{%% endexperiment %%}
"""
CLIENTSIDEEXPERIMENT_TEMPLATE = """
{%% load experiments %%}
{%% clientsideexperiment %(experiment_name)s %%}
{{ client_side_experiments.%(experiment_name)s }}
"""
@never_cache
def experiment_test(request, experiment_name):
t = Template(EXPERIMENT_TEMPLATE % {'experiment_name': experiment_name} )
return HttpResponse(t.render(RequestContext(request)))
@never_cache
def clientsideexperiment_test(request, experiment_name):
t = Template(CLIENTSIDEEXPERIMENT_TEMPLATE % {'experiment_name': experiment_name} )
return HttpResponse(t.render(RequestContext(request)))
def dummy404(request):
return HttpResponse(status=404, content="Not found", content_type="text/plain")
|
mfalesni/cfme_tests | refs/heads/master | cfme/storage/object_store_container.py | 3 | # -*- coding: utf-8 -*-
import attr
from navmazing import NavigateToSibling, NavigateToAttribute
from widgetastic_manageiq import (
Accordion,
BaseEntitiesView,
BreadCrumb,
ItemsToolBarViewSelector,
ManageIQTree,
SummaryTable,
)
from widgetastic_patternfly import Button, Dropdown
from widgetastic.widget import View, Text, NoSuchElementException
from cfme.base.ui import BaseLoggedInPage
from cfme.common import TagPageView, Taggable
from cfme.exceptions import ItemNotFound
from cfme.utils.appliance.implementations.ui import CFMENavigateStep, navigator, navigate_to
from cfme.modeling.base import BaseCollection, BaseEntity
class ObjectStoreContainerToolbar(View):
"""The toolbar on the Object Store Containers page"""
configuration = Dropdown('Configuration')
policy = Dropdown('Policy')
download = Dropdown('Download')
view_selector = View.nested(ItemsToolBarViewSelector)
class ObjectStoreContainerDetailsToolbar(View):
"""The toolbar on the Object Store Containers detail page"""
policy = Dropdown('Policy')
download = Button(title='Download summary in PDF format')
class ObjectStoreContainerDetailsEntities(View):
"""The entities on the Object Store Containers detail page"""
breadcrumb = BreadCrumb()
properties = SummaryTable('Properties')
relationships = SummaryTable('Relationships')
smart_management = SummaryTable('Smart Management')
class ObjectStoreContainerDetailSidebar(View):
"""The accordion on the Object Store Containers details page"""
@View.nested
class properties(Accordion): # noqa
tree = ManageIQTree()
@View.nested
class relationships(Accordion): # noqa
tree = ManageIQTree()
class ObjectStoreContainerView(BaseLoggedInPage):
"""A base view for all the Object Store Containers pages"""
title = Text('.//div[@id="center_div" or @id="main-content"]//h1')
@property
def in_container(self):
return (
self.logged_in_as_current_user and
self.navigation.currently_selected == ['Storage', 'Object Storage',
'Object Store Containers'])
class ObjectStoreContainerAllView(ObjectStoreContainerView):
"""The all Object Store Containers page"""
toolbar = View.nested(ObjectStoreContainerToolbar)
including_entities = View.include(BaseEntitiesView, use_parent=True)
@property
def is_displayed(self):
return (
self.in_container and
self.title.text == 'Cloud Object Store Containers')
class ObjectStoreContainerDetailsView(ObjectStoreContainerView):
"""The detail Object Store containers page"""
@property
def is_displayed(self):
expected_title = '{} (Summary)'.format(self.context['object'].key)
return (
self.title.text == expected_title and
self.entities.breadcrumb.active_location == expected_title)
toolbar = View.nested(ObjectStoreContainerDetailsToolbar)
sidebar = View.nested(ObjectStoreContainerDetailSidebar)
entities = View.nested(ObjectStoreContainerDetailsEntities)
@attr.s
class ObjectStoreContainer(BaseEntity, Taggable):
""" Model of an Storage Object Store Containers in cfme
Args:
key: key of the container.
provider: provider
"""
key = attr.ib()
provider = attr.ib()
# TODO add create method after BZ 1490320 fix
@attr.s
class ObjectStoreContainerCollection(BaseCollection):
"""Collection object for :py:class:'cfme.storage.object_store_container.ObjectStoreContainer'
"""
ENTITY = ObjectStoreContainer
def all(self):
"""returning all containers objects for respective Cloud Provider"""
view = navigate_to(self, 'All')
containers = []
# ToDo: use all_entity_names method as JS API issue (#2898) resolve.
for item in view.entities.elements.read():
if self.filters.get('provider').name in item['Cloud Provider']:
containers.append(self.instantiate(key=item['Key'],
provider=self.filters.get('provider')))
return containers
@navigator.register(ObjectStoreContainerCollection, 'All')
class All(CFMENavigateStep):
VIEW = ObjectStoreContainerAllView
prerequisite = NavigateToAttribute('appliance.server', 'LoggedIn')
def step(self):
self.prerequisite_view.navigation.select(
'Storage', 'Object Storage', 'Object Store Containers')
def resetter(self):
self.view.toolbar.view_selector.select("List View")
@navigator.register(ObjectStoreContainer, 'Details')
class Details(CFMENavigateStep):
VIEW = ObjectStoreContainerDetailsView
prerequisite = NavigateToAttribute('parent', 'All')
def step(self, *args, **kwargs):
try:
# ToDo: use get_entity method as JS API issue (#2898) resolve.
row = self.prerequisite_view.entities.paginator.find_row_on_pages(
self.prerequisite_view.entities.elements, key=self.obj.key)
row.click()
except NoSuchElementException:
raise ItemNotFound('Could not locate container {}'.format(self.obj.key))
@navigator.register(ObjectStoreContainer, 'EditTagsFromDetails')
class ObjectDetailEditTag(CFMENavigateStep):
""" This navigation destination help to Taggable"""
VIEW = TagPageView
prerequisite = NavigateToSibling('Details')
def step(self, *args, **kwargs):
self.prerequisite_view.toolbar.policy.item_select('Edit Tags')
|
dxxb/micropython | refs/heads/upstream-tracking | tests/basics/equal.py | 118 | # test equality
print(None == None)
print(False == None)
print(False == False)
print(False == True)
print(() == ())
print(() == [])
print([] == [])
print(() == {})
print({} == ())
print(() == None)
print(() == False)
print(() == print)
print([] == None)
print([] == False)
print([] == print)
print({} == None)
print({} == False)
print({} == print)
print(1 == 1)
print(1 == 2)
print(1 == ())
print(1 == [])
print(1 == {})
print(1 == 'a')
print('a' == 'a')
print('a' == 'ab')
print('a' == 1)
print('a' == ())
# same as above but with !=
print(None != None)
print(False != None)
print(False != False)
print(False != True)
print(() != ())
print(() != [])
print([] != [])
print(() != {})
print({} != ())
print(() != None)
print(() != False)
print(() != print)
print([] != None)
print([] != False)
print([] != print)
print({} != None)
print({} != False)
print({} != print)
print(1 != 1)
print(1 != 2)
print(1 != ())
print(1 != [])
print(1 != {})
print(1 != 'a')
print('a' != 'a')
print('a' != 'ab')
print('a' != 1)
print('a' != ())
|
varlib1/servermall | refs/heads/master | teamcity/check.py | 15 | # (C) Datadog, Inc. 2015-2016
# (C) Paul Kirby <pkirby@matrix-solutions.com> 2014
# All rights reserved
# Licensed under Simplified BSD License (see LICENSE)
# stdlib
import time
# 3p
import requests
# project
from checks import AgentCheck
from config import _is_affirmative
class TeamCityCheck(AgentCheck):
HEADERS = {'Accept': 'application/json'}
DEFAULT_TIMEOUT = 10
NEW_BUILD_URL = "http://{server}/guestAuth/app/rest/builds/?locator=buildType:{build_conf},sinceBuild:id:{since_build},status:SUCCESS"
LAST_BUILD_URL = "http://{server}/guestAuth/app/rest/builds/?locator=buildType:{build_conf},count:1"
NEW_BUILD_URL_AUTHENTICATED = "http://{server}/httpAuth/app/rest/builds/?locator=buildType:{build_conf},sinceBuild:id:{since_build},status:SUCCESS"
LAST_BUILD_URL_AUTHENTICATED = "http://{server}/httpAuth/app/rest/builds/?locator=buildType:{build_conf},count:1"
def __init__(self, name, init_config, agentConfig, instances=None):
AgentCheck.__init__(self, name, init_config, agentConfig, instances)
# Keep track of last build IDs per instance
self.last_build_ids = {}
def _initialize_if_required(self, instance_name, server, build_conf, ssl_validation, basic_http_authentication):
# Already initialized
if instance_name in self.last_build_ids:
return
self.log.debug("Initializing {0}".format(instance_name))
if basic_http_authentication:
build_url = self.LAST_BUILD_URL_AUTHENTICATED.format(
server=server,
build_conf=build_conf
)
else:
build_url = self.LAST_BUILD_URL.format(
server=server,
build_conf=build_conf
)
try:
resp = requests.get(build_url, timeout=self.DEFAULT_TIMEOUT, headers=self.HEADERS, verify=ssl_validation)
resp.raise_for_status()
last_build_id = resp.json().get('build')[0].get('id')
except requests.exceptions.HTTPError:
if resp.status_code == 401:
self.log.error("Access denied. You must enable guest authentication")
self.log.error(
"Failed to retrieve last build ID with code {0} for instance '{1}'"
.format(resp.status_code, instance_name)
)
raise
except Exception:
self.log.exception(
"Unhandled exception to get last build ID for instance '{0}'"
.format(instance_name)
)
raise
self.log.debug(
"Last build id for instance {0} is {1}."
.format(instance_name, last_build_id)
)
self.last_build_ids[instance_name] = last_build_id
def _build_and_send_event(self, new_build, instance_name, is_deployment, host, tags):
self.log.debug("Found new build with id {0}, saving and alerting.".format(new_build["id"]))
self.last_build_ids[instance_name] = new_build["id"]
event_dict = {
'timestamp': int(time.time()),
'source_type_name': 'teamcity',
'host': host,
'tags': [],
}
if is_deployment:
event_dict['event_type'] = 'teamcity_deployment'
event_dict['msg_title'] = "{0} deployed to {1}".format(instance_name, host)
event_dict['msg_text'] = "Build Number: {0}\n\nMore Info: {1}".format(new_build["number"],
new_build["webUrl"])
event_dict['tags'].append('deployment')
else:
event_dict['event_type'] = "build"
event_dict['msg_title'] = "Build for {0} successful".format(instance_name)
event_dict['msg_text'] = "Build Number: {0}\nDeployed To: {1}\n\nMore Info: {2}".format(new_build["number"],
host, new_build["webUrl"])
event_dict['tags'].append('build')
if tags:
event_dict["tags"].extend(tags)
self.event(event_dict)
def check(self, instance):
instance_name = instance.get('name')
if instance_name is None:
raise Exception("Each instance must have a unique name")
ssl_validation = _is_affirmative(instance.get('ssl_validation', True))
server = instance.get('server')
if 'server' is None:
raise Exception("Each instance must have a server")
build_conf = instance.get('build_configuration')
if build_conf is None:
raise Exception("Each instance must have a build configuration")
host = instance.get('host_affected') or self.hostname
tags = instance.get('tags')
is_deployment = _is_affirmative(instance.get('is_deployment', False))
basic_http_authentication = _is_affirmative(instance.get('basic_http_authentication', False))
self._initialize_if_required(instance_name, server, build_conf, ssl_validation, basic_http_authentication)
# Look for new successful builds
if basic_http_authentication:
new_build_url = self.NEW_BUILD_URL_AUTHENTICATED.format(
server=server,
build_conf=build_conf,
since_build=self.last_build_ids[instance_name]
)
else:
new_build_url = self.NEW_BUILD_URL.format(
server=server,
build_conf=build_conf,
since_build=self.last_build_ids[instance_name]
)
try:
resp = requests.get(new_build_url, timeout=self.DEFAULT_TIMEOUT, headers=self.HEADERS, verify=ssl_validation)
resp.raise_for_status()
new_builds = resp.json()
if new_builds["count"] == 0:
self.log.debug("No new builds found.")
else:
self._build_and_send_event(new_builds["build"][0], instance_name, is_deployment, host, tags)
except requests.exceptions.HTTPError:
self.log.exception(
"Couldn't fetch last build, got code {0}"
.format(resp.status_code)
)
raise
except Exception:
self.log.exception(
"Couldn't fetch last build, unhandled exception"
)
raise
|
peterfpeterson/mantid | refs/heads/master | Framework/PythonInterface/plugins/algorithms/RetrieveRunInfo.py | 3 | # Mantid Repository : https://github.com/mantidproject/mantid
#
# Copyright © 2018 ISIS Rutherford Appleton Laboratory UKRI,
# NScD Oak Ridge National Laboratory, European Spallation Source,
# Institut Laue - Langevin & CSNS, Institute of High Energy Physics, CAS
# SPDX - License - Identifier: GPL - 3.0 +
#pylint: disable=invalid-name,no-init
from mantid.api import PythonAlgorithm, AlgorithmFactory, ITableWorkspaceProperty
from mantid.simpleapi import *
from mantid.kernel import StringMandatoryValidator, Direction
from mantid import config
import os
from itertools import filterfalse
class Intervals(object):
# Having "*intervals" as a parameter instead of "intervals" allows us
# to type "Intervals( (0,3), (6, 8) )" instead of "Intervals( ( (0,3), (6, 8) ) )"
def __init__(self, *intervals):
# Convert into a list, then back into intervals, to make
# sure we have no overlapping intervals (which would result in
# duplicate values.
values = _intervalsToList(intervals)
self._intervals = _listToIntervals(values)
#Factory.
@classmethod
def fromString(cls, mystring):
# Tokenise on commas.
intervalTokens = mystring.split(",")
# Call parseRange on each tokenised range.
numbers = [_parseIntervalToken(intervalToken) for intervalToken in intervalTokens]
# Chain the result (a list of lists) together to make one single list of unique values.
result = list(set(itertools.chain.from_iterable(numbers)))
# Construct a new Intervals object, populate its intervals, and return.
newObj = cls()
newObj._intervals = _listToIntervals(result)
return newObj
#Factory.
@classmethod
def fromList(cls, values):
result = list(set(values))
# Construct a new Intervals object, populate its intervals, and return.
newObj = cls()
newObj._intervals = _listToIntervals(result)
return newObj
# Returns an array of all the values represented by this "Intervals" instance.
def getValues(self):
return [value for interval in self._intervals for value in range(interval[0], interval[1] + 1)]
# Returns the raw intervals.
def getIntervals(self):
return self._intervals
# So that "2 in Intervals( (0, 3) )" returns True.
def __contains__(self, ids):
for interval in self._intervals:
if interval[0] <= ids <= interval[1]:
return True
return False
# So that we can type "groups = Intervals( (0, 3) ) + Intervals( (6, 10) )"
def __add__(self, other):
newObj = Intervals()
newObj._intervals = self._intervals + other._intervals
return newObj
# TODO: At the moment this is just a generator. Implement a proper iterator.
# So that we can type "for i in Intervals( (0, 2), (4, 5) ):"
def __iter__(self):
for interval in self._intervals:
for value in range(interval[0], interval[1] + 1):
yield value
# So we can type "interval = Intervals( (3, 5), (10, 12) )" and then "interval[3]" returns 10.
def __getitem__(self, index):
return self.getValues()[index]
# Mainly for debugging.
def __str__(self):
strings = ["(" + str(interval[0]) + ", " + str(interval[1]) + ")" for interval in self._intervals]
return ", ".join(strings)
def __len__(self):
return len(self.getValues())
# Given a list of workspaces, will sum them together into a single new workspace, with the given name.
# If no name is given, then one is constructed from the names of the given workspaces.
def sumWsList(wsList, summedWsName = None):
if len(wsList) == 1:
if summedWsName is not None:
CloneWorkspace(InputWorkspace=wsList[0].name(), OutputWorkspace=summedWsName)
return mtd[summedWsName]
return wsList[0]
sumws = wsList[0] + wsList[1]
if len(wsList) > 2:
for i in range(2, len(wsList) - 1):
sumws += wsList[i]
if summedWsName is None:
summedWsName = "_PLUS_".join([ws.name() for ws in wsList])
RenameWorkspace(InputWorkspace=sumws.name(), OutputWorkspace=summedWsName)
return mtd[summedWsName]
#pylint: disable=too-few-public-methods
class FileBackedWsIterator(object):
''' An iterator to iterate over workspaces. Each filename in the list
provided is loaded into a workspace, validated by the given ws_validator,
yielded, and then deleted from memory. '''
def __init__(self, filenames):
''' Constructor, takes in the list of filenames to load, who's
workspaces will be iterated over. '''
# Validate.
if not isinstance(filenames, list):
raise TypeError("Expected a list.")
if not all([self._is_string(s) for s in filenames]):
raise TypeError("Expected a list of strings.")
if len(filenames) < 1:
raise ValueError("Expected at least one filename.")
# In the general case, we may or may not have checked for the existance
# of the files previously, so before we even start iterating throw if
# any are missing.
missing_files = list(filterfalse(os.path.exists, filenames))
if len(missing_files) > 0:
raise ValueError("One or more files are missing: " + str(missing_files))
self._filenames = filenames
self._loaded_ws = None
def __iter__(self):
''' Method that allows this object to treated as an iterator. '''
for filename in self._filenames:
# Delete the previously loaded ws, if one exists, then load the
# new ws.
self._delete_loaded_ws()
try:
self._load_into_ws(filename)
except RuntimeError:
raise RuntimeError("Problem loading file \"" + filename + "\"")
# Yield the newly loaded ws.
yield self._loaded_ws
# Final tidy-up.
self._delete_loaded_ws()
def _is_string(self, obj):
''' Convenience method to test if an object is a string or not. '''
return isinstance(obj, str)
def _load_into_ws(self, filename):
''' Load the given filename and return it. Use LoadRaw for raw files,
so we can turn LoadLogFiles off. '''
wsName = "__temp_" + filename
dummy_base, ext = os.path.splitext(filename)
if ext == ".raw":
# Loading log files is extremely slow on archive
LoadRaw(Filename = filename,
OutputWorkspace = wsName,
LoadLogFiles = False)
else:
Load(Filename = filename,
OutputWorkspace = wsName)
self._loaded_ws = mtd[wsName]
def _delete_loaded_ws(self):
''' If there has been a file loaded into a ws, delete it. '''
if self._loaded_ws:
DeleteWorkspace(Workspace=self._loaded_ws)
class RetrieveRunInfo(PythonAlgorithm):
def category(self):
return 'DataHandling\\Catalog'
def summary(self):
return "Given a range of run numbers and an output workspace name, will compile a table of info for "+\
"each run of the instrument you have set as default."
def seeAlso(self):
return [ "CreateLogPropertyTable" ]
def PyInit(self):
# Declare algorithm properties.
self.declareProperty(
'Runs',
'',
StringMandatoryValidator(),
doc='The range of runs to retrieve the run info for. E.g. "100-105".')
self.declareProperty(ITableWorkspaceProperty("OutputWorkspace", "", Direction.Output),
doc= """The name of the TableWorkspace that will be created.
'''You must specify a name that does not already exist.''' """)
def PyExec(self):
PROP_NAMES = ["inst_abrv", "run_number", "user_name", "run_title",
"hd_dur"]
# Not all ISIS run files have the relevant prop_names, but we may as
# well limit to ISIS only runs at this stage.
if config['default.facility'] != 'ISIS':
raise ValueError("Only ISIS runs are supported by this alg.")
# Ensure workspace does not already exist.
output_ws_name = self.getPropertyValue("OutputWorkspace")
if mtd.doesExist(output_ws_name):
raise ValueError("Workspace \"" + output_ws_name + "\" already "
"exists. Either delete it, or choose another workspace name.")
# Check that all run files are available.
run_string = self.getPropertyValue("Runs")
try:
filenames = list(FileFinder.findRuns(run_string))
except RuntimeError as re:
raise ValueError(str(re))
# Set up the output ws table.
CreateEmptyTableWorkspace(OutputWorkspace=output_ws_name)
output_ws = mtd[output_ws_name]
for prop_name in PROP_NAMES:
output_ws.addColumn(name=prop_name, type='str')
# Set up (and iterate over) a "file backed" iterator, which takes care
# of loading files and then deleting the resulting workspaces in turn
# when we are finished with them.
ws_iter = FileBackedWsIterator(filenames)
for ws in ws_iter:
# Create a single row table for each file.
temp_table_name = ws.name() + "_INFO"
CreateLogPropertyTable(
InputWorkspaces=ws.name(),
LogPropertyNames=', '.join(PROP_NAMES),
GroupPolicy="First", # Include only the 1st child of any groups.
OutputWorkspace=temp_table_name)
# Add its contents to the output before deleting it.
temp_table = mtd[temp_table_name]
output_ws.addRow(temp_table.row(0))
DeleteWorkspace(Workspace=temp_table_name)
self.setPropertyValue('OutputWorkspace', output_ws_name)
# Register algorthm with Mantid.
AlgorithmFactory.subscribe(RetrieveRunInfo)
|
Agent007/deepchem | refs/heads/master | deepchem/dock/pose_generation.py | 1 | """
Generates protein-ligand docked poses using Autodock Vina.
"""
from __future__ import division
from __future__ import unicode_literals
from deepchem.utils import mol_xyz_util
__author__ = "Bharath Ramsundar"
__copyright__ = "Copyright 2016, Stanford University"
__license__ = "MIT"
import logging
import numpy as np
import os
import tempfile
from subprocess import call
from deepchem.feat import hydrogenate_and_compute_partial_charges
from deepchem.dock.binding_pocket import RFConvexHullPocketFinder
from deepchem.utils import rdkit_util
logger = logging.getLogger(__name__)
class PoseGenerator(object):
"""Abstract superclass for all pose-generation routines."""
def generate_poses(self, protein_file, ligand_file, out_dir=None):
"""Generates the docked complex and outputs files for docked complex."""
raise NotImplementedError
def write_conf(receptor_filename,
ligand_filename,
centroid,
box_dims,
conf_filename,
exhaustiveness=None):
"""Writes Vina configuration file to disk."""
with open(conf_filename, "w") as f:
f.write("receptor = %s\n" % receptor_filename)
f.write("ligand = %s\n\n" % ligand_filename)
f.write("center_x = %f\n" % centroid[0])
f.write("center_y = %f\n" % centroid[1])
f.write("center_z = %f\n\n" % centroid[2])
f.write("size_x = %f\n" % box_dims[0])
f.write("size_y = %f\n" % box_dims[1])
f.write("size_z = %f\n\n" % box_dims[2])
if exhaustiveness is not None:
f.write("exhaustiveness = %d\n" % exhaustiveness)
class VinaPoseGenerator(PoseGenerator):
"""Uses Autodock Vina to generate binding poses."""
def __init__(self, exhaustiveness=10, detect_pockets=True):
"""Initializes Vina Pose generation"""
current_dir = os.path.dirname(os.path.realpath(__file__))
self.vina_dir = os.path.join(current_dir, "autodock_vina_1_1_2_linux_x86")
self.exhaustiveness = exhaustiveness
self.detect_pockets = detect_pockets
if self.detect_pockets:
self.pocket_finder = RFConvexHullPocketFinder()
if not os.path.exists(self.vina_dir):
logger.info("Vina not available. Downloading")
# TODO(rbharath): May want to move this file to S3 so we can ensure it's
# always available.
wget_cmd = "wget -nv -c http://vina.scripps.edu/download/autodock_vina_1_1_2_linux_x86.tgz"
call(wget_cmd.split())
logger.info("Downloaded Vina. Extracting")
download_cmd = "tar xzvf autodock_vina_1_1_2_linux_x86.tgz"
call(download_cmd.split())
logger.info("Moving to final location")
mv_cmd = "mv autodock_vina_1_1_2_linux_x86 %s" % current_dir
call(mv_cmd.split())
logger.info("Cleanup: removing downloaded vina tar.gz")
rm_cmd = "rm autodock_vina_1_1_2_linux_x86.tgz"
call(rm_cmd.split())
self.vina_cmd = os.path.join(self.vina_dir, "bin/vina")
def generate_poses(self,
protein_file,
ligand_file,
centroid=None,
box_dims=None,
dry_run=False,
out_dir=None):
"""Generates the docked complex and outputs files for docked complex."""
if out_dir is None:
out_dir = tempfile.mkdtemp()
# Prepare receptor
receptor_name = os.path.basename(protein_file).split(".")[0]
protein_hyd = os.path.join(out_dir, "%s.pdb" % receptor_name)
protein_pdbqt = os.path.join(out_dir, "%s.pdbqt" % receptor_name)
hydrogenate_and_compute_partial_charges(
protein_file,
"pdb",
hyd_output=protein_hyd,
pdbqt_output=protein_pdbqt,
protein=True)
# Get protein centroid and range
# TODO(rbharath): Need to add some way to identify binding pocket, or this is
# going to be extremely slow!
if centroid is not None and box_dims is not None:
protein_centroid = centroid
else:
if not self.detect_pockets:
receptor_mol = rdkit_util.load_molecule(
protein_hyd, calc_charges=False, add_hydrogens=False)
protein_centroid = mol_xyz_util.get_molecule_centroid(receptor_mol[0])
protein_range = mol_xyz_util.get_molecule_range(receptor_mol[0])
box_dims = protein_range + 5.0
else:
logger.info("About to find putative binding pockets")
pockets, pocket_atoms_maps, pocket_coords = self.pocket_finder.find_pockets(
protein_file, ligand_file)
# TODO(rbharath): Handle multiple pockets instead of arbitrarily selecting
# first pocket.
logger.info("Computing centroid and size of proposed pocket.")
pocket_coord = pocket_coords[0]
protein_centroid = np.mean(pocket_coord, axis=1)
pocket = pockets[0]
(x_min, x_max), (y_min, y_max), (z_min, z_max) = pocket
x_box = (x_max - x_min) / 2.
y_box = (y_max - y_min) / 2.
z_box = (z_max - z_min) / 2.
box_dims = (x_box, y_box, z_box)
# Prepare receptor
ligand_name = os.path.basename(ligand_file).split(".")[0]
ligand_hyd = os.path.join(out_dir, "%s.pdb" % ligand_name)
ligand_pdbqt = os.path.join(out_dir, "%s.pdbqt" % ligand_name)
# TODO(rbharath): Generalize this so can support mol2 files as well.
hydrogenate_and_compute_partial_charges(
ligand_file,
"sdf",
hyd_output=ligand_hyd,
pdbqt_output=ligand_pdbqt,
protein=False)
# Write Vina conf file
conf_file = os.path.join(out_dir, "conf.txt")
write_conf(
protein_pdbqt,
ligand_pdbqt,
protein_centroid,
box_dims,
conf_file,
exhaustiveness=self.exhaustiveness)
# Define locations of log and output files
log_file = os.path.join(out_dir, "%s_log.txt" % ligand_name)
out_pdbqt = os.path.join(out_dir, "%s_docked.pdbqt" % ligand_name)
# TODO(rbharath): Let user specify the number of poses required.
if not dry_run:
logger.info("About to call Vina")
call(
"%s --config %s --log %s --out %s" % (self.vina_cmd, conf_file,
log_file, out_pdbqt),
shell=True)
# TODO(rbharath): Convert the output pdbqt to a pdb file.
# Return docked files
return protein_hyd, out_pdbqt
|
eldioschalm/site-eventos | refs/heads/master | portal/core/models.py | 10644 | from django.db import models
# Create your models here.
|
olapaola/olapaola-android-scripting | refs/heads/master | python/src/Lib/test/test_iter.py | 80 | # Test iterators.
import unittest
from test.test_support import run_unittest, TESTFN, unlink, have_unicode
# Test result of triple loop (too big to inline)
TRIPLETS = [(0, 0, 0), (0, 0, 1), (0, 0, 2),
(0, 1, 0), (0, 1, 1), (0, 1, 2),
(0, 2, 0), (0, 2, 1), (0, 2, 2),
(1, 0, 0), (1, 0, 1), (1, 0, 2),
(1, 1, 0), (1, 1, 1), (1, 1, 2),
(1, 2, 0), (1, 2, 1), (1, 2, 2),
(2, 0, 0), (2, 0, 1), (2, 0, 2),
(2, 1, 0), (2, 1, 1), (2, 1, 2),
(2, 2, 0), (2, 2, 1), (2, 2, 2)]
# Helper classes
class BasicIterClass:
def __init__(self, n):
self.n = n
self.i = 0
def next(self):
res = self.i
if res >= self.n:
raise StopIteration
self.i = res + 1
return res
class IteratingSequenceClass:
def __init__(self, n):
self.n = n
def __iter__(self):
return BasicIterClass(self.n)
class SequenceClass:
def __init__(self, n):
self.n = n
def __getitem__(self, i):
if 0 <= i < self.n:
return i
else:
raise IndexError
# Main test suite
class TestCase(unittest.TestCase):
# Helper to check that an iterator returns a given sequence
def check_iterator(self, it, seq):
res = []
while 1:
try:
val = it.next()
except StopIteration:
break
res.append(val)
self.assertEqual(res, seq)
# Helper to check that a for loop generates a given sequence
def check_for_loop(self, expr, seq):
res = []
for val in expr:
res.append(val)
self.assertEqual(res, seq)
# Test basic use of iter() function
def test_iter_basic(self):
self.check_iterator(iter(range(10)), range(10))
# Test that iter(iter(x)) is the same as iter(x)
def test_iter_idempotency(self):
seq = range(10)
it = iter(seq)
it2 = iter(it)
self.assert_(it is it2)
# Test that for loops over iterators work
def test_iter_for_loop(self):
self.check_for_loop(iter(range(10)), range(10))
# Test several independent iterators over the same list
def test_iter_independence(self):
seq = range(3)
res = []
for i in iter(seq):
for j in iter(seq):
for k in iter(seq):
res.append((i, j, k))
self.assertEqual(res, TRIPLETS)
# Test triple list comprehension using iterators
def test_nested_comprehensions_iter(self):
seq = range(3)
res = [(i, j, k)
for i in iter(seq) for j in iter(seq) for k in iter(seq)]
self.assertEqual(res, TRIPLETS)
# Test triple list comprehension without iterators
def test_nested_comprehensions_for(self):
seq = range(3)
res = [(i, j, k) for i in seq for j in seq for k in seq]
self.assertEqual(res, TRIPLETS)
# Test a class with __iter__ in a for loop
def test_iter_class_for(self):
self.check_for_loop(IteratingSequenceClass(10), range(10))
# Test a class with __iter__ with explicit iter()
def test_iter_class_iter(self):
self.check_iterator(iter(IteratingSequenceClass(10)), range(10))
# Test for loop on a sequence class without __iter__
def test_seq_class_for(self):
self.check_for_loop(SequenceClass(10), range(10))
# Test iter() on a sequence class without __iter__
def test_seq_class_iter(self):
self.check_iterator(iter(SequenceClass(10)), range(10))
# Test two-argument iter() with callable instance
def test_iter_callable(self):
class C:
def __init__(self):
self.i = 0
def __call__(self):
i = self.i
self.i = i + 1
if i > 100:
raise IndexError # Emergency stop
return i
self.check_iterator(iter(C(), 10), range(10))
# Test two-argument iter() with function
def test_iter_function(self):
def spam(state=[0]):
i = state[0]
state[0] = i+1
return i
self.check_iterator(iter(spam, 10), range(10))
# Test two-argument iter() with function that raises StopIteration
def test_iter_function_stop(self):
def spam(state=[0]):
i = state[0]
if i == 10:
raise StopIteration
state[0] = i+1
return i
self.check_iterator(iter(spam, 20), range(10))
# Test exception propagation through function iterator
def test_exception_function(self):
def spam(state=[0]):
i = state[0]
state[0] = i+1
if i == 10:
raise RuntimeError
return i
res = []
try:
for x in iter(spam, 20):
res.append(x)
except RuntimeError:
self.assertEqual(res, range(10))
else:
self.fail("should have raised RuntimeError")
# Test exception propagation through sequence iterator
def test_exception_sequence(self):
class MySequenceClass(SequenceClass):
def __getitem__(self, i):
if i == 10:
raise RuntimeError
return SequenceClass.__getitem__(self, i)
res = []
try:
for x in MySequenceClass(20):
res.append(x)
except RuntimeError:
self.assertEqual(res, range(10))
else:
self.fail("should have raised RuntimeError")
# Test for StopIteration from __getitem__
def test_stop_sequence(self):
class MySequenceClass(SequenceClass):
def __getitem__(self, i):
if i == 10:
raise StopIteration
return SequenceClass.__getitem__(self, i)
self.check_for_loop(MySequenceClass(20), range(10))
# Test a big range
def test_iter_big_range(self):
self.check_for_loop(iter(range(10000)), range(10000))
# Test an empty list
def test_iter_empty(self):
self.check_for_loop(iter([]), [])
# Test a tuple
def test_iter_tuple(self):
self.check_for_loop(iter((0,1,2,3,4,5,6,7,8,9)), range(10))
# Test an xrange
def test_iter_xrange(self):
self.check_for_loop(iter(xrange(10)), range(10))
# Test a string
def test_iter_string(self):
self.check_for_loop(iter("abcde"), ["a", "b", "c", "d", "e"])
# Test a Unicode string
if have_unicode:
def test_iter_unicode(self):
self.check_for_loop(iter(unicode("abcde")),
[unicode("a"), unicode("b"), unicode("c"),
unicode("d"), unicode("e")])
# Test a directory
def test_iter_dict(self):
dict = {}
for i in range(10):
dict[i] = None
self.check_for_loop(dict, dict.keys())
# Test a file
def test_iter_file(self):
f = open(TESTFN, "w")
try:
for i in range(5):
f.write("%d\n" % i)
finally:
f.close()
f = open(TESTFN, "r")
try:
self.check_for_loop(f, ["0\n", "1\n", "2\n", "3\n", "4\n"])
self.check_for_loop(f, [])
finally:
f.close()
try:
unlink(TESTFN)
except OSError:
pass
# Test list()'s use of iterators.
def test_builtin_list(self):
self.assertEqual(list(SequenceClass(5)), range(5))
self.assertEqual(list(SequenceClass(0)), [])
self.assertEqual(list(()), [])
self.assertEqual(list(range(10, -1, -1)), range(10, -1, -1))
d = {"one": 1, "two": 2, "three": 3}
self.assertEqual(list(d), d.keys())
self.assertRaises(TypeError, list, list)
self.assertRaises(TypeError, list, 42)
f = open(TESTFN, "w")
try:
for i in range(5):
f.write("%d\n" % i)
finally:
f.close()
f = open(TESTFN, "r")
try:
self.assertEqual(list(f), ["0\n", "1\n", "2\n", "3\n", "4\n"])
f.seek(0, 0)
self.assertEqual(list(f),
["0\n", "1\n", "2\n", "3\n", "4\n"])
finally:
f.close()
try:
unlink(TESTFN)
except OSError:
pass
# Test tuples()'s use of iterators.
def test_builtin_tuple(self):
self.assertEqual(tuple(SequenceClass(5)), (0, 1, 2, 3, 4))
self.assertEqual(tuple(SequenceClass(0)), ())
self.assertEqual(tuple([]), ())
self.assertEqual(tuple(()), ())
self.assertEqual(tuple("abc"), ("a", "b", "c"))
d = {"one": 1, "two": 2, "three": 3}
self.assertEqual(tuple(d), tuple(d.keys()))
self.assertRaises(TypeError, tuple, list)
self.assertRaises(TypeError, tuple, 42)
f = open(TESTFN, "w")
try:
for i in range(5):
f.write("%d\n" % i)
finally:
f.close()
f = open(TESTFN, "r")
try:
self.assertEqual(tuple(f), ("0\n", "1\n", "2\n", "3\n", "4\n"))
f.seek(0, 0)
self.assertEqual(tuple(f),
("0\n", "1\n", "2\n", "3\n", "4\n"))
finally:
f.close()
try:
unlink(TESTFN)
except OSError:
pass
# Test filter()'s use of iterators.
def test_builtin_filter(self):
self.assertEqual(filter(None, SequenceClass(5)), range(1, 5))
self.assertEqual(filter(None, SequenceClass(0)), [])
self.assertEqual(filter(None, ()), ())
self.assertEqual(filter(None, "abc"), "abc")
d = {"one": 1, "two": 2, "three": 3}
self.assertEqual(filter(None, d), d.keys())
self.assertRaises(TypeError, filter, None, list)
self.assertRaises(TypeError, filter, None, 42)
class Boolean:
def __init__(self, truth):
self.truth = truth
def __nonzero__(self):
return self.truth
bTrue = Boolean(1)
bFalse = Boolean(0)
class Seq:
def __init__(self, *args):
self.vals = args
def __iter__(self):
class SeqIter:
def __init__(self, vals):
self.vals = vals
self.i = 0
def __iter__(self):
return self
def next(self):
i = self.i
self.i = i + 1
if i < len(self.vals):
return self.vals[i]
else:
raise StopIteration
return SeqIter(self.vals)
seq = Seq(*([bTrue, bFalse] * 25))
self.assertEqual(filter(lambda x: not x, seq), [bFalse]*25)
self.assertEqual(filter(lambda x: not x, iter(seq)), [bFalse]*25)
# Test max() and min()'s use of iterators.
def test_builtin_max_min(self):
self.assertEqual(max(SequenceClass(5)), 4)
self.assertEqual(min(SequenceClass(5)), 0)
self.assertEqual(max(8, -1), 8)
self.assertEqual(min(8, -1), -1)
d = {"one": 1, "two": 2, "three": 3}
self.assertEqual(max(d), "two")
self.assertEqual(min(d), "one")
self.assertEqual(max(d.itervalues()), 3)
self.assertEqual(min(iter(d.itervalues())), 1)
f = open(TESTFN, "w")
try:
f.write("medium line\n")
f.write("xtra large line\n")
f.write("itty-bitty line\n")
finally:
f.close()
f = open(TESTFN, "r")
try:
self.assertEqual(min(f), "itty-bitty line\n")
f.seek(0, 0)
self.assertEqual(max(f), "xtra large line\n")
finally:
f.close()
try:
unlink(TESTFN)
except OSError:
pass
# Test map()'s use of iterators.
def test_builtin_map(self):
self.assertEqual(map(None, SequenceClass(5)), range(5))
self.assertEqual(map(lambda x: x+1, SequenceClass(5)), range(1, 6))
d = {"one": 1, "two": 2, "three": 3}
self.assertEqual(map(None, d), d.keys())
self.assertEqual(map(lambda k, d=d: (k, d[k]), d), d.items())
dkeys = d.keys()
expected = [(i < len(d) and dkeys[i] or None,
i,
i < len(d) and dkeys[i] or None)
for i in range(5)]
self.assertEqual(map(None, d,
SequenceClass(5),
iter(d.iterkeys())),
expected)
f = open(TESTFN, "w")
try:
for i in range(10):
f.write("xy" * i + "\n") # line i has len 2*i+1
finally:
f.close()
f = open(TESTFN, "r")
try:
self.assertEqual(map(len, f), range(1, 21, 2))
finally:
f.close()
try:
unlink(TESTFN)
except OSError:
pass
# Test zip()'s use of iterators.
def test_builtin_zip(self):
self.assertEqual(zip(), [])
self.assertEqual(zip(*[]), [])
self.assertEqual(zip(*[(1, 2), 'ab']), [(1, 'a'), (2, 'b')])
self.assertRaises(TypeError, zip, None)
self.assertRaises(TypeError, zip, range(10), 42)
self.assertRaises(TypeError, zip, range(10), zip)
self.assertEqual(zip(IteratingSequenceClass(3)),
[(0,), (1,), (2,)])
self.assertEqual(zip(SequenceClass(3)),
[(0,), (1,), (2,)])
d = {"one": 1, "two": 2, "three": 3}
self.assertEqual(d.items(), zip(d, d.itervalues()))
# Generate all ints starting at constructor arg.
class IntsFrom:
def __init__(self, start):
self.i = start
def __iter__(self):
return self
def next(self):
i = self.i
self.i = i+1
return i
f = open(TESTFN, "w")
try:
f.write("a\n" "bbb\n" "cc\n")
finally:
f.close()
f = open(TESTFN, "r")
try:
self.assertEqual(zip(IntsFrom(0), f, IntsFrom(-100)),
[(0, "a\n", -100),
(1, "bbb\n", -99),
(2, "cc\n", -98)])
finally:
f.close()
try:
unlink(TESTFN)
except OSError:
pass
self.assertEqual(zip(xrange(5)), [(i,) for i in range(5)])
# Classes that lie about their lengths.
class NoGuessLen5:
def __getitem__(self, i):
if i >= 5:
raise IndexError
return i
class Guess3Len5(NoGuessLen5):
def __len__(self):
return 3
class Guess30Len5(NoGuessLen5):
def __len__(self):
return 30
self.assertEqual(len(Guess3Len5()), 3)
self.assertEqual(len(Guess30Len5()), 30)
self.assertEqual(zip(NoGuessLen5()), zip(range(5)))
self.assertEqual(zip(Guess3Len5()), zip(range(5)))
self.assertEqual(zip(Guess30Len5()), zip(range(5)))
expected = [(i, i) for i in range(5)]
for x in NoGuessLen5(), Guess3Len5(), Guess30Len5():
for y in NoGuessLen5(), Guess3Len5(), Guess30Len5():
self.assertEqual(zip(x, y), expected)
# Test reduces()'s use of iterators.
def test_builtin_reduce(self):
from operator import add
self.assertEqual(reduce(add, SequenceClass(5)), 10)
self.assertEqual(reduce(add, SequenceClass(5), 42), 52)
self.assertRaises(TypeError, reduce, add, SequenceClass(0))
self.assertEqual(reduce(add, SequenceClass(0), 42), 42)
self.assertEqual(reduce(add, SequenceClass(1)), 0)
self.assertEqual(reduce(add, SequenceClass(1), 42), 42)
d = {"one": 1, "two": 2, "three": 3}
self.assertEqual(reduce(add, d), "".join(d.keys()))
# This test case will be removed if we don't have Unicode
def test_unicode_join_endcase(self):
# This class inserts a Unicode object into its argument's natural
# iteration, in the 3rd position.
class OhPhooey:
def __init__(self, seq):
self.it = iter(seq)
self.i = 0
def __iter__(self):
return self
def next(self):
i = self.i
self.i = i+1
if i == 2:
return unicode("fooled you!")
return self.it.next()
f = open(TESTFN, "w")
try:
f.write("a\n" + "b\n" + "c\n")
finally:
f.close()
f = open(TESTFN, "r")
# Nasty: string.join(s) can't know whether unicode.join() is needed
# until it's seen all of s's elements. But in this case, f's
# iterator cannot be restarted. So what we're testing here is
# whether string.join() can manage to remember everything it's seen
# and pass that on to unicode.join().
try:
got = " - ".join(OhPhooey(f))
self.assertEqual(got, unicode("a\n - b\n - fooled you! - c\n"))
finally:
f.close()
try:
unlink(TESTFN)
except OSError:
pass
if not have_unicode:
def test_unicode_join_endcase(self): pass
# Test iterators with 'x in y' and 'x not in y'.
def test_in_and_not_in(self):
for sc5 in IteratingSequenceClass(5), SequenceClass(5):
for i in range(5):
self.assert_(i in sc5)
for i in "abc", -1, 5, 42.42, (3, 4), [], {1: 1}, 3-12j, sc5:
self.assert_(i not in sc5)
self.assertRaises(TypeError, lambda: 3 in 12)
self.assertRaises(TypeError, lambda: 3 not in map)
d = {"one": 1, "two": 2, "three": 3, 1j: 2j}
for k in d:
self.assert_(k in d)
self.assert_(k not in d.itervalues())
for v in d.values():
self.assert_(v in d.itervalues())
self.assert_(v not in d)
for k, v in d.iteritems():
self.assert_((k, v) in d.iteritems())
self.assert_((v, k) not in d.iteritems())
f = open(TESTFN, "w")
try:
f.write("a\n" "b\n" "c\n")
finally:
f.close()
f = open(TESTFN, "r")
try:
for chunk in "abc":
f.seek(0, 0)
self.assert_(chunk not in f)
f.seek(0, 0)
self.assert_((chunk + "\n") in f)
finally:
f.close()
try:
unlink(TESTFN)
except OSError:
pass
# Test iterators with operator.countOf (PySequence_Count).
def test_countOf(self):
from operator import countOf
self.assertEqual(countOf([1,2,2,3,2,5], 2), 3)
self.assertEqual(countOf((1,2,2,3,2,5), 2), 3)
self.assertEqual(countOf("122325", "2"), 3)
self.assertEqual(countOf("122325", "6"), 0)
self.assertRaises(TypeError, countOf, 42, 1)
self.assertRaises(TypeError, countOf, countOf, countOf)
d = {"one": 3, "two": 3, "three": 3, 1j: 2j}
for k in d:
self.assertEqual(countOf(d, k), 1)
self.assertEqual(countOf(d.itervalues(), 3), 3)
self.assertEqual(countOf(d.itervalues(), 2j), 1)
self.assertEqual(countOf(d.itervalues(), 1j), 0)
f = open(TESTFN, "w")
try:
f.write("a\n" "b\n" "c\n" "b\n")
finally:
f.close()
f = open(TESTFN, "r")
try:
for letter, count in ("a", 1), ("b", 2), ("c", 1), ("d", 0):
f.seek(0, 0)
self.assertEqual(countOf(f, letter + "\n"), count)
finally:
f.close()
try:
unlink(TESTFN)
except OSError:
pass
# Test iterators with operator.indexOf (PySequence_Index).
def test_indexOf(self):
from operator import indexOf
self.assertEqual(indexOf([1,2,2,3,2,5], 1), 0)
self.assertEqual(indexOf((1,2,2,3,2,5), 2), 1)
self.assertEqual(indexOf((1,2,2,3,2,5), 3), 3)
self.assertEqual(indexOf((1,2,2,3,2,5), 5), 5)
self.assertRaises(ValueError, indexOf, (1,2,2,3,2,5), 0)
self.assertRaises(ValueError, indexOf, (1,2,2,3,2,5), 6)
self.assertEqual(indexOf("122325", "2"), 1)
self.assertEqual(indexOf("122325", "5"), 5)
self.assertRaises(ValueError, indexOf, "122325", "6")
self.assertRaises(TypeError, indexOf, 42, 1)
self.assertRaises(TypeError, indexOf, indexOf, indexOf)
f = open(TESTFN, "w")
try:
f.write("a\n" "b\n" "c\n" "d\n" "e\n")
finally:
f.close()
f = open(TESTFN, "r")
try:
fiter = iter(f)
self.assertEqual(indexOf(fiter, "b\n"), 1)
self.assertEqual(indexOf(fiter, "d\n"), 1)
self.assertEqual(indexOf(fiter, "e\n"), 0)
self.assertRaises(ValueError, indexOf, fiter, "a\n")
finally:
f.close()
try:
unlink(TESTFN)
except OSError:
pass
iclass = IteratingSequenceClass(3)
for i in range(3):
self.assertEqual(indexOf(iclass, i), i)
self.assertRaises(ValueError, indexOf, iclass, -1)
# Test iterators with file.writelines().
def test_writelines(self):
f = file(TESTFN, "w")
try:
self.assertRaises(TypeError, f.writelines, None)
self.assertRaises(TypeError, f.writelines, 42)
f.writelines(["1\n", "2\n"])
f.writelines(("3\n", "4\n"))
f.writelines({'5\n': None})
f.writelines({})
# Try a big chunk too.
class Iterator:
def __init__(self, start, finish):
self.start = start
self.finish = finish
self.i = self.start
def next(self):
if self.i >= self.finish:
raise StopIteration
result = str(self.i) + '\n'
self.i += 1
return result
def __iter__(self):
return self
class Whatever:
def __init__(self, start, finish):
self.start = start
self.finish = finish
def __iter__(self):
return Iterator(self.start, self.finish)
f.writelines(Whatever(6, 6+2000))
f.close()
f = file(TESTFN)
expected = [str(i) + "\n" for i in range(1, 2006)]
self.assertEqual(list(f), expected)
finally:
f.close()
try:
unlink(TESTFN)
except OSError:
pass
# Test iterators on RHS of unpacking assignments.
def test_unpack_iter(self):
a, b = 1, 2
self.assertEqual((a, b), (1, 2))
a, b, c = IteratingSequenceClass(3)
self.assertEqual((a, b, c), (0, 1, 2))
try: # too many values
a, b = IteratingSequenceClass(3)
except ValueError:
pass
else:
self.fail("should have raised ValueError")
try: # not enough values
a, b, c = IteratingSequenceClass(2)
except ValueError:
pass
else:
self.fail("should have raised ValueError")
try: # not iterable
a, b, c = len
except TypeError:
pass
else:
self.fail("should have raised TypeError")
a, b, c = {1: 42, 2: 42, 3: 42}.itervalues()
self.assertEqual((a, b, c), (42, 42, 42))
f = open(TESTFN, "w")
lines = ("a\n", "bb\n", "ccc\n")
try:
for line in lines:
f.write(line)
finally:
f.close()
f = open(TESTFN, "r")
try:
a, b, c = f
self.assertEqual((a, b, c), lines)
finally:
f.close()
try:
unlink(TESTFN)
except OSError:
pass
(a, b), (c,) = IteratingSequenceClass(2), {42: 24}
self.assertEqual((a, b, c), (0, 1, 42))
# Test reference count behavior
class C(object):
count = 0
def __new__(cls):
cls.count += 1
return object.__new__(cls)
def __del__(self):
cls = self.__class__
assert cls.count > 0
cls.count -= 1
x = C()
self.assertEqual(C.count, 1)
del x
self.assertEqual(C.count, 0)
l = [C(), C(), C()]
self.assertEqual(C.count, 3)
try:
a, b = iter(l)
except ValueError:
pass
del l
self.assertEqual(C.count, 0)
# Make sure StopIteration is a "sink state".
# This tests various things that weren't sink states in Python 2.2.1,
# plus various things that always were fine.
def test_sinkstate_list(self):
# This used to fail
a = range(5)
b = iter(a)
self.assertEqual(list(b), range(5))
a.extend(range(5, 10))
self.assertEqual(list(b), [])
def test_sinkstate_tuple(self):
a = (0, 1, 2, 3, 4)
b = iter(a)
self.assertEqual(list(b), range(5))
self.assertEqual(list(b), [])
def test_sinkstate_string(self):
a = "abcde"
b = iter(a)
self.assertEqual(list(b), ['a', 'b', 'c', 'd', 'e'])
self.assertEqual(list(b), [])
def test_sinkstate_sequence(self):
# This used to fail
a = SequenceClass(5)
b = iter(a)
self.assertEqual(list(b), range(5))
a.n = 10
self.assertEqual(list(b), [])
def test_sinkstate_callable(self):
# This used to fail
def spam(state=[0]):
i = state[0]
state[0] = i+1
if i == 10:
raise AssertionError, "shouldn't have gotten this far"
return i
b = iter(spam, 5)
self.assertEqual(list(b), range(5))
self.assertEqual(list(b), [])
def test_sinkstate_dict(self):
# XXX For a more thorough test, see towards the end of:
# http://mail.python.org/pipermail/python-dev/2002-July/026512.html
a = {1:1, 2:2, 0:0, 4:4, 3:3}
for b in iter(a), a.iterkeys(), a.iteritems(), a.itervalues():
b = iter(a)
self.assertEqual(len(list(b)), 5)
self.assertEqual(list(b), [])
def test_sinkstate_yield(self):
def gen():
for i in range(5):
yield i
b = gen()
self.assertEqual(list(b), range(5))
self.assertEqual(list(b), [])
def test_sinkstate_range(self):
a = xrange(5)
b = iter(a)
self.assertEqual(list(b), range(5))
self.assertEqual(list(b), [])
def test_sinkstate_enumerate(self):
a = range(5)
e = enumerate(a)
b = iter(e)
self.assertEqual(list(b), zip(range(5), range(5)))
self.assertEqual(list(b), [])
def test_main():
run_unittest(TestCase)
if __name__ == "__main__":
test_main()
|
elainenaomi/sciwonc-dataflow-examples | refs/heads/master | dissertation2017/Experiment 2/instances/8_1_wikiflow_1sh_1s_annot_sec_wm1/outputs/ConfigDB_SessionCompute_0.py | 1 | HOST = "wfSciwoncWiki:enw1989@172.31.25.253:27001,172.31.25.251:27001,172.31.2.76:27001/?authSource=admin"
PORT = ""
USER = ""
PASSWORD = ""
DATABASE = "wiki"
READ_PREFERENCE = "secondary"
COLLECTION_INPUT = "sessions"
COLLECTION_OUTPUT = "user_sessions"
PREFIX_COLUMN = "w_"
ATTRIBUTES = ["timestamp", "contributor_username"]
SORT = ["timestamp"]
OPERATION_TYPE = "GROUP_BY_COLUMN"
COLUMN = "contributor_username"
VALUE = [u"!", u"! !!", u"!!! Professional Analism Account! ATTAAAAAAAACK!!!!", u"!aunbos", u"!!Aaapplesauce", u"!!Archie!!", u"!!aabgh!!", u"!!Wawawiwa!!", u"!VFT", u"!!", u"!!8uyvasdaw", u"!BOB+2!07", u"!arrizoG", u"!!House Down!!", u"!Darkfire!6'28'14", u"!Gangsta G home dogg", u"!!@!!", u"!!!!Wikipedia is Girls Aloud free web hosting!!!", u"!*peter*!", u"!MNc99", u"!Xathopeg", u"! the black spot !", u"!babininja", u"!Hazel!", u"!Dragon", u"!!gnikmrA", u"!---slappdash---!", u"!Spencer!New", u"!monkey!", u"!xoshizzle", u"\"Anyone\" Can Edit?...WELL, ALRIGHT!!", u"!jim", u"!llu$ion^", u"!melquiades", u"!spoodem", u"!vandos!", u"!xo-Bling", u"!musiclova!", u"!seakingdood", u"\"\"", u"!blardi", u"\"Cardinalen\"", u"!ndream", u"!notrub!", u"!xo Derek", u"!x0bile", u"!seakingman", u"!jrb", u"!ronstone", u"!yesmelissa!", u"!paradigm!", u"\"D\"", u"\"Good\" Editer", u"$ London Apartments", u"$$$Marlon$$$30", u"$$yuka con huevo$$", u"\"John\" 7hjg8", u"\"Sagan\"-Zane", u"\"Magic In The Moonlight\"", u"\"Country\" Bushrod Washington", u"\"DOWNLOAD FROM A PARKBENCH\"", u"$!ARA", u"$$Dragonjump$$", u"\"the\" trev", u"\"James\" 559", u"\"KP\"", u"\"ramu\" Ramakrishna Reddy", u"\"ToNToN--------Du------BLeD\"", u"\"alyosha\"", u"\"Slim\" Jim Swenson", u"\"jamy\" Jen & Amy", u"\"Glorius\" Gloria", u"\"suspected\" sockpuppet of Hamish Ross", u"\"Jerk, Beefy!\"", u"\"The Hurricane\" Gustav", u"$", u"$Quarry11", u"$1000000000ten0one1", u"$antander", u"$?", u"$TheDollarSign$", u"$hrine", u"$hadow1723", u"$ilverSetToExplode", u"$tatic", u"$toic", u"$BRollin", u"$aiyapimp", u"$ha8una", u"$nake420", u"$293,021.57!! DAYAM(!), lookadit climb!", u"$cammer", u"$kevanh", u"$tink0man", u"$id", u"$riR@m", u"$CyBeRwIz$", u"$jonathannnnnnn", u"$pongey", u"$eti", u"$olanum", u"$teVen", u"$ari$41", u"$6milliondollarman", u"$ergio 0923", u"$ter!:ng", u"$@r@h", u"$nevesso$", u"''Christoper Jones''", u"'Angel'", u"'Arry Boy", u"'); DROP TABLE en user", u"'Ff'lo", u"'O'", u"(3) Livonia Ave. Local", u"$wgUser", u"(.Y.)", u"'Tis of thee", u"'orghenya'", u"((froggy))1021", u"$traight-$hoota", u"%1lox47Doa", u"(", u"(:Julien:)", u"'sed", u"(*)georgenewton(*)", u"$tupid !mage", u"$yD!", u"( == : MyBigFatJewishNose", u"(3ucky(3all", u"()wolfsy", u"&*iskeyed", u"'Cause It's No Good", u"'Ivan", u"'kj;kd a;ladks;lf adsr", u"%=)", u"'bLaZe'", u"'s-Gravenhage", u"&Delta", u"'", u"'ebed", u"(;D- hymyilev\u00e4 takapiru", u"'Flow", u"'Inyan", u"'Ey Gringo!", u"'Splodey Star", u"(ICE)King", u"(Slyr)Bleach", u"(TLE)KILLER BILL", u"(Turnip)", u"(Worship Me)", u"(= Ben Hamid =)", u"(Order.) Chaos", u"(Didie)", u"(Az)Xeus", u"(Insert Name Here)", u"(TERA) Michael Balogun", u"(RT)", u"(Killers Are Quiet)", u"(Removed)", u"(= =)", u"(HUN)Villy", u"(LEET) TERRORIST", u"(Zod) Ph34r3d", u"(aeropagitica)", u"(npcserver)", u"(the real)ReCreate", u"(sic)", u"(sic)98", u"(needs) Sir Help-a-lot", u"(nz)dave", u"(insert lame and corny username here) *sniffs* how gay", u"(ms)", u"(et)t", u"(jarbarf)", u"(boxed)", u"(chubbstar)", u"(tyrone)", u"(void*)", u"))ECB((", u"*$Marie*Night*Fox$*", u"* Jessica***17", u"* and Obelisk", u"(www itshixun com)GaoXiaoZu", u"************headuway", u"*****SENIOR EXECUTIVE EDITOR IN CHIEF: WWW.WIKIPEDIA.COM*****", u"***Ria777***", u"*D", u"**VIRUS**", u"*Captain Jack Sparrow*", u"*Chosen One*", u"*Dark Dragon*", u"*ABCD*", u"*Dominicans*Rock*", u"*Drillzor*", u"***Ria777", u"*Finley Pedrina*", u"**dave**", u"*Jamila1*", u"**JMeljefe**", u"**macph***", u"*Josephine85*", u"***snitsky", u"**mech**", u"*Crusifix*", u"*Drem*", u"*Hobbs*", u"**RS**2009", u"*ABC*", u"*Daijinryuu*", u"*Hi Cool*U", u"*62", u"*Kat*", u"*MissCharmedOne*", u"*Lolita Haze*", u"*Little John*", u"*Luna*", u"*Nausikaa*", u"*Max*", u"*NeyoLover*", u"*Kiki*Krazy*", u"*Mitsuki M Hyati*", u"*NormalGamer*", u"*Mystic*", u"*Kutaka(-lu)", u"*VaS*Dave", u"*Paul*", u"*Unforgivable*", u"*SaMm&aLi*", u"*Ulla*", u"*Sam*", u"*Star", u"*Wilfred*", u"*aimee*", u"*bj*", u"*camisado*", u"*Rammstein*", u"*Teddy..", u"*bow-ner*", u"*Ria777*", u"*Surak*", u"*Siberian*", u"*bloodspiller*", u"*The Real Uncle Ben*", u"*PrincessofBooks*", u"*crkk", u"*crys*", u"*drew", u"*feridi\u00e1k", u"*nondescript*", u"*smb", u"*myl2000*", u"*flossie*", u"*x stab my heartx*", u"+ ixtapa6 +", u"*emokids123*", u"*johnny*1234567890", u"*my*boots*", u"*wetherill", u"*wpausa'", u"*\u0391\u03bb\u03ad\u03be\u03b1\u03bd\u03b4\u03c1\u03bf\u03c2", u"+++ATDT123", u"+20 EXP", u"+freegift", u"*nondescript", u"*star^river*", u"*hindu4life*", u"*star*", u"+J.J.C+OK", u"+SPQR", u"*jb", u"*dy-fuse", u"*jd", u"+2as", u"+op dokey", u"*hollywood*", u"*someone42*", u"*turquoise moon*", u"+kirbyyRAWRR", u"*drowned*", u"*liverguts*", u"*pixie*", u"++Martin++", u"++ungood", u"*thinkabelle*", u"+1baka", u"- -rock n roll-", u"- 45", u"- )", u"- - baseballpro - -", u"- 40", u"- 41", u"- 38", u"-) 2006", u"- martin", u"+u3)u!^ 7!3N", u"- -", u"- 49", u"-- April", u",,n", u",jdhfbwfd", u"-- -- --", u",.hjkyhuikrtdh", u"- tSR - Nth Man", u"- 42", u"--- ---", u"- Frost -", u"-*-steph-*-", u"--;", u"--=The Doctor=--", u"--XenaDance--", u"--mrs.vince.noir", u"---eek! A Mouse!", u"--colibri--", u"-.SLDHSK.-", u"---imaen---", u"---lorin---", u"--H-Unit-Souljah--", u"-1348-", u"--livingonadrenalinee", u"---Sol---", u"--JonPorter--", u"---adam---", u"--Strattus--", u"--stefanosteo--", u"--K--", u"--TBAS", u"--ca--mi--bach--", u"--shmexii-emo--", u"---law--and--history---", u"--wOrLd-HoLd-On--1993", u"--MK--", u"--Ril--", u"-000", u"-1g", u"-40c", u"-25.677.2.66-", u"-5-", u"-=SkYnEt=-", u"-A", u"-:KSA:-TWINTURBOSkyline", u"-=CHAINSAW GRINGO=-", u"-=PhotoN=-", u"-AV3NG3R8-", u"-=-=DARK=-=-", u"-ALDI-", u"-:-TheGodfather-:-", u"-=zsh=-", u"-ACL-", u"-=FuZzi=-Bittert", u"-=ZiG ZoG=-", u"-=HyPeRzOnD=-", u"-Alex-Xardas-", u"-=Rizzo=-", u"-Cerberus", u"-DarkPhoenix-", u"-Dist-", u"-Barry-", u"-Bobby", u"-Casualty-", u"-Demosthenes-", u"-Erdal", u"-Black Mage-", u"-Dense-", u"-Durandal-", u"-Buddhist-", u"-Cal-", u"-Emanuele-", u"-ETA-The Outsider", u"-ESC-ckd1987", u"-DjD-", u"-Edwin-", u"-Anthony-", u"-Antonius-", u"-Chiyo-Mihama-", u"-Daniel-", u"-FallingSkies", u"-E\u00f1e09-", u"-Kerplunk-", u"-Inanna-", u"-Keenan-000", u"-Frenzy-", u"-Jake-", u"-Kripta-", u"-Garret-", u"-Freebird-", u"-H2K-Hitman", u"-Ilhador-", u"-Fire-", u"-K-", u"-Grover", u"-Jack-", u"-Frank-", u"-HereIAm-", u"-Ida-", u"-JP-", u"-MSZMP-", u"-Lemmy-", u"-Lostboy-", u"-Lumi\u00e8re", u"-Lurifax-", u"-Lord-92", u"-Majestic-", u"-Mathias", u"-Marcus-", u"-Midorihana-", u"-OOPSIE-", u"-Ozone-", u"-Paul-", u"-NEKEL-", u"-Nmd77950", u"-P-", u"-Reaper-", u"-Reserve-", u"-Tehz-", u"-SA-", u"-SLASH-", u"-Ril-", u"-Strawberry-fields-", u"-Strogoff-", u"-SonyGamer", u"-Stunner-", u"-Sinthesis-", u"-Tharikifa-", u"-Septimus-", u"-Shaitan-", u"-Ril-ly on -Ril-s", u"-SainT-", u"-Stamps95", u"-Superman-", u"-The-Rocket-147-", u"-Wintermute-", u"-a-d-lynch-", u"-^Glorfindel^-1", u"-TheCompleteOwnage-", u"-Vagrant-", u"-Tsugaru-", u"-Wa-", u"-Zeus-", u"-The Great One-", u"-TwistedPokesX-", u"-VVV-Jeroen-", u"-WVU-DOHFAST", u"-The Bold Guy-", u"-VL-", u"-Trebor-", u"-aka-demia", u"-bilal92-", u"-c", u"-jackmac-", u"-byj-", u"-gtitom", u"-beatles-are-cool-", u"-calum-r-", u"-jkb-", u"-fin", u"-illusionz-", u"-gillyg-", u"-asx-", u"-dennis-", u"-iNu-", u"-alpha-", u"-amazonpixels", u"-cman-", u"-jkb- je upravenec", u"-frosty-", u"-hh", u"-ginkgobiloba-", u"-jem-", u"-jmac-", u"-al", u"-alex-", u"-b", u"-colugo", u"-kazaku-", u"-pac-", u"-michaela-", u"-mushroom", u"-mj", u"-ni-ni-s-", u"-lord", u"-ril-", u"-nothingman-", u"-kj", u"-oo-", u"-kylej", u"-ramz-", u"-kayac71-", u"-lukejohnson-", u"-m-i-k-e-y-", u"-oo0(GoldTrader)0oo-", u"-me-", u"-js-", u"-liam-", u"-kkm", u"-n!xon-", u"-sd-", u"-ttebayo", u"-walrus1234-", u"-sunshine-lollipops-", u"-woot-", u"-xfi-", u"...And Found", u"-ross616-", u"-sexbox360", u"-the-muffin-man-", u". JamieHughes", u"..-..", u"... is in the Shrub's head", u"...And Beyond!", u"-shobbel-", u"-shuum", u"...And will again.", u"-wrathofconn", u"-xxmellyxx-", u"-sil-", u"-vonurb", u"...---...SOS", u"-weemann-", u"-teenspirit-", u"-toto-", u".*wikiholic*.", u"-the mole-", u"-sweenyy-", u"...", u"....", u"-starter-", u"-ts-", u"..-. ..- -.-. -.- ..-", u"-shakalee-", u"-voice-whales", u"-xx-BabyCham-xx-", u"...adam...", u"...notalot...", u"..CJ..", u"..Sevii..", u"..zammechat", u"...approximatly", u"..BABY-BEX..", u"..ownage..", u"..TTT..", u".0", u".132.205.256.256", u".::Arbitrary::.", u".::JJB::.", u".45Colt", u".::.", u".:.", u".511 MeV", u".5 Asian", u".::POLARBE", u".:Ajvol:.", u".:Alex:.", u".:HBK:.", u".:sportychic:.", u".:Charmed Butterflie:.", u".:Forever Harry Potter:.", u".:Jenni:.", u".Dizzy.Hearts.", u".Jos\u00e9", u".IT", u".:MOMMA's BOI:.", u".Jeff.Spectre.", u".:Nicky:.", u".Absolution.", u".B(Heat)M.", u".:Debil:.", u".:SimoN:.", u".:mollzy2013:.", u".K", u".NERGAL", u".Starr Cole.:", u".M", u".alternative", u".VWBug1.", u".Zao.", u".Koen", u".Sailor.Moon.rules.", u".V.", u".Unkown coding.", u".Rukia Fujioka.", u".Tom.", u".anaconda", u".anacondabot", u".l..zZyiZZiyZz..l.", u".lIl.", u".isika", u".bulldog", u".mau.", u".cosme.", u".deShinDand", u".digamma", u".jhc.", u".hackMASTER", u".emokid.", u".learned.fool.", u".hacker101", u".c", u".matt", u".ivan the awesome.", u".derf", u".gremlin.alex", u".lil-boi14::", u".mdk.", u".tekrox", u".x3x.", u".onion", u".nix", u".skye.", u"0 Carlos 0", u".ultimate", u".s", u".nytimesCookie", u".oOTraceyOo.", u".schwrz", u".rhavin", u".snoopy.", u".telus.", u".oisyn", u".screen", u".seVer!Ty^-", u".xXx.to mega therion.xXx.", u"0", u".o.", u".phil.", u"0 tellurium", u"00 riot 00", u"0-172", u"0-Hutchy-0", u"00 buckshot", u"0 Sabaku No Gaara 0", u"0-0-0-Destruct-0", u"0-8", u"0-Jenny-0", u"0.39", u"0 ssoB giB", u"0-BoB-0", u"0-Jay-Parmar-0", u"00-penguin(yayz)", u"00000001infogirl", u"000023", u"0000z", u"001.anand", u"001012A", u"000ace000", u"000lynx", u"00-cwarren", u"000bosco000", u"000029t", u"0001", u"000nemo000", u"000person000", u"000tobba000", u"00000K00000", u"000masa000", u"000orz111", u"000012", u"0000taanstaafl0000", u"000francesca000", u"000khalil000", u"000dom000", u"0010011", u"00.03 Rumor", u"0000001", u"0000ZERO", u"0009LAH", u"00666", u"007JC", u"00110001", u"00527", u"007FANATIC", u"007bdp", u"001iruleu100", u"007david", u"007Changer", u"007blue", u"007bond", u"001lynsey", u"007HoLyWoLf", u"007SARAH007", u"007kz", u"0075150", u"0025Paperboy", u"007WikiDude", u"007chappy", u"007craft", u"007-Goldeneye", u"00112233", u"00420", u"007bistromath", u"007nathan", u"007FanBond", u"007Vegito", u"007XXX", u"007ketan", u"007JJ", u"002KFlash052", u"007blur007", u"007e0007e", u"008xtreme", u"007spyguy7", u"007rupesh", u"007nites", u"007patrick", u"00870077", u"008", u"007pale", u"0098huh", u"007rjd", u"007steve3", u"009Mike", u"00a00a0aa", u"00mcnabjw", u"00frodo", u"00shakti00", u"00jonnyk", u"00JDG00", u"00Sora00", u"00danny1212", u"00squirrel", u"00Quick00", u"00alesmi", u"00lone3", u"00stick", u"00Joseph00", u"00THOMAS", u"00jayhind", u"00slayer", u"009F", u"00Ragora00", u"00Ladyluck00", u"00besmartr", u"00masmit", u"009rcker", u"00smitty00", u"00christian00", u"00fjohns", u"00hara", u"00chips", u"00g3l3ym00g3l3y", u"00pj", u"00se7en00", u"00Warlord00", u"00domenic", u"00haste", u"00liadon", u"009x", u"00LinK1785", u"00goldeneye", u"00patty", u"00zion00", u"01001", u"0101CHANky", u"00user001", u"010101011", u"00timcheng", u"01 Data 10", u"01000001A", u"0101Anson06", u"0101CHANkk", u"01 chrism", u"0101CHANlf", u"0101 grace06", u"01011000 (usurped)", u"01011000", u"0101CHANmw", u"01-Judge", u"0101CHANhm", u"0101 waie", u"0101AUsy", u"0101CHANkh", u"01000100 W 01000010", u"00vis", u"00thetruth", u"00tony", u"01 !", u"0101CHANhk. 0101", u"0101CHANsk", u"0101CHANyk", u"0101CHEUNGyy", u"0101CHOWS.", u"0101CHANmwlydia", u"0101CHENGwy", u"0101CHENGyk", u"0101CHENGsw", u"0101CHANyy", u"0101CHEUNGKH", u"0101CHEUNGht", u"0101CHIUtl", u"0101CHANsh", u"0101CHENGsk", u"0101CHIUnc", u"0101CHOIyyj", u"0101CHANwh", u"0101CHANwy", u"0101CHENGhn", u"0101CHOWyw", u"0101CHAUwy", u"0101CHENGkm", u"0101CHEUNGwsj", u"0101CHANtk", u"0101CHANyc", u"0101CHENGyp", u"0101CHENGytf", u"0101CHONGmt", u"0101CHENGkwc", u"0101CHINsy", u"0101CHOYwk", u"0101KAMkk", u"0101KWANps", u"0101LAIsm", u"0101HOcc", u"0101FANcy", u"0101HUly", u"0101HOhl", u"0101HUANGX", u"0101KONGkh", u"0101KWOKcy", u"0101CHUNGjhk", u"0101HUIOYM", u"0101HUNGch", u"0101HungtbAnnie", u"0101KAMt", u"0101LAIoy", u"0101LAIkay", u"0101CHUNGdaeun", u"0101HOlm", u"0101HUNGmm", u"0101Jania06", u"0101KUNGsf", u"0101FUNGhy", u"0101GUDJONSSONi", u"0101HOhy", u"0101CHOYws", u"0101CHUky", u"0101FANhk", u"0101KONGwlw", u"0101LAUmh", u"0101LOcw", u"0101LAIsy", u"0101LEEsc", u"0101LEUNGsf", u"0101LAUWywa", u"0101LIhm", u"0101LAUWS", u"0101LAWsy", u"0101LEUNGKY", u"0101LUIkw marco", u"0101LEEgj", u"0101LIdy", u"0101LAMmt", u"0101LAMsy", u"0101LEUNGCAROL", u"0101LAMkp", u"0101LAMcy", u"0101LEEsk", u"0101LEUNGykk", u"0101LAIyl", u"0101LIYL", u"0101LItc", u"0101LEUNGkm", u"0101LUIkwN", u"0101LAUsm", u"0101LEEyy", u"0101LIY", u"0101LIj", u"0101LAMyk", u"0101LEEws", u"0101LEEwy", u"0101LEUNGcy", u"0101LIUyf", u"0101NGks", u"0101TANGst", u"0101Lo-peach", u"0101MAKht", u"0101NGmk", u"0101NGms", u"0101SOwk", u"0101Lamkp", u"0101Novus06", u"0101TA", u"0101TANGwy", u"0101PIAOJw", u"0101TANGhk", u"0101TANGws", u"0101NGco", u"0101NGpcn", u"0101PUNy", u"0101TANGkl", u"0101LUIml", u"0101TANGkm", u"0101Leeyy", u"0101NGtn", u"0101SZEpwe", u"0101NGhk", u"0101NUNGnsc", u"0101TAMcy", u"0101PANGth", u"0101TAITcdc", u"0101LUNGkm", u"0101TANGsf", u"0101PANGky", u"0101SHIEHkc", u"0101SOht", u"0101TOx", u"0101WONGky", u"0101WONGlh", u"0101TONGck", u"0101TSOIyy", u"0101WAIys", u"0101TSEfh", u"0101WANwy", u"0101WONGkp", u"0101TSANGmk", u"0101WONGol", u"0101TRANc", u"0101WONGfm", u"0101TSUIyy", u"0101WONGSK", u"0101TSANGlm", u"0101TSANGyk", u"0101WANty", u"0101WONGhy", u"0101YEUNGyl", u"0101YIPwy", u"0101WONGpl", u"0101WONGsy", u"0101WUws", u"0101YIPd", u"0101YUNGyy", u"0101WONGsh", u"0101YUENyp", u"0101WONGoy", u"0101YOUNGhy", u"0101WONGy", u"0101YIPkw", u"0101WONGyh", u"0101WONGv", u"0101YIPnw", u"0101YUENkm", u"0101WUhy", u"0101YAUtc", u"0101YEUNGhy", u"0101ba07", u"0101christina06", u"0101ZEHRAg", u"0101chisum06", u"0101ZHANGy", u"0101amber06", u"0101carinaleung06", u"0101cmesh07", u"0101YUyw", u"0101andyho06", u"0101angel06", u"0101betty06", u"0101cherrylui06", u"0101cindy06", u"0101carol06", u"0101chris", u"0101alison06", u"0101angelatky06", u"0101bessie06", u"0101YUkw", u"0101carmen06", u"0101cherlee", u"0101chingkwan06", u"0101YUh", u"0101antonie06", u"0101ariel06", u"0101christineng06", u"0101alisonlam06", u"0101anita06", u"0101Zhaoh", u"0101colin06", u"0101gpa07", u"0101iamivypoon06", u"0101crystal06", u"0101howardlo06", u"0101eileen06", u"0101inmedia07", u"0101crepe06", u"0101ghadir06", u"0101hannah06", u"0101conrad06", u"0101hazel06", u"0101ian06", u"0101ellalee06", u"0101hotammy06", u"0101david06", u"0101gina06", u"0101hannahfa", u"0101maggiema06", u"0101nialeung06", u"0101luciawang06", u"0101jade06", u"0101pera06", u"0101jen06", u"0101journalists07", u"0101margaretren06", u"0101kywinnie06", u"0101maggie06", u"0101morris06", u"0101kikz06", u"0101laylasue06", u"0101jaymee06", u"0101kissssty06", u"0101mary06", u"0101qianqian06", u"0101kenneth06", u"0101mediapower07", u"0101kristi06", u"0101mic06", u"0101midterm07", u"0101kennis06", u"0101kk06", u"0101melo06", u"0101kami06", u"0101janet06", u"0101lorettaho06", u"0101isabella06", u"0101jan06", u"0101miniho06", u"0101sheungyng06", u"0101shirley06", u"0101ss06", u"0101tonykung06", u"0101vivian06", u"0101watkatz06", u"0101queenz06", u"0101sarahwk06", u"0101takka06", u"0101tinang06", u"0101xuecanfly06", u"0101tifflam06", u"0101syshum06", u"0101wukaman06", u"0101sherry06", u"0101sukitian06", u"0101winky06", u"0101sandraho06", u"0101stella06", u"0101tiffin06", u"0101susanne06", u"0101winnieho06", u"0101tina06", u"0101simon06", u"0101sunny06", u"0101tsemandy06", u"0101vien06", u"0101rachel 07", u"0101zoeng06", u"01189998819991197253a", u"01253", u"0101yen06", u"01245james", u"012noobnoob012", u"01101001", u"0123465", u"01255C", u"012ella", u"0121448", u"0127", u"0101yansed06", u"01110111zeroone", u"0102ad", u"0101yolandatai06", u"0104832", u"0101yangl", u"0123456789ABCDEF", u"0101yuka06", u"0101zia06", u"0111", u"01gricer", u"01haydndavies", u"016m1", u"01bemounce", u"019045dc", u"0196nick", u"015graves", u"01FireStarter", u"01Mariah", u"01dadshj", u"01mattho", u"01:36, 21 June 2006", u"01fieldo", u"01FireStarterX", u"01mhesman", u"01560 es diego", u"01567", u"0160131", u"0149784rr", u"01halliwel", u"01horng", u"01jnorth", u"01jsmyth", u"01574871", u"01Aawad", u"01andr1d", u"01griste", u"01kkk", u"01casey", u"01domlaw", u"01lander", u"01mememe", u"01rayws", u"01satkins", u"02113", u"02auyeungtinchun", u"01p88O", u"01sjagmoh", u"020808", u"027826", u"02Pestana", u"01sharpj", u"01yusu", u"020278", u"0202gogo", u"02bhar", u"02bjahed", u"01spenderm", u"01winniethepooh", u"02138abc", u"02anneho", u"02Firth", u"02PonyGT", u"01ohnidlanoronaldinho10", u"0219631", u"01seashell", u"02500", u"02barryc", u"02birch", u"01sbrightwell", u"01vandevp", u"0222", u"02132user", u"02deweyd", u"02jefferyng", u"03carsonp", u"03froshar", u"03kehita", u"03taie", u"03141119", u"03swalker", u"03matchamdan", u"03myersd", u"03wgreen", u"02haworthc", u"02millers", u"0300738", u"0310alexander", u"03cobrasvt", u"03hawnee", u"03joshhicks", u"03thompsons69", u"02lukedoonnelly", u"02walshe", u"02woodr", u"038DBATC", u"03BTHOMPSON", u"03bjcox", u"03cranec", u"03crichardson", u"03jamesshi", u"03webberg", u"03yjahmed", u"03yuchifung", u"02ghorner", u"031586", u"03dcombe", u"02blythed", u"02nothing", u"02rocker", u"03242", u"03alsopc", u"03dyerj", u"03fieldj", u"03haya", u"03jmason1", u"03lambert", u"03taylorc", u"040072", u"02coulsona", u"0331marine", u"03eckersleys", u"03joe03", u"03man", u"02hodgkinson", u"02timmo", u"03asmith", u"03imicshe", u"03md", u"02got", u"02pollajo", u"03Rotpar", u"03joharr", u"03thomas", u"03vaseyj", u"02okaneMiguelsanchez", u"03 GySgt", u"03018", u"03bibbyj", u"03lewisjos", u"03whitd", u"0404343m", u"02colla1", u"038dbatc", u"03jmgibbens", u"03kirkwooda1844", u"03s", u"03thomck", u"03x072", u"03grievm", u"03king", u"03powerranger", u"02jgraham", u"02lukedonnelly", u"02saladi", u"02vallancel", u"0321recon", u"03ce", u"03rp110", u"03shirleyleung", u"02nowtho", u"03125", u"03beatob", u"03robertsj25", u"03sheepdog", u"02rossip", u"02ryantou", u"02shin", u"02small,j", u"02warrenderj", u"031158", u"03bgsingh", u"04aeverington", u"0429axxess", u"0417mac", u"0435163989dan", u"04211015ju", u"043602s", u"041744", u"042austin042", u"0424mae", u"04A13", u"0475930", u"04december", u"04jessops", u"04palmertonalex", u"05BARRob", u"05LockR", u"04birch m", u"04jhowarth", u"04kingj", u"04marjess", u"04nunhucks", u"0555", u"05", u"051000TOMSTOCKLEY", u"04croberts", u"04pafras", u"04pmehta", u"04willr", u"05benros", u"05chiltonl", u"04walkc", u"0500118s", u"05browha", u"04mandar", u"04mattstep", u"04wilsonm", u"05.gaurav", u"04spicerc", u"053bss", u"05acole", u"04parrw", u"04ssutcliffe", u"050101 manly24", u"05chevydude", u"04deloozem", u"04madhoc", u"04marjes", u"0512theking", u"059041m", u"05HuangWai", u"04blood2line06", u"04clarkr", u"05172004c", u"055203", u"0564401", u"04alexpeac", u"04anichols", u"04crustn", u"04holbrooktho", u"04pedra", u"04wyJBaines", u"051039s", u"05crf250r0622", u"04asmdg", u"04ctay", u"04megadavi", u"04williamsm", u"04gillr", u"04lwilson", u"05PatelA2", u"05clareb", u"05glanza", u"05hepburn3", u"05ericy", u"05oditam", u"05theben", u"05wiki", u"05ntheo", u"05rm01", u"05rune", u"05etheringtonwilly", u"05jasm02", u"05lineage", u"05rwriga", u"05cthurman", u"05johnsonn", u"05winsjp", u"05runner", u"05ksenior", u"05goodj", u"05nep", u"05hander", u"05mhughes", u"05singhk", u"05hepburn", u"05jfoley", u"05pittsd", u"05dempc", u"05g-thomas", u"05jul", u"05fcrane", u"05nitram", u"05gnowell", u"05skychau", u"05kinjac", u"05smithm", u"05wilkamy", u"05emigre", u"05f087", u"05hepburn2", u"05kayeya", u"05thomasr", u"063092laugh", u"065211105iscool", u"06capetown", u"0600661160", u"064ldingla", u"06cgordo", u"05wrightc", u"0685twc", u"06QuipitP", u"06RevnA", u"06cl2810", u"06SmithG", u"06abarnard", u"06alifar", u"0612", u"06ChambersA", u"06RBambe", u"06SidebottomJ", u"0604214g", u"069josephr", u"0608jessg", u"063006", u"067012732s", u"068152", u"06bull", u"068701341", u"0683", u"06clarkc", u"06raym", u"06singhk", u"06dhewett", u"06dsouza", u"06mascha", u"06wutang06", u"06stiege", u"06renell", u"06tpatel", u"0704monochrome", u"06e-hill", u"06readc", u"06twalke", u"06vijhk", u"06deok", u"06gormma", u"06khanmo", u"06wheawella", u"06vicoxl", u"06whitec", u"06hendersonm", u"06whitefl", u"06desilvag", u"06gkwalt", u"06tlaing", u"06gaind", u"06h11", u"06marksr", u"06phal", u"06ict318", u"06jgrabowski", u"06krana", u"06owense", u"06dave06", u"0702034", u"06ligeti", u"06slocke", u"07berobe", u"07fan", u"07jhone", u"07ShadowV", u"07agwatts", u"07ed01", u"07keera", u"07hoehne s", u"072654a", u"07757109672a", u"07kinjac", u"07S76", u"0705746b", u"07bargem", u"07g1981", u"07kulks", u"07BurkeE", u"07Hawk", u"07andrew", u"07corl", u"07demeter07", u"07andy07", u"070time070", u"07dcreaser", u"07blues08", u"07cinephile", u"07genclik", u"07holsombd", u"07WHISKEY07", u"070758voteforpedro", u"070miracle", u"07hdwideglide", u"07jor", u"07ahughes", u"07chill07", u"07grinners", u"07goodyero", u"07kanesh", u"07matmoo", u"08glasgow08", u"0800abc123", u"0811gv", u"0822721", u"07pulline", u"08RIOT", u"08albatross", u"07toope", u"07yog", u"08131428", u"0851431754", u"08gallor", u"08ashbyb", u"08gullij", u"07neworleans", u"07tghard", u"0807127c", u"07swettr", u"0800whatsup", u"08blandr", u"07yali", u"0800tim", u"0836whimper", u"08Posturepro11", u"08JayLaw", u"07scheurleerg", u"08-15", u"08ellisd", u"07michaelg", u"08 Voter", u"081993WV", u"08alex08", u"07p71", u"08dbs", u"07nbudd", u"07reyes07JA", u"08ewingr", u"08KTP", u"08hardingr", u"09090909090", u"09SentraSpecV", u"08mb1", u"08murtaghkc", u"08masonG", u"09 F9 11 02 9D 74 E3 5B D8", u"09758576", u"0987654321abc", u"09GKsoc", u"08longr", u"09876", u"09amethystlai", u"095228004Zach", u"098today", u"0996slam", u"0918665gbndD", u"09achan", u"09BennyBoy13", u"09 gheussler", u"0987", u"08october", u"098iop", u"08jh", u"08jpotter", u"08proush", u"0955ADLC", u"097344p", u"08stones a", u"0987654321a9", u"08lazizcan", u"08rbilto", u"08tpierc", u"08uk", u"09aog", u"08smithb", u"090-chall", u"09arvincent", u"08jriche", u"08yuillj", u"09bmarcu", u"08milluz", u"08prl", u"099 peter", u"0991zayram", u"09cwells", u"09dphelps", u"09cravelo", u"09dazza", u"09dormani", u"09creenc", u"09curranm", u"09dorffd", u"09dspatel01", u"09f91102", u"09hannah09", u"09rox!", u"09lwwsjoq", u"09jamsutherland", u"09jturner", u"09laclair", u"09nava", u"09patrick", u"09smorales", u"09gebejo", u"09lwsjoq", u"09unc", u"09wetsto", u"09rosso", u"09grahas", u"09kmeyer", u"09nick", u"09jblowme", u"09er", u"09smallj", u"09jtaylo", u"09poopers", u"09i70o7nj0h", u"09ken09", u"09kjkl;", u"09tait09", u"0Aldrin0", u"0Bammer", u"0CD therapist", u"0XQ", u"0bsessive c0mpulsive dis0rder is YOU", u"0LiamLegend0", u"0MS 40490", u"0Wizzer0", u"0Andrew0", u"0and1", u"0CD therapy help", u"0SC's Just John", u"0atsuatski0", u"0Brahma0", u"0Ihavenousername0", u"0Piper0Halliwell0", u"0bsessive c0mpulsive dis0rder united", u"09wiki", u"0Mostel", u"0ak", u"0bsessive c0mpulsive dis0rder & wikipeedia ADDICTION", u"0CD is you", u"09yalie", u"0CD Grandmasterka", u"0T0", u"0Temp", u"0CD therapy", u"0Neosis", u"0TH iS L0VE", u"0CHUEY0", u"0Exo-Man0", u"0a0203", u"0crandall", u"0goodiegoodie0", u"0group19", u"0cm", u"0firefly0", u"0decibel", u"0filip0", u"0hisa2me", u"0hn035!H4xx0r!", u"0coooool1", u"0bvious", u"0ccam", u"0idonthaveausername0", u"0gpwns", u"0densRaven", u"0fatman0", u"0cdcnctx&", u"0g", u"0hmy123", u"0dimensional", u"0dd1", u"0hpenelope", u"0g1o2i3k4e5n6", u"0ceans11", u"0d1e1ken0b1", u"0garrett0", u"0ilikemoney0", u"0imagination", u"0irc", u"0johnsmith0", u"0k4mi PCD", u"0innocence0", u"0kdal", u"0jhoneycutt", u"0jspicer", u"0ld13", u"0meaning", u"0matter", u"0kty", u"0mega00", u"0l3oodrush0", u"0kmck4gmja", u"0laf15O", u"0lucy7", u"0mel", u"0lexa0", u"0lhe", u"0lorenzo0", u"0ld", u"0nion1337", u"0nn3", u"0nullbinary0", u"0rgyP0rgy", u"0mfgitssnowing", u"0nonanon0", u"0nslaught", u"0pulse", u"0r4ngecrush", u"0mona0", u"0o floccinaucinihilipilification o0", u"0mnipotent", u"0nizuka the Great", u"0ooo", u"0reteki", u"0positive", u"0reis19a", u"0plusminus0", u"0range-cn", u"0nlyth3truth", u"0oors", u"0oPiEo0", u"0okie", u"0pos0sop", u"0o zappy o0", u"0rac1e", u"0neguy", u"0nimaru", u"0paqu3 r3as0n", u"0o64eva", u"0oToddo0", u"0pen$0urce", u"0o-JayParmar-o0", u"0pera", u"0o", u"0oo", u"0pt", u"0scalefactor", u"0siris", u"0thom1", u"0username", u"0x", u"0s1r1s", u"0stimpy0", u"0sd9gj0s9:D:D", u"0silverfire", u"0therean", u"0starstone0", u"0taku.Storm", u"0webkinz0", u"0wn4g3 41if3", u"0sd0", u"0spooy0", u"0v1d1u", u"0ssoup0", u"0ts0", u"0vic03", u"0wnd1zzl3d", u"0solar0", u"0v1d1o114", u"0waldo", u"0rrAvenger", u"0seveN", u"0tsa0", u"0utlaw", u"0vermind74", u"0times6", u"0tophy1", u"0utsider", u"0x01", u"0vanessa", u"0v3r533r", u"0x0n", u"0x30114", u"0x803A", u"0x7048", u"0x6adb015", u"0x38I9J*", u"0x539", u"0x54097DAA", u"0x6D667061", u"0xFFFF", u"0xFFFF0000", u"0xFE", u"0xF", u"0xRAIN", u"0x845FED", u"0xRougex0", u"0xIMPLOSION", u"1 Brett Favre", u"1-1111", u"0xd34df00d", u"1 Lucky Texan", u"1-14FA", u"1 2know", u"1&^dzzzt", u"1&only", u"1 black hand", u"0xrandomx0", u"1+2x3=9", u"1 Cent In Mind", u"1 Meat and 2 Veg", u"1 flagler point", u"1 wit da force", u"0xxAprilxx0", u"0z-max", u"0zymandias", u"0zzy", u"1()", u"1-555-confide", u"1 20 O9", u"1 knick fan", u"1 moonlight", u"1-54-24", u"0zzBandit", u"1 use", u"1 wikip 1", u"10.doctor", u"10 brownj", u"10 Ants", u"10-JMB", u"100 Club TORRES", u"100%BulletProof", u"1-point", u"100% FreeJack", u"100% Christian", u"1.21 jigwatts", u"1-no-name-1", u"10 goal payne", u"10 steps behind the princess", u"10,000 Smokes", u"10.31.90a", u"10.maurya", u"1.6.2007.b", u"1.FC N\u00fcrnberg", u"1.WhoDatSwizBeats", u"10 000 thundering typhoons", u"10 year old boy", u"100 Kerr Street", u"1-800missingmarbles", u"1-is-blue", u"1.618033989", u"1-Zema-1", u"10 July 2009 Timmeh", u"1.1.1", u"1000sjewelry", u"1000kHz", u"10011", u"100496RF", u"1000 Sangheilis and Darknuts", u"10000 Walls", u"1000february", u"1000tubby", u"100%freehuman", u"10014derek", u"10027a", u"1001Bob", u"1001 Jokes", u"1001101101010110", u"1001nuits", u"10025", u"1000moleman2", u"10001er", u"1001001", u"100110100", u"10072osi", u"1000%!", u"1000amethyst", u"10012guy", u"1001lala", u"100%RatedR", u"100-bit-machine", u"10001", u"100056255", u"1000MHz", u"1000poems", u"1000Faces", u"1000STACKS", u"100%RSA", u"100percentkrazy", u"100anpr", u"100charlie", u"100dash", u"100m", u"101001", u"10101kurti1001010", u"101090ABC", u"100legacy100", u"10101001011", u"100Club", u"100Gig", u"100humbert", u"100percentColombian", u"10101", u"100A9aaaCC", u"100colours", u"100straight", u"100MilesAndRunnin'", u"1010011010", u"10101011101", u"100sbo10", u"1010902", u"1010Capitol", u"101010e9", u"1009MIX", u"100DashSix", u"100hundred", u"100percentdesign", u"100pfanner", u"100chuk", u"100percentrecord", u"1011854", u"101202771", u"1013-Isaac", u"1013-alex", u"1013-Marty", u"1013-Nqua", u"1011ski", u"1013-christi", u"1013-Andrew", u"1013- katie", u"1013-Lisa", u"1013-andy", u"1013-Jeff", u"1013-josh", u"1013-luys0001", u"1013-precious", u"1013-David", u"1013-kate", u"1013-Brendan", u"1013-Dan", u"1013-jessica", u"1027E", u"1013-shishi", u"101TomA101", u"101greatesthits", u"101i", u"101mhoshizaki", u"101Whatup101", u"101peircems", u"101phones", u"1020J", u"1013-shae", u"1013-whittney", u"10244093le", u"101easycompany", u"1013-shannon", u"1020bravozulu", u"1024x768", u"101FrenchKisses", u"1013-rey", u"1027", u"1020hhhh", u"10171990snow", u"101jedi", u"108stitches", u"1031james", u"1080jim", u"1029384k", u"102davidroad", u"103penguin", u"1066Power", u"10285658sdsaa", u"1039sc", u"104thAirborne", u"108059", u"1029man", u"1066Seagull", u"1060kHz", u"1028", u"102RB", u"1050Benton", u"1085N", u"1090467k", u"102motox", u"1041broadcast", u"108 Stars", u"104066481", u"104greenwood", u"1066seagull", u"1069thesurge", u"10851man", u"10538trinity", u"1029yoshii", u"108BenjaminLinus", u"104Serena", u"108.00a", u"103momo", u"103saloo7", u"10Tinsleyn", u"10blaken", u"10draftsdeep", u"1092q", u"10987sa", u"10931", u"10987", u"10String guitar", u"10careyi", u"10dobbinsd", u"10coxj", u"109jln", u"10courtneyc", u"10dellis", u"10July", u"10credits", u"10crabapple", u"10dxholcomb", u"10broderick", u"10dutcher", u"109Eastside", u"10carfer", u"10carfja", u"10dixong", u"10dkafka", u"10dug01d", u"10X445", u"10cents", u"10caart", u"10drill10", u"1099pro", u"10YUGIOHKING", u"10comforts", u"10nn", u"10eddy", u"10kbartman", u"10kdaghistani", u"10euro", u"10hello10", u"10incher", u"10krunner", u"10minutes", u"10nitro", u"10luef", u"10mahony01", u"10mcleod", u"10metreh", u"10inchesunbuffed", u"10jlyn", u"10max01", u"10foot10", u"10lbs of potatoes", u"10kkenney", u"10mmf", u"10kmk", u"10lgauta", u"10morenoe", u"10grandSLOBIT", u"10pin", u"10slvr96", u"10outof10die", u"10redlight", u"10percentcharlie", u"10robbie10", u"10speed", u"10qwerty", u"10rudmar", u"10plakon", u"10silop", u"10stone5", u"10shistory", u"10sutton", u"10to100", u"10stuece", u"10ta", u"10widdse", u"10thdayoftheweek", u"10tracy", u"10sw1y", u"10xman22", u"10thcity", u"10tunneyz", u"10x10x10", u"110th Street", u"11110101111", u"1111elena", u"1101701010", u"1110khz", u"1111111111", u"1111lZZ", u"11 Leathal", u"110 on M3", u"11111bob11111", u"110ani", u"1111", u"1111lilkatie", u"1111mol", u"1111tomica", u"11001001", u"1111lol1111", u"1108569a", u"110011100011110000jc", u"11010", u"1111films", u"10zing", u"110fremont", u"110volts", u"11 My 11", u"111paperbag", u"1122mmg", u"1123581321", u"1116ent", u"1116sen", u"112212nnnn", u"1130130", u"111wildchild111", u"112311A", u"1111tomica (reserve)", u"1111wtG", u"11350kingsland", u"1123h", u"1127cecilia", u"111kiepie", u"112087bondam", u"1122334455", u"112andy112", u"1134 Films", u"11291Ramiz", u"11341134a", u"1122micky", u"112whileplan", u"113", u"1121331a", u"1125csl", u"1121Ben", u"112233445566", u"1123dbn", u"111Travis555", u"113385666a", u"111bigfella", u"11221", u"112233048710k", u"112ahern", u"1159812161", u"1169jason", u"116redrock", u"1147332726", u"116 CliqueFlyR", u"116135", u"113david", u"1159th", u"1149330310", u"116Benz", u"116Rebel", u"1148000594", u"116Calvinist", u"1136west", u"11679", u"1147858154", u"117Avenue", u"11987", u"118.9O.9O.116", u"11:31 P.M.", u"117pollo", u"11K", u"11B", u"11MarkHoKaMing", u"119Jamie", u"118Froggy", u"11999ttt999999999", u"117hax", u"11Dunc11", u"11Meredj", u"11Eagleflight", u"117th", u"119", u"11JamesK", u"118118pwincessChloe118118", u"11844a", u"11MarkONE", u"119s4kid911", u"11aaronl", u"11brian", u"11maxwarie", u"11december", u"11nwirth", u"11parkjj2", u"11aargyle", u"11blue", u"11boyd11", u"11gaudrco1", u"11jason11", u"11jx", u"11kowrom", u"11lani11", u"11ngsp1", u"11bje11", u"11brando22", u"11amayrsohn", u"11cazrycrelk", u"11milleran", u"11aa22bb", u"11Reaper11", u"11godfreje", u"11kravitzn", u"11boifoef", u"11lussis", u"11nirvana", u"11james22", u"11hessj sidney", u"11agbonlahor11", u"11blackroses", u"11montel", u"11nacholibre", u"11achitturi", u"11ellisz", u"11guy", u"11alphabet11", u"11kellen11", u"11moffap", u"11langleyj", u"11parkjj1", u"11roses11", u"11suppakit", u"11psleig", u"11rey619", u"11stroud", u"11ssims", u"11tberkey", u"11thearlofmar", u"11prince11", u"11vert11", u"11watkins", u"11penguin11", u"12 Centuries", u"11schrp", u"11quark92pa", u"11trivia", u"11thedition", u"11rslayden", u"11up3down", u"11vegeta11", u"11seller11", u"11tas", u"11thmilestone", u"11warnjames", u"11stecklera", u"11stoop", u"11tercox", u"11rcombs", u"11rich3k", u"11ruoccoc", u"11v1mayhewa", u"11wiki freak11", u"12 Basti", u"12.5German", u"120129", u"12.23.54.162", u"120881hr", u"120242pp", u"1212coachz", u"12 Step", u"1203roe", u"12-21-2012", u"1219timothy", u"12 ashley e", u"1210Poppy", u"120271a", u"12 Tenma", u"12 in. pianist", u"12.000", u"1200Flasher", u"1200custom", u"121314151617g", u"12 Noon", u"12 Year Old Geek Kid", u"120Degrees", u"120albany", u"1204DE", u"1208spinjaco", u"1210donna", u"1204yeti", u"12121212121212ben", u"120296harry", u"120babies", u"121792garage", u"121a0012", u"121jake", u"122589423KM", u"122regular", u"123.moreno", u"123123123123hahaha", u"1234!", u"12345678987654321", u"12345 lewis", u"121eLigne", u"12345 wiki", u"12261mw", u"1231232", u"12345678910GraceIsAwesome", u"121nfox", u"123 1", u"123 montreal", u"12345 cornelius", u"122jb1", u"123315", u"12341234abc", u"1234567891o", u"122Neopets lover121", u"123 Kidd", u"12341234lol", u"12345678", u"12345678910john", u"123456789ABCDEFGHIJK", u"123 nano", u"1234567890boy", u"123 abc", u"123-SAM", u"123321fun", u"123456789a1", u"123 zenbaby", u"12334er", u"12344", u"12345678910e", u"121translation", u"123 lmnop", u"12345 6", u"121riya121", u"123123123qweqweqwe", u"123456veshrepresent", u"123456789mark", u"123456789qwertyuiop123456789", u"12345ak", u"12345anpr", u"12345ccr", u"12345asdfghjkl;'", u"12345blake", u"123456789bot3", u"123456nm", u"123456789ten", u"1234567man", u"123456qwe", u"123456789asdf", u"12345bestcomes", u"123456789piemandead", u"123456abcdef123456", u"123456gamma", u"123456789kyle", u"123456789psdfgetrd", u"123456c", u"123456nbvdfgghj", u"123456987", u"12345LLAMAZ", u"123456789lol", u"123456798", u"123456abc", u"12345drbob", u"123456789abc", u"12345abcxyz20082009", u"123456llama", u"123456sq", u"12345e", u"12345kat6789natv03", u"1234BOUNCE", u"12345hi", u"1234asdf98", u"12345ka", u"1234BobDole", u"1234Sticky", u"1234baller5678", u"12345mw", u"1234David", u"12345green678910", u"12345own", u"1234abc", u"12345oicafa", u"12345user", u"1234Pumpkin", u"12345once", u"12345sixseven", u"1234abc1234", u"1235e532", u"1234joha", u"1234john1234massai1", u"123TOBY123", u"1234givetelestracustomarmore", u"1234lol", u"123JinKazama123", u"123Tod", u"1234hellowanker", u"1234kenttna", u"1234ouch", u"123Triad", u"123Willom", u"123abc123babyuandme", u"123Phineas", u"1234eight", u"123Boomer", u"1235 man", u"123Dan123", u"123Sunny", u"1234bestie", u"1234cherSB", u"1234fox", u"123DUNDON", u"1234gadget", u"1234mike1234", u"1234pops", u"1234u", u"1234urmama", u"123MFL", u"123Repeater!", u"1234bear", u"123Mike456Winston789", u"1236howard", u"1234peace", u"1234summerisastupidwhore", u"123ACB", u"123Contact", u"123FM", u"123Hollic", u"123789", u"123admin", u"123d123", u"123ddd", u"123ert321", u"123abcdoreme3", u"123awesomeyeah", u"123ad", u"123candy", u"123craigyboy", u"123ellie123", u"123episkopon", u"123aclock", u"123dest", u"123dylan456", u"123edit456", u"123dieinafire", u"123brenda4567", u"123gabriel123", u"123abcde", u"123abcman", u"123asskicker", u"123don'tgetbanned", u"123fakestreet", u"123gf456", u"123abcdoreme2", u"123andy321", u"123botname", u"123destiny321", u"123dumb", u"123edc0", u"123flamenco", u"123awe", u"123biggerfish", u"123eronisle", u"123buklemyshoe", u"123davieboy", u"123ewq123ewq", u"123davidn", u"123abc123zzz", u"123home123", u"123ideclareathumbwar", u"123loulou123", u"123packers", u"123pink", u"123mkg", u"123ho", u"123nik321", u"123lalala1", u"123mintdog321", u"123monkeys", u"123ma456s789on", u"123music", u"123ninja", u"123hannas", u"123green", u"123hari", u"123kc456", u"123kevin123", u"123gilly321", u"123hello46", u"123helpme123", u"123jason345", u"123jenn", u"123lilani", u"123petitesouris", u"123learning", u"123pecca", u"123indianses", u"123jim123", u"123mahaveer", u"123hihi", u"123jules", u"123owca321", u"123o", u"123lsr", u"123jac123", u"123lkik", u"123local", u"123lol123", u"123penguin123", u"123ggg", u"123hargeisa", u"123imfree", u"123kid", u"123random123", u"123torrent", u"123samaster", u"123tinja", u"123qweasdfr4", u"123suppy", u"123tanman123", u"123wiki000", u"123samantha", u"123unoduetre", u"123qwerty890", u"123saolis", u"123shine", u"123pull456", u"123sammy", u"123wikiworm", u"123shayne", u"123username", u"123pspdvd", u"123wade123", u"123wiki123", u"123pok", u"123scarlet123", u"123vkk321", u"123well", u"123writeit", u"123pra", u"123tenten", u"123weber", u"123wizard123", u"123woo123", u"123qazwsx", u"123thakar123", u"123tadayou", u"123wise", u"123steve11", u"123zombie", u"127", u"127.0.0.1:eighty", u"12Ektabar", u"12Shark", u"12acrobatgreen", u"12barBlues", u"12crown", u"12fieldamb", u"1255", u"12554ib", u"125hjose", u"127x0x0x1", u"128flashfire", u"1297", u"12Noodleman", u"12cool725", u"126tanker", u"127.o.o.1", u"12adda", u"12bigbrother12", u"12bsilling", u"12db", u"12george1", u"12hernn", u"12jkl", u"12jmedd", u"12lmaxwell", u"123xyz456", u"123zane321", u"123zhongqing", u"127001", u"127078hss", u"12Ghost12", u"12Gooner89", u"12Iceman", u"12Tree", u"12VELMA", u"12jamie34", u"12jn", u"123xpac", u"129.", u"12gaugefury", u"12inchjack", u"1245Dylan", u"12Bent12", u"12Redinolli", u"12YrsCounting", u"12er", u"12kellda1", u"12:28 QM", u"12cookk", u"12fred", u"12grange", u"12guage", u"12keithchan", u"1251thestrokes", u"125t", u"12939", u"12marshd", u"127\u00b80\u00b80\u00b81", u"128.104.truth", u"12Dorsa152", u"12bogdanicha", u"12bondra12", u"12cebert", u"12Hawk", u"12killa", u"124Nick", u"124bichoo", u"125lee", u"12cabrera", u"12chapman", u"12firemage", u"12george11", u"12hctawkcalB", u"124haven", u"125park", u"12Createaccount", u"12458789a", u"1279cat", u"12Mason34", u"12apagnotta", u"12davmaster345", u"12dstring", u"12jms", u"124059", u"1263linville", u"127ken", u"128m6", u"12PACK", u"12a02a89a", u"12eewew", u"12feet12", u"12ian34", u"12jchristensen", u"124.169I", u"124j", u"128kgs", u"12beasts", u"12hat34", u"12hearts", u"12iddler", u"12insan", u"12kellda", u"12416girl", u"1243trel;ghsfdk;lgsfd", u"12764", u"12989", u"129UCFKnight", u"12:51", u"12Fish", u"12oz", u"12ptHelvetica", u"12thNight", u"12whatever12", u"12wildcats12", u"12thwxman", u"12orangelemurs", u"12wqasdf", u"12quality", u"12nw", u"12showlace", u"12snyder", u"13 Mark 13", u"12neo12", u"12tmccauley", u"12tonton12", u"12oz.mouse", u"13 and Sensible", u"12minicr", u"12va34", u"12phil12", u"12rtk19", u"13 Bottles of luck", u"12oasis34", u"12quim12", u"12scolsrud", u"12ticeb", u"12nayr", u"12novelreach", u"12pen2", u"12renne", u"12thShare", u"12wally21", u"12wattsm", u"12redref", u"12spots", u"12thMarquis", u"12yearoldkid", u"12p3m", u"12sa", u"1313ktm", u"1337editor", u"1304Felix", u"1331331331", u"1337 5p34k", u"1337h4xx0rs", u"1300khz", u"1337 pl0x", u"13231323a", u"1337 haXX0r lolz", u"1337hacker", u"1312wiki", u"1337 ninj4", u"1337Garda", u"1337h4x", u"132qwerty", u"1337XD1337Sauce", u"1337chris", u"133618s", u"13 of Diamonds", u"1337 H4XZ0R", u"1337369", u"1337donald", u"1337freek", u"131596", u"1337 pwnzr", u"1307chesterfield", u"1313 South Harbor Blvd. Anaheim, CA", u"13-days", u"131Alex131", u"13245yellow678910", u"1337 r0XX0r", u"1337h4x0rz", u"1337 A.D", u"1337 AzN", u"1337Frank", u"1337st", u"1337haxor1992", u"1337macro", u"1337seth", u"1357ttt", u"138.217*136.17a", u"13Stunna", u"1337platypus", u"1337wesm", u"133mhz", u"136170aaron", u"139", u"1337monkee", u"1337pino", u"1366 Technologies", u"13670174", u"133wandering", u"137park", u"1337z0r", u"1337zorSKillz", u"13RACE", u"1377chunter", u"139f", u"1337n00blar", u"134679adg", u"135798642", u"136Socrates", u"13Curtain", u"135LUIGI", u"13Anonymous 49", u"13Badz13", u"13579king", u"13756", u"13Gregor", u"137270s", u"138", u"13alexander", u"13ambience", u"13djb13", u"13hockey", u"13lackhat", u"13blake13", u"13ennett", u"13jej", u"13ideas", u"13kielea", u"13dev", u"13fishyu", u"13Tawaazun14", u"13junior15", u"13agutierrez", u"13dble", u"13meadowland", u"13enzoate", u"13YANIV", u"13curseof", u"13ddas", u"13fakestreet", u"13francisco13", u"13jabielee", u"13kelleyd", u"13XIII", u"13elite37", u"13afuse", u"13aquamarine", u"13liat13", u"13cgiblin", u"13cgrubb", u"13kems", u"13bbrady", u"13bubbles", u"13melek", u"13ecap13", u"13oitinglo", u"13mk436", u"13montel", u"13rady", u"13rainvillek", u"13sharonj", u"13riski", u"13rento", u"13smithwalker", u"13scott", u"13olsender", u"13seer", u"13randonM", u"13scott31", u"13moons", u"13mullja", u"13merriaa", u"13nov95", u"13parker13", u"13steve13", u"13xx", u"142 and 99", u"13unny", u"13yangm1", u"1414rwbt", u"13thstar", u"1400khz", u"13thfloor", u"14 Torres-Jess", u"14160aldora", u"13sullj", u"13westrn", u"14.ashutosh", u"141", u"13th pirate", u"1408 Film Fan", u"13thpatriot", u"13tyrone13", u"13th Accord", u"13thchild", u"1413montana", u"13ten", u"13thcentury", u"14---blood---144", u"1400degrees", u"14082009aug", u"13soccer", u"13taylor", u"13wanderlust", u"14 June", u"141098472k", u"13th", u"1418", u"13th Law Lord", u"1407roni", u"14092786", u"14305 Man", u"14Truth", u"14days", u"14mene", u"144334", u"14chung", u"14mcdomt", u"145childrenondeathrowIran", u"147ronnie", u"14mcm", u"14p mkaef", u"145j", u"147AM", u"14mRh4X0r", u"14ashley14", u"14kempsfield", u"1452147896325O", u"1490khz", u"142Roderick", u"1431jt", u"1456ghk", u"14ellie41", u"144", u"14buckets", u"14jjohnson", u"14joyb", u"14Ave", u"1423", u"1435mm", u"1471", u"1492bozo", u"14545", u"14FIFTY", u"14achyo", u"14music", u"14368", u"148Tomra", u"14ledzeppelin", u"14ssolan", u"150rushing", u"14th Taicho", u"14wiki1414", u"14y", u"14u2no", u"1500sampark", u"14rolemac", u"14srivas", u"14tm10993", u"14peace", u"14schltr", u"1500M", u"14victoire", u"150489", u"14th Armored Division Association", u"14wesley", u"150 Music", u"150omega", u"14pricja", u"14thArmored", u"14page", u"1549bcp", u"154west", u"158-152-12-77", u"151st Egg", u"15911", u"151DLzer", u"157.228.x.x", u"158.64.21.3", u"156ableitem", u"1541media", u"158.64.22.226", u"151.200.121.237", u"1537ohno", u"153wos", u"1564shakespeare1616", u"15216310013", u"157,684,368,245", u"15357", u"155ws", u"151Henry151", u"1523", u"159tery", u"159F357B", u"15InfiniteJest", u"15GoGators", u"159753", u"15Mickey20", u"159ufo159", u"15ParkRow", u"15thWardWestBank", u"15keramiek19", u"161.2.56.147X", u"15mhchung", u"15mm-spikes", u"15peaksin24", u"15swanal", u"15th ward", u"15superchic15", u"15year oltl fan", u"161.2.56.148X", u"15guess", u"15Stitches", u"15cman", u"15lsoucy", u"15rthughes", u"15th hour", u"15ymrpgr", u"160", u"15avaughn", u"15versts", u"15ddas", u"15jdiltz", u"15run02p", u"15Xin", u"15tedaty25", u"15th Floor Poet", u"15x2+3", u"16 desember 6 januari", u"15Select", u"15qdotnet", u"16375a92d874b75g83h759d3840", u"163rd", u"1658", u"16445970cl", u"161battery", u"1670JAMESS", u"1620050", u"168..", u"1619jm", u"1611kjb", u"1644", u"1628japan", u"1652186", u"1622738", u"16189", u"162141", u"1645ladas566afsafgandalfmarctian", u"164fagh6a746", u"166 66 16 103", u"16761", u"168...", u"16836054", u"169-254-13-37", u"16=O in a P", u"168pin", u"1690khz", u"169", u"16@r", u"16keeper", u"16kokiyin", u"16cedars", u"16x9", u"16toki", u"17!!!a", u"16ronaldlai", u"16Oct1981", u"16angel05", u"1700-talet", u"17025specialist", u"16vmarc", u"16leahy", u"17", u"16cya!", u"16sectors", u"16xmacx13", u"16years", u"16V567", u"16draw33ia", u"16greenday16", u"16cubfan", u"16ruenicolo", u"171046", u"172", u"177777", u"1779Days", u"1730data", u"173rd", u"175392e", u"179589alex", u"172GAL", u"176thWingPublicAffairs", u"1776bob", u"177mswg", u"17589er", u"1729", u"17482opan5", u"1792aug", u"1750DH", u"176charlie", u"17717171j", u"17Browning", u"176-617", u"1743", u"178564stefan", u"17Drew", u"17rthngl", u"17mist17", u"17mooren", u"17brizzie", u"17daves", u"17reasons", u"17th Assasin", u"17NOV04MGS3", u"17Spartacus76", u"17andlilboots", u"17lausauling", u"17novembre", u"17portugal17", u"17purple", u"17Tyersal", u"17gmrtgs", u"17helpmeout21", u"17killaz", u"17taro17", u"17jake", u"1822", u"1837Tarmeto", u"1800theman", u"180223", u"18090", u"180OP", u"181tokansas", u"18277263rhyse", u"18882donate", u"1888martybhoy1967", u"183VictoriaStS", u"18-Till-I-Die", u"1801270P", u"182luvr", u"187eazycpt", u"1888 legend", u"17tracks", u"18121815usuk", u"1875", u"17v", u"18 Sid", u"180888", u"1800tarjetas", u"1812Soldier", u"185thSnakeBite", u"1860CN", u"1891jet1891", u"181thanos", u"180 \u043e\u0447\u043a\u043e\u0432", u"1812ahill", u"18 hours", u"182BCR+44AVAfan", u"186Davenport", u"187-8s", u"18 inch industries", u"183kevin183", u"180staging", u"187mixer", u"18saughtonmains", u"18th", u"19008aa", u"18OO552247O", u"18married", u"18yannick18", u"18close", u"18j1990", u"1907", u"18alex12", u"18jahrem\u00e4dchen", u"18underpar", u"18Peero73", u"18jms", u"19 Hurontario", u"190319m9", u"190editor", u"18conal1988", u"18laws", u"18pct", u"18brian", u"1904.CC", u"1894mk2", u"18liam78", u"18wongngai", u"1903barrett", u"1892 Fitch Dude", u"18Fox", u"18villamos", u"19 minutes", u"190 Proof", u"19204", u"194.83.7l.179", u"1911dealtdark", u"19191919s", u"191cool", u"194x144x90x118", u"1916b", u"190fordhouse", u"19193221", u"1927metropolis", u"1945AlphaTeam", u"1927 Orchestra", u"193ypico", u"1942 Bimbo Wales", u"1947c", u"190jimbob", u"1912Moi", u"192-94-73-5", u"1938superman", u"1948remembered", u"194ABrg", u"1940Movies", u"1945n2653", u"1947DB", u"190octane", u"1947project", u"19121*DN", u"192168r", u"1919saurav", u"19281987", u"19494484", u"1948vintage", u"1950salvador", u"1955dhj", u"1967treeman", u"1969", u"1971topgeek", u"1957 1947 2W", u"1973srajesh", u"1952425", u"1950hippy", u"1969Rave", u"1973beijinger", u"1975822usmc", u"1957roadster", u"1966writer", u"1968GTO", u"1975 jp", u"1958publius", u"1969ZOMG1969", u"1958", u"1967PONTIACGTO", u"1956okie", u"1959jake", u"1965Tim", u"1967GTOgoat", u"1970sammy", u"1961val", u"19748", u"1951radioguy", u"1952KingDavid", u"1952mgyb", u"1973girl", u"1980Esquire", u"1980b", u"1979Viking", u"1979spartan", u"1978risen", u"198", u"1981", u"1976AD", u"1979Miyuki", u"1977simon", u"1979omen", u"197941g4", u"198.81.26.72", u"1980 Canadian Cup Series", u"1978", u"1976grad", u"1981 graduate", u"1981Corey", u"1981editor", u"1981kid", u"1983erin", u"1987-96", u"1984whatcanido", u"1987AW", u"1989 Rosie", u"1984VH", u"1988ja", u"1984asta", u"1982Alice", u"1986Ricardo", u"199", u"1989flame", u"1990-Higgy-2007", u"1987olds442", u"1989jeep", u"19838j819j91", u"19870119", u"1989luke", u"1990F250460", u"1988benjamin", u"1990gabe2009", u"1983", u"1984", u"1987HarryPothead", u"1990bacon", u"1981willy", u"1988", u"19842112", u"1986q", u"1989girly", u"1983VF1A", u"1988LALA", u"1989Guardian", u"1981Laura", u"1990skid", u"199494", u"19960401", u"1993sanity", u"1991shahan", u"1992.2006.Reloaded", u"1990jor", u"1990man", u"1994Group", u"1996lakers", u"1993 lol", u"1997", u"1991gaz", u"1993.J", u"19930618w", u"1995hoo", u"1991GTA", u"1995-Mc-keever-1995", u"1991 lowestoft", u"1995phatheadz", u"1990hugo", u"1990ijk", u"1996 coup d'etat", u"1991Jackson", u"1991chan", u"1994lamp post", u"1996roy", u"199419941b", u"1993rox", u"19erXx", u"19iRock84", u"19justin", u"19Osama85", u"19Publius", u"19bethany89", u"1999blacklist666", u"19est66", u"19mikel92", u"19peter77", u"19991955", u"19dan46", u"1997HK", u"19Leyton94", u"19amytastic", u"19dog", u"199807j", u"19aifs76", u"19jp87", u"19oo19", u"19qwinn", u"19Ent", u"19munn90", u"19andy91", u"19century", u"1998molly", u"1998mollyhollyzena", u"199er", u"19Braddy93", u"19andrew19", u"19DrPepper91", u"19eighteen17", u"19merlin69", u"19sinclair51", u"19994best", u"19mfc90", u"1999screamingchicken", u"19eames53", u"19letterslong", u"19o7", u"19redsox86", u"1997sub", u"19dgbtex66", u"19Harlequin19", u"19at2", u"19daedalus88", u"19spartan71", u"19thcentury", u"1AMM3", u"1Alice41", u"1F4N", u"1A62", u"1Click", u"1AllieKat", u"1BreeBree1", u"1Chaan", u"19yearoldboyfromNY", u"1Californian", u"19troodon52", u"1Audit1", u"1FATMN", u"1FoolishMortal", u"19vwishart", u"1FanoftheArts", u"1DTPLER", u"1FastClown", u"19th Century Firearms", u"19thsavior", u"1DOSE1", u"1Dungeon Master1", u"1Candice", u"19spongeg", u"1BaraQ1", u"1CaUsa", u"1Effect", u"1B6", u"1DmkIIN", u"19thwardrunner", u"1A643E7", u"1B.Goetz", u"1ForTheMoney", u"1Gingee2", u"1QAZXCFT", u"1Newman", u"1RodStewartFan", u"1Nathan3", u"1Shaggy1", u"1Hamster9", u"1JRJORDAN1", u"1GoodNight4Life", u"1LINKPAK", u"1Memphis", u"1Helper", u"1Shank1", u"1NeverKnows", u"1NosferatuZodd1", u"1GordonH", u"1K Years", u"1Nevis", u"1Shady2", u"1Kayla2", u"1REVDR", u"1Goofy1", u"1LOST4life1", u"1Si61515", u"1OLB4"]
INPUT_FILE = "session"
OUTPUT_FILE = "user_0" |
ocadotechnology/calico | refs/heads/master | calico/test/stub_etcd.py | 4 | # -*- coding: utf-8 -*-
# Copyright 2014, 2015 Metaswitch Networks
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
calico.test.stub_etcd
~~~~~~~~~~~~
Stub version of the etcd interface.
"""
import logging
import eventlet
from eventlet.event import Event
# Logger
log = logging.getLogger(__name__)
class EtcdException(Exception):
pass
class EtcdKeyNotFound(EtcdException):
pass
class EtcdClusterIdChanged(EtcdException):
pass
class EtcdEventIndexCleared(EtcdException):
pass
class NoMoreResults(Exception):
pass
class UnexpectedResultType(Exception):
pass
READ = "read"
WRITE = "write"
class Client(object):
def __init__(self):
self.results = []
self.stop = Event()
self.no_more_results = Event()
self.failure = None
def read(self, path, **kwargs):
try:
result = self.results.pop(0)
except IndexError:
if not self.no_more_results.ready():
self.no_more_results.send()
eventlet.with_timeout(5, self.stop.wait)
raise NoMoreResults()
if result.op != READ:
self.failure = "Unexpected result type for read(): %s" % result.op
raise UnexpectedResultType()
if result.exception is not None:
log.debug("Raise read exception %s", type(result.exception).__name__)
raise result.exception
log.debug("Return read result %s", result)
return result
def write(self, path, value, **kwargs):
log.debug("Write of %s to %s", value, path)
try:
result = self.results.pop(0)
except IndexError:
if not self.no_more_results.ready():
self.no_more_results.send()
eventlet.with_timeout(5, self.stop.wait)
raise NoMoreResults()
if result.op != WRITE:
self.failure = "Unexpected result type for write(): %s" % result.op
raise UnexpectedResultType()
if result.exception is not None:
log.debug("Raise write exception %s", result.exception)
raise result.exception
log.debug("Return write result")
return result
def add_read_exception(self, exception):
assert(isinstance(exception, Exception))
self.results.append(EtcdResult(exception=exception))
def add_read_result(self, **kwargs):
self.results.append(EtcdResult(**kwargs))
def add_write_result(self):
# Write results have no useful content.
self.results.append(EtcdResult(op=WRITE))
def add_write_exception(self, exception):
self.results.append(EtcdResult(op=WRITE, exception=exception))
class EtcdResult(object):
def __init__(self, op=READ, exception=None, key=None,
value=None, action=None, index=0):
self.op = op
self.key = key
self.value = value
self.action = action
self.exception = exception
self.etcd_index = index
def __str__(self):
return ("key=%s, value=%s, action=%s,index=%d" %
(self.key, self.value, self.action, self.etcd_index))
|
abhattad4/Digi-Menu | refs/heads/master | django/contrib/gis/geometry/backend/geos.py | 622 | from django.contrib.gis.geos import (
GEOSException as GeometryException, GEOSGeometry as Geometry,
)
__all__ = ['Geometry', 'GeometryException']
|
drukhil/frappe | refs/heads/master | frappe/tests/test_email.py | 4 | # Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# MIT License. See license.txt
from __future__ import unicode_literals
import unittest, frappe
from frappe.test_runner import make_test_records
make_test_records("User")
make_test_records("Email Account")
class TestEmail(unittest.TestCase):
def setUp(self):
frappe.db.sql("""delete from `tabEmail Unsubscribe`""")
frappe.db.sql("""delete from `tabEmail Queue`""")
def test_send(self):
from frappe.email import sendmail
sendmail('test@example.com', subject='Test Mail', msg="Test Content")
def test_email_queue(self, send_after=None):
from frappe.email.queue import send
send(recipients = ['test@example.com', 'test1@example.com'],
sender="admin@example.com",
reference_doctype='User', reference_name='Administrator',
subject='Testing Queue', message='This mail is queued!', send_after=send_after)
email_queue = frappe.db.sql("""select * from `tabEmail Queue` where status='Not Sent'""", as_dict=1)
self.assertEquals(len(email_queue), 2)
self.assertTrue('test@example.com' in [d['recipient'] for d in email_queue])
self.assertTrue('test1@example.com' in [d['recipient'] for d in email_queue])
self.assertTrue('Unsubscribe' in email_queue[0]['message'])
def test_flush(self):
self.test_email_queue(send_after = 1)
from frappe.email.queue import flush
flush(from_test=True)
email_queue = frappe.db.sql("""select * from `tabEmail Queue` where status='Sent'""", as_dict=1)
self.assertEquals(len(email_queue), 0)
def test_send_after(self):
self.test_email_queue()
from frappe.email.queue import flush
flush(from_test=True)
email_queue = frappe.db.sql("""select * from `tabEmail Queue` where status='Sent'""", as_dict=1)
self.assertEquals(len(email_queue), 2)
self.assertTrue('test@example.com' in [d['recipient'] for d in email_queue])
self.assertTrue('test1@example.com' in [d['recipient'] for d in email_queue])
def test_expired(self):
self.test_email_queue()
frappe.db.sql("update `tabEmail Queue` set creation=DATE_SUB(curdate(), interval 8 day)")
from frappe.email.queue import clear_outbox
clear_outbox()
email_queue = frappe.db.sql("""select * from `tabEmail Queue` where status='Expired'""", as_dict=1)
self.assertEquals(len(email_queue), 2)
self.assertTrue('test@example.com' in [d['recipient'] for d in email_queue])
self.assertTrue('test1@example.com' in [d['recipient'] for d in email_queue])
def test_unsubscribe(self):
from frappe.email.queue import unsubscribe, send
unsubscribe(doctype="User", name="Administrator", email="test@example.com")
self.assertTrue(frappe.db.get_value("Email Unsubscribe",
{"reference_doctype": "User", "reference_name": "Administrator", "email": "test@example.com"}))
before = frappe.db.sql("""select count(name) from `tabEmail Queue` where status='Not Sent'""")[0][0]
send(recipients = ['test@example.com', 'test1@example.com'],
sender="admin@example.com",
reference_doctype='User', reference_name= "Administrator",
subject='Testing Email Queue', message='This is mail is queued!')
# this is sent async (?)
email_queue = frappe.db.sql("""select * from `tabEmail Queue` where status='Not Sent'""",
as_dict=1)
self.assertEquals(len(email_queue), before + 1)
self.assertFalse('test@example.com' in [d['recipient'] for d in email_queue])
self.assertTrue('test1@example.com' in [d['recipient'] for d in email_queue])
self.assertTrue('Unsubscribe' in email_queue[0]['message'])
def test_email_queue_limit(self):
from frappe.email.queue import send, EmailLimitCrossedError
self.assertRaises(EmailLimitCrossedError, send,
recipients=['test@example.com']*1000,
sender="admin@example.com",
reference_doctype = "User", reference_name="Administrator",
subject='Testing Email Queue', message='This email is queued!')
def test_image_parsing(self):
import re
email_account = frappe.get_doc('Email Account', '_Test Email Account 1')
with open(frappe.get_app_path('frappe', 'tests', 'data', 'email_with_image.txt'), 'r') as raw:
communication = email_account.insert_communication(raw.read())
#print communication.content
self.assertTrue(re.search('''<img[^>]*src=["']/private/files/rtco1.png[^>]*>''', communication.content))
self.assertTrue(re.search('''<img[^>]*src=["']/private/files/rtco2.png[^>]*>''', communication.content))
if __name__=='__main__':
frappe.connect()
unittest.main()
|
yawnosnorous/python-for-android | refs/heads/master | python3-alpha/python3-src/Lib/encodings/cp875.py | 272 | """ Python Character Mapping Codec cp875 generated from 'MAPPINGS/VENDORS/MICSFT/EBCDIC/CP875.TXT' with gencodec.py.
"""#"
import codecs
### Codec APIs
class Codec(codecs.Codec):
def encode(self,input,errors='strict'):
return codecs.charmap_encode(input,errors,encoding_table)
def decode(self,input,errors='strict'):
return codecs.charmap_decode(input,errors,decoding_table)
class IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input, final=False):
return codecs.charmap_encode(input,self.errors,encoding_table)[0]
class IncrementalDecoder(codecs.IncrementalDecoder):
def decode(self, input, final=False):
return codecs.charmap_decode(input,self.errors,decoding_table)[0]
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return codecs.CodecInfo(
name='cp875',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
### Decoding Table
decoding_table = (
'\x00' # 0x00 -> NULL
'\x01' # 0x01 -> START OF HEADING
'\x02' # 0x02 -> START OF TEXT
'\x03' # 0x03 -> END OF TEXT
'\x9c' # 0x04 -> CONTROL
'\t' # 0x05 -> HORIZONTAL TABULATION
'\x86' # 0x06 -> CONTROL
'\x7f' # 0x07 -> DELETE
'\x97' # 0x08 -> CONTROL
'\x8d' # 0x09 -> CONTROL
'\x8e' # 0x0A -> CONTROL
'\x0b' # 0x0B -> VERTICAL TABULATION
'\x0c' # 0x0C -> FORM FEED
'\r' # 0x0D -> CARRIAGE RETURN
'\x0e' # 0x0E -> SHIFT OUT
'\x0f' # 0x0F -> SHIFT IN
'\x10' # 0x10 -> DATA LINK ESCAPE
'\x11' # 0x11 -> DEVICE CONTROL ONE
'\x12' # 0x12 -> DEVICE CONTROL TWO
'\x13' # 0x13 -> DEVICE CONTROL THREE
'\x9d' # 0x14 -> CONTROL
'\x85' # 0x15 -> CONTROL
'\x08' # 0x16 -> BACKSPACE
'\x87' # 0x17 -> CONTROL
'\x18' # 0x18 -> CANCEL
'\x19' # 0x19 -> END OF MEDIUM
'\x92' # 0x1A -> CONTROL
'\x8f' # 0x1B -> CONTROL
'\x1c' # 0x1C -> FILE SEPARATOR
'\x1d' # 0x1D -> GROUP SEPARATOR
'\x1e' # 0x1E -> RECORD SEPARATOR
'\x1f' # 0x1F -> UNIT SEPARATOR
'\x80' # 0x20 -> CONTROL
'\x81' # 0x21 -> CONTROL
'\x82' # 0x22 -> CONTROL
'\x83' # 0x23 -> CONTROL
'\x84' # 0x24 -> CONTROL
'\n' # 0x25 -> LINE FEED
'\x17' # 0x26 -> END OF TRANSMISSION BLOCK
'\x1b' # 0x27 -> ESCAPE
'\x88' # 0x28 -> CONTROL
'\x89' # 0x29 -> CONTROL
'\x8a' # 0x2A -> CONTROL
'\x8b' # 0x2B -> CONTROL
'\x8c' # 0x2C -> CONTROL
'\x05' # 0x2D -> ENQUIRY
'\x06' # 0x2E -> ACKNOWLEDGE
'\x07' # 0x2F -> BELL
'\x90' # 0x30 -> CONTROL
'\x91' # 0x31 -> CONTROL
'\x16' # 0x32 -> SYNCHRONOUS IDLE
'\x93' # 0x33 -> CONTROL
'\x94' # 0x34 -> CONTROL
'\x95' # 0x35 -> CONTROL
'\x96' # 0x36 -> CONTROL
'\x04' # 0x37 -> END OF TRANSMISSION
'\x98' # 0x38 -> CONTROL
'\x99' # 0x39 -> CONTROL
'\x9a' # 0x3A -> CONTROL
'\x9b' # 0x3B -> CONTROL
'\x14' # 0x3C -> DEVICE CONTROL FOUR
'\x15' # 0x3D -> NEGATIVE ACKNOWLEDGE
'\x9e' # 0x3E -> CONTROL
'\x1a' # 0x3F -> SUBSTITUTE
' ' # 0x40 -> SPACE
'\u0391' # 0x41 -> GREEK CAPITAL LETTER ALPHA
'\u0392' # 0x42 -> GREEK CAPITAL LETTER BETA
'\u0393' # 0x43 -> GREEK CAPITAL LETTER GAMMA
'\u0394' # 0x44 -> GREEK CAPITAL LETTER DELTA
'\u0395' # 0x45 -> GREEK CAPITAL LETTER EPSILON
'\u0396' # 0x46 -> GREEK CAPITAL LETTER ZETA
'\u0397' # 0x47 -> GREEK CAPITAL LETTER ETA
'\u0398' # 0x48 -> GREEK CAPITAL LETTER THETA
'\u0399' # 0x49 -> GREEK CAPITAL LETTER IOTA
'[' # 0x4A -> LEFT SQUARE BRACKET
'.' # 0x4B -> FULL STOP
'<' # 0x4C -> LESS-THAN SIGN
'(' # 0x4D -> LEFT PARENTHESIS
'+' # 0x4E -> PLUS SIGN
'!' # 0x4F -> EXCLAMATION MARK
'&' # 0x50 -> AMPERSAND
'\u039a' # 0x51 -> GREEK CAPITAL LETTER KAPPA
'\u039b' # 0x52 -> GREEK CAPITAL LETTER LAMDA
'\u039c' # 0x53 -> GREEK CAPITAL LETTER MU
'\u039d' # 0x54 -> GREEK CAPITAL LETTER NU
'\u039e' # 0x55 -> GREEK CAPITAL LETTER XI
'\u039f' # 0x56 -> GREEK CAPITAL LETTER OMICRON
'\u03a0' # 0x57 -> GREEK CAPITAL LETTER PI
'\u03a1' # 0x58 -> GREEK CAPITAL LETTER RHO
'\u03a3' # 0x59 -> GREEK CAPITAL LETTER SIGMA
']' # 0x5A -> RIGHT SQUARE BRACKET
'$' # 0x5B -> DOLLAR SIGN
'*' # 0x5C -> ASTERISK
')' # 0x5D -> RIGHT PARENTHESIS
';' # 0x5E -> SEMICOLON
'^' # 0x5F -> CIRCUMFLEX ACCENT
'-' # 0x60 -> HYPHEN-MINUS
'/' # 0x61 -> SOLIDUS
'\u03a4' # 0x62 -> GREEK CAPITAL LETTER TAU
'\u03a5' # 0x63 -> GREEK CAPITAL LETTER UPSILON
'\u03a6' # 0x64 -> GREEK CAPITAL LETTER PHI
'\u03a7' # 0x65 -> GREEK CAPITAL LETTER CHI
'\u03a8' # 0x66 -> GREEK CAPITAL LETTER PSI
'\u03a9' # 0x67 -> GREEK CAPITAL LETTER OMEGA
'\u03aa' # 0x68 -> GREEK CAPITAL LETTER IOTA WITH DIALYTIKA
'\u03ab' # 0x69 -> GREEK CAPITAL LETTER UPSILON WITH DIALYTIKA
'|' # 0x6A -> VERTICAL LINE
',' # 0x6B -> COMMA
'%' # 0x6C -> PERCENT SIGN
'_' # 0x6D -> LOW LINE
'>' # 0x6E -> GREATER-THAN SIGN
'?' # 0x6F -> QUESTION MARK
'\xa8' # 0x70 -> DIAERESIS
'\u0386' # 0x71 -> GREEK CAPITAL LETTER ALPHA WITH TONOS
'\u0388' # 0x72 -> GREEK CAPITAL LETTER EPSILON WITH TONOS
'\u0389' # 0x73 -> GREEK CAPITAL LETTER ETA WITH TONOS
'\xa0' # 0x74 -> NO-BREAK SPACE
'\u038a' # 0x75 -> GREEK CAPITAL LETTER IOTA WITH TONOS
'\u038c' # 0x76 -> GREEK CAPITAL LETTER OMICRON WITH TONOS
'\u038e' # 0x77 -> GREEK CAPITAL LETTER UPSILON WITH TONOS
'\u038f' # 0x78 -> GREEK CAPITAL LETTER OMEGA WITH TONOS
'`' # 0x79 -> GRAVE ACCENT
':' # 0x7A -> COLON
'#' # 0x7B -> NUMBER SIGN
'@' # 0x7C -> COMMERCIAL AT
"'" # 0x7D -> APOSTROPHE
'=' # 0x7E -> EQUALS SIGN
'"' # 0x7F -> QUOTATION MARK
'\u0385' # 0x80 -> GREEK DIALYTIKA TONOS
'a' # 0x81 -> LATIN SMALL LETTER A
'b' # 0x82 -> LATIN SMALL LETTER B
'c' # 0x83 -> LATIN SMALL LETTER C
'd' # 0x84 -> LATIN SMALL LETTER D
'e' # 0x85 -> LATIN SMALL LETTER E
'f' # 0x86 -> LATIN SMALL LETTER F
'g' # 0x87 -> LATIN SMALL LETTER G
'h' # 0x88 -> LATIN SMALL LETTER H
'i' # 0x89 -> LATIN SMALL LETTER I
'\u03b1' # 0x8A -> GREEK SMALL LETTER ALPHA
'\u03b2' # 0x8B -> GREEK SMALL LETTER BETA
'\u03b3' # 0x8C -> GREEK SMALL LETTER GAMMA
'\u03b4' # 0x8D -> GREEK SMALL LETTER DELTA
'\u03b5' # 0x8E -> GREEK SMALL LETTER EPSILON
'\u03b6' # 0x8F -> GREEK SMALL LETTER ZETA
'\xb0' # 0x90 -> DEGREE SIGN
'j' # 0x91 -> LATIN SMALL LETTER J
'k' # 0x92 -> LATIN SMALL LETTER K
'l' # 0x93 -> LATIN SMALL LETTER L
'm' # 0x94 -> LATIN SMALL LETTER M
'n' # 0x95 -> LATIN SMALL LETTER N
'o' # 0x96 -> LATIN SMALL LETTER O
'p' # 0x97 -> LATIN SMALL LETTER P
'q' # 0x98 -> LATIN SMALL LETTER Q
'r' # 0x99 -> LATIN SMALL LETTER R
'\u03b7' # 0x9A -> GREEK SMALL LETTER ETA
'\u03b8' # 0x9B -> GREEK SMALL LETTER THETA
'\u03b9' # 0x9C -> GREEK SMALL LETTER IOTA
'\u03ba' # 0x9D -> GREEK SMALL LETTER KAPPA
'\u03bb' # 0x9E -> GREEK SMALL LETTER LAMDA
'\u03bc' # 0x9F -> GREEK SMALL LETTER MU
'\xb4' # 0xA0 -> ACUTE ACCENT
'~' # 0xA1 -> TILDE
's' # 0xA2 -> LATIN SMALL LETTER S
't' # 0xA3 -> LATIN SMALL LETTER T
'u' # 0xA4 -> LATIN SMALL LETTER U
'v' # 0xA5 -> LATIN SMALL LETTER V
'w' # 0xA6 -> LATIN SMALL LETTER W
'x' # 0xA7 -> LATIN SMALL LETTER X
'y' # 0xA8 -> LATIN SMALL LETTER Y
'z' # 0xA9 -> LATIN SMALL LETTER Z
'\u03bd' # 0xAA -> GREEK SMALL LETTER NU
'\u03be' # 0xAB -> GREEK SMALL LETTER XI
'\u03bf' # 0xAC -> GREEK SMALL LETTER OMICRON
'\u03c0' # 0xAD -> GREEK SMALL LETTER PI
'\u03c1' # 0xAE -> GREEK SMALL LETTER RHO
'\u03c3' # 0xAF -> GREEK SMALL LETTER SIGMA
'\xa3' # 0xB0 -> POUND SIGN
'\u03ac' # 0xB1 -> GREEK SMALL LETTER ALPHA WITH TONOS
'\u03ad' # 0xB2 -> GREEK SMALL LETTER EPSILON WITH TONOS
'\u03ae' # 0xB3 -> GREEK SMALL LETTER ETA WITH TONOS
'\u03ca' # 0xB4 -> GREEK SMALL LETTER IOTA WITH DIALYTIKA
'\u03af' # 0xB5 -> GREEK SMALL LETTER IOTA WITH TONOS
'\u03cc' # 0xB6 -> GREEK SMALL LETTER OMICRON WITH TONOS
'\u03cd' # 0xB7 -> GREEK SMALL LETTER UPSILON WITH TONOS
'\u03cb' # 0xB8 -> GREEK SMALL LETTER UPSILON WITH DIALYTIKA
'\u03ce' # 0xB9 -> GREEK SMALL LETTER OMEGA WITH TONOS
'\u03c2' # 0xBA -> GREEK SMALL LETTER FINAL SIGMA
'\u03c4' # 0xBB -> GREEK SMALL LETTER TAU
'\u03c5' # 0xBC -> GREEK SMALL LETTER UPSILON
'\u03c6' # 0xBD -> GREEK SMALL LETTER PHI
'\u03c7' # 0xBE -> GREEK SMALL LETTER CHI
'\u03c8' # 0xBF -> GREEK SMALL LETTER PSI
'{' # 0xC0 -> LEFT CURLY BRACKET
'A' # 0xC1 -> LATIN CAPITAL LETTER A
'B' # 0xC2 -> LATIN CAPITAL LETTER B
'C' # 0xC3 -> LATIN CAPITAL LETTER C
'D' # 0xC4 -> LATIN CAPITAL LETTER D
'E' # 0xC5 -> LATIN CAPITAL LETTER E
'F' # 0xC6 -> LATIN CAPITAL LETTER F
'G' # 0xC7 -> LATIN CAPITAL LETTER G
'H' # 0xC8 -> LATIN CAPITAL LETTER H
'I' # 0xC9 -> LATIN CAPITAL LETTER I
'\xad' # 0xCA -> SOFT HYPHEN
'\u03c9' # 0xCB -> GREEK SMALL LETTER OMEGA
'\u0390' # 0xCC -> GREEK SMALL LETTER IOTA WITH DIALYTIKA AND TONOS
'\u03b0' # 0xCD -> GREEK SMALL LETTER UPSILON WITH DIALYTIKA AND TONOS
'\u2018' # 0xCE -> LEFT SINGLE QUOTATION MARK
'\u2015' # 0xCF -> HORIZONTAL BAR
'}' # 0xD0 -> RIGHT CURLY BRACKET
'J' # 0xD1 -> LATIN CAPITAL LETTER J
'K' # 0xD2 -> LATIN CAPITAL LETTER K
'L' # 0xD3 -> LATIN CAPITAL LETTER L
'M' # 0xD4 -> LATIN CAPITAL LETTER M
'N' # 0xD5 -> LATIN CAPITAL LETTER N
'O' # 0xD6 -> LATIN CAPITAL LETTER O
'P' # 0xD7 -> LATIN CAPITAL LETTER P
'Q' # 0xD8 -> LATIN CAPITAL LETTER Q
'R' # 0xD9 -> LATIN CAPITAL LETTER R
'\xb1' # 0xDA -> PLUS-MINUS SIGN
'\xbd' # 0xDB -> VULGAR FRACTION ONE HALF
'\x1a' # 0xDC -> SUBSTITUTE
'\u0387' # 0xDD -> GREEK ANO TELEIA
'\u2019' # 0xDE -> RIGHT SINGLE QUOTATION MARK
'\xa6' # 0xDF -> BROKEN BAR
'\\' # 0xE0 -> REVERSE SOLIDUS
'\x1a' # 0xE1 -> SUBSTITUTE
'S' # 0xE2 -> LATIN CAPITAL LETTER S
'T' # 0xE3 -> LATIN CAPITAL LETTER T
'U' # 0xE4 -> LATIN CAPITAL LETTER U
'V' # 0xE5 -> LATIN CAPITAL LETTER V
'W' # 0xE6 -> LATIN CAPITAL LETTER W
'X' # 0xE7 -> LATIN CAPITAL LETTER X
'Y' # 0xE8 -> LATIN CAPITAL LETTER Y
'Z' # 0xE9 -> LATIN CAPITAL LETTER Z
'\xb2' # 0xEA -> SUPERSCRIPT TWO
'\xa7' # 0xEB -> SECTION SIGN
'\x1a' # 0xEC -> SUBSTITUTE
'\x1a' # 0xED -> SUBSTITUTE
'\xab' # 0xEE -> LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
'\xac' # 0xEF -> NOT SIGN
'0' # 0xF0 -> DIGIT ZERO
'1' # 0xF1 -> DIGIT ONE
'2' # 0xF2 -> DIGIT TWO
'3' # 0xF3 -> DIGIT THREE
'4' # 0xF4 -> DIGIT FOUR
'5' # 0xF5 -> DIGIT FIVE
'6' # 0xF6 -> DIGIT SIX
'7' # 0xF7 -> DIGIT SEVEN
'8' # 0xF8 -> DIGIT EIGHT
'9' # 0xF9 -> DIGIT NINE
'\xb3' # 0xFA -> SUPERSCRIPT THREE
'\xa9' # 0xFB -> COPYRIGHT SIGN
'\x1a' # 0xFC -> SUBSTITUTE
'\x1a' # 0xFD -> SUBSTITUTE
'\xbb' # 0xFE -> RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
'\x9f' # 0xFF -> CONTROL
)
### Encoding table
encoding_table=codecs.charmap_build(decoding_table)
|
Big-B702/python-for-android | refs/heads/master | python3-alpha/python3-src/Lib/encodings/mac_romanian.py | 272 | """ Python Character Mapping Codec mac_romanian generated from 'MAPPINGS/VENDORS/APPLE/ROMANIAN.TXT' with gencodec.py.
"""#"
import codecs
### Codec APIs
class Codec(codecs.Codec):
def encode(self,input,errors='strict'):
return codecs.charmap_encode(input,errors,encoding_table)
def decode(self,input,errors='strict'):
return codecs.charmap_decode(input,errors,decoding_table)
class IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input, final=False):
return codecs.charmap_encode(input,self.errors,encoding_table)[0]
class IncrementalDecoder(codecs.IncrementalDecoder):
def decode(self, input, final=False):
return codecs.charmap_decode(input,self.errors,decoding_table)[0]
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return codecs.CodecInfo(
name='mac-romanian',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
### Decoding Table
decoding_table = (
'\x00' # 0x00 -> CONTROL CHARACTER
'\x01' # 0x01 -> CONTROL CHARACTER
'\x02' # 0x02 -> CONTROL CHARACTER
'\x03' # 0x03 -> CONTROL CHARACTER
'\x04' # 0x04 -> CONTROL CHARACTER
'\x05' # 0x05 -> CONTROL CHARACTER
'\x06' # 0x06 -> CONTROL CHARACTER
'\x07' # 0x07 -> CONTROL CHARACTER
'\x08' # 0x08 -> CONTROL CHARACTER
'\t' # 0x09 -> CONTROL CHARACTER
'\n' # 0x0A -> CONTROL CHARACTER
'\x0b' # 0x0B -> CONTROL CHARACTER
'\x0c' # 0x0C -> CONTROL CHARACTER
'\r' # 0x0D -> CONTROL CHARACTER
'\x0e' # 0x0E -> CONTROL CHARACTER
'\x0f' # 0x0F -> CONTROL CHARACTER
'\x10' # 0x10 -> CONTROL CHARACTER
'\x11' # 0x11 -> CONTROL CHARACTER
'\x12' # 0x12 -> CONTROL CHARACTER
'\x13' # 0x13 -> CONTROL CHARACTER
'\x14' # 0x14 -> CONTROL CHARACTER
'\x15' # 0x15 -> CONTROL CHARACTER
'\x16' # 0x16 -> CONTROL CHARACTER
'\x17' # 0x17 -> CONTROL CHARACTER
'\x18' # 0x18 -> CONTROL CHARACTER
'\x19' # 0x19 -> CONTROL CHARACTER
'\x1a' # 0x1A -> CONTROL CHARACTER
'\x1b' # 0x1B -> CONTROL CHARACTER
'\x1c' # 0x1C -> CONTROL CHARACTER
'\x1d' # 0x1D -> CONTROL CHARACTER
'\x1e' # 0x1E -> CONTROL CHARACTER
'\x1f' # 0x1F -> CONTROL CHARACTER
' ' # 0x20 -> SPACE
'!' # 0x21 -> EXCLAMATION MARK
'"' # 0x22 -> QUOTATION MARK
'#' # 0x23 -> NUMBER SIGN
'$' # 0x24 -> DOLLAR SIGN
'%' # 0x25 -> PERCENT SIGN
'&' # 0x26 -> AMPERSAND
"'" # 0x27 -> APOSTROPHE
'(' # 0x28 -> LEFT PARENTHESIS
')' # 0x29 -> RIGHT PARENTHESIS
'*' # 0x2A -> ASTERISK
'+' # 0x2B -> PLUS SIGN
',' # 0x2C -> COMMA
'-' # 0x2D -> HYPHEN-MINUS
'.' # 0x2E -> FULL STOP
'/' # 0x2F -> SOLIDUS
'0' # 0x30 -> DIGIT ZERO
'1' # 0x31 -> DIGIT ONE
'2' # 0x32 -> DIGIT TWO
'3' # 0x33 -> DIGIT THREE
'4' # 0x34 -> DIGIT FOUR
'5' # 0x35 -> DIGIT FIVE
'6' # 0x36 -> DIGIT SIX
'7' # 0x37 -> DIGIT SEVEN
'8' # 0x38 -> DIGIT EIGHT
'9' # 0x39 -> DIGIT NINE
':' # 0x3A -> COLON
';' # 0x3B -> SEMICOLON
'<' # 0x3C -> LESS-THAN SIGN
'=' # 0x3D -> EQUALS SIGN
'>' # 0x3E -> GREATER-THAN SIGN
'?' # 0x3F -> QUESTION MARK
'@' # 0x40 -> COMMERCIAL AT
'A' # 0x41 -> LATIN CAPITAL LETTER A
'B' # 0x42 -> LATIN CAPITAL LETTER B
'C' # 0x43 -> LATIN CAPITAL LETTER C
'D' # 0x44 -> LATIN CAPITAL LETTER D
'E' # 0x45 -> LATIN CAPITAL LETTER E
'F' # 0x46 -> LATIN CAPITAL LETTER F
'G' # 0x47 -> LATIN CAPITAL LETTER G
'H' # 0x48 -> LATIN CAPITAL LETTER H
'I' # 0x49 -> LATIN CAPITAL LETTER I
'J' # 0x4A -> LATIN CAPITAL LETTER J
'K' # 0x4B -> LATIN CAPITAL LETTER K
'L' # 0x4C -> LATIN CAPITAL LETTER L
'M' # 0x4D -> LATIN CAPITAL LETTER M
'N' # 0x4E -> LATIN CAPITAL LETTER N
'O' # 0x4F -> LATIN CAPITAL LETTER O
'P' # 0x50 -> LATIN CAPITAL LETTER P
'Q' # 0x51 -> LATIN CAPITAL LETTER Q
'R' # 0x52 -> LATIN CAPITAL LETTER R
'S' # 0x53 -> LATIN CAPITAL LETTER S
'T' # 0x54 -> LATIN CAPITAL LETTER T
'U' # 0x55 -> LATIN CAPITAL LETTER U
'V' # 0x56 -> LATIN CAPITAL LETTER V
'W' # 0x57 -> LATIN CAPITAL LETTER W
'X' # 0x58 -> LATIN CAPITAL LETTER X
'Y' # 0x59 -> LATIN CAPITAL LETTER Y
'Z' # 0x5A -> LATIN CAPITAL LETTER Z
'[' # 0x5B -> LEFT SQUARE BRACKET
'\\' # 0x5C -> REVERSE SOLIDUS
']' # 0x5D -> RIGHT SQUARE BRACKET
'^' # 0x5E -> CIRCUMFLEX ACCENT
'_' # 0x5F -> LOW LINE
'`' # 0x60 -> GRAVE ACCENT
'a' # 0x61 -> LATIN SMALL LETTER A
'b' # 0x62 -> LATIN SMALL LETTER B
'c' # 0x63 -> LATIN SMALL LETTER C
'd' # 0x64 -> LATIN SMALL LETTER D
'e' # 0x65 -> LATIN SMALL LETTER E
'f' # 0x66 -> LATIN SMALL LETTER F
'g' # 0x67 -> LATIN SMALL LETTER G
'h' # 0x68 -> LATIN SMALL LETTER H
'i' # 0x69 -> LATIN SMALL LETTER I
'j' # 0x6A -> LATIN SMALL LETTER J
'k' # 0x6B -> LATIN SMALL LETTER K
'l' # 0x6C -> LATIN SMALL LETTER L
'm' # 0x6D -> LATIN SMALL LETTER M
'n' # 0x6E -> LATIN SMALL LETTER N
'o' # 0x6F -> LATIN SMALL LETTER O
'p' # 0x70 -> LATIN SMALL LETTER P
'q' # 0x71 -> LATIN SMALL LETTER Q
'r' # 0x72 -> LATIN SMALL LETTER R
's' # 0x73 -> LATIN SMALL LETTER S
't' # 0x74 -> LATIN SMALL LETTER T
'u' # 0x75 -> LATIN SMALL LETTER U
'v' # 0x76 -> LATIN SMALL LETTER V
'w' # 0x77 -> LATIN SMALL LETTER W
'x' # 0x78 -> LATIN SMALL LETTER X
'y' # 0x79 -> LATIN SMALL LETTER Y
'z' # 0x7A -> LATIN SMALL LETTER Z
'{' # 0x7B -> LEFT CURLY BRACKET
'|' # 0x7C -> VERTICAL LINE
'}' # 0x7D -> RIGHT CURLY BRACKET
'~' # 0x7E -> TILDE
'\x7f' # 0x7F -> CONTROL CHARACTER
'\xc4' # 0x80 -> LATIN CAPITAL LETTER A WITH DIAERESIS
'\xc5' # 0x81 -> LATIN CAPITAL LETTER A WITH RING ABOVE
'\xc7' # 0x82 -> LATIN CAPITAL LETTER C WITH CEDILLA
'\xc9' # 0x83 -> LATIN CAPITAL LETTER E WITH ACUTE
'\xd1' # 0x84 -> LATIN CAPITAL LETTER N WITH TILDE
'\xd6' # 0x85 -> LATIN CAPITAL LETTER O WITH DIAERESIS
'\xdc' # 0x86 -> LATIN CAPITAL LETTER U WITH DIAERESIS
'\xe1' # 0x87 -> LATIN SMALL LETTER A WITH ACUTE
'\xe0' # 0x88 -> LATIN SMALL LETTER A WITH GRAVE
'\xe2' # 0x89 -> LATIN SMALL LETTER A WITH CIRCUMFLEX
'\xe4' # 0x8A -> LATIN SMALL LETTER A WITH DIAERESIS
'\xe3' # 0x8B -> LATIN SMALL LETTER A WITH TILDE
'\xe5' # 0x8C -> LATIN SMALL LETTER A WITH RING ABOVE
'\xe7' # 0x8D -> LATIN SMALL LETTER C WITH CEDILLA
'\xe9' # 0x8E -> LATIN SMALL LETTER E WITH ACUTE
'\xe8' # 0x8F -> LATIN SMALL LETTER E WITH GRAVE
'\xea' # 0x90 -> LATIN SMALL LETTER E WITH CIRCUMFLEX
'\xeb' # 0x91 -> LATIN SMALL LETTER E WITH DIAERESIS
'\xed' # 0x92 -> LATIN SMALL LETTER I WITH ACUTE
'\xec' # 0x93 -> LATIN SMALL LETTER I WITH GRAVE
'\xee' # 0x94 -> LATIN SMALL LETTER I WITH CIRCUMFLEX
'\xef' # 0x95 -> LATIN SMALL LETTER I WITH DIAERESIS
'\xf1' # 0x96 -> LATIN SMALL LETTER N WITH TILDE
'\xf3' # 0x97 -> LATIN SMALL LETTER O WITH ACUTE
'\xf2' # 0x98 -> LATIN SMALL LETTER O WITH GRAVE
'\xf4' # 0x99 -> LATIN SMALL LETTER O WITH CIRCUMFLEX
'\xf6' # 0x9A -> LATIN SMALL LETTER O WITH DIAERESIS
'\xf5' # 0x9B -> LATIN SMALL LETTER O WITH TILDE
'\xfa' # 0x9C -> LATIN SMALL LETTER U WITH ACUTE
'\xf9' # 0x9D -> LATIN SMALL LETTER U WITH GRAVE
'\xfb' # 0x9E -> LATIN SMALL LETTER U WITH CIRCUMFLEX
'\xfc' # 0x9F -> LATIN SMALL LETTER U WITH DIAERESIS
'\u2020' # 0xA0 -> DAGGER
'\xb0' # 0xA1 -> DEGREE SIGN
'\xa2' # 0xA2 -> CENT SIGN
'\xa3' # 0xA3 -> POUND SIGN
'\xa7' # 0xA4 -> SECTION SIGN
'\u2022' # 0xA5 -> BULLET
'\xb6' # 0xA6 -> PILCROW SIGN
'\xdf' # 0xA7 -> LATIN SMALL LETTER SHARP S
'\xae' # 0xA8 -> REGISTERED SIGN
'\xa9' # 0xA9 -> COPYRIGHT SIGN
'\u2122' # 0xAA -> TRADE MARK SIGN
'\xb4' # 0xAB -> ACUTE ACCENT
'\xa8' # 0xAC -> DIAERESIS
'\u2260' # 0xAD -> NOT EQUAL TO
'\u0102' # 0xAE -> LATIN CAPITAL LETTER A WITH BREVE
'\u0218' # 0xAF -> LATIN CAPITAL LETTER S WITH COMMA BELOW # for Unicode 3.0 and later
'\u221e' # 0xB0 -> INFINITY
'\xb1' # 0xB1 -> PLUS-MINUS SIGN
'\u2264' # 0xB2 -> LESS-THAN OR EQUAL TO
'\u2265' # 0xB3 -> GREATER-THAN OR EQUAL TO
'\xa5' # 0xB4 -> YEN SIGN
'\xb5' # 0xB5 -> MICRO SIGN
'\u2202' # 0xB6 -> PARTIAL DIFFERENTIAL
'\u2211' # 0xB7 -> N-ARY SUMMATION
'\u220f' # 0xB8 -> N-ARY PRODUCT
'\u03c0' # 0xB9 -> GREEK SMALL LETTER PI
'\u222b' # 0xBA -> INTEGRAL
'\xaa' # 0xBB -> FEMININE ORDINAL INDICATOR
'\xba' # 0xBC -> MASCULINE ORDINAL INDICATOR
'\u03a9' # 0xBD -> GREEK CAPITAL LETTER OMEGA
'\u0103' # 0xBE -> LATIN SMALL LETTER A WITH BREVE
'\u0219' # 0xBF -> LATIN SMALL LETTER S WITH COMMA BELOW # for Unicode 3.0 and later
'\xbf' # 0xC0 -> INVERTED QUESTION MARK
'\xa1' # 0xC1 -> INVERTED EXCLAMATION MARK
'\xac' # 0xC2 -> NOT SIGN
'\u221a' # 0xC3 -> SQUARE ROOT
'\u0192' # 0xC4 -> LATIN SMALL LETTER F WITH HOOK
'\u2248' # 0xC5 -> ALMOST EQUAL TO
'\u2206' # 0xC6 -> INCREMENT
'\xab' # 0xC7 -> LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
'\xbb' # 0xC8 -> RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
'\u2026' # 0xC9 -> HORIZONTAL ELLIPSIS
'\xa0' # 0xCA -> NO-BREAK SPACE
'\xc0' # 0xCB -> LATIN CAPITAL LETTER A WITH GRAVE
'\xc3' # 0xCC -> LATIN CAPITAL LETTER A WITH TILDE
'\xd5' # 0xCD -> LATIN CAPITAL LETTER O WITH TILDE
'\u0152' # 0xCE -> LATIN CAPITAL LIGATURE OE
'\u0153' # 0xCF -> LATIN SMALL LIGATURE OE
'\u2013' # 0xD0 -> EN DASH
'\u2014' # 0xD1 -> EM DASH
'\u201c' # 0xD2 -> LEFT DOUBLE QUOTATION MARK
'\u201d' # 0xD3 -> RIGHT DOUBLE QUOTATION MARK
'\u2018' # 0xD4 -> LEFT SINGLE QUOTATION MARK
'\u2019' # 0xD5 -> RIGHT SINGLE QUOTATION MARK
'\xf7' # 0xD6 -> DIVISION SIGN
'\u25ca' # 0xD7 -> LOZENGE
'\xff' # 0xD8 -> LATIN SMALL LETTER Y WITH DIAERESIS
'\u0178' # 0xD9 -> LATIN CAPITAL LETTER Y WITH DIAERESIS
'\u2044' # 0xDA -> FRACTION SLASH
'\u20ac' # 0xDB -> EURO SIGN
'\u2039' # 0xDC -> SINGLE LEFT-POINTING ANGLE QUOTATION MARK
'\u203a' # 0xDD -> SINGLE RIGHT-POINTING ANGLE QUOTATION MARK
'\u021a' # 0xDE -> LATIN CAPITAL LETTER T WITH COMMA BELOW # for Unicode 3.0 and later
'\u021b' # 0xDF -> LATIN SMALL LETTER T WITH COMMA BELOW # for Unicode 3.0 and later
'\u2021' # 0xE0 -> DOUBLE DAGGER
'\xb7' # 0xE1 -> MIDDLE DOT
'\u201a' # 0xE2 -> SINGLE LOW-9 QUOTATION MARK
'\u201e' # 0xE3 -> DOUBLE LOW-9 QUOTATION MARK
'\u2030' # 0xE4 -> PER MILLE SIGN
'\xc2' # 0xE5 -> LATIN CAPITAL LETTER A WITH CIRCUMFLEX
'\xca' # 0xE6 -> LATIN CAPITAL LETTER E WITH CIRCUMFLEX
'\xc1' # 0xE7 -> LATIN CAPITAL LETTER A WITH ACUTE
'\xcb' # 0xE8 -> LATIN CAPITAL LETTER E WITH DIAERESIS
'\xc8' # 0xE9 -> LATIN CAPITAL LETTER E WITH GRAVE
'\xcd' # 0xEA -> LATIN CAPITAL LETTER I WITH ACUTE
'\xce' # 0xEB -> LATIN CAPITAL LETTER I WITH CIRCUMFLEX
'\xcf' # 0xEC -> LATIN CAPITAL LETTER I WITH DIAERESIS
'\xcc' # 0xED -> LATIN CAPITAL LETTER I WITH GRAVE
'\xd3' # 0xEE -> LATIN CAPITAL LETTER O WITH ACUTE
'\xd4' # 0xEF -> LATIN CAPITAL LETTER O WITH CIRCUMFLEX
'\uf8ff' # 0xF0 -> Apple logo
'\xd2' # 0xF1 -> LATIN CAPITAL LETTER O WITH GRAVE
'\xda' # 0xF2 -> LATIN CAPITAL LETTER U WITH ACUTE
'\xdb' # 0xF3 -> LATIN CAPITAL LETTER U WITH CIRCUMFLEX
'\xd9' # 0xF4 -> LATIN CAPITAL LETTER U WITH GRAVE
'\u0131' # 0xF5 -> LATIN SMALL LETTER DOTLESS I
'\u02c6' # 0xF6 -> MODIFIER LETTER CIRCUMFLEX ACCENT
'\u02dc' # 0xF7 -> SMALL TILDE
'\xaf' # 0xF8 -> MACRON
'\u02d8' # 0xF9 -> BREVE
'\u02d9' # 0xFA -> DOT ABOVE
'\u02da' # 0xFB -> RING ABOVE
'\xb8' # 0xFC -> CEDILLA
'\u02dd' # 0xFD -> DOUBLE ACUTE ACCENT
'\u02db' # 0xFE -> OGONEK
'\u02c7' # 0xFF -> CARON
)
### Encoding table
encoding_table=codecs.charmap_build(decoding_table)
|
yoosw/printrun_etri | refs/heads/master | pronterface.py | 13 | #!/usr/bin/env python
# This file is part of the Printrun suite.
#
# Printrun is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Printrun is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Printrun. If not, see <http://www.gnu.org/licenses/>.
import sys
try:
import wx # NOQA
except:
print("wxPython is not installed. This program requires wxPython to run.")
if sys.version_info.major >= 3:
print("""\
As you are currently running python3, this is most likely because wxPython is
not yet available for python3. You should try running with python2 instead.""")
sys.exit(-1)
else:
raise
from printrun.pronterface import PronterApp
if __name__ == '__main__':
app = PronterApp(False)
try:
app.MainLoop()
except KeyboardInterrupt:
pass
del app
|
grilo/ansible-1 | refs/heads/devel | test/units/mock/generator.py | 63 | # Copyright 2016 Toshio Kuratomi <tkuratomi@ansible.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from collections import Mapping
def make_method(func, args, kwargs):
def test_method(self):
func(self, *args, **kwargs)
# Format the argument string
arg_string = ', '.join(repr(a) for a in args)
kwarg_string = ', '.join('{0}={1}'.format(item[0], repr(item[1])) for item in kwargs.items())
arg_list = []
if arg_string:
arg_list.append(arg_string)
if kwarg_string:
arg_list.append(kwarg_string)
test_method.__name__ = 'test_{0}({1})'.format(func.__name__, ', '.join(arg_list))
return test_method
def add_method(func, *combined_args):
"""
Add a test case via a class decorator.
nose uses generators for this but doesn't work with unittest.TestCase
subclasses. So we have to write our own.
The first argument to this decorator is a test function. All subsequent
arguments are the arguments to create each generated test function with in
the following format:
Each set of arguments is a two-tuple. The first element is an iterable of
positional arguments. the second is a dict representing the kwargs.
"""
def wrapper(cls):
for combined_arg in combined_args:
if len(combined_arg) == 2:
args = combined_arg[0]
kwargs = combined_arg[1]
elif isinstance(combined_arg[0], Mapping):
args = []
kwargs = combined_arg[0]
else:
args = combined_arg[0]
kwargs = {}
test_method = make_method(func, args, kwargs)
setattr(cls, test_method.__name__, test_method)
return cls
return wrapper
|
hujiajie/chromium-crosswalk | refs/heads/master | third_party/WebKit/Source/devtools/scripts/jsdoc-validator/build_jsdoc_validator_jar.py | 66 | #!/usr/bin/python
import hashlib
import operator
import os
import shutil
import stat
import subprocess
import sys
import tempfile
def rel_to_abs(rel_path):
return os.path.join(script_path, rel_path)
java_bin_path = os.getenv('JAVA_HOME', '')
if java_bin_path:
java_bin_path = os.path.join(java_bin_path, 'bin')
main_class = 'org.chromium.devtools.jsdoc.JsDocValidator'
jar_name = 'jsdoc-validator.jar'
hashes_name = 'hashes'
src_dir = 'src'
script_path = os.path.dirname(os.path.abspath(__file__))
closure_jar_relpath = os.path.join('..', 'closure', 'compiler.jar')
src_path = rel_to_abs(src_dir)
hashes_path = rel_to_abs(hashes_name)
def get_file_hash(file, blocksize=65536):
sha = hashlib.sha256()
buf = file.read(blocksize)
while len(buf) > 0:
sha.update(buf)
buf = file.read(blocksize)
return sha.hexdigest()
def traverse(hasher, path):
abs_path = rel_to_abs(path)
info = os.lstat(abs_path)
quoted_name = repr(path.replace('\\', '/'))
if stat.S_ISDIR(info.st_mode) and not os.path.basename(path).startswith('.'):
hasher.update('d ' + quoted_name + '\n')
for entry in sorted(os.listdir(abs_path)):
traverse(hasher, os.path.join(path, entry))
elif stat.S_ISREG(info.st_mode) and path.endswith('.java'):
hasher.update('r ' + quoted_name + ' ')
hasher.update(str(info.st_size) + ' ')
with open(abs_path, 'Ur') as file:
f_hash = get_file_hash(file)
hasher.update(f_hash + '\n')
def get_src_dir_hash(dir):
sha = hashlib.sha256()
traverse(sha, dir)
return sha.hexdigest()
def get_actual_hashes():
hashed_files = [(jar_name, True)]
hashes = {}
for (file_name, binary) in hashed_files:
try:
hash = get_file_hash(open(file_name, 'rb' if binary else 'r'))
hashes[file_name] = hash
except IOError:
hashes[file_name] = '0'
hashes[src_dir] = get_src_dir_hash(src_dir)
return hashes
def get_expected_hashes():
try:
with open(hashes_path, 'r') as file:
return {file_name: hash for (file_name, hash) in [(name.strip(), hash.strip()) for (hash, name) in [line.split(' ', 1) for line in file]]}
except:
return None
def run_and_communicate(command, error_template):
proc = subprocess.Popen(command, stdout=subprocess.PIPE, shell=True)
proc.communicate()
if proc.returncode:
print >> sys.stderr, error_template % proc.returncode
sys.exit(proc.returncode)
def build_artifacts():
print 'Compiling...'
java_files = []
for root, dirs, files in sorted(os.walk(src_path)):
for file_name in files:
if file_name.endswith('.java'):
java_files.append(os.path.join(root, file_name))
bin_path = tempfile.mkdtemp()
manifest_file = tempfile.NamedTemporaryFile(mode='wt', delete=False)
try:
manifest_file.write('Class-Path: %s\n' % closure_jar_relpath)
manifest_file.close()
javac_path = os.path.join(java_bin_path, 'javac')
javac_command = '%s -d %s -cp %s %s' % (javac_path, bin_path, rel_to_abs(closure_jar_relpath), ' '.join(java_files))
run_and_communicate(javac_command, 'Error: javac returned %d')
print 'Building jar...'
artifact_path = rel_to_abs(jar_name)
jar_path = os.path.join(java_bin_path, 'jar')
jar_command = '%s cvfme %s %s %s -C %s .' % (jar_path, artifact_path, manifest_file.name, main_class, bin_path)
run_and_communicate(jar_command, 'Error: jar returned %d')
finally:
os.remove(manifest_file.name)
shutil.rmtree(bin_path, True)
def update_hashes():
print 'Updating hashes...'
with open(hashes_path, 'w') as file:
file.writelines(['%s %s\n' % (hash, name) for (name, hash) in get_actual_hashes().iteritems()])
def hashes_modified():
expected_hashes = get_expected_hashes()
if not expected_hashes:
return [('<no expected hashes>', 1, 0)]
actual_hashes = get_actual_hashes()
results = []
for name, expected_hash in expected_hashes.iteritems():
actual_hash = actual_hashes.get(name)
if expected_hash != actual_hash:
results.append((name, expected_hash, actual_hash))
return results
def help():
print 'usage: %s [option]' % os.path.basename(__file__)
print 'Options:'
print '--force-rebuild: Rebuild classes and jar even if there are no source file changes'
print '--no-rebuild: Do not rebuild jar, just update hashes'
def main():
no_rebuild = False
force_rebuild = False
if len(sys.argv) > 1:
if sys.argv[1] == '--help':
help()
return
no_rebuild = sys.argv[1] == '--no-rebuild'
force_rebuild = sys.argv[1] == '--force-rebuild'
if not hashes_modified() and not force_rebuild:
print 'No modifications found, rebuild not required.'
return
if not no_rebuild:
build_artifacts()
update_hashes()
print 'Done.'
if __name__ == '__main__':
main()
|
deepsrijit1105/edx-platform | refs/heads/master | openedx/core/djangoapps/content/course_structures/api/v0/errors.py | 66 | """ Errors used by the Course Structure API. """
class CourseStructureNotAvailableError(Exception):
""" The course structure still needs to be generated. """
pass
|
hwroitzsch/BikersLifeSaver | refs/heads/master | lib/python3.5/site-packages/numpy/ma/tests/test_extras.py | 14 | # pylint: disable-msg=W0611, W0612, W0511
"""Tests suite for MaskedArray.
Adapted from the original test_ma by Pierre Gerard-Marchant
:author: Pierre Gerard-Marchant
:contact: pierregm_at_uga_dot_edu
:version: $Id: test_extras.py 3473 2007-10-29 15:18:13Z jarrod.millman $
"""
from __future__ import division, absolute_import, print_function
import warnings
import numpy as np
from numpy.testing import (
TestCase, run_module_suite, assert_warns, clear_and_catch_warnings
)
from numpy.ma.testutils import (
assert_, assert_array_equal, assert_equal, assert_almost_equal
)
from numpy.ma.core import (
array, arange, masked, MaskedArray, masked_array, getmaskarray, shape,
nomask, ones, zeros, count
)
from numpy.ma.extras import (
atleast_1d, atleast_2d, atleast_3d, mr_, dot, polyfit, cov, corrcoef,
median, average, unique, setxor1d, setdiff1d, union1d, intersect1d, in1d,
ediff1d, apply_over_axes, apply_along_axis, compress_nd, compress_rowcols,
mask_rowcols, clump_masked, clump_unmasked, flatnotmasked_contiguous,
notmasked_contiguous, notmasked_edges, masked_all, masked_all_like,
diagflat
)
import numpy.ma.extras as mae
class TestGeneric(TestCase):
#
def test_masked_all(self):
# Tests masked_all
# Standard dtype
test = masked_all((2,), dtype=float)
control = array([1, 1], mask=[1, 1], dtype=float)
assert_equal(test, control)
# Flexible dtype
dt = np.dtype({'names': ['a', 'b'], 'formats': ['f', 'f']})
test = masked_all((2,), dtype=dt)
control = array([(0, 0), (0, 0)], mask=[(1, 1), (1, 1)], dtype=dt)
assert_equal(test, control)
test = masked_all((2, 2), dtype=dt)
control = array([[(0, 0), (0, 0)], [(0, 0), (0, 0)]],
mask=[[(1, 1), (1, 1)], [(1, 1), (1, 1)]],
dtype=dt)
assert_equal(test, control)
# Nested dtype
dt = np.dtype([('a', 'f'), ('b', [('ba', 'f'), ('bb', 'f')])])
test = masked_all((2,), dtype=dt)
control = array([(1, (1, 1)), (1, (1, 1))],
mask=[(1, (1, 1)), (1, (1, 1))], dtype=dt)
assert_equal(test, control)
test = masked_all((2,), dtype=dt)
control = array([(1, (1, 1)), (1, (1, 1))],
mask=[(1, (1, 1)), (1, (1, 1))], dtype=dt)
assert_equal(test, control)
test = masked_all((1, 1), dtype=dt)
control = array([[(1, (1, 1))]], mask=[[(1, (1, 1))]], dtype=dt)
assert_equal(test, control)
def test_masked_all_like(self):
# Tests masked_all
# Standard dtype
base = array([1, 2], dtype=float)
test = masked_all_like(base)
control = array([1, 1], mask=[1, 1], dtype=float)
assert_equal(test, control)
# Flexible dtype
dt = np.dtype({'names': ['a', 'b'], 'formats': ['f', 'f']})
base = array([(0, 0), (0, 0)], mask=[(1, 1), (1, 1)], dtype=dt)
test = masked_all_like(base)
control = array([(10, 10), (10, 10)], mask=[(1, 1), (1, 1)], dtype=dt)
assert_equal(test, control)
# Nested dtype
dt = np.dtype([('a', 'f'), ('b', [('ba', 'f'), ('bb', 'f')])])
control = array([(1, (1, 1)), (1, (1, 1))],
mask=[(1, (1, 1)), (1, (1, 1))], dtype=dt)
test = masked_all_like(control)
assert_equal(test, control)
def test_clump_masked(self):
# Test clump_masked
a = masked_array(np.arange(10))
a[[0, 1, 2, 6, 8, 9]] = masked
#
test = clump_masked(a)
control = [slice(0, 3), slice(6, 7), slice(8, 10)]
assert_equal(test, control)
def test_clump_unmasked(self):
# Test clump_unmasked
a = masked_array(np.arange(10))
a[[0, 1, 2, 6, 8, 9]] = masked
test = clump_unmasked(a)
control = [slice(3, 6), slice(7, 8), ]
assert_equal(test, control)
def test_flatnotmasked_contiguous(self):
# Test flatnotmasked_contiguous
a = arange(10)
# No mask
test = flatnotmasked_contiguous(a)
assert_equal(test, slice(0, a.size))
# Some mask
a[(a < 3) | (a > 8) | (a == 5)] = masked
test = flatnotmasked_contiguous(a)
assert_equal(test, [slice(3, 5), slice(6, 9)])
#
a[:] = masked
test = flatnotmasked_contiguous(a)
assert_equal(test, None)
class TestAverage(TestCase):
# Several tests of average. Why so many ? Good point...
def test_testAverage1(self):
# Test of average.
ott = array([0., 1., 2., 3.], mask=[True, False, False, False])
assert_equal(2.0, average(ott, axis=0))
assert_equal(2.0, average(ott, weights=[1., 1., 2., 1.]))
result, wts = average(ott, weights=[1., 1., 2., 1.], returned=1)
assert_equal(2.0, result)
self.assertTrue(wts == 4.0)
ott[:] = masked
assert_equal(average(ott, axis=0).mask, [True])
ott = array([0., 1., 2., 3.], mask=[True, False, False, False])
ott = ott.reshape(2, 2)
ott[:, 1] = masked
assert_equal(average(ott, axis=0), [2.0, 0.0])
assert_equal(average(ott, axis=1).mask[0], [True])
assert_equal([2., 0.], average(ott, axis=0))
result, wts = average(ott, axis=0, returned=1)
assert_equal(wts, [1., 0.])
def test_testAverage2(self):
# More tests of average.
w1 = [0, 1, 1, 1, 1, 0]
w2 = [[0, 1, 1, 1, 1, 0], [1, 0, 0, 0, 0, 1]]
x = arange(6, dtype=np.float_)
assert_equal(average(x, axis=0), 2.5)
assert_equal(average(x, axis=0, weights=w1), 2.5)
y = array([arange(6, dtype=np.float_), 2.0 * arange(6)])
assert_equal(average(y, None), np.add.reduce(np.arange(6)) * 3. / 12.)
assert_equal(average(y, axis=0), np.arange(6) * 3. / 2.)
assert_equal(average(y, axis=1),
[average(x, axis=0), average(x, axis=0) * 2.0])
assert_equal(average(y, None, weights=w2), 20. / 6.)
assert_equal(average(y, axis=0, weights=w2),
[0., 1., 2., 3., 4., 10.])
assert_equal(average(y, axis=1),
[average(x, axis=0), average(x, axis=0) * 2.0])
m1 = zeros(6)
m2 = [0, 0, 1, 1, 0, 0]
m3 = [[0, 0, 1, 1, 0, 0], [0, 1, 1, 1, 1, 0]]
m4 = ones(6)
m5 = [0, 1, 1, 1, 1, 1]
assert_equal(average(masked_array(x, m1), axis=0), 2.5)
assert_equal(average(masked_array(x, m2), axis=0), 2.5)
assert_equal(average(masked_array(x, m4), axis=0).mask, [True])
assert_equal(average(masked_array(x, m5), axis=0), 0.0)
assert_equal(count(average(masked_array(x, m4), axis=0)), 0)
z = masked_array(y, m3)
assert_equal(average(z, None), 20. / 6.)
assert_equal(average(z, axis=0), [0., 1., 99., 99., 4.0, 7.5])
assert_equal(average(z, axis=1), [2.5, 5.0])
assert_equal(average(z, axis=0, weights=w2),
[0., 1., 99., 99., 4.0, 10.0])
def test_testAverage3(self):
# Yet more tests of average!
a = arange(6)
b = arange(6) * 3
r1, w1 = average([[a, b], [b, a]], axis=1, returned=1)
assert_equal(shape(r1), shape(w1))
assert_equal(r1.shape, w1.shape)
r2, w2 = average(ones((2, 2, 3)), axis=0, weights=[3, 1], returned=1)
assert_equal(shape(w2), shape(r2))
r2, w2 = average(ones((2, 2, 3)), returned=1)
assert_equal(shape(w2), shape(r2))
r2, w2 = average(ones((2, 2, 3)), weights=ones((2, 2, 3)), returned=1)
assert_equal(shape(w2), shape(r2))
a2d = array([[1, 2], [0, 4]], float)
a2dm = masked_array(a2d, [[False, False], [True, False]])
a2da = average(a2d, axis=0)
assert_equal(a2da, [0.5, 3.0])
a2dma = average(a2dm, axis=0)
assert_equal(a2dma, [1.0, 3.0])
a2dma = average(a2dm, axis=None)
assert_equal(a2dma, 7. / 3.)
a2dma = average(a2dm, axis=1)
assert_equal(a2dma, [1.5, 4.0])
def test_onintegers_with_mask(self):
# Test average on integers with mask
a = average(array([1, 2]))
assert_equal(a, 1.5)
a = average(array([1, 2, 3, 4], mask=[False, False, True, True]))
assert_equal(a, 1.5)
def test_complex(self):
# Test with complex data.
# (Regression test for https://github.com/numpy/numpy/issues/2684)
mask = np.array([[0, 0, 0, 1, 0],
[0, 1, 0, 0, 0]], dtype=bool)
a = masked_array([[0, 1+2j, 3+4j, 5+6j, 7+8j],
[9j, 0+1j, 2+3j, 4+5j, 7+7j]],
mask=mask)
av = average(a)
expected = np.average(a.compressed())
assert_almost_equal(av.real, expected.real)
assert_almost_equal(av.imag, expected.imag)
av0 = average(a, axis=0)
expected0 = average(a.real, axis=0) + average(a.imag, axis=0)*1j
assert_almost_equal(av0.real, expected0.real)
assert_almost_equal(av0.imag, expected0.imag)
av1 = average(a, axis=1)
expected1 = average(a.real, axis=1) + average(a.imag, axis=1)*1j
assert_almost_equal(av1.real, expected1.real)
assert_almost_equal(av1.imag, expected1.imag)
# Test with the 'weights' argument.
wts = np.array([[0.5, 1.0, 2.0, 1.0, 0.5],
[1.0, 1.0, 1.0, 1.0, 1.0]])
wav = average(a, weights=wts)
expected = np.average(a.compressed(), weights=wts[~mask])
assert_almost_equal(wav.real, expected.real)
assert_almost_equal(wav.imag, expected.imag)
wav0 = average(a, weights=wts, axis=0)
expected0 = (average(a.real, weights=wts, axis=0) +
average(a.imag, weights=wts, axis=0)*1j)
assert_almost_equal(wav0.real, expected0.real)
assert_almost_equal(wav0.imag, expected0.imag)
wav1 = average(a, weights=wts, axis=1)
expected1 = (average(a.real, weights=wts, axis=1) +
average(a.imag, weights=wts, axis=1)*1j)
assert_almost_equal(wav1.real, expected1.real)
assert_almost_equal(wav1.imag, expected1.imag)
class TestConcatenator(TestCase):
# Tests for mr_, the equivalent of r_ for masked arrays.
def test_1d(self):
# Tests mr_ on 1D arrays.
assert_array_equal(mr_[1, 2, 3, 4, 5, 6], array([1, 2, 3, 4, 5, 6]))
b = ones(5)
m = [1, 0, 0, 0, 0]
d = masked_array(b, mask=m)
c = mr_[d, 0, 0, d]
self.assertTrue(isinstance(c, MaskedArray))
assert_array_equal(c, [1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1])
assert_array_equal(c.mask, mr_[m, 0, 0, m])
def test_2d(self):
# Tests mr_ on 2D arrays.
a_1 = np.random.rand(5, 5)
a_2 = np.random.rand(5, 5)
m_1 = np.round_(np.random.rand(5, 5), 0)
m_2 = np.round_(np.random.rand(5, 5), 0)
b_1 = masked_array(a_1, mask=m_1)
b_2 = masked_array(a_2, mask=m_2)
# append columns
d = mr_['1', b_1, b_2]
self.assertTrue(d.shape == (5, 10))
assert_array_equal(d[:, :5], b_1)
assert_array_equal(d[:, 5:], b_2)
assert_array_equal(d.mask, np.r_['1', m_1, m_2])
d = mr_[b_1, b_2]
self.assertTrue(d.shape == (10, 5))
assert_array_equal(d[:5,:], b_1)
assert_array_equal(d[5:,:], b_2)
assert_array_equal(d.mask, np.r_[m_1, m_2])
class TestNotMasked(TestCase):
# Tests notmasked_edges and notmasked_contiguous.
def test_edges(self):
# Tests unmasked_edges
data = masked_array(np.arange(25).reshape(5, 5),
mask=[[0, 0, 1, 0, 0],
[0, 0, 0, 1, 1],
[1, 1, 0, 0, 0],
[0, 0, 0, 0, 0],
[1, 1, 1, 0, 0]],)
test = notmasked_edges(data, None)
assert_equal(test, [0, 24])
test = notmasked_edges(data, 0)
assert_equal(test[0], [(0, 0, 1, 0, 0), (0, 1, 2, 3, 4)])
assert_equal(test[1], [(3, 3, 3, 4, 4), (0, 1, 2, 3, 4)])
test = notmasked_edges(data, 1)
assert_equal(test[0], [(0, 1, 2, 3, 4), (0, 0, 2, 0, 3)])
assert_equal(test[1], [(0, 1, 2, 3, 4), (4, 2, 4, 4, 4)])
#
test = notmasked_edges(data.data, None)
assert_equal(test, [0, 24])
test = notmasked_edges(data.data, 0)
assert_equal(test[0], [(0, 0, 0, 0, 0), (0, 1, 2, 3, 4)])
assert_equal(test[1], [(4, 4, 4, 4, 4), (0, 1, 2, 3, 4)])
test = notmasked_edges(data.data, -1)
assert_equal(test[0], [(0, 1, 2, 3, 4), (0, 0, 0, 0, 0)])
assert_equal(test[1], [(0, 1, 2, 3, 4), (4, 4, 4, 4, 4)])
#
data[-2] = masked
test = notmasked_edges(data, 0)
assert_equal(test[0], [(0, 0, 1, 0, 0), (0, 1, 2, 3, 4)])
assert_equal(test[1], [(1, 1, 2, 4, 4), (0, 1, 2, 3, 4)])
test = notmasked_edges(data, -1)
assert_equal(test[0], [(0, 1, 2, 4), (0, 0, 2, 3)])
assert_equal(test[1], [(0, 1, 2, 4), (4, 2, 4, 4)])
def test_contiguous(self):
# Tests notmasked_contiguous
a = masked_array(np.arange(24).reshape(3, 8),
mask=[[0, 0, 0, 0, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1, 1],
[0, 0, 0, 0, 0, 0, 1, 0], ])
tmp = notmasked_contiguous(a, None)
assert_equal(tmp[-1], slice(23, 24, None))
assert_equal(tmp[-2], slice(16, 22, None))
assert_equal(tmp[-3], slice(0, 4, None))
#
tmp = notmasked_contiguous(a, 0)
self.assertTrue(len(tmp[-1]) == 1)
self.assertTrue(tmp[-2] is None)
assert_equal(tmp[-3], tmp[-1])
self.assertTrue(len(tmp[0]) == 2)
#
tmp = notmasked_contiguous(a, 1)
assert_equal(tmp[0][-1], slice(0, 4, None))
self.assertTrue(tmp[1] is None)
assert_equal(tmp[2][-1], slice(7, 8, None))
assert_equal(tmp[2][-2], slice(0, 6, None))
class TestCompressFunctions(TestCase):
def test_compress_nd(self):
# Tests compress_nd
x = np.array(list(range(3*4*5))).reshape(3, 4, 5)
m = np.zeros((3,4,5)).astype(bool)
m[1,1,1] = True
x = array(x, mask=m)
# axis=None
a = compress_nd(x)
assert_equal(a, [[[ 0, 2, 3, 4],
[10, 12, 13, 14],
[15, 17, 18, 19]],
[[40, 42, 43, 44],
[50, 52, 53, 54],
[55, 57, 58, 59]]])
# axis=0
a = compress_nd(x, 0)
assert_equal(a, [[[ 0, 1, 2, 3, 4],
[ 5, 6, 7, 8, 9],
[10, 11, 12, 13, 14],
[15, 16, 17, 18, 19]],
[[40, 41, 42, 43, 44],
[45, 46, 47, 48, 49],
[50, 51, 52, 53, 54],
[55, 56, 57, 58, 59]]])
# axis=1
a = compress_nd(x, 1)
assert_equal(a, [[[ 0, 1, 2, 3, 4],
[10, 11, 12, 13, 14],
[15, 16, 17, 18, 19]],
[[20, 21, 22, 23, 24],
[30, 31, 32, 33, 34],
[35, 36, 37, 38, 39]],
[[40, 41, 42, 43, 44],
[50, 51, 52, 53, 54],
[55, 56, 57, 58, 59]]])
a2 = compress_nd(x, (1,))
a3 = compress_nd(x, -2)
a4 = compress_nd(x, (-2,))
assert_equal(a, a2)
assert_equal(a, a3)
assert_equal(a, a4)
# axis=2
a = compress_nd(x, 2)
assert_equal(a, [[[ 0, 2, 3, 4],
[ 5, 7, 8, 9],
[10, 12, 13, 14],
[15, 17, 18, 19]],
[[20, 22, 23, 24],
[25, 27, 28, 29],
[30, 32, 33, 34],
[35, 37, 38, 39]],
[[40, 42, 43, 44],
[45, 47, 48, 49],
[50, 52, 53, 54],
[55, 57, 58, 59]]])
a2 = compress_nd(x, (2,))
a3 = compress_nd(x, -1)
a4 = compress_nd(x, (-1,))
assert_equal(a, a2)
assert_equal(a, a3)
assert_equal(a, a4)
# axis=(0, 1)
a = compress_nd(x, (0, 1))
assert_equal(a, [[[ 0, 1, 2, 3, 4],
[10, 11, 12, 13, 14],
[15, 16, 17, 18, 19]],
[[40, 41, 42, 43, 44],
[50, 51, 52, 53, 54],
[55, 56, 57, 58, 59]]])
a2 = compress_nd(x, (0, -2))
assert_equal(a, a2)
# axis=(1, 2)
a = compress_nd(x, (1, 2))
assert_equal(a, [[[ 0, 2, 3, 4],
[10, 12, 13, 14],
[15, 17, 18, 19]],
[[20, 22, 23, 24],
[30, 32, 33, 34],
[35, 37, 38, 39]],
[[40, 42, 43, 44],
[50, 52, 53, 54],
[55, 57, 58, 59]]])
a2 = compress_nd(x, (-2, 2))
a3 = compress_nd(x, (1, -1))
a4 = compress_nd(x, (-2, -1))
assert_equal(a, a2)
assert_equal(a, a3)
assert_equal(a, a4)
# axis=(0, 2)
a = compress_nd(x, (0, 2))
assert_equal(a, [[[ 0, 2, 3, 4],
[ 5, 7, 8, 9],
[10, 12, 13, 14],
[15, 17, 18, 19]],
[[40, 42, 43, 44],
[45, 47, 48, 49],
[50, 52, 53, 54],
[55, 57, 58, 59]]])
a2 = compress_nd(x, (0, -1))
assert_equal(a, a2)
def test_compress_rowcols(self):
# Tests compress_rowcols
x = array(np.arange(9).reshape(3, 3),
mask=[[1, 0, 0], [0, 0, 0], [0, 0, 0]])
assert_equal(compress_rowcols(x), [[4, 5], [7, 8]])
assert_equal(compress_rowcols(x, 0), [[3, 4, 5], [6, 7, 8]])
assert_equal(compress_rowcols(x, 1), [[1, 2], [4, 5], [7, 8]])
x = array(x._data, mask=[[0, 0, 0], [0, 1, 0], [0, 0, 0]])
assert_equal(compress_rowcols(x), [[0, 2], [6, 8]])
assert_equal(compress_rowcols(x, 0), [[0, 1, 2], [6, 7, 8]])
assert_equal(compress_rowcols(x, 1), [[0, 2], [3, 5], [6, 8]])
x = array(x._data, mask=[[1, 0, 0], [0, 1, 0], [0, 0, 0]])
assert_equal(compress_rowcols(x), [[8]])
assert_equal(compress_rowcols(x, 0), [[6, 7, 8]])
assert_equal(compress_rowcols(x, 1,), [[2], [5], [8]])
x = array(x._data, mask=[[1, 0, 0], [0, 1, 0], [0, 0, 1]])
assert_equal(compress_rowcols(x).size, 0)
assert_equal(compress_rowcols(x, 0).size, 0)
assert_equal(compress_rowcols(x, 1).size, 0)
def test_mask_rowcols(self):
# Tests mask_rowcols.
x = array(np.arange(9).reshape(3, 3),
mask=[[1, 0, 0], [0, 0, 0], [0, 0, 0]])
assert_equal(mask_rowcols(x).mask,
[[1, 1, 1], [1, 0, 0], [1, 0, 0]])
assert_equal(mask_rowcols(x, 0).mask,
[[1, 1, 1], [0, 0, 0], [0, 0, 0]])
assert_equal(mask_rowcols(x, 1).mask,
[[1, 0, 0], [1, 0, 0], [1, 0, 0]])
x = array(x._data, mask=[[0, 0, 0], [0, 1, 0], [0, 0, 0]])
assert_equal(mask_rowcols(x).mask,
[[0, 1, 0], [1, 1, 1], [0, 1, 0]])
assert_equal(mask_rowcols(x, 0).mask,
[[0, 0, 0], [1, 1, 1], [0, 0, 0]])
assert_equal(mask_rowcols(x, 1).mask,
[[0, 1, 0], [0, 1, 0], [0, 1, 0]])
x = array(x._data, mask=[[1, 0, 0], [0, 1, 0], [0, 0, 0]])
assert_equal(mask_rowcols(x).mask,
[[1, 1, 1], [1, 1, 1], [1, 1, 0]])
assert_equal(mask_rowcols(x, 0).mask,
[[1, 1, 1], [1, 1, 1], [0, 0, 0]])
assert_equal(mask_rowcols(x, 1,).mask,
[[1, 1, 0], [1, 1, 0], [1, 1, 0]])
x = array(x._data, mask=[[1, 0, 0], [0, 1, 0], [0, 0, 1]])
self.assertTrue(mask_rowcols(x).all() is masked)
self.assertTrue(mask_rowcols(x, 0).all() is masked)
self.assertTrue(mask_rowcols(x, 1).all() is masked)
self.assertTrue(mask_rowcols(x).mask.all())
self.assertTrue(mask_rowcols(x, 0).mask.all())
self.assertTrue(mask_rowcols(x, 1).mask.all())
def test_dot(self):
# Tests dot product
n = np.arange(1, 7)
#
m = [1, 0, 0, 0, 0, 0]
a = masked_array(n, mask=m).reshape(2, 3)
b = masked_array(n, mask=m).reshape(3, 2)
c = dot(a, b, strict=True)
assert_equal(c.mask, [[1, 1], [1, 0]])
c = dot(b, a, strict=True)
assert_equal(c.mask, [[1, 1, 1], [1, 0, 0], [1, 0, 0]])
c = dot(a, b, strict=False)
assert_equal(c, np.dot(a.filled(0), b.filled(0)))
c = dot(b, a, strict=False)
assert_equal(c, np.dot(b.filled(0), a.filled(0)))
#
m = [0, 0, 0, 0, 0, 1]
a = masked_array(n, mask=m).reshape(2, 3)
b = masked_array(n, mask=m).reshape(3, 2)
c = dot(a, b, strict=True)
assert_equal(c.mask, [[0, 1], [1, 1]])
c = dot(b, a, strict=True)
assert_equal(c.mask, [[0, 0, 1], [0, 0, 1], [1, 1, 1]])
c = dot(a, b, strict=False)
assert_equal(c, np.dot(a.filled(0), b.filled(0)))
assert_equal(c, dot(a, b))
c = dot(b, a, strict=False)
assert_equal(c, np.dot(b.filled(0), a.filled(0)))
#
m = [0, 0, 0, 0, 0, 0]
a = masked_array(n, mask=m).reshape(2, 3)
b = masked_array(n, mask=m).reshape(3, 2)
c = dot(a, b)
assert_equal(c.mask, nomask)
c = dot(b, a)
assert_equal(c.mask, nomask)
#
a = masked_array(n, mask=[1, 0, 0, 0, 0, 0]).reshape(2, 3)
b = masked_array(n, mask=[0, 0, 0, 0, 0, 0]).reshape(3, 2)
c = dot(a, b, strict=True)
assert_equal(c.mask, [[1, 1], [0, 0]])
c = dot(a, b, strict=False)
assert_equal(c, np.dot(a.filled(0), b.filled(0)))
c = dot(b, a, strict=True)
assert_equal(c.mask, [[1, 0, 0], [1, 0, 0], [1, 0, 0]])
c = dot(b, a, strict=False)
assert_equal(c, np.dot(b.filled(0), a.filled(0)))
#
a = masked_array(n, mask=[0, 0, 0, 0, 0, 1]).reshape(2, 3)
b = masked_array(n, mask=[0, 0, 0, 0, 0, 0]).reshape(3, 2)
c = dot(a, b, strict=True)
assert_equal(c.mask, [[0, 0], [1, 1]])
c = dot(a, b)
assert_equal(c, np.dot(a.filled(0), b.filled(0)))
c = dot(b, a, strict=True)
assert_equal(c.mask, [[0, 0, 1], [0, 0, 1], [0, 0, 1]])
c = dot(b, a, strict=False)
assert_equal(c, np.dot(b.filled(0), a.filled(0)))
#
a = masked_array(n, mask=[0, 0, 0, 0, 0, 1]).reshape(2, 3)
b = masked_array(n, mask=[0, 0, 1, 0, 0, 0]).reshape(3, 2)
c = dot(a, b, strict=True)
assert_equal(c.mask, [[1, 0], [1, 1]])
c = dot(a, b, strict=False)
assert_equal(c, np.dot(a.filled(0), b.filled(0)))
c = dot(b, a, strict=True)
assert_equal(c.mask, [[0, 0, 1], [1, 1, 1], [0, 0, 1]])
c = dot(b, a, strict=False)
assert_equal(c, np.dot(b.filled(0), a.filled(0)))
def test_dot_returns_maskedarray(self):
# See gh-6611
a = np.eye(3)
b = array(a)
assert_(type(dot(a, a)) is MaskedArray)
assert_(type(dot(a, b)) is MaskedArray)
assert_(type(dot(b, a)) is MaskedArray)
assert_(type(dot(b, b)) is MaskedArray)
def test_dot_out(self):
a = array(np.eye(3))
out = array(np.zeros((3, 3)))
res = dot(a, a, out=out)
assert_(res is out)
assert_equal(a, res)
class TestApplyAlongAxis(TestCase):
# Tests 2D functions
def test_3d(self):
a = arange(12.).reshape(2, 2, 3)
def myfunc(b):
return b[1]
xa = apply_along_axis(myfunc, 2, a)
assert_equal(xa, [[1, 4], [7, 10]])
# Tests kwargs functions
def test_3d_kwargs(self):
a = arange(12).reshape(2, 2, 3)
def myfunc(b, offset=0):
return b[1+offset]
xa = apply_along_axis(myfunc, 2, a, offset=1)
assert_equal(xa, [[2, 5], [8, 11]])
class TestApplyOverAxes(TestCase):
# Tests apply_over_axes
def test_basic(self):
a = arange(24).reshape(2, 3, 4)
test = apply_over_axes(np.sum, a, [0, 2])
ctrl = np.array([[[60], [92], [124]]])
assert_equal(test, ctrl)
a[(a % 2).astype(np.bool)] = masked
test = apply_over_axes(np.sum, a, [0, 2])
ctrl = np.array([[[28], [44], [60]]])
assert_equal(test, ctrl)
class TestMedian(TestCase):
def test_pytype(self):
r = np.ma.median([[np.inf, np.inf], [np.inf, np.inf]], axis=-1)
assert_equal(r, np.inf)
def test_non_masked(self):
assert_equal(np.ma.median(np.arange(9)), 4.)
assert_equal(np.ma.median(range(9)), 4)
def test_2d(self):
# Tests median w/ 2D
(n, p) = (101, 30)
x = masked_array(np.linspace(-1., 1., n),)
x[:10] = x[-10:] = masked
z = masked_array(np.empty((n, p), dtype=float))
z[:, 0] = x[:]
idx = np.arange(len(x))
for i in range(1, p):
np.random.shuffle(idx)
z[:, i] = x[idx]
assert_equal(median(z[:, 0]), 0)
assert_equal(median(z), 0)
assert_equal(median(z, axis=0), np.zeros(p))
assert_equal(median(z.T, axis=1), np.zeros(p))
def test_2d_waxis(self):
# Tests median w/ 2D arrays and different axis.
x = masked_array(np.arange(30).reshape(10, 3))
x[:3] = x[-3:] = masked
assert_equal(median(x), 14.5)
assert_equal(median(x, axis=0), [13.5, 14.5, 15.5])
assert_equal(median(x, axis=1), [0, 0, 0, 10, 13, 16, 19, 0, 0, 0])
assert_equal(median(x, axis=1).mask, [1, 1, 1, 0, 0, 0, 0, 1, 1, 1])
def test_3d(self):
# Tests median w/ 3D
x = np.ma.arange(24).reshape(3, 4, 2)
x[x % 3 == 0] = masked
assert_equal(median(x, 0), [[12, 9], [6, 15], [12, 9], [18, 15]])
x.shape = (4, 3, 2)
assert_equal(median(x, 0), [[99, 10], [11, 99], [13, 14]])
x = np.ma.arange(24).reshape(4, 3, 2)
x[x % 5 == 0] = masked
assert_equal(median(x, 0), [[12, 10], [8, 9], [16, 17]])
def test_neg_axis(self):
x = masked_array(np.arange(30).reshape(10, 3))
x[:3] = x[-3:] = masked
assert_equal(median(x, axis=-1), median(x, axis=1))
def test_out(self):
x = masked_array(np.arange(30).reshape(10, 3))
x[:3] = x[-3:] = masked
out = masked_array(np.ones(10))
r = median(x, axis=1, out=out)
assert_equal(r, out)
assert_(type(r) == MaskedArray)
class TestCov(TestCase):
def setUp(self):
self.data = array(np.random.rand(12))
def test_1d_wo_missing(self):
# Test cov on 1D variable w/o missing values
x = self.data
assert_almost_equal(np.cov(x), cov(x))
assert_almost_equal(np.cov(x, rowvar=False), cov(x, rowvar=False))
assert_almost_equal(np.cov(x, rowvar=False, bias=True),
cov(x, rowvar=False, bias=True))
def test_2d_wo_missing(self):
# Test cov on 1 2D variable w/o missing values
x = self.data.reshape(3, 4)
assert_almost_equal(np.cov(x), cov(x))
assert_almost_equal(np.cov(x, rowvar=False), cov(x, rowvar=False))
assert_almost_equal(np.cov(x, rowvar=False, bias=True),
cov(x, rowvar=False, bias=True))
def test_1d_w_missing(self):
# Test cov 1 1D variable w/missing values
x = self.data
x[-1] = masked
x -= x.mean()
nx = x.compressed()
assert_almost_equal(np.cov(nx), cov(x))
assert_almost_equal(np.cov(nx, rowvar=False), cov(x, rowvar=False))
assert_almost_equal(np.cov(nx, rowvar=False, bias=True),
cov(x, rowvar=False, bias=True))
#
try:
cov(x, allow_masked=False)
except ValueError:
pass
#
# 2 1D variables w/ missing values
nx = x[1:-1]
assert_almost_equal(np.cov(nx, nx[::-1]), cov(x, x[::-1]))
assert_almost_equal(np.cov(nx, nx[::-1], rowvar=False),
cov(x, x[::-1], rowvar=False))
assert_almost_equal(np.cov(nx, nx[::-1], rowvar=False, bias=True),
cov(x, x[::-1], rowvar=False, bias=True))
def test_2d_w_missing(self):
# Test cov on 2D variable w/ missing value
x = self.data
x[-1] = masked
x = x.reshape(3, 4)
valid = np.logical_not(getmaskarray(x)).astype(int)
frac = np.dot(valid, valid.T)
xf = (x - x.mean(1)[:, None]).filled(0)
assert_almost_equal(cov(x),
np.cov(xf) * (x.shape[1] - 1) / (frac - 1.))
assert_almost_equal(cov(x, bias=True),
np.cov(xf, bias=True) * x.shape[1] / frac)
frac = np.dot(valid.T, valid)
xf = (x - x.mean(0)).filled(0)
assert_almost_equal(cov(x, rowvar=False),
(np.cov(xf, rowvar=False) *
(x.shape[0] - 1) / (frac - 1.)))
assert_almost_equal(cov(x, rowvar=False, bias=True),
(np.cov(xf, rowvar=False, bias=True) *
x.shape[0] / frac))
class catch_warn_mae(clear_and_catch_warnings):
""" Context manager to catch, reset warnings in ma.extras module
"""
class_modules = (mae,)
class TestCorrcoef(TestCase):
def setUp(self):
self.data = array(np.random.rand(12))
self.data2 = array(np.random.rand(12))
def test_ddof(self):
# ddof raises DeprecationWarning
x, y = self.data, self.data2
expected = np.corrcoef(x)
expected2 = np.corrcoef(x, y)
with catch_warn_mae():
warnings.simplefilter("always")
assert_warns(DeprecationWarning, corrcoef, x, ddof=-1)
warnings.simplefilter("ignore")
# ddof has no or negligible effect on the function
assert_almost_equal(np.corrcoef(x, ddof=0), corrcoef(x, ddof=0))
assert_almost_equal(corrcoef(x, ddof=-1), expected)
assert_almost_equal(corrcoef(x, y, ddof=-1), expected2)
assert_almost_equal(corrcoef(x, ddof=3), expected)
assert_almost_equal(corrcoef(x, y, ddof=3), expected2)
def test_bias(self):
x, y = self.data, self.data2
expected = np.corrcoef(x)
# bias raises DeprecationWarning
with catch_warn_mae():
warnings.simplefilter("always")
assert_warns(DeprecationWarning, corrcoef, x, y, True, False)
assert_warns(DeprecationWarning, corrcoef, x, y, True, True)
assert_warns(DeprecationWarning, corrcoef, x, bias=False)
warnings.simplefilter("ignore")
# bias has no or negligible effect on the function
assert_almost_equal(corrcoef(x, bias=1), expected)
def test_1d_wo_missing(self):
# Test cov on 1D variable w/o missing values
x = self.data
assert_almost_equal(np.corrcoef(x), corrcoef(x))
assert_almost_equal(np.corrcoef(x, rowvar=False),
corrcoef(x, rowvar=False))
with catch_warn_mae():
warnings.simplefilter("ignore")
assert_almost_equal(np.corrcoef(x, rowvar=False, bias=True),
corrcoef(x, rowvar=False, bias=True))
def test_2d_wo_missing(self):
# Test corrcoef on 1 2D variable w/o missing values
x = self.data.reshape(3, 4)
assert_almost_equal(np.corrcoef(x), corrcoef(x))
assert_almost_equal(np.corrcoef(x, rowvar=False),
corrcoef(x, rowvar=False))
with catch_warn_mae():
warnings.simplefilter("ignore")
assert_almost_equal(np.corrcoef(x, rowvar=False, bias=True),
corrcoef(x, rowvar=False, bias=True))
def test_1d_w_missing(self):
# Test corrcoef 1 1D variable w/missing values
x = self.data
x[-1] = masked
x -= x.mean()
nx = x.compressed()
assert_almost_equal(np.corrcoef(nx), corrcoef(x))
assert_almost_equal(np.corrcoef(nx, rowvar=False),
corrcoef(x, rowvar=False))
with catch_warn_mae():
warnings.simplefilter("ignore")
assert_almost_equal(np.corrcoef(nx, rowvar=False, bias=True),
corrcoef(x, rowvar=False, bias=True))
try:
corrcoef(x, allow_masked=False)
except ValueError:
pass
# 2 1D variables w/ missing values
nx = x[1:-1]
assert_almost_equal(np.corrcoef(nx, nx[::-1]), corrcoef(x, x[::-1]))
assert_almost_equal(np.corrcoef(nx, nx[::-1], rowvar=False),
corrcoef(x, x[::-1], rowvar=False))
with catch_warn_mae():
warnings.simplefilter("ignore")
# ddof and bias have no or negligible effect on the function
assert_almost_equal(np.corrcoef(nx, nx[::-1]),
corrcoef(x, x[::-1], bias=1))
assert_almost_equal(np.corrcoef(nx, nx[::-1]),
corrcoef(x, x[::-1], ddof=2))
def test_2d_w_missing(self):
# Test corrcoef on 2D variable w/ missing value
x = self.data
x[-1] = masked
x = x.reshape(3, 4)
test = corrcoef(x)
control = np.corrcoef(x)
assert_almost_equal(test[:-1, :-1], control[:-1, :-1])
with catch_warn_mae():
warnings.simplefilter("ignore")
# ddof and bias have no or negligible effect on the function
assert_almost_equal(corrcoef(x, ddof=-2)[:-1, :-1],
control[:-1, :-1])
assert_almost_equal(corrcoef(x, ddof=3)[:-1, :-1],
control[:-1, :-1])
assert_almost_equal(corrcoef(x, bias=1)[:-1, :-1],
control[:-1, :-1])
class TestPolynomial(TestCase):
#
def test_polyfit(self):
# Tests polyfit
# On ndarrays
x = np.random.rand(10)
y = np.random.rand(20).reshape(-1, 2)
assert_almost_equal(polyfit(x, y, 3), np.polyfit(x, y, 3))
# ON 1D maskedarrays
x = x.view(MaskedArray)
x[0] = masked
y = y.view(MaskedArray)
y[0, 0] = y[-1, -1] = masked
#
(C, R, K, S, D) = polyfit(x, y[:, 0], 3, full=True)
(c, r, k, s, d) = np.polyfit(x[1:], y[1:, 0].compressed(), 3,
full=True)
for (a, a_) in zip((C, R, K, S, D), (c, r, k, s, d)):
assert_almost_equal(a, a_)
#
(C, R, K, S, D) = polyfit(x, y[:, -1], 3, full=True)
(c, r, k, s, d) = np.polyfit(x[1:-1], y[1:-1, -1], 3, full=True)
for (a, a_) in zip((C, R, K, S, D), (c, r, k, s, d)):
assert_almost_equal(a, a_)
#
(C, R, K, S, D) = polyfit(x, y, 3, full=True)
(c, r, k, s, d) = np.polyfit(x[1:-1], y[1:-1,:], 3, full=True)
for (a, a_) in zip((C, R, K, S, D), (c, r, k, s, d)):
assert_almost_equal(a, a_)
#
w = np.random.rand(10) + 1
wo = w.copy()
xs = x[1:-1]
ys = y[1:-1]
ws = w[1:-1]
(C, R, K, S, D) = polyfit(x, y, 3, full=True, w=w)
(c, r, k, s, d) = np.polyfit(xs, ys, 3, full=True, w=ws)
assert_equal(w, wo)
for (a, a_) in zip((C, R, K, S, D), (c, r, k, s, d)):
assert_almost_equal(a, a_)
def test_polyfit_with_masked_NaNs(self):
x = np.random.rand(10)
y = np.random.rand(20).reshape(-1, 2)
x[0] = np.nan
y[-1,-1] = np.nan
x = x.view(MaskedArray)
y = y.view(MaskedArray)
x[0] = masked
y[-1,-1] = masked
(C, R, K, S, D) = polyfit(x, y, 3, full=True)
(c, r, k, s, d) = np.polyfit(x[1:-1], y[1:-1,:], 3, full=True)
for (a, a_) in zip((C, R, K, S, D), (c, r, k, s, d)):
assert_almost_equal(a, a_)
class TestArraySetOps(TestCase):
def test_unique_onlist(self):
# Test unique on list
data = [1, 1, 1, 2, 2, 3]
test = unique(data, return_index=True, return_inverse=True)
self.assertTrue(isinstance(test[0], MaskedArray))
assert_equal(test[0], masked_array([1, 2, 3], mask=[0, 0, 0]))
assert_equal(test[1], [0, 3, 5])
assert_equal(test[2], [0, 0, 0, 1, 1, 2])
def test_unique_onmaskedarray(self):
# Test unique on masked data w/use_mask=True
data = masked_array([1, 1, 1, 2, 2, 3], mask=[0, 0, 1, 0, 1, 0])
test = unique(data, return_index=True, return_inverse=True)
assert_equal(test[0], masked_array([1, 2, 3, -1], mask=[0, 0, 0, 1]))
assert_equal(test[1], [0, 3, 5, 2])
assert_equal(test[2], [0, 0, 3, 1, 3, 2])
#
data.fill_value = 3
data = masked_array(data=[1, 1, 1, 2, 2, 3],
mask=[0, 0, 1, 0, 1, 0], fill_value=3)
test = unique(data, return_index=True, return_inverse=True)
assert_equal(test[0], masked_array([1, 2, 3, -1], mask=[0, 0, 0, 1]))
assert_equal(test[1], [0, 3, 5, 2])
assert_equal(test[2], [0, 0, 3, 1, 3, 2])
def test_unique_allmasked(self):
# Test all masked
data = masked_array([1, 1, 1], mask=True)
test = unique(data, return_index=True, return_inverse=True)
assert_equal(test[0], masked_array([1, ], mask=[True]))
assert_equal(test[1], [0])
assert_equal(test[2], [0, 0, 0])
#
# Test masked
data = masked
test = unique(data, return_index=True, return_inverse=True)
assert_equal(test[0], masked_array(masked))
assert_equal(test[1], [0])
assert_equal(test[2], [0])
def test_ediff1d(self):
# Tests mediff1d
x = masked_array(np.arange(5), mask=[1, 0, 0, 0, 1])
control = array([1, 1, 1, 4], mask=[1, 0, 0, 1])
test = ediff1d(x)
assert_equal(test, control)
assert_equal(test.data, control.data)
assert_equal(test.mask, control.mask)
def test_ediff1d_tobegin(self):
# Test ediff1d w/ to_begin
x = masked_array(np.arange(5), mask=[1, 0, 0, 0, 1])
test = ediff1d(x, to_begin=masked)
control = array([0, 1, 1, 1, 4], mask=[1, 1, 0, 0, 1])
assert_equal(test, control)
assert_equal(test.data, control.data)
assert_equal(test.mask, control.mask)
#
test = ediff1d(x, to_begin=[1, 2, 3])
control = array([1, 2, 3, 1, 1, 1, 4], mask=[0, 0, 0, 1, 0, 0, 1])
assert_equal(test, control)
assert_equal(test.data, control.data)
assert_equal(test.mask, control.mask)
def test_ediff1d_toend(self):
# Test ediff1d w/ to_end
x = masked_array(np.arange(5), mask=[1, 0, 0, 0, 1])
test = ediff1d(x, to_end=masked)
control = array([1, 1, 1, 4, 0], mask=[1, 0, 0, 1, 1])
assert_equal(test, control)
assert_equal(test.data, control.data)
assert_equal(test.mask, control.mask)
#
test = ediff1d(x, to_end=[1, 2, 3])
control = array([1, 1, 1, 4, 1, 2, 3], mask=[1, 0, 0, 1, 0, 0, 0])
assert_equal(test, control)
assert_equal(test.data, control.data)
assert_equal(test.mask, control.mask)
def test_ediff1d_tobegin_toend(self):
# Test ediff1d w/ to_begin and to_end
x = masked_array(np.arange(5), mask=[1, 0, 0, 0, 1])
test = ediff1d(x, to_end=masked, to_begin=masked)
control = array([0, 1, 1, 1, 4, 0], mask=[1, 1, 0, 0, 1, 1])
assert_equal(test, control)
assert_equal(test.data, control.data)
assert_equal(test.mask, control.mask)
#
test = ediff1d(x, to_end=[1, 2, 3], to_begin=masked)
control = array([0, 1, 1, 1, 4, 1, 2, 3],
mask=[1, 1, 0, 0, 1, 0, 0, 0])
assert_equal(test, control)
assert_equal(test.data, control.data)
assert_equal(test.mask, control.mask)
def test_ediff1d_ndarray(self):
# Test ediff1d w/ a ndarray
x = np.arange(5)
test = ediff1d(x)
control = array([1, 1, 1, 1], mask=[0, 0, 0, 0])
assert_equal(test, control)
self.assertTrue(isinstance(test, MaskedArray))
assert_equal(test.data, control.data)
assert_equal(test.mask, control.mask)
#
test = ediff1d(x, to_end=masked, to_begin=masked)
control = array([0, 1, 1, 1, 1, 0], mask=[1, 0, 0, 0, 0, 1])
self.assertTrue(isinstance(test, MaskedArray))
assert_equal(test.data, control.data)
assert_equal(test.mask, control.mask)
def test_intersect1d(self):
# Test intersect1d
x = array([1, 3, 3, 3], mask=[0, 0, 0, 1])
y = array([3, 1, 1, 1], mask=[0, 0, 0, 1])
test = intersect1d(x, y)
control = array([1, 3, -1], mask=[0, 0, 1])
assert_equal(test, control)
def test_setxor1d(self):
# Test setxor1d
a = array([1, 2, 5, 7, -1], mask=[0, 0, 0, 0, 1])
b = array([1, 2, 3, 4, 5, -1], mask=[0, 0, 0, 0, 0, 1])
test = setxor1d(a, b)
assert_equal(test, array([3, 4, 7]))
#
a = array([1, 2, 5, 7, -1], mask=[0, 0, 0, 0, 1])
b = [1, 2, 3, 4, 5]
test = setxor1d(a, b)
assert_equal(test, array([3, 4, 7, -1], mask=[0, 0, 0, 1]))
#
a = array([1, 2, 3])
b = array([6, 5, 4])
test = setxor1d(a, b)
assert_(isinstance(test, MaskedArray))
assert_equal(test, [1, 2, 3, 4, 5, 6])
#
a = array([1, 8, 2, 3], mask=[0, 1, 0, 0])
b = array([6, 5, 4, 8], mask=[0, 0, 0, 1])
test = setxor1d(a, b)
assert_(isinstance(test, MaskedArray))
assert_equal(test, [1, 2, 3, 4, 5, 6])
#
assert_array_equal([], setxor1d([], []))
def test_in1d(self):
# Test in1d
a = array([1, 2, 5, 7, -1], mask=[0, 0, 0, 0, 1])
b = array([1, 2, 3, 4, 5, -1], mask=[0, 0, 0, 0, 0, 1])
test = in1d(a, b)
assert_equal(test, [True, True, True, False, True])
#
a = array([5, 5, 2, 1, -1], mask=[0, 0, 0, 0, 1])
b = array([1, 5, -1], mask=[0, 0, 1])
test = in1d(a, b)
assert_equal(test, [True, True, False, True, True])
#
assert_array_equal([], in1d([], []))
def test_in1d_invert(self):
# Test in1d's invert parameter
a = array([1, 2, 5, 7, -1], mask=[0, 0, 0, 0, 1])
b = array([1, 2, 3, 4, 5, -1], mask=[0, 0, 0, 0, 0, 1])
assert_equal(np.invert(in1d(a, b)), in1d(a, b, invert=True))
a = array([5, 5, 2, 1, -1], mask=[0, 0, 0, 0, 1])
b = array([1, 5, -1], mask=[0, 0, 1])
assert_equal(np.invert(in1d(a, b)), in1d(a, b, invert=True))
assert_array_equal([], in1d([], [], invert=True))
def test_union1d(self):
# Test union1d
a = array([1, 2, 5, 7, 5, -1], mask=[0, 0, 0, 0, 0, 1])
b = array([1, 2, 3, 4, 5, -1], mask=[0, 0, 0, 0, 0, 1])
test = union1d(a, b)
control = array([1, 2, 3, 4, 5, 7, -1], mask=[0, 0, 0, 0, 0, 0, 1])
assert_equal(test, control)
#
assert_array_equal([], union1d([], []))
def test_setdiff1d(self):
# Test setdiff1d
a = array([6, 5, 4, 7, 7, 1, 2, 1], mask=[0, 0, 0, 0, 0, 0, 0, 1])
b = array([2, 4, 3, 3, 2, 1, 5])
test = setdiff1d(a, b)
assert_equal(test, array([6, 7, -1], mask=[0, 0, 1]))
#
a = arange(10)
b = arange(8)
assert_equal(setdiff1d(a, b), array([8, 9]))
a = array([], np.uint32, mask=[])
assert_equal(setdiff1d(a, []).dtype, np.uint32)
def test_setdiff1d_char_array(self):
# Test setdiff1d_charray
a = np.array(['a', 'b', 'c'])
b = np.array(['a', 'b', 's'])
assert_array_equal(setdiff1d(a, b), np.array(['c']))
class TestShapeBase(TestCase):
def test_atleast2d(self):
# Test atleast_2d
a = masked_array([0, 1, 2], mask=[0, 1, 0])
b = atleast_2d(a)
assert_equal(b.shape, (1, 3))
assert_equal(b.mask.shape, b.data.shape)
assert_equal(a.shape, (3,))
assert_equal(a.mask.shape, a.data.shape)
def test_shape_scalar(self):
# the atleast and diagflat function should work with scalars
# GitHub issue #3367
b = atleast_1d(1.0)
assert_equal(b.shape, (1, ))
assert_equal(b.mask.shape, b.data.shape)
b = atleast_2d(1.0)
assert_equal(b.shape, (1, 1))
assert_equal(b.mask.shape, b.data.shape)
b = atleast_3d(1.0)
assert_equal(b.shape, (1, 1, 1))
assert_equal(b.mask.shape, b.data.shape)
b = diagflat(1.0)
assert_equal(b.shape, (1, 1))
assert_equal(b.mask.shape, b.data.shape)
if __name__ == "__main__":
run_module_suite()
|
menardorama/ReadyNAS-Add-ons | refs/heads/master | headphones-1.0.0/debian/headphones/etc/apps/headphones/lib/mako/__init__.py | 60 | # mako/__init__.py
# Copyright (C) 2006-2015 the Mako authors and contributors <see AUTHORS file>
#
# This module is part of Mako and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
__version__ = '1.0.1'
|
ysarbaev/contrib-python-qubell-client | refs/heads/master | test_qubell_client/__init__.py | 1 | # Copyright (c) 2013 Qubell Inc., http://qubell.com
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from qubell.api.provider import log_routes_stat
__author__ = "Vasyl Khomenko"
__copyright__ = "Copyright 2013, Qubell.com"
__license__ = "Apache"
__email__ = "vkhomenko@qubell.com"
import atexit
atexit.register(log_routes_stat) |
amwelch/a10sdk-python | refs/heads/master | a10sdk/core/cgnv6/cgnv6_template_policy.py | 2 | from a10sdk.common.A10BaseClass import A10BaseClass
class Dns64(A10BaseClass):
"""This class does not support CRUD Operations please use parent.
:param exclusive_answer: {"default": 0, "type": "number", "description": "Exclusive Answer in DNS Response", "format": "flag"}
:param prefix: {"type": "string", "description": "IPv6 prefix", "format": "ipv6-address-plen"}
:param disable: {"default": 0, "type": "number", "description": "Disable", "format": "flag"}
:param DeviceProxy: The device proxy for REST operations and session handling. Refer to `common/device_proxy.py`
"""
def __init__(self, **kwargs):
self.ERROR_MSG = ""
self.b_key = "dns64"
self.DeviceProxy = ""
self.exclusive_answer = ""
self.prefix = ""
self.disable = ""
for keys, value in kwargs.items():
setattr(self,keys, value)
class LidList(A10BaseClass):
"""This class does not support CRUD Operations please use parent.
:param request_limit: {"description": "Request limit (Specify request limit)", "format": "number", "type": "number", "maximum": 1048575, "minimum": 0, "optional": true}
:param conn_limit: {"description": "Connection limit", "format": "number", "type": "number", "maximum": 1048575, "minimum": 0, "optional": true}
:param interval: {"description": "Specify log interval in minutes, by default system will log every over limit instance", "format": "number", "type": "number", "maximum": 255, "minimum": 1, "optional": true}
:param log: {"default": 0, "optional": true, "type": "number", "description": "Log a message", "format": "flag"}
:param lidnum: {"description": "Specify a limit ID", "format": "number", "type": "number", "maximum": 31, "minimum": 1, "optional": false}
:param request_rate_limit: {"description": "Request rate limit (Specify request rate limit)", "format": "number", "type": "number", "maximum": 4294967295, "minimum": 1, "optional": true}
:param conn_per: {"description": "Per (Specify interval in number of 100ms)", "format": "number", "type": "number", "maximum": 65535, "minimum": 1, "optional": true}
:param request_per: {"description": "Per (Specify interval in number of 100ms)", "format": "number", "type": "number", "maximum": 65535, "minimum": 1, "optional": true}
:param conn_rate_limit: {"description": "Specify connection rate limit", "format": "number", "type": "number", "maximum": 2147483647, "minimum": 1, "optional": true}
:param lockout: {"description": "Don't accept any new connection for certain time (Lockout duration in minutes)", "format": "number", "type": "number", "maximum": 1023, "minimum": 1, "optional": true}
:param action_value: {"optional": true, "enum": ["forward", "reset"], "type": "string", "description": "'forward': Forward the traffic even it exceeds limit; 'reset': Reset the connection when it exceeds limit; ", "format": "enum"}
:param over_limit_action: {"default": 0, "optional": true, "type": "number", "description": "Set action when exceeds limit", "format": "flag"}
:param uuid: {"description": "uuid of the object", "format": "string", "minLength": 1, "modify-not-allowed": 1, "optional": true, "maxLength": 64, "type": "string"}
:param DeviceProxy: The device proxy for REST operations and session handling. Refer to `common/device_proxy.py`
"""
def __init__(self, **kwargs):
self.ERROR_MSG = ""
self.b_key = "lid-list"
self.DeviceProxy = ""
self.request_limit = ""
self.conn_limit = ""
self.interval = ""
self.log = ""
self.dns64 = {}
self.lidnum = ""
self.request_rate_limit = ""
self.conn_per = ""
self.request_per = ""
self.conn_rate_limit = ""
self.lockout = ""
self.action_value = ""
self.over_limit_action = ""
self.uuid = ""
for keys, value in kwargs.items():
setattr(self,keys, value)
class ClassList(A10BaseClass):
"""This class does not support CRUD Operations please use parent.
:param header_name: {"minLength": 1, "maxLength": 63, "type": "string", "description": "Specify L7 header name", "format": "string"}
:param lid_list: {"minItems": 1, "items": {"type": "lid"}, "uniqueItems": true, "array": [{"required": ["lidnum"], "properties": {"request-limit": {"description": "Request limit (Specify request limit)", "format": "number", "type": "number", "maximum": 1048575, "minimum": 0, "optional": true}, "conn-limit": {"description": "Connection limit", "format": "number", "type": "number", "maximum": 1048575, "minimum": 0, "optional": true}, "interval": {"description": "Specify log interval in minutes, by default system will log every over limit instance", "format": "number", "type": "number", "maximum": 255, "minimum": 1, "optional": true}, "log": {"default": 0, "optional": true, "type": "number", "description": "Log a message", "format": "flag"}, "dns64": {"type": "object", "properties": {"exclusive-answer": {"default": 0, "type": "number", "description": "Exclusive Answer in DNS Response", "format": "flag"}, "prefix": {"type": "string", "description": "IPv6 prefix", "format": "ipv6-address-plen"}, "disable": {"default": 0, "type": "number", "description": "Disable", "format": "flag"}}}, "lidnum": {"description": "Specify a limit ID", "format": "number", "type": "number", "maximum": 31, "minimum": 1, "optional": false}, "request-rate-limit": {"description": "Request rate limit (Specify request rate limit)", "format": "number", "type": "number", "maximum": 4294967295, "minimum": 1, "optional": true}, "conn-per": {"description": "Per (Specify interval in number of 100ms)", "format": "number", "type": "number", "maximum": 65535, "minimum": 1, "optional": true}, "request-per": {"description": "Per (Specify interval in number of 100ms)", "format": "number", "type": "number", "maximum": 65535, "minimum": 1, "optional": true}, "conn-rate-limit": {"description": "Specify connection rate limit", "format": "number", "type": "number", "maximum": 2147483647, "minimum": 1, "optional": true}, "lockout": {"description": "Don't accept any new connection for certain time (Lockout duration in minutes)", "format": "number", "type": "number", "maximum": 1023, "minimum": 1, "optional": true}, "action-value": {"optional": true, "enum": ["forward", "reset"], "type": "string", "description": "'forward': Forward the traffic even it exceeds limit; 'reset': Reset the connection when it exceeds limit; ", "format": "enum"}, "over-limit-action": {"default": 0, "optional": true, "type": "number", "description": "Set action when exceeds limit", "format": "flag"}, "uuid": {"description": "uuid of the object", "format": "string", "minLength": 1, "modify-not-allowed": 1, "optional": true, "maxLength": 64, "type": "string"}}}], "type": "array", "$ref": "/axapi/v3/cgnv6/template/policy/{name}/class-list/lid/{lidnum}"}
:param name: {"minLength": 1, "maxLength": 63, "type": "string", "description": "Class list name", "format": "string-rlx"}
:param client_ip_l3_dest: {"default": 0, "not": "client-ip-l7-header", "type": "number", "description": "Use destination IP as client IP address", "format": "flag"}
:param client_ip_l7_header: {"default": 0, "not": "client-ip-l3-dest", "type": "number", "description": "Use extract client IP address from L7 header", "format": "flag"}
:param uuid: {"description": "uuid of the object", "format": "string", "minLength": 1, "modify-not-allowed": 1, "maxLength": 64, "type": "string"}
:param DeviceProxy: The device proxy for REST operations and session handling. Refer to `common/device_proxy.py`
"""
def __init__(self, **kwargs):
self.ERROR_MSG = ""
self.b_key = "class-list"
self.DeviceProxy = ""
self.header_name = ""
self.lid_list = []
self.name = ""
self.client_ip_l3_dest = ""
self.client_ip_l7_header = ""
self.uuid = ""
for keys, value in kwargs.items():
setattr(self,keys, value)
class Policy(A10BaseClass):
"""Class Description::
Policy config.
Class policy supports CRUD Operations and inherits from `common/A10BaseClass`.
This class is the `"PARENT"` class for this module.`
:param name: {"description": "Policy template name", "format": "string-rlx", "minLength": 1, "optional": false, "maxLength": 63, "type": "string"}
:param uuid: {"description": "uuid of the object", "format": "string", "minLength": 1, "modify-not-allowed": 1, "optional": true, "maxLength": 64, "type": "string"}
:param DeviceProxy: The device proxy for REST operations and session handling. Refer to `common/device_proxy.py`
URL for this object::
`https://<Hostname|Ip address>//axapi/v3/cgnv6/template/policy/{name}`.
"""
def __init__(self, **kwargs):
self.ERROR_MSG = ""
self.required = [ "name"]
self.b_key = "policy"
self.a10_url="/axapi/v3/cgnv6/template/policy/{name}"
self.DeviceProxy = ""
self.class_list = {}
self.name = ""
self.uuid = ""
for keys, value in kwargs.items():
setattr(self,keys, value)
|
ahuarte47/QGIS | refs/heads/master | tests/src/python/test_qgslayoutmapgrid.py | 15 | # -*- coding: utf-8 -*-
"""QGIS Unit tests for QgsLayoutItemMapGrid.
.. note. This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
"""
__author__ = '(C) 2017 by Nyall Dawson'
__date__ = '20/10/2017'
__copyright__ = 'Copyright 2012, The QGIS Project'
import qgis # NOQA
from qgis.PyQt.QtCore import QRectF
from qgis.PyQt.QtGui import QPainter, QColor
from qgis.core import (QgsLayoutItemMap,
QgsLayoutItemMapGrid,
QgsRectangle,
QgsLayout,
QgsProperty,
QgsLayoutObject,
QgsFontUtils,
QgsProject)
from qgis.testing import start_app, unittest
from utilities import unitTestDataPath, getTestFont
from qgslayoutchecker import QgsLayoutChecker
start_app()
TEST_DATA_DIR = unitTestDataPath()
class TestQgsLayoutMapGrid(unittest.TestCase):
def testGrid(self):
layout = QgsLayout(QgsProject.instance())
layout.initializeDefaults()
map = QgsLayoutItemMap(layout)
map.attemptSetSceneRect(QRectF(20, 20, 200, 100))
map.setFrameEnabled(True)
map.setBackgroundColor(QColor(150, 100, 100))
layout.addLayoutItem(map)
self.assertFalse(map.grids().hasEnabledItems())
"""Test that we can create a grid for a map."""
myRectangle = QgsRectangle(781662.375, 3339523.125,
793062.375, 3345223.125)
map.setExtent(myRectangle)
map.grid().setEnabled(True)
self.assertTrue(map.grids().hasEnabledItems())
map.grid().setIntervalX(2000)
map.grid().setIntervalY(2000)
map.grid().setAnnotationEnabled(True)
map.grid().setGridLineColor(QColor(0, 255, 0))
map.grid().setGridLineWidth(0.5)
map.grid().setAnnotationFont(getTestFont())
map.grid().setAnnotationPrecision(0)
map.grid().setAnnotationDisplay(QgsLayoutItemMapGrid.HideAll, QgsLayoutItemMapGrid.Left)
map.grid().setAnnotationPosition(QgsLayoutItemMapGrid.OutsideMapFrame, QgsLayoutItemMapGrid.Right)
map.grid().setAnnotationDisplay(QgsLayoutItemMapGrid.HideAll, QgsLayoutItemMapGrid.Top)
map.grid().setAnnotationPosition(QgsLayoutItemMapGrid.OutsideMapFrame, QgsLayoutItemMapGrid.Bottom)
map.grid().setAnnotationDirection(QgsLayoutItemMapGrid.Horizontal, QgsLayoutItemMapGrid.Right)
map.grid().setAnnotationDirection(QgsLayoutItemMapGrid.Horizontal, QgsLayoutItemMapGrid.Bottom)
map.grid().setAnnotationFontColor(QColor(255, 0, 0, 150))
map.grid().setBlendMode(QPainter.CompositionMode_Overlay)
map.updateBoundingRect()
checker = QgsLayoutChecker('composermap_grid', layout)
checker.setControlPathPrefix("composer_mapgrid")
myTestResult, myMessage = checker.testLayout()
map.grid().setEnabled(False)
map.grid().setAnnotationEnabled(False)
assert myTestResult, myMessage
def testCrossGrid(self):
layout = QgsLayout(QgsProject.instance())
layout.initializeDefaults()
map = QgsLayoutItemMap(layout)
map.attemptSetSceneRect(QRectF(20, 20, 200, 100))
map.setFrameEnabled(True)
map.setBackgroundColor(QColor(150, 100, 100))
layout.addLayoutItem(map)
myRectangle = QgsRectangle(781662.375, 3339523.125, 793062.375, 3345223.125)
map.setExtent(myRectangle)
map.grid().setEnabled(True)
map.grid().setStyle(QgsLayoutItemMapGrid.Cross)
map.grid().setCrossLength(2.0)
map.grid().setIntervalX(2000)
map.grid().setIntervalY(2000)
map.grid().setAnnotationEnabled(False)
map.grid().setGridLineColor(QColor(0, 255, 0))
map.grid().setGridLineWidth(0.5)
map.grid().setBlendMode(QPainter.CompositionMode_SourceOver)
map.updateBoundingRect()
checker = QgsLayoutChecker('composermap_crossgrid', layout)
checker.setControlPathPrefix("composer_mapgrid")
myTestResult, myMessage = checker.testLayout()
map.grid().setStyle(QgsLayoutItemMapGrid.Solid)
map.grid().setEnabled(False)
map.grid().setAnnotationEnabled(False)
assert myTestResult, myMessage
def testMarkerGrid(self):
layout = QgsLayout(QgsProject.instance())
layout.initializeDefaults()
map = QgsLayoutItemMap(layout)
map.attemptSetSceneRect(QRectF(20, 20, 200, 100))
map.setFrameEnabled(True)
map.setBackgroundColor(QColor(150, 100, 100))
layout.addLayoutItem(map)
myRectangle = QgsRectangle(781662.375, 3339523.125, 793062.375, 3345223.125)
map.setExtent(myRectangle)
map.grid().setEnabled(True)
map.grid().setStyle(QgsLayoutItemMapGrid.Markers)
map.grid().setCrossLength(2.0)
map.grid().setIntervalX(2000)
map.grid().setIntervalY(2000)
map.grid().setAnnotationEnabled(False)
map.grid().setBlendMode(QPainter.CompositionMode_SourceOver)
map.grid().markerSymbol().symbolLayer(0).setStrokeColor(QColor(0, 0, 0))
map.updateBoundingRect()
checker = QgsLayoutChecker('composermap_markergrid', layout)
checker.setControlPathPrefix("composer_mapgrid")
myTestResult, myMessage = checker.testLayout()
map.grid().setStyle(QgsLayoutItemMapGrid.Solid)
map.grid().setEnabled(False)
map.grid().setAnnotationEnabled(False)
assert myTestResult, myMessage
def testFrameOnly(self):
layout = QgsLayout(QgsProject.instance())
layout.initializeDefaults()
map = QgsLayoutItemMap(layout)
map.attemptSetSceneRect(QRectF(20, 20, 200, 100))
map.setFrameEnabled(True)
map.setBackgroundColor(QColor(150, 100, 100))
layout.addLayoutItem(map)
myRectangle = QgsRectangle(781662.375, 3339523.125, 793062.375, 3345223.125)
map.setExtent(myRectangle)
map.grid().setEnabled(True)
map.grid().setStyle(QgsLayoutItemMapGrid.FrameAnnotationsOnly)
map.grid().setIntervalX(2000)
map.grid().setIntervalY(2000)
map.grid().setAnnotationEnabled(False)
map.grid().setFrameStyle(QgsLayoutItemMapGrid.Zebra)
map.grid().setFramePenSize(0.5)
map.grid().setBlendMode(QPainter.CompositionMode_SourceOver)
map.updateBoundingRect()
checker = QgsLayoutChecker('composermap_gridframeonly', layout)
checker.setControlPathPrefix("composer_mapgrid")
myTestResult, myMessage = checker.testLayout()
map.grid().setStyle(QgsLayoutItemMapGrid.Solid)
map.grid().setEnabled(False)
map.grid().setAnnotationEnabled(False)
map.grid().setFrameStyle(QgsLayoutItemMapGrid.NoFrame)
assert myTestResult, myMessage
def testZebraStyle(self):
layout = QgsLayout(QgsProject.instance())
layout.initializeDefaults()
map = QgsLayoutItemMap(layout)
map.attemptSetSceneRect(QRectF(20, 20, 200, 100))
map.setFrameEnabled(True)
map.setBackgroundColor(QColor(150, 100, 100))
layout.addLayoutItem(map)
map.grid().setFrameStyle(QgsLayoutItemMapGrid.Zebra)
myRectangle = QgsRectangle(785462.375, 3341423.125,
789262.375, 3343323.125)
map.setExtent(myRectangle)
map.grid().setIntervalX(2000)
map.grid().setIntervalY(2000)
map.grid().setGridLineColor(QColor(0, 0, 0))
map.grid().setAnnotationFontColor(QColor(0, 0, 0))
map.grid().setBlendMode(QPainter.CompositionMode_SourceOver)
map.grid().setFrameStyle(QgsLayoutItemMapGrid.Zebra)
map.grid().setFrameWidth(10)
map.grid().setFramePenSize(1)
map.grid().setGridLineWidth(0.5)
map.grid().setFramePenColor(QColor(255, 100, 0, 200))
map.grid().setFrameFillColor1(QColor(50, 90, 50, 100))
map.grid().setFrameFillColor2(QColor(200, 220, 100, 60))
map.grid().setEnabled(True)
map.updateBoundingRect()
checker = QgsLayoutChecker('composermap_zebrastyle', layout)
checker.setControlPathPrefix("composer_mapgrid")
myTestResult, myMessage = checker.testLayout(0, 100)
assert myTestResult, myMessage
def testZebraStyleSides(self):
layout = QgsLayout(QgsProject.instance())
layout.initializeDefaults()
map = QgsLayoutItemMap(layout)
map.attemptSetSceneRect(QRectF(20, 20, 200, 100))
map.setFrameEnabled(True)
map.setBackgroundColor(QColor(150, 100, 100))
layout.addLayoutItem(map)
map.grid().setFrameStyle(QgsLayoutItemMapGrid.Zebra)
myRectangle = QgsRectangle(781662.375, 3339523.125, 793062.375, 3345223.125)
map.setExtent(myRectangle)
map.grid().setIntervalX(2000)
map.grid().setIntervalY(2000)
map.grid().setGridLineColor(QColor(0, 0, 0))
map.grid().setAnnotationFontColor(QColor(0, 0, 0))
map.grid().setBlendMode(QPainter.CompositionMode_SourceOver)
map.grid().setFrameStyle(QgsLayoutItemMapGrid.Zebra)
map.grid().setFrameWidth(10)
map.grid().setFramePenSize(1)
map.grid().setGridLineWidth(0.5)
map.grid().setFramePenColor(QColor(0, 0, 0))
map.grid().setFrameFillColor1(QColor(0, 0, 0))
map.grid().setFrameFillColor2(QColor(255, 255, 255))
map.grid().setEnabled(True)
map.grid().setFrameSideFlag(QgsLayoutItemMapGrid.FrameLeft, True)
map.grid().setFrameSideFlag(QgsLayoutItemMapGrid.FrameRight, False)
map.grid().setFrameSideFlag(QgsLayoutItemMapGrid.FrameTop, False)
map.grid().setFrameSideFlag(QgsLayoutItemMapGrid.FrameBottom, False)
map.updateBoundingRect()
checker = QgsLayoutChecker('composermap_zebrastyle_left', layout)
checker.setControlPathPrefix("composer_mapgrid")
myTestResult, myMessage = checker.testLayout(0, 100)
assert myTestResult, myMessage
map.grid().setFrameSideFlag(QgsLayoutItemMapGrid.FrameTop, True)
map.updateBoundingRect()
checker = QgsLayoutChecker('composermap_zebrastyle_lefttop', layout)
checker.setControlPathPrefix("composer_mapgrid")
myTestResult, myMessage = checker.testLayout(0, 100)
assert myTestResult, myMessage
map.grid().setFrameSideFlag(QgsLayoutItemMapGrid.FrameRight, True)
map.updateBoundingRect()
checker = QgsLayoutChecker('composermap_zebrastyle_lefttopright', layout)
checker.setControlPathPrefix("composer_mapgrid")
myTestResult, myMessage = checker.testLayout(0, 100)
assert myTestResult, myMessage
map.grid().setFrameSideFlag(QgsLayoutItemMapGrid.FrameBottom, True)
map.grid().setFrameStyle(QgsLayoutItemMapGrid.NoFrame)
def testInteriorTicks(self):
layout = QgsLayout(QgsProject.instance())
layout.initializeDefaults()
map = QgsLayoutItemMap(layout)
map.attemptSetSceneRect(QRectF(20, 20, 200, 100))
map.setFrameEnabled(True)
map.setBackgroundColor(QColor(150, 100, 100))
layout.addLayoutItem(map)
map.grid().setFrameStyle(QgsLayoutItemMapGrid.Zebra)
myRectangle = QgsRectangle(781662.375, 3339523.125, 793062.375, 3345223.125)
map.setExtent(myRectangle)
map.grid().setIntervalX(2000)
map.grid().setIntervalY(2000)
map.grid().setAnnotationFontColor(QColor(0, 0, 0))
map.grid().setBlendMode(QPainter.CompositionMode_SourceOver)
map.grid().setFrameStyle(QgsLayoutItemMapGrid.InteriorTicks)
map.grid().setFrameWidth(10)
map.grid().setFramePenSize(1)
map.grid().setFramePenColor(QColor(0, 0, 0))
map.grid().setEnabled(True)
map.grid().setStyle(QgsLayoutItemMapGrid.FrameAnnotationsOnly)
map.updateBoundingRect()
checker = QgsLayoutChecker('composermap_interiorticks', layout)
checker.setControlPathPrefix("composer_mapgrid")
myTestResult, myMessage = checker.testLayout(0, 100)
assert myTestResult, myMessage
def testExpressionContext(self):
layout = QgsLayout(QgsProject.instance())
layout.initializeDefaults()
map = QgsLayoutItemMap(layout)
map.attemptSetSceneRect(QRectF(20, 20, 200, 100))
map.setFrameEnabled(True)
map.setBackgroundColor(QColor(150, 100, 100))
map.setExtent(QgsRectangle(781662.375, 3339523.125, 793062.375, 3345223.125))
map.setScale(1000)
layout.addLayoutItem(map)
# grid expression context should inherit from map, so variables like @map_scale can be used
context = map.grid().createExpressionContext()
self.assertAlmostEqual(context.variable('map_scale'), 1000, 5)
self.assertEqual(context.variable('grid_number'), 0)
self.assertEqual(context.variable('grid_axis'), 'x')
self.assertEqual(context.variable('item_uuid'), map.uuid())
def testDataDefinedEnabled(self):
layout = QgsLayout(QgsProject.instance())
layout.initializeDefaults()
map = QgsLayoutItemMap(layout)
map.attemptSetSceneRect(QRectF(20, 20, 200, 100))
map.setFrameEnabled(True)
map.setBackgroundColor(QColor(150, 100, 100))
layout.addLayoutItem(map)
myRectangle = QgsRectangle(781662.375, 3339523.125,
793062.375, 3345223.125)
map.setExtent(myRectangle)
map.grid().setEnabled(True)
map.grid().setIntervalX(2000)
map.grid().setIntervalY(2000)
map.grid().setAnnotationEnabled(True)
map.grid().setGridLineColor(QColor(0, 255, 0))
map.grid().setGridLineWidth(0.5)
map.grid().setAnnotationFont(getTestFont())
map.grid().setAnnotationPrecision(0)
map.grid().setAnnotationDisplay(QgsLayoutItemMapGrid.HideAll, QgsLayoutItemMapGrid.Left)
map.grid().setAnnotationPosition(QgsLayoutItemMapGrid.OutsideMapFrame, QgsLayoutItemMapGrid.Right)
map.grid().setAnnotationDisplay(QgsLayoutItemMapGrid.HideAll, QgsLayoutItemMapGrid.Top)
map.grid().setAnnotationPosition(QgsLayoutItemMapGrid.OutsideMapFrame, QgsLayoutItemMapGrid.Bottom)
map.grid().setAnnotationDirection(QgsLayoutItemMapGrid.Horizontal, QgsLayoutItemMapGrid.Right)
map.grid().setAnnotationDirection(QgsLayoutItemMapGrid.Horizontal, QgsLayoutItemMapGrid.Bottom)
map.grid().setAnnotationFontColor(QColor(255, 0, 0, 150))
map.grid().setBlendMode(QPainter.CompositionMode_Overlay)
map.updateBoundingRect()
map.grid().dataDefinedProperties().setProperty(QgsLayoutObject.MapGridEnabled, QgsProperty.fromValue(True))
map.grid().refresh()
checker = QgsLayoutChecker('composermap_grid', layout)
checker.setControlPathPrefix("composer_mapgrid")
myTestResult, myMessage = checker.testLayout()
self.assertTrue(myTestResult, myMessage)
map.grid().dataDefinedProperties().setProperty(QgsLayoutObject.MapGridEnabled, QgsProperty.fromValue(False))
map.grid().refresh()
checker = QgsLayoutChecker('composermap_datadefined_disabled', layout)
checker.setControlPathPrefix("composer_mapgrid")
myTestResult, myMessage = checker.testLayout()
self.assertTrue(myTestResult, myMessage)
def testDataDefinedIntervalOffset(self):
layout = QgsLayout(QgsProject.instance())
layout.initializeDefaults()
map = QgsLayoutItemMap(layout)
map.attemptSetSceneRect(QRectF(20, 20, 200, 100))
map.setFrameEnabled(True)
map.setBackgroundColor(QColor(150, 100, 100))
layout.addLayoutItem(map)
myRectangle = QgsRectangle(781662.375, 3339523.125,
793062.375, 3345223.125)
map.setExtent(myRectangle)
map.grid().setEnabled(True)
map.grid().setIntervalX(2000)
map.grid().setIntervalY(2000)
map.grid().setAnnotationEnabled(False)
map.grid().setGridLineColor(QColor(0, 255, 0))
map.grid().setGridLineWidth(0.5)
map.grid().setBlendMode(QPainter.CompositionMode_Overlay)
map.updateBoundingRect()
map.grid().dataDefinedProperties().setProperty(QgsLayoutObject.MapGridIntervalX, QgsProperty.fromValue(1500))
map.grid().dataDefinedProperties().setProperty(QgsLayoutObject.MapGridIntervalY, QgsProperty.fromValue(2500))
map.grid().dataDefinedProperties().setProperty(QgsLayoutObject.MapGridOffsetX, QgsProperty.fromValue(500))
map.grid().dataDefinedProperties().setProperty(QgsLayoutObject.MapGridOffsetY, QgsProperty.fromValue(250))
map.grid().refresh()
checker = QgsLayoutChecker('composermap_datadefined_intervaloffset', layout)
checker.setControlPathPrefix("composer_mapgrid")
myTestResult, myMessage = checker.testLayout()
self.assertTrue(myTestResult, myMessage)
def testDataDefinedFrameSize(self):
layout = QgsLayout(QgsProject.instance())
layout.initializeDefaults()
map = QgsLayoutItemMap(layout)
map.attemptSetSceneRect(QRectF(20, 20, 200, 100))
map.setFrameEnabled(True)
map.setBackgroundColor(QColor(150, 100, 100))
layout.addLayoutItem(map)
myRectangle = QgsRectangle(781662.375, 3339523.125,
793062.375, 3345223.125)
map.setExtent(myRectangle)
map.grid().setEnabled(True)
map.grid().setIntervalX(2000)
map.grid().setIntervalY(2000)
map.grid().setAnnotationEnabled(False)
map.grid().setGridLineColor(QColor(0, 255, 0))
map.grid().setGridLineWidth(0.5)
map.grid().setFrameStyle(QgsLayoutItemMapGrid.Zebra)
map.grid().setFrameWidth(10)
map.grid().setFramePenSize(1)
map.grid().setGridLineWidth(0.5)
map.grid().setFramePenColor(QColor(0, 0, 0))
map.grid().setFrameFillColor1(QColor(0, 0, 0))
map.grid().setFrameFillColor2(QColor(255, 255, 255))
map.grid().setBlendMode(QPainter.CompositionMode_Overlay)
map.updateBoundingRect()
map.grid().dataDefinedProperties().setProperty(QgsLayoutObject.MapGridFrameSize, QgsProperty.fromValue(20))
map.grid().dataDefinedProperties().setProperty(QgsLayoutObject.MapGridFrameMargin, QgsProperty.fromValue(10))
map.grid().refresh()
checker = QgsLayoutChecker(''
'composermap_datadefined_framesizemargin', layout)
checker.setControlPathPrefix("composer_mapgrid")
myTestResult, myMessage = checker.testLayout()
self.assertTrue(myTestResult, myMessage)
def testDataDefinedCrossSize(self):
layout = QgsLayout(QgsProject.instance())
layout.initializeDefaults()
map = QgsLayoutItemMap(layout)
map.attemptSetSceneRect(QRectF(20, 20, 200, 100))
map.setFrameEnabled(True)
map.setBackgroundColor(QColor(150, 100, 100))
layout.addLayoutItem(map)
myRectangle = QgsRectangle(781662.375, 3339523.125,
793062.375, 3345223.125)
map.setExtent(myRectangle)
map.grid().setEnabled(True)
map.grid().setIntervalX(2000)
map.grid().setIntervalY(2000)
map.grid().setStyle(QgsLayoutItemMapGrid.Cross)
map.grid().setCrossLength(2.0)
map.grid().setIntervalX(2000)
map.grid().setIntervalY(2000)
map.grid().setAnnotationEnabled(False)
map.grid().setGridLineColor(QColor(0, 255, 0))
map.grid().setGridLineWidth(0.5)
map.grid().setBlendMode(QPainter.CompositionMode_Overlay)
map.updateBoundingRect()
map.grid().dataDefinedProperties().setProperty(QgsLayoutObject.MapGridCrossSize, QgsProperty.fromValue(4))
map.grid().refresh()
checker = QgsLayoutChecker('composermap_datadefined_crosssize', layout)
checker.setControlPathPrefix("composer_mapgrid")
myTestResult, myMessage = checker.testLayout()
self.assertTrue(myTestResult, myMessage)
def testDataDefinedFrameThickness(self):
layout = QgsLayout(QgsProject.instance())
layout.initializeDefaults()
map = QgsLayoutItemMap(layout)
map.attemptSetSceneRect(QRectF(20, 20, 200, 100))
map.setFrameEnabled(True)
map.setBackgroundColor(QColor(150, 100, 100))
layout.addLayoutItem(map)
myRectangle = QgsRectangle(781662.375, 3339523.125,
793062.375, 3345223.125)
map.setExtent(myRectangle)
map.grid().setEnabled(True)
map.grid().setIntervalX(2000)
map.grid().setIntervalY(2000)
map.grid().setAnnotationEnabled(False)
map.grid().setGridLineColor(QColor(0, 255, 0))
map.grid().setGridLineWidth(0.5)
map.grid().setFrameStyle(QgsLayoutItemMapGrid.Zebra)
map.grid().setFrameWidth(10)
map.grid().setFramePenSize(1)
map.grid().setGridLineWidth(0.5)
map.grid().setFramePenColor(QColor(0, 0, 0))
map.grid().setFrameFillColor1(QColor(0, 0, 0))
map.grid().setFrameFillColor2(QColor(255, 255, 255))
map.grid().setBlendMode(QPainter.CompositionMode_Overlay)
map.updateBoundingRect()
map.grid().dataDefinedProperties().setProperty(QgsLayoutObject.MapGridFrameLineThickness, QgsProperty.fromValue(4))
map.grid().refresh()
checker = QgsLayoutChecker('composermap_datadefined_framethickness', layout)
checker.setControlPathPrefix("composer_mapgrid")
myTestResult, myMessage = checker.testLayout()
self.assertTrue(myTestResult, myMessage)
def testDataDefinedAnnotationDistance(self):
layout = QgsLayout(QgsProject.instance())
layout.initializeDefaults()
map = QgsLayoutItemMap(layout)
map.attemptSetSceneRect(QRectF(20, 20, 200, 100))
map.setFrameEnabled(True)
map.setBackgroundColor(QColor(150, 100, 100))
layout.addLayoutItem(map)
myRectangle = QgsRectangle(781662.375, 3339523.125,
793062.375, 3345223.125)
map.setExtent(myRectangle)
map.grid().setEnabled(True)
map.grid().setIntervalX(2000)
map.grid().setIntervalY(2000)
map.grid().setAnnotationEnabled(True)
map.grid().setGridLineColor(QColor(0, 255, 0))
map.grid().setGridLineWidth(0.5)
map.grid().setAnnotationFont(getTestFont('Bold', 20))
map.grid().setAnnotationPrecision(0)
map.grid().setAnnotationDisplay(QgsLayoutItemMapGrid.HideAll, QgsLayoutItemMapGrid.Left)
map.grid().setAnnotationPosition(QgsLayoutItemMapGrid.OutsideMapFrame, QgsLayoutItemMapGrid.Right)
map.grid().setAnnotationDisplay(QgsLayoutItemMapGrid.HideAll, QgsLayoutItemMapGrid.Top)
map.grid().setAnnotationPosition(QgsLayoutItemMapGrid.OutsideMapFrame, QgsLayoutItemMapGrid.Bottom)
map.grid().setAnnotationDirection(QgsLayoutItemMapGrid.Horizontal, QgsLayoutItemMapGrid.Right)
map.grid().setAnnotationDirection(QgsLayoutItemMapGrid.Horizontal, QgsLayoutItemMapGrid.Bottom)
map.grid().setAnnotationFontColor(QColor(255, 0, 0, 150))
map.grid().setBlendMode(QPainter.CompositionMode_Overlay)
map.updateBoundingRect()
map.grid().dataDefinedProperties().setProperty(QgsLayoutObject.MapGridLabelDistance, QgsProperty.fromValue(10))
map.grid().refresh()
checker = QgsLayoutChecker('composermap_datadefined_annotationdistance', layout)
checker.setControlPathPrefix("composer_mapgrid")
myTestResult, myMessage = checker.testLayout()
self.assertTrue(myTestResult, myMessage)
def testDynamicInterval(self):
layout = QgsLayout(QgsProject.instance())
layout.initializeDefaults()
map = QgsLayoutItemMap(layout)
map.attemptSetSceneRect(QRectF(20, 20, 200, 100))
map.setFrameEnabled(True)
map.setBackgroundColor(QColor(150, 100, 100))
layout.addLayoutItem(map)
myRectangle = QgsRectangle(781662.375, 3339523.125,
793062.375, 3345223.125)
map.setExtent(myRectangle)
map.grid().setEnabled(True)
map.grid().setUnits(QgsLayoutItemMapGrid.DynamicPageSizeBased)
map.grid().setMinimumIntervalWidth(50)
map.grid().setMaximumIntervalWidth(100)
map.grid().setAnnotationEnabled(True)
map.grid().setGridLineColor(QColor(0, 255, 0))
map.grid().setGridLineWidth(0.5)
map.grid().setAnnotationFont(getTestFont('Bold', 20))
map.grid().setAnnotationPrecision(0)
map.grid().setAnnotationDisplay(QgsLayoutItemMapGrid.HideAll, QgsLayoutItemMapGrid.Left)
map.grid().setAnnotationPosition(QgsLayoutItemMapGrid.OutsideMapFrame, QgsLayoutItemMapGrid.Right)
map.grid().setAnnotationDisplay(QgsLayoutItemMapGrid.HideAll, QgsLayoutItemMapGrid.Top)
map.grid().setAnnotationPosition(QgsLayoutItemMapGrid.OutsideMapFrame, QgsLayoutItemMapGrid.Bottom)
map.grid().setAnnotationDirection(QgsLayoutItemMapGrid.Horizontal, QgsLayoutItemMapGrid.Right)
map.grid().setAnnotationDirection(QgsLayoutItemMapGrid.Horizontal, QgsLayoutItemMapGrid.Bottom)
map.grid().setAnnotationFontColor(QColor(255, 0, 0, 150))
map.grid().setBlendMode(QPainter.CompositionMode_Overlay)
map.updateBoundingRect()
map.grid().refresh()
checker = QgsLayoutChecker('composermap_dynamic_5_10', layout)
checker.setControlPathPrefix("composer_mapgrid")
myTestResult, myMessage = checker.testLayout()
self.assertTrue(myTestResult, myMessage)
map.setScale(map.scale() * 1.1)
checker = QgsLayoutChecker('composermap_dynamic_5_10_2', layout)
checker.setControlPathPrefix("composer_mapgrid")
myTestResult, myMessage = checker.testLayout()
self.assertTrue(myTestResult, myMessage)
map.setScale(map.scale() * 1.8)
checker = QgsLayoutChecker('composermap_dynamic_5_10_3', layout)
checker.setControlPathPrefix("composer_mapgrid")
myTestResult, myMessage = checker.testLayout()
self.assertTrue(myTestResult, myMessage)
map.grid().setMinimumIntervalWidth(10)
map.grid().setMaximumIntervalWidth(40)
map.grid().refresh()
checker = QgsLayoutChecker('composermap_dynamic_5_10_4', layout)
checker.setControlPathPrefix("composer_mapgrid")
myTestResult, myMessage = checker.testLayout()
self.assertTrue(myTestResult, myMessage)
if __name__ == '__main__':
unittest.main()
|
grant/algorithm-music | refs/heads/master | misccode/mididump.py | 13 | #!/usr/bin/env python
"""
Print a description of a MIDI file.
"""
import midi
import sys
if len(sys.argv) != 2:
print "Usage: {0} <midifile>".format(sys.argv[0])
sys.exit(2)
midifile = sys.argv[1]
pattern = midi.read_midifile(midifile)
print repr(pattern)
|
viaembedded/vab1000-kernel-bsp | refs/heads/master | tools/perf/scripts/python/netdev-times.py | 11271 | # Display a process of packets and processed time.
# It helps us to investigate networking or network device.
#
# options
# tx: show only tx chart
# rx: show only rx chart
# dev=: show only thing related to specified device
# debug: work with debug mode. It shows buffer status.
import os
import sys
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from perf_trace_context import *
from Core import *
from Util import *
all_event_list = []; # insert all tracepoint event related with this script
irq_dic = {}; # key is cpu and value is a list which stacks irqs
# which raise NET_RX softirq
net_rx_dic = {}; # key is cpu and value include time of NET_RX softirq-entry
# and a list which stacks receive
receive_hunk_list = []; # a list which include a sequence of receive events
rx_skb_list = []; # received packet list for matching
# skb_copy_datagram_iovec
buffer_budget = 65536; # the budget of rx_skb_list, tx_queue_list and
# tx_xmit_list
of_count_rx_skb_list = 0; # overflow count
tx_queue_list = []; # list of packets which pass through dev_queue_xmit
of_count_tx_queue_list = 0; # overflow count
tx_xmit_list = []; # list of packets which pass through dev_hard_start_xmit
of_count_tx_xmit_list = 0; # overflow count
tx_free_list = []; # list of packets which is freed
# options
show_tx = 0;
show_rx = 0;
dev = 0; # store a name of device specified by option "dev="
debug = 0;
# indices of event_info tuple
EINFO_IDX_NAME= 0
EINFO_IDX_CONTEXT=1
EINFO_IDX_CPU= 2
EINFO_IDX_TIME= 3
EINFO_IDX_PID= 4
EINFO_IDX_COMM= 5
# Calculate a time interval(msec) from src(nsec) to dst(nsec)
def diff_msec(src, dst):
return (dst - src) / 1000000.0
# Display a process of transmitting a packet
def print_transmit(hunk):
if dev != 0 and hunk['dev'].find(dev) < 0:
return
print "%7s %5d %6d.%06dsec %12.3fmsec %12.3fmsec" % \
(hunk['dev'], hunk['len'],
nsecs_secs(hunk['queue_t']),
nsecs_nsecs(hunk['queue_t'])/1000,
diff_msec(hunk['queue_t'], hunk['xmit_t']),
diff_msec(hunk['xmit_t'], hunk['free_t']))
# Format for displaying rx packet processing
PF_IRQ_ENTRY= " irq_entry(+%.3fmsec irq=%d:%s)"
PF_SOFT_ENTRY=" softirq_entry(+%.3fmsec)"
PF_NAPI_POLL= " napi_poll_exit(+%.3fmsec %s)"
PF_JOINT= " |"
PF_WJOINT= " | |"
PF_NET_RECV= " |---netif_receive_skb(+%.3fmsec skb=%x len=%d)"
PF_NET_RX= " |---netif_rx(+%.3fmsec skb=%x)"
PF_CPY_DGRAM= " | skb_copy_datagram_iovec(+%.3fmsec %d:%s)"
PF_KFREE_SKB= " | kfree_skb(+%.3fmsec location=%x)"
PF_CONS_SKB= " | consume_skb(+%.3fmsec)"
# Display a process of received packets and interrputs associated with
# a NET_RX softirq
def print_receive(hunk):
show_hunk = 0
irq_list = hunk['irq_list']
cpu = irq_list[0]['cpu']
base_t = irq_list[0]['irq_ent_t']
# check if this hunk should be showed
if dev != 0:
for i in range(len(irq_list)):
if irq_list[i]['name'].find(dev) >= 0:
show_hunk = 1
break
else:
show_hunk = 1
if show_hunk == 0:
return
print "%d.%06dsec cpu=%d" % \
(nsecs_secs(base_t), nsecs_nsecs(base_t)/1000, cpu)
for i in range(len(irq_list)):
print PF_IRQ_ENTRY % \
(diff_msec(base_t, irq_list[i]['irq_ent_t']),
irq_list[i]['irq'], irq_list[i]['name'])
print PF_JOINT
irq_event_list = irq_list[i]['event_list']
for j in range(len(irq_event_list)):
irq_event = irq_event_list[j]
if irq_event['event'] == 'netif_rx':
print PF_NET_RX % \
(diff_msec(base_t, irq_event['time']),
irq_event['skbaddr'])
print PF_JOINT
print PF_SOFT_ENTRY % \
diff_msec(base_t, hunk['sirq_ent_t'])
print PF_JOINT
event_list = hunk['event_list']
for i in range(len(event_list)):
event = event_list[i]
if event['event_name'] == 'napi_poll':
print PF_NAPI_POLL % \
(diff_msec(base_t, event['event_t']), event['dev'])
if i == len(event_list) - 1:
print ""
else:
print PF_JOINT
else:
print PF_NET_RECV % \
(diff_msec(base_t, event['event_t']), event['skbaddr'],
event['len'])
if 'comm' in event.keys():
print PF_WJOINT
print PF_CPY_DGRAM % \
(diff_msec(base_t, event['comm_t']),
event['pid'], event['comm'])
elif 'handle' in event.keys():
print PF_WJOINT
if event['handle'] == "kfree_skb":
print PF_KFREE_SKB % \
(diff_msec(base_t,
event['comm_t']),
event['location'])
elif event['handle'] == "consume_skb":
print PF_CONS_SKB % \
diff_msec(base_t,
event['comm_t'])
print PF_JOINT
def trace_begin():
global show_tx
global show_rx
global dev
global debug
for i in range(len(sys.argv)):
if i == 0:
continue
arg = sys.argv[i]
if arg == 'tx':
show_tx = 1
elif arg =='rx':
show_rx = 1
elif arg.find('dev=',0, 4) >= 0:
dev = arg[4:]
elif arg == 'debug':
debug = 1
if show_tx == 0 and show_rx == 0:
show_tx = 1
show_rx = 1
def trace_end():
# order all events in time
all_event_list.sort(lambda a,b :cmp(a[EINFO_IDX_TIME],
b[EINFO_IDX_TIME]))
# process all events
for i in range(len(all_event_list)):
event_info = all_event_list[i]
name = event_info[EINFO_IDX_NAME]
if name == 'irq__softirq_exit':
handle_irq_softirq_exit(event_info)
elif name == 'irq__softirq_entry':
handle_irq_softirq_entry(event_info)
elif name == 'irq__softirq_raise':
handle_irq_softirq_raise(event_info)
elif name == 'irq__irq_handler_entry':
handle_irq_handler_entry(event_info)
elif name == 'irq__irq_handler_exit':
handle_irq_handler_exit(event_info)
elif name == 'napi__napi_poll':
handle_napi_poll(event_info)
elif name == 'net__netif_receive_skb':
handle_netif_receive_skb(event_info)
elif name == 'net__netif_rx':
handle_netif_rx(event_info)
elif name == 'skb__skb_copy_datagram_iovec':
handle_skb_copy_datagram_iovec(event_info)
elif name == 'net__net_dev_queue':
handle_net_dev_queue(event_info)
elif name == 'net__net_dev_xmit':
handle_net_dev_xmit(event_info)
elif name == 'skb__kfree_skb':
handle_kfree_skb(event_info)
elif name == 'skb__consume_skb':
handle_consume_skb(event_info)
# display receive hunks
if show_rx:
for i in range(len(receive_hunk_list)):
print_receive(receive_hunk_list[i])
# display transmit hunks
if show_tx:
print " dev len Qdisc " \
" netdevice free"
for i in range(len(tx_free_list)):
print_transmit(tx_free_list[i])
if debug:
print "debug buffer status"
print "----------------------------"
print "xmit Qdisc:remain:%d overflow:%d" % \
(len(tx_queue_list), of_count_tx_queue_list)
print "xmit netdevice:remain:%d overflow:%d" % \
(len(tx_xmit_list), of_count_tx_xmit_list)
print "receive:remain:%d overflow:%d" % \
(len(rx_skb_list), of_count_rx_skb_list)
# called from perf, when it finds a correspoinding event
def irq__softirq_entry(name, context, cpu, sec, nsec, pid, comm, vec):
if symbol_str("irq__softirq_entry", "vec", vec) != "NET_RX":
return
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm, vec)
all_event_list.append(event_info)
def irq__softirq_exit(name, context, cpu, sec, nsec, pid, comm, vec):
if symbol_str("irq__softirq_entry", "vec", vec) != "NET_RX":
return
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm, vec)
all_event_list.append(event_info)
def irq__softirq_raise(name, context, cpu, sec, nsec, pid, comm, vec):
if symbol_str("irq__softirq_entry", "vec", vec) != "NET_RX":
return
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm, vec)
all_event_list.append(event_info)
def irq__irq_handler_entry(name, context, cpu, sec, nsec, pid, comm,
irq, irq_name):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
irq, irq_name)
all_event_list.append(event_info)
def irq__irq_handler_exit(name, context, cpu, sec, nsec, pid, comm, irq, ret):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm, irq, ret)
all_event_list.append(event_info)
def napi__napi_poll(name, context, cpu, sec, nsec, pid, comm, napi, dev_name):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
napi, dev_name)
all_event_list.append(event_info)
def net__netif_receive_skb(name, context, cpu, sec, nsec, pid, comm, skbaddr,
skblen, dev_name):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
skbaddr, skblen, dev_name)
all_event_list.append(event_info)
def net__netif_rx(name, context, cpu, sec, nsec, pid, comm, skbaddr,
skblen, dev_name):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
skbaddr, skblen, dev_name)
all_event_list.append(event_info)
def net__net_dev_queue(name, context, cpu, sec, nsec, pid, comm,
skbaddr, skblen, dev_name):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
skbaddr, skblen, dev_name)
all_event_list.append(event_info)
def net__net_dev_xmit(name, context, cpu, sec, nsec, pid, comm,
skbaddr, skblen, rc, dev_name):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
skbaddr, skblen, rc ,dev_name)
all_event_list.append(event_info)
def skb__kfree_skb(name, context, cpu, sec, nsec, pid, comm,
skbaddr, protocol, location):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
skbaddr, protocol, location)
all_event_list.append(event_info)
def skb__consume_skb(name, context, cpu, sec, nsec, pid, comm, skbaddr):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
skbaddr)
all_event_list.append(event_info)
def skb__skb_copy_datagram_iovec(name, context, cpu, sec, nsec, pid, comm,
skbaddr, skblen):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
skbaddr, skblen)
all_event_list.append(event_info)
def handle_irq_handler_entry(event_info):
(name, context, cpu, time, pid, comm, irq, irq_name) = event_info
if cpu not in irq_dic.keys():
irq_dic[cpu] = []
irq_record = {'irq':irq, 'name':irq_name, 'cpu':cpu, 'irq_ent_t':time}
irq_dic[cpu].append(irq_record)
def handle_irq_handler_exit(event_info):
(name, context, cpu, time, pid, comm, irq, ret) = event_info
if cpu not in irq_dic.keys():
return
irq_record = irq_dic[cpu].pop()
if irq != irq_record['irq']:
return
irq_record.update({'irq_ext_t':time})
# if an irq doesn't include NET_RX softirq, drop.
if 'event_list' in irq_record.keys():
irq_dic[cpu].append(irq_record)
def handle_irq_softirq_raise(event_info):
(name, context, cpu, time, pid, comm, vec) = event_info
if cpu not in irq_dic.keys() \
or len(irq_dic[cpu]) == 0:
return
irq_record = irq_dic[cpu].pop()
if 'event_list' in irq_record.keys():
irq_event_list = irq_record['event_list']
else:
irq_event_list = []
irq_event_list.append({'time':time, 'event':'sirq_raise'})
irq_record.update({'event_list':irq_event_list})
irq_dic[cpu].append(irq_record)
def handle_irq_softirq_entry(event_info):
(name, context, cpu, time, pid, comm, vec) = event_info
net_rx_dic[cpu] = {'sirq_ent_t':time, 'event_list':[]}
def handle_irq_softirq_exit(event_info):
(name, context, cpu, time, pid, comm, vec) = event_info
irq_list = []
event_list = 0
if cpu in irq_dic.keys():
irq_list = irq_dic[cpu]
del irq_dic[cpu]
if cpu in net_rx_dic.keys():
sirq_ent_t = net_rx_dic[cpu]['sirq_ent_t']
event_list = net_rx_dic[cpu]['event_list']
del net_rx_dic[cpu]
if irq_list == [] or event_list == 0:
return
rec_data = {'sirq_ent_t':sirq_ent_t, 'sirq_ext_t':time,
'irq_list':irq_list, 'event_list':event_list}
# merge information realted to a NET_RX softirq
receive_hunk_list.append(rec_data)
def handle_napi_poll(event_info):
(name, context, cpu, time, pid, comm, napi, dev_name) = event_info
if cpu in net_rx_dic.keys():
event_list = net_rx_dic[cpu]['event_list']
rec_data = {'event_name':'napi_poll',
'dev':dev_name, 'event_t':time}
event_list.append(rec_data)
def handle_netif_rx(event_info):
(name, context, cpu, time, pid, comm,
skbaddr, skblen, dev_name) = event_info
if cpu not in irq_dic.keys() \
or len(irq_dic[cpu]) == 0:
return
irq_record = irq_dic[cpu].pop()
if 'event_list' in irq_record.keys():
irq_event_list = irq_record['event_list']
else:
irq_event_list = []
irq_event_list.append({'time':time, 'event':'netif_rx',
'skbaddr':skbaddr, 'skblen':skblen, 'dev_name':dev_name})
irq_record.update({'event_list':irq_event_list})
irq_dic[cpu].append(irq_record)
def handle_netif_receive_skb(event_info):
global of_count_rx_skb_list
(name, context, cpu, time, pid, comm,
skbaddr, skblen, dev_name) = event_info
if cpu in net_rx_dic.keys():
rec_data = {'event_name':'netif_receive_skb',
'event_t':time, 'skbaddr':skbaddr, 'len':skblen}
event_list = net_rx_dic[cpu]['event_list']
event_list.append(rec_data)
rx_skb_list.insert(0, rec_data)
if len(rx_skb_list) > buffer_budget:
rx_skb_list.pop()
of_count_rx_skb_list += 1
def handle_net_dev_queue(event_info):
global of_count_tx_queue_list
(name, context, cpu, time, pid, comm,
skbaddr, skblen, dev_name) = event_info
skb = {'dev':dev_name, 'skbaddr':skbaddr, 'len':skblen, 'queue_t':time}
tx_queue_list.insert(0, skb)
if len(tx_queue_list) > buffer_budget:
tx_queue_list.pop()
of_count_tx_queue_list += 1
def handle_net_dev_xmit(event_info):
global of_count_tx_xmit_list
(name, context, cpu, time, pid, comm,
skbaddr, skblen, rc, dev_name) = event_info
if rc == 0: # NETDEV_TX_OK
for i in range(len(tx_queue_list)):
skb = tx_queue_list[i]
if skb['skbaddr'] == skbaddr:
skb['xmit_t'] = time
tx_xmit_list.insert(0, skb)
del tx_queue_list[i]
if len(tx_xmit_list) > buffer_budget:
tx_xmit_list.pop()
of_count_tx_xmit_list += 1
return
def handle_kfree_skb(event_info):
(name, context, cpu, time, pid, comm,
skbaddr, protocol, location) = event_info
for i in range(len(tx_queue_list)):
skb = tx_queue_list[i]
if skb['skbaddr'] == skbaddr:
del tx_queue_list[i]
return
for i in range(len(tx_xmit_list)):
skb = tx_xmit_list[i]
if skb['skbaddr'] == skbaddr:
skb['free_t'] = time
tx_free_list.append(skb)
del tx_xmit_list[i]
return
for i in range(len(rx_skb_list)):
rec_data = rx_skb_list[i]
if rec_data['skbaddr'] == skbaddr:
rec_data.update({'handle':"kfree_skb",
'comm':comm, 'pid':pid, 'comm_t':time})
del rx_skb_list[i]
return
def handle_consume_skb(event_info):
(name, context, cpu, time, pid, comm, skbaddr) = event_info
for i in range(len(tx_xmit_list)):
skb = tx_xmit_list[i]
if skb['skbaddr'] == skbaddr:
skb['free_t'] = time
tx_free_list.append(skb)
del tx_xmit_list[i]
return
def handle_skb_copy_datagram_iovec(event_info):
(name, context, cpu, time, pid, comm, skbaddr, skblen) = event_info
for i in range(len(rx_skb_list)):
rec_data = rx_skb_list[i]
if skbaddr == rec_data['skbaddr']:
rec_data.update({'handle':"skb_copy_datagram_iovec",
'comm':comm, 'pid':pid, 'comm_t':time})
del rx_skb_list[i]
return
|
huggingface/transformers | refs/heads/master | src/transformers/modeling_tf_pytorch_utils.py | 2 | # coding=utf-8
# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" PyTorch - TF 2.0 general utilities."""
import os
import re
import numpy
from .utils import logging
logger = logging.get_logger(__name__)
def convert_tf_weight_name_to_pt_weight_name(tf_name, start_prefix_to_remove=""):
"""
Convert a TF 2.0 model variable name in a pytorch model weight name.
Conventions for TF2.0 scopes -> PyTorch attribute names conversions:
- '$1___$2' is replaced by $2 (can be used to duplicate or remove layers in TF2.0 vs PyTorch)
- '_._' is replaced by a new level separation (can be used to convert TF2.0 lists in PyTorch nn.ModulesList)
return tuple with:
- pytorch model weight name
- transpose: boolean indicating whether TF2.0 and PyTorch weights matrices are transposed with regards to each
other
"""
tf_name = tf_name.replace(":0", "") # device ids
tf_name = re.sub(
r"/[^/]*___([^/]*)/", r"/\1/", tf_name
) # '$1___$2' is replaced by $2 (can be used to duplicate or remove layers in TF2.0 vs PyTorch)
tf_name = tf_name.replace(
"_._", "/"
) # '_._' is replaced by a level separation (can be used to convert TF2.0 lists in PyTorch nn.ModulesList)
tf_name = re.sub(r"//+", "/", tf_name) # Remove empty levels at the end
tf_name = tf_name.split("/") # Convert from TF2.0 '/' separators to PyTorch '.' separators
# Some weights have a single name without "/" such as final_logits_bias in BART
if len(tf_name) > 1:
tf_name = tf_name[1:] # Remove level zero
# When should we transpose the weights
transpose = bool(
tf_name[-1] in ["kernel", "pointwise_kernel", "depthwise_kernel"]
or "emb_projs" in tf_name
or "out_projs" in tf_name
)
# Convert standard TF2.0 names in PyTorch names
if tf_name[-1] == "kernel" or tf_name[-1] == "embeddings" or tf_name[-1] == "gamma":
tf_name[-1] = "weight"
if tf_name[-1] == "beta":
tf_name[-1] = "bias"
# The SeparableConv1D TF layer contains two weights that are translated to PyTorch Conv1D here
if tf_name[-1] == "pointwise_kernel" or tf_name[-1] == "depthwise_kernel":
tf_name[-1] = tf_name[-1].replace("_kernel", ".weight")
# Remove prefix if needed
tf_name = ".".join(tf_name)
if start_prefix_to_remove:
tf_name = tf_name.replace(start_prefix_to_remove, "", 1)
return tf_name, transpose
#####################
# PyTorch => TF 2.0 #
#####################
def load_pytorch_checkpoint_in_tf2_model(tf_model, pytorch_checkpoint_path, tf_inputs=None, allow_missing_keys=False):
"""Load pytorch checkpoints in a TF 2.0 model"""
try:
import tensorflow as tf # noqa: F401
import torch # noqa: F401
except ImportError:
logger.error(
"Loading a PyTorch model in TensorFlow, requires both PyTorch and TensorFlow to be installed. Please see "
"https://pytorch.org/ and https://www.tensorflow.org/install/ for installation instructions."
)
raise
pt_path = os.path.abspath(pytorch_checkpoint_path)
logger.info(f"Loading PyTorch weights from {pt_path}")
pt_state_dict = torch.load(pt_path, map_location="cpu")
logger.info(f"PyTorch checkpoint contains {sum(t.numel() for t in pt_state_dict.values()):,} parameters")
return load_pytorch_weights_in_tf2_model(
tf_model, pt_state_dict, tf_inputs=tf_inputs, allow_missing_keys=allow_missing_keys
)
def load_pytorch_model_in_tf2_model(tf_model, pt_model, tf_inputs=None, allow_missing_keys=False):
"""Load pytorch checkpoints in a TF 2.0 model"""
pt_state_dict = pt_model.state_dict()
return load_pytorch_weights_in_tf2_model(
tf_model, pt_state_dict, tf_inputs=tf_inputs, allow_missing_keys=allow_missing_keys
)
def load_pytorch_weights_in_tf2_model(tf_model, pt_state_dict, tf_inputs=None, allow_missing_keys=False):
"""Load pytorch state_dict in a TF 2.0 model."""
try:
import tensorflow as tf # noqa: F401
import torch # noqa: F401
from tensorflow.python.keras import backend as K
except ImportError:
logger.error(
"Loading a PyTorch model in TensorFlow, requires both PyTorch and TensorFlow to be installed. Please see "
"https://pytorch.org/ and https://www.tensorflow.org/install/ for installation instructions."
)
raise
if tf_inputs is None:
tf_inputs = tf_model.dummy_inputs
if tf_inputs is not None:
tf_model(tf_inputs, training=False) # Make sure model is built
# Adapt state dict - TODO remove this and update the AWS weights files instead
# Convert old format to new format if needed from a PyTorch state_dict
old_keys = []
new_keys = []
for key in pt_state_dict.keys():
new_key = None
if "gamma" in key:
new_key = key.replace("gamma", "weight")
if "beta" in key:
new_key = key.replace("beta", "bias")
if new_key:
old_keys.append(key)
new_keys.append(new_key)
for old_key, new_key in zip(old_keys, new_keys):
pt_state_dict[new_key] = pt_state_dict.pop(old_key)
# Make sure we are able to load PyTorch base models as well as derived models (with heads)
# TF models always have a prefix, some of PyTorch models (base ones) don't
start_prefix_to_remove = ""
if not any(s.startswith(tf_model.base_model_prefix) for s in pt_state_dict.keys()):
start_prefix_to_remove = tf_model.base_model_prefix + "."
symbolic_weights = tf_model.trainable_weights + tf_model.non_trainable_weights
tf_loaded_numel = 0
weight_value_tuples = []
all_pytorch_weights = set(list(pt_state_dict.keys()))
missing_keys = []
for symbolic_weight in symbolic_weights:
sw_name = symbolic_weight.name
name, transpose = convert_tf_weight_name_to_pt_weight_name(
sw_name, start_prefix_to_remove=start_prefix_to_remove
)
# Find associated numpy array in pytorch model state dict
if name not in pt_state_dict:
if allow_missing_keys:
missing_keys.append(name)
continue
elif tf_model._keys_to_ignore_on_load_missing is not None:
# authorized missing keys don't have to be loaded
if any(re.search(pat, name) is not None for pat in tf_model._keys_to_ignore_on_load_missing):
continue
raise AttributeError(f"{name} not found in PyTorch model")
array = pt_state_dict[name].numpy()
if transpose:
array = numpy.transpose(array)
if len(symbolic_weight.shape) < len(array.shape):
array = numpy.squeeze(array)
elif len(symbolic_weight.shape) > len(array.shape):
array = numpy.expand_dims(array, axis=0)
if list(symbolic_weight.shape) != list(array.shape):
try:
array = numpy.reshape(array, symbolic_weight.shape)
except AssertionError as e:
e.args += (symbolic_weight.shape, array.shape)
raise e
try:
assert list(symbolic_weight.shape) == list(array.shape)
except AssertionError as e:
e.args += (symbolic_weight.shape, array.shape)
raise e
tf_loaded_numel += array.size
# logger.warning(f"Initialize TF weight {symbolic_weight.name}")
weight_value_tuples.append((symbolic_weight, array))
all_pytorch_weights.discard(name)
K.batch_set_value(weight_value_tuples)
if tf_inputs is not None:
tf_model(tf_inputs, training=False) # Make sure restore ops are run
logger.info(f"Loaded {tf_loaded_numel:,} parameters in the TF 2.0 model.")
unexpected_keys = list(all_pytorch_weights)
if tf_model._keys_to_ignore_on_load_missing is not None:
for pat in tf_model._keys_to_ignore_on_load_missing:
missing_keys = [k for k in missing_keys if re.search(pat, k) is None]
if tf_model._keys_to_ignore_on_load_unexpected is not None:
for pat in tf_model._keys_to_ignore_on_load_unexpected:
unexpected_keys = [k for k in unexpected_keys if re.search(pat, k) is None]
if len(unexpected_keys) > 0:
logger.warning(
f"Some weights of the PyTorch model were not used when "
f"initializing the TF 2.0 model {tf_model.__class__.__name__}: {unexpected_keys}\n"
f"- This IS expected if you are initializing {tf_model.__class__.__name__} from a PyTorch model trained on another task "
f"or with another architecture (e.g. initializing a TFBertForSequenceClassification model from a BertForPreTraining model).\n"
f"- This IS NOT expected if you are initializing {tf_model.__class__.__name__} from a PyTorch model that you expect "
f"to be exactly identical (e.g. initializing a TFBertForSequenceClassification model from a BertForSequenceClassification model)."
)
else:
logger.warning(f"All PyTorch model weights were used when initializing {tf_model.__class__.__name__}.\n")
if len(missing_keys) > 0:
logger.warning(
f"Some weights or buffers of the TF 2.0 model {tf_model.__class__.__name__} were not initialized from the PyTorch model "
f"and are newly initialized: {missing_keys}\n"
f"You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference."
)
else:
logger.warning(
f"All the weights of {tf_model.__class__.__name__} were initialized from the PyTorch model.\n"
f"If your task is similar to the task the model of the checkpoint was trained on, "
f"you can already use {tf_model.__class__.__name__} for predictions without further training."
)
return tf_model
#####################
# TF 2.0 => PyTorch #
#####################
def load_tf2_checkpoint_in_pytorch_model(pt_model, tf_checkpoint_path, tf_inputs=None, allow_missing_keys=False):
"""
Load TF 2.0 HDF5 checkpoint in a PyTorch model We use HDF5 to easily do transfer learning (see
https://github.com/tensorflow/tensorflow/blob/ee16fcac960ae660e0e4496658a366e2f745e1f0/tensorflow/python/keras/engine/network.py#L1352-L1357).
"""
try:
import tensorflow as tf # noqa: F401
import torch # noqa: F401
except ImportError:
logger.error(
"Loading a TensorFlow model in PyTorch, requires both PyTorch and TensorFlow to be installed. Please see "
"https://pytorch.org/ and https://www.tensorflow.org/install/ for installation instructions."
)
raise
import transformers
from .modeling_tf_utils import load_tf_weights
logger.info(f"Loading TensorFlow weights from {tf_checkpoint_path}")
# Instantiate and load the associated TF 2.0 model
tf_model_class_name = "TF" + pt_model.__class__.__name__ # Add "TF" at the beginning
tf_model_class = getattr(transformers, tf_model_class_name)
tf_model = tf_model_class(pt_model.config)
if tf_inputs is None:
tf_inputs = tf_model.dummy_inputs
if tf_inputs is not None:
tf_model(tf_inputs, training=False) # Make sure model is built
load_tf_weights(tf_model, tf_checkpoint_path)
return load_tf2_model_in_pytorch_model(pt_model, tf_model, allow_missing_keys=allow_missing_keys)
def load_tf2_model_in_pytorch_model(pt_model, tf_model, allow_missing_keys=False):
"""Load TF 2.0 model in a pytorch model"""
weights = tf_model.weights
return load_tf2_weights_in_pytorch_model(pt_model, weights, allow_missing_keys=allow_missing_keys)
def load_tf2_weights_in_pytorch_model(pt_model, tf_weights, allow_missing_keys=False):
"""Load TF2.0 symbolic weights in a PyTorch model"""
try:
import tensorflow as tf # noqa: F401
import torch # noqa: F401
except ImportError:
logger.error(
"Loading a TensorFlow model in PyTorch, requires both PyTorch and TensorFlow to be installed. Please see "
"https://pytorch.org/ and https://www.tensorflow.org/install/ for installation instructions."
)
raise
new_pt_params_dict = {}
current_pt_params_dict = dict(pt_model.named_parameters())
# Make sure we are able to load PyTorch base models as well as derived models (with heads)
# TF models always have a prefix, some of PyTorch models (base ones) don't
start_prefix_to_remove = ""
if not any(s.startswith(pt_model.base_model_prefix) for s in current_pt_params_dict.keys()):
start_prefix_to_remove = pt_model.base_model_prefix + "."
# Build a map from potential PyTorch weight names to TF 2.0 Variables
tf_weights_map = {}
for tf_weight in tf_weights:
pt_name, transpose = convert_tf_weight_name_to_pt_weight_name(
tf_weight.name, start_prefix_to_remove=start_prefix_to_remove
)
tf_weights_map[pt_name] = (tf_weight.numpy(), transpose)
all_tf_weights = set(list(tf_weights_map.keys()))
loaded_pt_weights_data_ptr = {}
missing_keys_pt = []
for pt_weight_name, pt_weight in current_pt_params_dict.items():
# Handle PyTorch shared weight ()not duplicated in TF 2.0
if pt_weight.data_ptr() in loaded_pt_weights_data_ptr:
new_pt_params_dict[pt_weight_name] = loaded_pt_weights_data_ptr[pt_weight.data_ptr()]
continue
# Find associated numpy array in pytorch model state dict
if pt_weight_name not in tf_weights_map:
if allow_missing_keys:
missing_keys_pt.append(pt_weight_name)
continue
raise AttributeError(f"{pt_weight_name} not found in TF 2.0 model")
array, transpose = tf_weights_map[pt_weight_name]
if transpose:
array = numpy.transpose(array)
if len(pt_weight.shape) < len(array.shape):
array = numpy.squeeze(array)
elif len(pt_weight.shape) > len(array.shape):
array = numpy.expand_dims(array, axis=0)
if list(pt_weight.shape) != list(array.shape):
try:
array = numpy.reshape(array, pt_weight.shape)
except AssertionError as e:
e.args += (pt_weight.shape, array.shape)
raise e
try:
assert list(pt_weight.shape) == list(array.shape)
except AssertionError as e:
e.args += (pt_weight.shape, array.shape)
raise e
# logger.warning(f"Initialize PyTorch weight {pt_weight_name}")
new_pt_params_dict[pt_weight_name] = torch.from_numpy(array)
loaded_pt_weights_data_ptr[pt_weight.data_ptr()] = torch.from_numpy(array)
all_tf_weights.discard(pt_weight_name)
missing_keys, unexpected_keys = pt_model.load_state_dict(new_pt_params_dict, strict=False)
missing_keys += missing_keys_pt
# Some models may have keys that are not in the state by design, removing them before needlessly warning
# the user.
if pt_model._keys_to_ignore_on_load_missing is not None:
for pat in pt_model._keys_to_ignore_on_load_missing:
missing_keys = [k for k in missing_keys if re.search(pat, k) is None]
if pt_model._keys_to_ignore_on_load_unexpected is not None:
for pat in pt_model._keys_to_ignore_on_load_unexpected:
unexpected_keys = [k for k in unexpected_keys if re.search(pat, k) is None]
if len(unexpected_keys) > 0:
logger.warning(
f"Some weights of the TF 2.0 model were not used when "
f"initializing the PyTorch model {pt_model.__class__.__name__}: {unexpected_keys}\n"
f"- This IS expected if you are initializing {pt_model.__class__.__name__} from a TF 2.0 model trained on another task "
f"or with another architecture (e.g. initializing a BertForSequenceClassification model from a TFBertForPreTraining model).\n"
f"- This IS NOT expected if you are initializing {pt_model.__class__.__name__} from a TF 2.0 model that you expect "
f"to be exactly identical (e.g. initializing a BertForSequenceClassification model from a TFBertForSequenceClassification model)."
)
else:
logger.warning(f"All TF 2.0 model weights were used when initializing {pt_model.__class__.__name__}.\n")
if len(missing_keys) > 0:
logger.warning(
f"Some weights of {pt_model.__class__.__name__} were not initialized from the TF 2.0 model "
f"and are newly initialized: {missing_keys}\n"
f"You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference."
)
else:
logger.warning(
f"All the weights of {pt_model.__class__.__name__} were initialized from the TF 2.0 model.\n"
f"If your task is similar to the task the model of the checkpoint was trained on, "
f"you can already use {pt_model.__class__.__name__} for predictions without further training."
)
logger.info(f"Weights or buffers not loaded from TF 2.0 model: {all_tf_weights}")
return pt_model
|
liorvh/infernal-twin | refs/heads/master | build/pillow/PIL/GimpGradientFile.py | 72 | #
# Python Imaging Library
# $Id$
#
# stuff to read (and render) GIMP gradient files
#
# History:
# 97-08-23 fl Created
#
# Copyright (c) Secret Labs AB 1997.
# Copyright (c) Fredrik Lundh 1997.
#
# See the README file for information on usage and redistribution.
#
from math import pi, log, sin, sqrt
from PIL._binary import o8
# --------------------------------------------------------------------
# Stuff to translate curve segments to palette values (derived from
# the corresponding code in GIMP, written by Federico Mena Quintero.
# See the GIMP distribution for more information.)
#
EPSILON = 1e-10
def linear(middle, pos):
if pos <= middle:
if middle < EPSILON:
return 0.0
else:
return 0.5 * pos / middle
else:
pos = pos - middle
middle = 1.0 - middle
if middle < EPSILON:
return 1.0
else:
return 0.5 + 0.5 * pos / middle
def curved(middle, pos):
return pos ** (log(0.5) / log(max(middle, EPSILON)))
def sine(middle, pos):
return (sin((-pi / 2.0) + pi * linear(middle, pos)) + 1.0) / 2.0
def sphere_increasing(middle, pos):
return sqrt(1.0 - (linear(middle, pos) - 1.0) ** 2)
def sphere_decreasing(middle, pos):
return 1.0 - sqrt(1.0 - linear(middle, pos) ** 2)
SEGMENTS = [linear, curved, sine, sphere_increasing, sphere_decreasing]
class GradientFile(object):
gradient = None
def getpalette(self, entries=256):
palette = []
ix = 0
x0, x1, xm, rgb0, rgb1, segment = self.gradient[ix]
for i in range(entries):
x = i / float(entries-1)
while x1 < x:
ix += 1
x0, x1, xm, rgb0, rgb1, segment = self.gradient[ix]
w = x1 - x0
if w < EPSILON:
scale = segment(0.5, 0.5)
else:
scale = segment((xm - x0) / w, (x - x0) / w)
# expand to RGBA
r = o8(int(255 * ((rgb1[0] - rgb0[0]) * scale + rgb0[0]) + 0.5))
g = o8(int(255 * ((rgb1[1] - rgb0[1]) * scale + rgb0[1]) + 0.5))
b = o8(int(255 * ((rgb1[2] - rgb0[2]) * scale + rgb0[2]) + 0.5))
a = o8(int(255 * ((rgb1[3] - rgb0[3]) * scale + rgb0[3]) + 0.5))
# add to palette
palette.append(r + g + b + a)
return b"".join(palette), "RGBA"
##
# File handler for GIMP's gradient format.
class GimpGradientFile(GradientFile):
def __init__(self, fp):
if fp.readline()[:13] != b"GIMP Gradient":
raise SyntaxError("not a GIMP gradient file")
line = fp.readline()
# GIMP 1.2 gradient files don't contain a name, but GIMP 1.3 files do
if line.startswith(b"Name: "):
line = fp.readline().strip()
count = int(line)
gradient = []
for i in range(count):
s = fp.readline().split()
w = [float(x) for x in s[:11]]
x0, x1 = w[0], w[2]
xm = w[1]
rgb0 = w[3:7]
rgb1 = w[7:11]
segment = SEGMENTS[int(s[11])]
cspace = int(s[12])
if cspace != 0:
raise IOError("cannot handle HSV colour space")
gradient.append((x0, x1, xm, rgb0, rgb1, segment))
self.gradient = gradient
|
wuzhenda/gaedav | refs/heads/master | pyxml/dom/minicompat.py | 4 | """Python version compatibility support for minidom."""
# This module should only be imported using "import *".
#
# The following names are defined:
#
# isinstance -- version of the isinstance() function that accepts
# tuples as the second parameter regardless of the
# Python version
#
# NodeList -- lightest possible NodeList implementation
#
# EmptyNodeList -- lightest possible NodeList that is guarateed to
# remain empty (immutable)
#
# StringTypes -- tuple of defined string types
#
# GetattrMagic -- base class used to make _get_<attr> be magically
# invoked when available
# defproperty -- function used in conjunction with GetattrMagic;
# using these together is needed to make them work
# as efficiently as possible in both Python 2.2+
# and older versions. For example:
#
# class MyClass(GetattrMagic):
# def _get_myattr(self):
# return something
#
# defproperty(MyClass, "myattr",
# "return some value")
#
# For Python 2.2 and newer, this will construct a
# property object on the class, which avoids
# needing to override __getattr__(). It will only
# work for read-only attributes.
#
# For older versions of Python, inheriting from
# GetattrMagic will use the traditional
# __getattr__() hackery to achieve the same effect,
# but less efficiently.
#
# defproperty() should be used for each version of
# the relevant _get_<property>() function.
#
# NewStyle -- base class to cause __slots__ to be honored in
# the new world
#
# True, False -- only for Python 2.2 and earlier
__all__ = ["NodeList", "EmptyNodeList", "NewStyle",
"StringTypes", "defproperty", "GetattrMagic"]
import pyxml.dom
try:
unicode
except NameError:
StringTypes = type(''),
else:
StringTypes = type(''), type(unicode(''))
# define True and False only if not defined as built-ins
try:
True
except NameError:
True = 1
False = 0
__all__.extend(["True", "False"])
try:
isinstance('', StringTypes)
except TypeError:
#
# Wrap isinstance() to make it compatible with the version in
# Python 2.2 and newer.
#
_isinstance = isinstance
def isinstance(obj, type_or_seq):
try:
return _isinstance(obj, type_or_seq)
except TypeError:
for t in type_or_seq:
if _isinstance(obj, t):
return 1
return 0
__all__.append("isinstance")
if list is type([]):
class NodeList(list):
__slots__ = ()
def item(self, index):
if 0 <= index < len(self):
return self[index]
def _get_length(self):
return len(self)
def _set_length(self, value):
raise pyxml.dom.NoModificationAllowedErr(
"attempt to modify read-only attribute 'length'")
length = property(_get_length, _set_length,
doc="The number of nodes in the NodeList.")
def __getstate__(self):
return list(self)
def __setstate__(self, state):
self[:] = state
class EmptyNodeList(tuple):
__slots__ = ()
def __add__(self, other):
NL = NodeList()
NL.extend(other)
return NL
def __radd__(self, other):
NL = NodeList()
NL.extend(other)
return NL
def item(self, index):
return None
def _get_length(self):
return 0
def _set_length(self, value):
raise pyxml.dom.NoModificationAllowedErr(
"attempt to modify read-only attribute 'length'")
length = property(_get_length, _set_length,
doc="The number of nodes in the NodeList.")
else:
def NodeList():
return []
def EmptyNodeList():
return []
try:
property
except NameError:
def defproperty(klass, name, doc):
# taken care of by the base __getattr__()
pass
class GetattrMagic:
def __getattr__(self, key):
if key.startswith("_"):
raise AttributeError, key
try:
get = getattr(self, "_get_" + key)
except AttributeError:
raise AttributeError, key
return get()
class NewStyle:
pass
else:
def defproperty(klass, name, doc):
get = getattr(klass, ("_get_" + name)).im_func
def set(self, value, name=name):
raise pyxml.dom.NoModificationAllowedErr(
"attempt to modify read-only attribute " + repr(name))
assert not hasattr(klass, "_set_" + name), \
"expected not to find _set_" + name
prop = property(get, set, doc=doc)
setattr(klass, name, prop)
class GetattrMagic:
pass
NewStyle = object
|
hectord/lettuce | refs/heads/master | tests/integration/lib/Django-1.2.5/tests/modeltests/field_defaults/models.py | 93 | # coding: utf-8
"""
32. Callable defaults
You can pass callable objects as the ``default`` parameter to a field. When
the object is created without an explicit value passed in, Django will call
the method to determine the default value.
This example uses ``datetime.datetime.now`` as the default for the ``pub_date``
field.
"""
from django.db import models
from datetime import datetime
class Article(models.Model):
headline = models.CharField(max_length=100, default='Default headline')
pub_date = models.DateTimeField(default=datetime.now)
def __unicode__(self):
return self.headline
|
lmorchard/django | refs/heads/master | django/utils/archive.py | 562 | """
Based on "python-archive" -- http://pypi.python.org/pypi/python-archive/
Copyright (c) 2010 Gary Wilson Jr. <gary.wilson@gmail.com> and contributors.
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
import os
import shutil
import tarfile
import zipfile
from django.utils import six
class ArchiveException(Exception):
"""
Base exception class for all archive errors.
"""
class UnrecognizedArchiveFormat(ArchiveException):
"""
Error raised when passed file is not a recognized archive format.
"""
def extract(path, to_path=''):
"""
Unpack the tar or zip file at the specified path to the directory
specified by to_path.
"""
with Archive(path) as archive:
archive.extract(to_path)
class Archive(object):
"""
The external API class that encapsulates an archive implementation.
"""
def __init__(self, file):
self._archive = self._archive_cls(file)(file)
@staticmethod
def _archive_cls(file):
cls = None
if isinstance(file, six.string_types):
filename = file
else:
try:
filename = file.name
except AttributeError:
raise UnrecognizedArchiveFormat(
"File object not a recognized archive format.")
base, tail_ext = os.path.splitext(filename.lower())
cls = extension_map.get(tail_ext)
if not cls:
base, ext = os.path.splitext(base)
cls = extension_map.get(ext)
if not cls:
raise UnrecognizedArchiveFormat(
"Path not a recognized archive format: %s" % filename)
return cls
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
self.close()
def extract(self, to_path=''):
self._archive.extract(to_path)
def list(self):
self._archive.list()
def close(self):
self._archive.close()
class BaseArchive(object):
"""
Base Archive class. Implementations should inherit this class.
"""
def split_leading_dir(self, path):
path = str(path)
path = path.lstrip('/').lstrip('\\')
if '/' in path and (('\\' in path and path.find('/') < path.find('\\'))
or '\\' not in path):
return path.split('/', 1)
elif '\\' in path:
return path.split('\\', 1)
else:
return path, ''
def has_leading_dir(self, paths):
"""
Returns true if all the paths have the same leading path name
(i.e., everything is in one subdirectory in an archive)
"""
common_prefix = None
for path in paths:
prefix, rest = self.split_leading_dir(path)
if not prefix:
return False
elif common_prefix is None:
common_prefix = prefix
elif prefix != common_prefix:
return False
return True
def extract(self):
raise NotImplementedError('subclasses of BaseArchive must provide an extract() method')
def list(self):
raise NotImplementedError('subclasses of BaseArchive must provide a list() method')
class TarArchive(BaseArchive):
def __init__(self, file):
self._archive = tarfile.open(file)
def list(self, *args, **kwargs):
self._archive.list(*args, **kwargs)
def extract(self, to_path):
# note: python<=2.5 doesn't seem to know about pax headers, filter them
members = [member for member in self._archive.getmembers()
if member.name != 'pax_global_header']
leading = self.has_leading_dir(x.name for x in members)
for member in members:
name = member.name
if leading:
name = self.split_leading_dir(name)[1]
filename = os.path.join(to_path, name)
if member.isdir():
if filename and not os.path.exists(filename):
os.makedirs(filename)
else:
try:
extracted = self._archive.extractfile(member)
except (KeyError, AttributeError) as exc:
# Some corrupt tar files seem to produce this
# (specifically bad symlinks)
print("In the tar file %s the member %s is invalid: %s" %
(name, member.name, exc))
else:
dirname = os.path.dirname(filename)
if dirname and not os.path.exists(dirname):
os.makedirs(dirname)
with open(filename, 'wb') as outfile:
shutil.copyfileobj(extracted, outfile)
finally:
if extracted:
extracted.close()
def close(self):
self._archive.close()
class ZipArchive(BaseArchive):
def __init__(self, file):
self._archive = zipfile.ZipFile(file)
def list(self, *args, **kwargs):
self._archive.printdir(*args, **kwargs)
def extract(self, to_path):
namelist = self._archive.namelist()
leading = self.has_leading_dir(namelist)
for name in namelist:
data = self._archive.read(name)
if leading:
name = self.split_leading_dir(name)[1]
filename = os.path.join(to_path, name)
dirname = os.path.dirname(filename)
if dirname and not os.path.exists(dirname):
os.makedirs(dirname)
if filename.endswith(('/', '\\')):
# A directory
if not os.path.exists(filename):
os.makedirs(filename)
else:
with open(filename, 'wb') as outfile:
outfile.write(data)
def close(self):
self._archive.close()
extension_map = {
'.tar': TarArchive,
'.tar.bz2': TarArchive,
'.tar.gz': TarArchive,
'.tgz': TarArchive,
'.tz2': TarArchive,
'.zip': ZipArchive,
}
|
stewartpark/django | refs/heads/master | tests/force_insert_update/models.py | 581 | """
Tests for forcing insert and update queries (instead of Django's normal
automatic behavior).
"""
from django.db import models
class Counter(models.Model):
name = models.CharField(max_length=10)
value = models.IntegerField()
class InheritedCounter(Counter):
tag = models.CharField(max_length=10)
class ProxyCounter(Counter):
class Meta:
proxy = True
class SubCounter(Counter):
pass
class WithCustomPK(models.Model):
name = models.IntegerField(primary_key=True)
value = models.IntegerField()
|
jimberlage/servo | refs/heads/master | tests/wpt/webgl/tools/import-conformance-tests.py | 6 | #!/usr/bin/env python
import os
import subprocess
import sys
import tempfile
import shutil
import bisect
import argparse
KHRONOS_REPO_URL = "https://github.com/KhronosGroup/WebGL.git"
# Patches for conformance tests 1.0.x
PATCHES = [
("js-test-pre.patch", "js/js-test-pre.js"),
("unit.patch", "conformance/more/unit.js"),
("timeout.patch", None),
("set-zero-timeout.patch", "js/webgl-test-utils.js"),
]
# Fix for 'UnicodeDecodeError: 'ascii' codec can't decode byte'
reload(sys)
sys.setdefaultencoding('utf8')
def usage():
print("Usage: {} destination [existing_webgl_repo]".format(sys.argv[0]))
sys.exit(1)
def get_tests(base_dir, file_name, tests_list):
list_file = os.path.join(base_dir, file_name)
if not os.path.isfile(list_file):
print("Test list ({}) not found".format(list_file))
sys.exit(1)
print("Processing: {}".format(list_file))
with open(list_file, "r") as f:
for line in f:
line = line.strip()
if not line or line.startswith("#") or line.startswith("//"):
continue # It's an empty line or a comment
# Lines often are in the form:
# --min-version x.x.x abc.html
#
# We only care about the last part
line = line.split(" ")[-1]
if line.endswith(".html"):
tests_list.append(os.path.join(base_dir, line))
if line.endswith(".txt"):
(next_dir, file_name) = os.path.split(os.path.join(base_dir, line))
get_tests(next_dir, file_name, tests_list)
# Insert the test harness scripts before any other script
def process_test(test):
(new, new_path) = tempfile.mkstemp()
script_tag_found = False
with open(test, "r") as test_file:
for line in test_file:
if not script_tag_found and "<script" in line:
indent = ' ' * line.index('<')
script_tag_found = True
os.write(new, "{}<script src=/resources/testharness.js></script>\n".format(indent))
os.write(new, "{}<script src=/resources/testharnessreport.js></script>\n".format(indent))
os.write(new, line)
os.close(new)
shutil.move(new_path, test)
def update_conformance(destination, existing_repo, patches_dir):
print("Trying to import WebGL tests into {}".format(destination))
if existing_repo:
directory = existing_repo
print("Using existing WebGL repository: {}".format(directory))
else:
directory = tempfile.mkdtemp()
print("Cloning WebGL repository into temporary directory {}".format(directory))
subprocess.check_call(["git", "clone", KHRONOS_REPO_URL, directory, "--depth", "1"])
suite_dir = os.path.join(directory, "sdk/tests")
print("Test suite directory: {}".format(suite_dir))
if not os.path.isdir(suite_dir):
print("Test suite directory ({}) not found, aborting...".format(suite_dir))
sys.exit(1)
# We recursively copy all the test suite to `destination`
shutil.copytree(suite_dir, destination)
# Get all the tests, remove any html file which is not in the list, and
# later process them.
tests = []
get_tests(destination, "00_test_list.txt", tests)
test_count = len(tests)
print("Found {} tests.".format(test_count))
print("Removing non-test html files")
# To use binary search, which speeds things up a little
# instead of f in tests
tests.sort()
# Remove html files that are not tests
for dirpath, dirnames, filenames in os.walk(destination):
if '/resources' in dirpath:
continue # Most of the files under resources directories are used
for f in filenames:
if not f.endswith('.html'):
continue
f = os.path.join(dirpath, f)
pos = bisect.bisect_left(tests, f)
if pos == test_count or tests[pos] != f:
print("Removing: {}".format(f))
os.remove(f)
# Insert our harness into the tests
for test in tests:
process_test(test)
# Try to apply the patches to the required files
if not patches_dir:
patches_dir = os.path.abspath(os.path.dirname(sys.argv[0]))
for patch, file_name in PATCHES:
try:
patch = os.path.join(patches_dir, patch)
if file_name is None:
subprocess.check_call(["patch", "-d", destination, "-p", "1"], stdin=open(patch))
else:
subprocess.check_call(["patch", "-x", "3", "-d", destination, file_name, patch])
except subprocess.CalledProcessError:
print("Automatic patch failed for {}".format(file_name))
print("Please review the WPT integration and update {} accordingly".format(os.path.basename(patch)))
def main():
parser = argparse.ArgumentParser()
parser.add_argument("destination", help="Test suite destination")
parser.add_argument("-e", "--existing-repo", help="Path to an existing clone of the khronos WebGL repository")
args = parser.parse_args()
update_conformance(args.destination, args.existing_repo, None)
if __name__ == '__main__':
main()
|
sergi-casbas/xstore-api | refs/heads/master | xapiconnect.py | 1 | #!/usr/bin/python3
# -*- coding: utf-8 -*-
#
# xapiconnect.py
""" Module with the required classes to handle and automatize connection with xStore """
#
# Copyright 2016 Sergi Casbas <sergi@casbas.cat>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
# MA 02110-1301, USA.
#
#
import json
import time
from websocket import create_connection
from websocket import WebSocketConnectionClosedException
XAPI_ENV_REAL = "real"
XAPI_ENV_DEMO = "demo"
FLOOD_THRESHOLD = 215
FLOOD_MAXIMUM = 6
################################################################################
class xapi_exception(Exception):
""" Exception related to xStore API connections. """
pass
################################################################################
#------------------------------------------------------------------------------#
class xapi_direct(object):
""" Class to handle xStore API direct connections (state-less) """
connection = None # WebSocket object
session = None # Session ID
config = {} # Dictionary to store local configurations.
_antiflood=[0,0] # Var to control flooding xstore severer (count,time).
#------------------------------------------------------------------------------#
def __init__(self,environment,username,password):
""" Class constructor."""
# Define basic local variables.
self.session = 0
self.config['username'] = username
self.config['password'] = password
# Select right url's based on selected environment.
if environment==XAPI_ENV_REAL:
self.config['mainUrl'] = 'wss://ws.xapi.pro/real'
self.config['streamUrl'] = 'wss://ws.xapi.pro/realStream'
elif environment==XAPI_ENV_DEMO:
self.config['mainUrl'] = 'wss://ws.xapi.pro/demo'
self.config['streamUrl'] = 'wss://ws.xapi.pro/demoStream'
else:
raise xapi_exception("Wrong environment.")
#------------------------------------------------------------------------------#
def open_connection(self):
""" Open a connection with xStore """
# Connect to the websocket.
self.connection = create_connection(self.config['mainUrl'])
# Auto-login
command = self.create_command('login')
# Prepare the commmand to send to the socket
command['arguments']['userId'] = self.config['username']
command['arguments']['password'] = self.config['password']
# Send the command and return the session Id.
self.session=self.query(command)
#------------------------------------------------------------------------------#
def close_connection(self):
""" Disconnect from xStore """
# Send a logout command and close the socket.
try:
self.quick_query('logout')
self.connection.close()
self.connection = None
self.session = None
except:
pass
#------------------------------------------------------------------------------#
def create_command(self,command):
""" Creates a command dictonary with internal structures """
command = xapi_direct._create_command(self,command)
return command
@staticmethod
def _create_command(xapiconn,command):
""" Class level create_command to help stream deal with it """
# Generate dictionaries to store command information.
dic_command = {}
arguments = {}
dic_command['command']=command
dic_command['arguments'] = arguments
dic_command['streamSessionId']=str(xapiconn.session)
return dic_command
#------------------------------------------------------------------------------#
def query(self,command):
""" Send a JSON string and decode the output into a dictionary """
# Validate if we are connected.
if self.session in (None,0) and command['command']!='login':
raise xapi_exception("Not connected.")
# Protect with antiflood.
self.__antiflood()
# Connect to xtb and send the query.
self.connection.send(json.dumps(command))
# Recive xtb response and transform into a dictionary.
response = json.loads(self.connection.recv())
# If the status of the response is false, throw an error.
if not response['status']:
if not 'errorDescr' in response.keys():
response['errorDescr']="Unknown Error"
raise xapi_exception(response['errorCode']+': '+response['errorDescr'])
# Prepare information to return to caller. (som commands are special)
if command['command']=='login':
response = response['streamSessionId']
elif command['command']=='ping' or command['command']=='logout':
response = 'ok'
else:
response = response['returnData']
return response
#------------------------------------------------------------------------------#
def quick_query(self,commandString):
""" Quick query for simple query, without a command dict, only with a string """
return self.query(self.create_command(commandString))
#------------------------------------------------------------------------------#
def __antiflood(self):
""" Avoid disconection by breaking flooding rules (200ms interval/6 in a row) """
now = int(round(time.time() * 1000))
if FLOOD_THRESHOLD-(now-self._antiflood[0])>0:
time.sleep(FLOOD_THRESHOLD/1000.0)
self._antiflood[0]=now
#------------------------------------------------------------------------------#
################################################################################
#------------------------------------------------------------------------------#
class xapi_stream(object):
""" Class to deal with xStore API stream connection (Statefull) """
#------------------------------------------------------------------------------#
_aux = None # Auxiliary connection used only by the stream.
stream = None # Local connections for the streaming.
queue = None # Queue to store messages recived by the thread.
streams_thread = None # Thread ID.
#------------------------------------------------------------------------------#
def __init__(self,environment,username,password):
""" Class constructor."""
self._aux = xapi_direct(environment,username,password)
#------------------------------------------------------------------------------#
def open_stream(self):
""" Open the main connection and the stream connection """
# Open auxilary connection (used by the stream).
self._aux.open_connection()
# Connect the stream.
self.stream = create_connection(self._aux.config['streamUrl'])
# Always enable keepAlive.
self.quick_request('getKeepAlive')
#------------------------------------------------------------------------------#
def close_stream(self):
""" Close the stream and the main connection """
try:
# Call finalization functions.
self.stream.close()
self._aux.close_connection()
except:
pass
#------------------------------------------------------------------------------#
def create_command(self,command):
""" Creates a command associated to auxiliar connection """
# Use static function to create it.
new_command = xapi_direct._create_command(self._aux,command)
# Remove unused information.
del (new_command['arguments'])
# Return the value.
return new_command
#------------------------------------------------------------------------------#
def request(self,command):
""" Creates a command associated to auxiliar connection """
# Chech if the stream is connected, if not throw an error.
if self.stream==None:
raise xapi_exception("Stream not connected.")
# Send the command to the stream.
self.stream.send(json.dumps(command))
#------------------------------------------------------------------------------#
def get_next_stream(self):
""" Wait for new information in the stream and return it """
# Check if the stream is connected, if not throw an error.
if self.stream==None:
raise xapi_exception("Stream not connected.")
# Wait for a new message on the stream.
response = None
while response == None:
response = json.loads(self.stream.recv())
# If the response is a keepAlive, ping both connections and wait again.
if response['command'] == 'keepAlive':
self.quick_request('ping')
self._aux.quick_query('ping')
response = None
# Return the received stream.
return response
#------------------------------------------------------------------------------#
def quick_request(self,commandString):
""" Make requests to the streams using a simple string """
return self.request(self.create_command(commandString))
#------------------------------------------------------------------------------#
################################################################################
#### STANDALONE EXECUTION TO DEBUG ####
if __file__ == 'xapiconnect.py':
import sys
x = xapi_direct('demo',sys.argv[1],sys.argv[2])
x.open_connection()
while True:
print (x.quick_query("getServerTime"))
time.sleep(0.200)
x.close_connection()
|
kosgroup/odoo | refs/heads/10.0 | addons/portal/models/__init__.py | 22 | # -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
import res_groups
|
jayoshih/content-curation | refs/heads/master | contentcuration/manage.py | 4 | #!/usr/bin/env python
import os
import sys
# Attach Python Cloud Debugger
if __name__ == "__main__":
#import warnings
#warnings.filterwarnings('ignore', message=r'Module .*? is being added to sys\.path', append=True)
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "contentcuration.settings")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
|
DualSpark/ansible | refs/heads/devel | v1/ansible/runner/lookup_plugins/inventory_hostnames.py | 173 | # (c) 2012, Michael DeHaan <michael.dehaan@gmail.com>
# (c) 2013, Steven Dossett <sdossett@panath.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from ansible.utils import safe_eval
import ansible.utils as utils
import ansible.errors as errors
import ansible.inventory as inventory
def flatten(terms):
ret = []
for term in terms:
if isinstance(term, list):
ret.extend(term)
else:
ret.append(term)
return ret
class LookupModule(object):
def __init__(self, basedir=None, **kwargs):
self.basedir = basedir
if 'runner' in kwargs:
self.host_list = kwargs['runner'].inventory.host_list
else:
raise errors.AnsibleError("inventory_hostnames must be used as a loop. Example: \"with_inventory_hostnames: \'all\'\"")
def run(self, terms, inject=None, **kwargs):
terms = utils.listify_lookup_plugin_terms(terms, self.basedir, inject)
if not isinstance(terms, list):
raise errors.AnsibleError("with_inventory_hostnames expects a list")
return flatten(inventory.Inventory(self.host_list).list_hosts(terms))
|
larsoner/mne-python | refs/heads/master | examples/inverse/plot_mne_cov_power.py | 15 | """
===================================================================
Compute source power estimate by projecting the covariance with MNE
===================================================================
We can apply the MNE inverse operator to a covariance matrix to obtain
an estimate of source power. This is computationally more efficient than first
estimating the source timecourses and then computing their power. This
code is based on the code from :footcite:`Sabbagh2020` and has been useful to
correct for individual field spread using source localization in the context of
predictive modeling.
References
----------
.. footbibliography::
"""
# Author: Denis A. Engemann <denis-alexander.engemann@inria.fr>
# Luke Bloy <luke.bloy@gmail.com>
#
# License: BSD (3-clause)
import os.path as op
import numpy as np
import mne
from mne.datasets import sample
from mne.minimum_norm import make_inverse_operator, apply_inverse_cov
data_path = sample.data_path()
subjects_dir = data_path + '/subjects'
raw_fname = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw.fif'
raw = mne.io.read_raw_fif(raw_fname)
###############################################################################
# Compute empty-room covariance
# -----------------------------
# First we compute an empty-room covariance, which captures noise from the
# sensors and environment.
raw_empty_room_fname = op.join(
data_path, 'MEG', 'sample', 'ernoise_raw.fif')
raw_empty_room = mne.io.read_raw_fif(raw_empty_room_fname)
raw_empty_room.crop(0, 60)
raw_empty_room.info['bads'] = ['MEG 2443']
raw_empty_room.info['projs'] = raw.info['projs']
noise_cov = mne.compute_raw_covariance(
raw_empty_room, method=['empirical', 'shrunk'])
del raw_empty_room
###############################################################################
# Epoch the data
# --------------
raw.info['bads'] = ['MEG 2443', 'EEG 053']
raw.load_data().filter(4, 12)
events = mne.find_events(raw, stim_channel='STI 014')
event_id = dict(aud_l=1, aud_r=2, vis_l=3, vis_r=4)
tmin, tmax = -0.2, 0.5
baseline = (None, 0) # means from the first instant to t = 0
reject = dict(grad=4000e-13, mag=4e-12, eog=150e-6)
epochs = mne.Epochs(raw.copy().filter(4, 12), events, event_id, tmin, tmax,
proj=True, picks=('meg', 'eog'), baseline=None,
reject=reject, preload=True)
del raw
###############################################################################
# Compute and plot covariances
# ----------------------------
# In addition to the empty-room covariance above, we compute two additional
# covariances:
#
# 1. Baseline covariance, which captures signals not of interest in our
# analysis (e.g., sensor noise, environmental noise, physiological
# artifacts, and also resting-state-like brain activity / "noise").
# 2. Data covariance, which captures our activation of interest (in addition
# to noise sources).
base_cov = mne.compute_covariance(
epochs, tmin=-0.2, tmax=0, method=['shrunk', 'empirical'], rank=None,
verbose=True)
data_cov = mne.compute_covariance(
epochs, tmin=0., tmax=0.2, method=['shrunk', 'empirical'], rank=None,
verbose=True)
fig_noise_cov = mne.viz.plot_cov(noise_cov, epochs.info, show_svd=False)
fig_base_cov = mne.viz.plot_cov(base_cov, epochs.info, show_svd=False)
fig_data_cov = mne.viz.plot_cov(data_cov, epochs.info, show_svd=False)
###############################################################################
# We can also look at the covariances using topomaps, here we just show the
# baseline and data covariances, followed by the data covariance whitened
# by the baseline covariance:
evoked = epochs.average().pick('meg')
evoked.drop_channels(evoked.info['bads'])
evoked.plot(time_unit='s')
evoked.plot_topomap(times=np.linspace(0.05, 0.15, 5), ch_type='mag')
noise_cov.plot_topomap(evoked.info, 'grad', title='Noise')
data_cov.plot_topomap(evoked.info, 'grad', title='Data')
data_cov.plot_topomap(evoked.info, 'grad', noise_cov=noise_cov,
title='Whitened data')
###############################################################################
# Apply inverse operator to covariance
# ------------------------------------
# Finally, we can construct an inverse using the empty-room noise covariance:
# Read the forward solution and compute the inverse operator
fname_fwd = data_path + '/MEG/sample/sample_audvis-meg-oct-6-fwd.fif'
fwd = mne.read_forward_solution(fname_fwd)
# make an MEG inverse operator
info = evoked.info
inverse_operator = make_inverse_operator(info, fwd, noise_cov,
loose=0.2, depth=0.8)
###############################################################################
# Project our data and baseline covariance to source space:
stc_data = apply_inverse_cov(data_cov, evoked.info, inverse_operator,
nave=len(epochs), method='dSPM', verbose=True)
stc_base = apply_inverse_cov(base_cov, evoked.info, inverse_operator,
nave=len(epochs), method='dSPM', verbose=True)
###############################################################################
# And visualize power is relative to the baseline:
# sphinx_gallery_thumbnail_number = 9
stc_data /= stc_base
brain = stc_data.plot(subject='sample', subjects_dir=subjects_dir,
clim=dict(kind='percent', lims=(50, 90, 98)))
|
pkilambi/ceilometer | refs/heads/master | ceilometer/hardware/pollsters/disk.py | 9 | #
# Copyright 2013 ZHAW SoE
# Copyright 2014 Intel Corp.
#
# Authors: Lucas Graf <graflu0@students.zhaw.ch>
# Toni Zehnder <zehndton@students.zhaw.ch>
# Lianhao Lu <lianhao.lu@intel.com>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from ceilometer.hardware import plugin
from ceilometer.hardware.pollsters import util
from ceilometer import sample
class _Base(plugin.HardwarePollster):
CACHE_KEY = 'disk'
def generate_one_sample(self, host, c_data):
value, metadata, extra = c_data
res_id = extra.get('resource_id') or host.hostname
if metadata.get('device'):
res_id = res_id + ".%s" % metadata.get('device')
return util.make_sample_from_host(host,
name=self.IDENTIFIER,
sample_type=sample.TYPE_GAUGE,
unit='B',
volume=value,
res_metadata=metadata,
extra=extra,
resource_id=res_id)
class DiskTotalPollster(_Base):
IDENTIFIER = 'disk.size.total'
class DiskUsedPollster(_Base):
IDENTIFIER = 'disk.size.used'
|
Vivek-anand-jain/Implementation-of-BLUE-in-ns-3 | refs/heads/master | src/sixlowpan/bindings/callbacks_list.py | 80 | callback_classes = [
['bool', 'ns3::Ptr<ns3::NetDevice>', 'ns3::Ptr<ns3::Packet const>', 'unsigned short', 'ns3::Address const&', 'ns3::Address const&', 'ns3::NetDevice::PacketType', 'ns3::empty', 'ns3::empty', 'ns3::empty'],
['bool', 'ns3::Ptr<ns3::NetDevice>', 'ns3::Ptr<ns3::Packet const>', 'unsigned short', 'ns3::Address const&', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty'],
['void', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty'],
['unsigned char', 'ns3::Ptr<ns3::QueueItem>', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty'],
]
|
gaccob/gres | refs/heads/master | dep/google/protobuf/message_factory.py | 228 | # Protocol Buffers - Google's data interchange format
# Copyright 2008 Google Inc. All rights reserved.
# http://code.google.com/p/protobuf/
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Provides a factory class for generating dynamic messages."""
__author__ = 'matthewtoia@google.com (Matt Toia)'
from google.protobuf import descriptor_database
from google.protobuf import descriptor_pool
from google.protobuf import message
from google.protobuf import reflection
class MessageFactory(object):
"""Factory for creating Proto2 messages from descriptors in a pool."""
def __init__(self):
"""Initializes a new factory."""
self._classes = {}
def GetPrototype(self, descriptor):
"""Builds a proto2 message class based on the passed in descriptor.
Passing a descriptor with a fully qualified name matching a previous
invocation will cause the same class to be returned.
Args:
descriptor: The descriptor to build from.
Returns:
A class describing the passed in descriptor.
"""
if descriptor.full_name not in self._classes:
result_class = reflection.GeneratedProtocolMessageType(
descriptor.name.encode('ascii', 'ignore'),
(message.Message,),
{'DESCRIPTOR': descriptor})
self._classes[descriptor.full_name] = result_class
for field in descriptor.fields:
if field.message_type:
self.GetPrototype(field.message_type)
return self._classes[descriptor.full_name]
_DB = descriptor_database.DescriptorDatabase()
_POOL = descriptor_pool.DescriptorPool(_DB)
_FACTORY = MessageFactory()
def GetMessages(file_protos):
"""Builds a dictionary of all the messages available in a set of files.
Args:
file_protos: A sequence of file protos to build messages out of.
Returns:
A dictionary containing all the message types in the files mapping the
fully qualified name to a Message subclass for the descriptor.
"""
result = {}
for file_proto in file_protos:
_DB.Add(file_proto)
for file_proto in file_protos:
for desc in _GetAllDescriptors(file_proto.message_type, file_proto.package):
result[desc.full_name] = _FACTORY.GetPrototype(desc)
return result
def _GetAllDescriptors(desc_protos, package):
"""Gets all levels of nested message types as a flattened list of descriptors.
Args:
desc_protos: The descriptor protos to process.
package: The package where the protos are defined.
Yields:
Each message descriptor for each nested type.
"""
for desc_proto in desc_protos:
name = '.'.join((package, desc_proto.name))
yield _POOL.FindMessageTypeByName(name)
for nested_desc in _GetAllDescriptors(desc_proto.nested_type, name):
yield nested_desc
|
project-magpie/enigma2-openpli | refs/heads/master | lib/python/Plugins/Extensions/GraphMultiEPG/GraphMultiEpg.py | 2 | from skin import parseColor, parseFont, parseSize
from Components.config import config, ConfigClock, ConfigInteger, ConfigSubsection, ConfigYesNo, ConfigSelection, ConfigSelectionNumber
from Components.Pixmap import Pixmap
from Components.Button import Button
from Components.ActionMap import HelpableActionMap
from Components.HTMLComponent import HTMLComponent
from Components.GUIComponent import GUIComponent
from Components.EpgList import Rect
from Components.Sources.Event import Event
from Components.MultiContent import MultiContentEntryText, MultiContentEntryPixmapAlphaTest
from Components.TimerList import TimerList
from Components.Renderer.Picon import getPiconName
from Components.Sources.ServiceEvent import ServiceEvent
from Screens.Screen import Screen
from Screens.HelpMenu import HelpableScreen
from Screens.EventView import EventViewEPGSelect
from Screens.TimeDateInput import TimeDateInput
from Screens.TimerEntry import TimerEntry
from Screens.EpgSelection import EPGSelection
from Screens.TimerEdit import TimerSanityConflict, TimerEditList
from Screens.MessageBox import MessageBox
from Screens.ChoiceBox import ChoiceBox
from Tools.Directories import resolveFilename, SCOPE_CURRENT_SKIN
from RecordTimer import RecordTimerEntry, parseEvent, AFTEREVENT
from ServiceReference import ServiceReference, isPlayableForCur
from Tools.LoadPixmap import LoadPixmap
from Tools.Alternatives import CompareWithAlternatives
from Tools import Notifications
from enigma import eEPGCache, eListbox, gFont, eListboxPythonMultiContent, RT_HALIGN_LEFT, RT_HALIGN_RIGHT, RT_HALIGN_CENTER,\
RT_VALIGN_CENTER, RT_WRAP, BT_SCALE, BT_KEEP_ASPECT_RATIO, eSize, eRect, eTimer, getBestPlayableServiceReference, loadPNG
from GraphMultiEpgSetup import GraphMultiEpgSetup
from time import localtime, time, strftime
from Components.PluginComponent import plugins
from Plugins.Plugin import PluginDescriptor
from Tools.BoundFunction import boundFunction
MAX_TIMELINES = 6
config.misc.graph_mepg = ConfigSubsection()
config.misc.graph_mepg.prev_time = ConfigClock(default = time())
config.misc.graph_mepg.prev_time_period = ConfigInteger(default = 120, limits = (60, 300))
config.misc.graph_mepg.ev_fontsize = ConfigSelectionNumber(default = 0, stepwidth = 1, min = -8, max = 8, wraparound = True)
config.misc.graph_mepg.items_per_page = ConfigSelectionNumber(min = 3, max = 40, stepwidth = 1, default = 6, wraparound = True)
config.misc.graph_mepg.items_per_page_listscreen = ConfigSelectionNumber(min = 3, max = 60, stepwidth = 1, default = 12, wraparound = True)
config.misc.graph_mepg.default_mode = ConfigYesNo(default = False)
config.misc.graph_mepg.overjump = ConfigYesNo(default = True)
config.misc.graph_mepg.center_timeline = ConfigYesNo(default = False)
config.misc.graph_mepg.servicetitle_mode = ConfigSelection(default = "picon+servicename", choices = [
("servicename", _("Service name")),
("picon", _("Picon")),
("picon+servicename", _("Picon and service name")) ])
config.misc.graph_mepg.roundTo = ConfigSelection(default = "900", choices = [("900", _("%d minutes") % 15), ("1800", _("%d minutes") % 30), ("3600", _("%d minutes") % 60)])
config.misc.graph_mepg.OKButton = ConfigSelection(default = "info", choices = [("info", _("Show detailed event info")), ("zap", _("Zap to selected channel"))])
possibleAlignmentChoices = [
( str(RT_HALIGN_LEFT | RT_VALIGN_CENTER ) , _("left")),
( str(RT_HALIGN_CENTER | RT_VALIGN_CENTER ) , _("centered")),
( str(RT_HALIGN_RIGHT | RT_VALIGN_CENTER ) , _("right")),
( str(RT_HALIGN_LEFT | RT_VALIGN_CENTER | RT_WRAP) , _("left, wrapped")),
( str(RT_HALIGN_CENTER | RT_VALIGN_CENTER | RT_WRAP) , _("centered, wrapped")),
( str(RT_HALIGN_RIGHT | RT_VALIGN_CENTER | RT_WRAP) , _("right, wrapped"))]
config.misc.graph_mepg.event_alignment = ConfigSelection(default = possibleAlignmentChoices[0][0], choices = possibleAlignmentChoices)
config.misc.graph_mepg.servicename_alignment = ConfigSelection(default = possibleAlignmentChoices[0][0], choices = possibleAlignmentChoices)
config.misc.graph_mepg.extension_menu = ConfigYesNo(default = True)
listscreen = config.misc.graph_mepg.default_mode.value
class EPGList(HTMLComponent, GUIComponent):
def __init__(self, selChangedCB = None, timer = None, time_epoch = 120, overjump_empty = True):
GUIComponent.__init__(self)
self.cur_event = None
self.cur_service = None
self.offs = 0
self.timer = timer
self.last_time = time()
self.onSelChanged = [ ]
if selChangedCB is not None:
self.onSelChanged.append(selChangedCB)
self.l = eListboxPythonMultiContent()
self.l.setBuildFunc(self.buildEntry)
self.setOverjump_Empty(overjump_empty)
self.epgcache = eEPGCache.getInstance()
self.clocks = [ LoadPixmap(cached=True, path=resolveFilename(SCOPE_CURRENT_SKIN, 'skin_default/icons/epgclock_add.png')),
LoadPixmap(cached=True, path=resolveFilename(SCOPE_CURRENT_SKIN, 'skin_default/icons/epgclock_pre.png')),
LoadPixmap(cached=True, path=resolveFilename(SCOPE_CURRENT_SKIN, 'skin_default/icons/epgclock.png')),
LoadPixmap(cached=True, path=resolveFilename(SCOPE_CURRENT_SKIN, 'skin_default/icons/epgclock_prepost.png')),
LoadPixmap(cached=True, path=resolveFilename(SCOPE_CURRENT_SKIN, 'skin_default/icons/epgclock_post.png')),
LoadPixmap(cached=True, path=resolveFilename(SCOPE_CURRENT_SKIN, 'skin_default/icons/zapclock_add.png')),
LoadPixmap(cached=True, path=resolveFilename(SCOPE_CURRENT_SKIN, 'skin_default/icons/zapclock_pre.png')),
LoadPixmap(cached=True, path=resolveFilename(SCOPE_CURRENT_SKIN, 'skin_default/icons/zapclock.png')),
LoadPixmap(cached=True, path=resolveFilename(SCOPE_CURRENT_SKIN, 'skin_default/icons/zapclock_prepost.png')),
LoadPixmap(cached=True, path=resolveFilename(SCOPE_CURRENT_SKIN, 'skin_default/icons/zapclock_post.png')),
LoadPixmap(cached=True, path=resolveFilename(SCOPE_CURRENT_SKIN, 'skin_default/icons/zaprecclock_add.png')),
LoadPixmap(cached=True, path=resolveFilename(SCOPE_CURRENT_SKIN, 'skin_default/icons/zaprecclock_pre.png')),
LoadPixmap(cached=True, path=resolveFilename(SCOPE_CURRENT_SKIN, 'skin_default/icons/zaprecclock.png')),
LoadPixmap(cached=True, path=resolveFilename(SCOPE_CURRENT_SKIN, 'skin_default/icons/zaprecclock_prepost.png')),
LoadPixmap(cached=True, path=resolveFilename(SCOPE_CURRENT_SKIN, 'skin_default/icons/zaprecclock_post.png')) ]
self.time_base = None
self.time_epoch = time_epoch
self.list = None
self.select_rect = None
self.event_rect = None
self.service_rect = None
self.picon_size = None
self.currentlyPlaying = None
self.showPicon = False
self.showServiceTitle = True
self.nowEvPix = None
self.othEvPix = None
self.selEvPix = None
self.recEvPix = None
self.curSerPix = None
self.foreColor = 0xffffff
self.foreColorSelected = 0xffc000
self.borderColor = 0x464445
self.backColor = 0x595959
self.backColorSelected = 0x808080
self.foreColorService = 0xffffff
self.foreColorServiceSelected = 0xffffff
self.backColorService = 0x000000
self.backColorServiceSelected = 0x508050
self.borderColorService = 0x000000
self.foreColorNow = 0xffffff
self.backColorNow = 0x505080
self.foreColorRec = 0xffffff
self.backColorRec = 0x805050
self.serviceFont = gFont("Regular", 20)
self.entryFontName = "Regular"
self.entryFontSize = 18
self.listHeight = None
self.listWidth = None
self.serviceBorderWidth = 1
self.serviceNamePadding = 0
self.eventBorderWidth = 1
self.eventNamePadding = 0
def applySkin(self, desktop, screen):
if self.skinAttributes is not None:
attribs = [ ]
for (attrib, value) in self.skinAttributes:
if attrib == "EntryForegroundColor":
self.foreColor = parseColor(value).argb()
elif attrib == "EntryForegroundColorSelected":
self.foreColorSelected = parseColor(value).argb()
elif attrib == "EntryBackgroundColor":
self.backColor = parseColor(value).argb()
elif attrib == "EntryBackgroundColorSelected":
self.backColorSelected = parseColor(value).argb()
elif attrib == "EntryBorderColor":
self.borderColor = parseColor(value).argb()
elif attrib == "EntryFont":
font = parseFont(value, ((1,1),(1,1)) )
self.entryFontName = font.family
self.entryFontSize = font.pointSize
elif attrib == "ServiceForegroundColor" or attrib == "ServiceNameForegroundColor":
self.foreColorService = parseColor(value).argb()
elif attrib == "ServiceForegroundColorSelected":
self.foreColorServiceSelected = parseColor(value).argb()
elif attrib == "ServiceBackgroundColor" or attrib == "ServiceNameBackgroundColor":
self.backColorService = parseColor(value).argb()
elif attrib == "ServiceBackgroundColorSelected":
self.backColorServiceSelected = parseColor(value).argb()
elif attrib == "ServiceBackgroundColorRecording" or attrib == "ServiceNameBackgroundColor":
self.backColorRec = parseColor(value).argb()
elif attrib == "ServiceForegroundColorRecording":
self.foreColorRec = parseColor(value).argb()
elif attrib == "ServiceBorderColor":
self.borderColorService = parseColor(value).argb()
elif attrib == "ServiceFont":
self.serviceFont = parseFont(value, ((1,1),(1,1)) )
elif attrib == "EntryBackgroundColorNow":
self.backColorNow = parseColor(value).argb()
elif attrib == "EntryForegroundColorNow":
self.foreColorNow = parseColor(value).argb()
elif attrib == "ServiceBorderWidth":
self.serviceBorderWidth = int(value)
elif attrib == "ServiceNamePadding":
self.serviceNamePadding = int(value)
elif attrib == "EventBorderWidth":
self.eventBorderWidth = int(value)
elif attrib == "EventNamePadding":
self.eventNamePadding = int(value)
else:
attribs.append((attrib,value))
self.skinAttributes = attribs
self.l.setFont(0, self.serviceFont)
self.setEventFontsize()
rc = GUIComponent.applySkin(self, desktop, screen)
# now we know our size and can safely set items per page
self.listHeight = self.instance.size().height()
self.listWidth = self.instance.size().width()
self.setItemsPerPage()
return rc
def isSelectable(self, service, service_name, events, picon):
return (events and len(events) and True) or False
def setShowServiceMode(self, value):
self.showServiceTitle = "servicename" in value
self.showPicon = "picon" in value
self.recalcEntrySize()
self.selEntry(0) #Select entry again so that the clipping region gets updated if needed
def setOverjump_Empty(self, overjump_empty):
if overjump_empty:
self.l.setSelectableFunc(self.isSelectable)
else:
self.l.setSelectableFunc(None)
def setEpoch(self, epoch):
self.offs = 0
self.time_epoch = epoch
self.fillMultiEPG(None) # refill
def setCurrentlyPlaying(self, serviceref):
self.currentlyPlaying = serviceref
def getEventFromId(self, service, eventid):
event = None
if self.epgcache is not None and eventid is not None:
event = self.epgcache.lookupEventId(service.ref, eventid)
return event
def getIndexFromService(self, serviceref):
if serviceref is not None:
for x in range(len(self.list)):
if CompareWithAlternatives(self.list[x][0], serviceref.toString()):
return x
return None
def moveToService(self, serviceref):
newIdx = self.getIndexFromService(serviceref)
if newIdx is None:
newIdx = 0
self.setCurrentIndex(newIdx)
def setCurrentIndex(self, index):
if self.instance is not None:
self.instance.moveSelectionTo(index)
def moveTo(self, dir):
if self.instance is not None:
self.instance.moveSelection(dir)
def getCurrent(self):
if self.cur_service is None:
return (None, None)
old_service = self.cur_service #(service, service_name, events, picon)
events = self.cur_service[2]
refstr = self.cur_service[0]
if self.cur_event is None or not events or not len(events):
return (None, ServiceReference(refstr))
event = events[self.cur_event] #(event_id, event_title, begin_time, duration)
eventid = event[0]
service = ServiceReference(refstr)
event = self.getEventFromId(service, eventid) # get full event info
return (event, service)
def connectSelectionChanged(func):
if not self.onSelChanged.count(func):
self.onSelChanged.append(func)
def disconnectSelectionChanged(func):
self.onSelChanged.remove(func)
def serviceChanged(self):
cur_sel = self.l.getCurrentSelection()
if cur_sel:
self.findBestEvent()
def findBestEvent(self):
old_service = self.cur_service #(service, service_name, events, picon)
cur_service = self.cur_service = self.l.getCurrentSelection()
time_base = self.getTimeBase()
now = time()
if old_service and self.cur_event is not None:
events = old_service[2]
cur_event = events[self.cur_event] #(event_id, event_title, begin_time, duration)
if self.last_time < cur_event[2] or cur_event[2]+cur_event[3] < self.last_time:
self.last_time = cur_event[2]
if now > self.last_time:
self.last_time = now
if cur_service:
self.cur_event = None
events = cur_service[2]
if events and len(events):
self.cur_event = idx = 0
for event in events: #iterate all events
if event[2] <= self.last_time and event[2]+event[3] > self.last_time:
self.cur_event = idx
break
idx += 1
self.selEntry(0)
def selectionChanged(self):
for x in self.onSelChanged:
if x is not None:
x()
GUI_WIDGET = eListbox
def setItemsPerPage(self):
global listscreen
if self.listHeight > 0:
if listscreen:
itemHeight = self.listHeight / config.misc.graph_mepg.items_per_page_listscreen.getValue()
else:
itemHeight = self.listHeight / config.misc.graph_mepg.items_per_page.getValue()
else:
itemHeight = 54 # some default (270/5)
if listscreen:
self.instance.resize(eSize(self.listWidth, itemHeight * config.misc.graph_mepg.items_per_page_listscreen.getValue()))
else:
self.instance.resize(eSize(self.listWidth, itemHeight * config.misc.graph_mepg.items_per_page.getValue()))
self.l.setItemHeight(itemHeight)
self.nowEvPix = loadPNG(resolveFilename(SCOPE_CURRENT_SKIN, 'epg/CurrentEvent.png'))
self.othEvPix = loadPNG(resolveFilename(SCOPE_CURRENT_SKIN, 'epg/OtherEvent.png'))
self.selEvPix = loadPNG(resolveFilename(SCOPE_CURRENT_SKIN, 'epg/SelectedEvent.png'))
self.recEvPix = loadPNG(resolveFilename(SCOPE_CURRENT_SKIN, 'epg/RecordingEvent.png'))
self.curSerPix = loadPNG(resolveFilename(SCOPE_CURRENT_SKIN, 'epg/CurrentService.png'))
def setEventFontsize(self):
self.l.setFont(1, gFont(self.entryFontName, self.entryFontSize + config.misc.graph_mepg.ev_fontsize.getValue()))
def postWidgetCreate(self, instance):
instance.setWrapAround(True)
instance.selectionChanged.get().append(self.serviceChanged)
instance.setContent(self.l)
self.l.setSelectionClip(eRect(0, 0, 0, 0), False)
def preWidgetRemove(self, instance):
instance.selectionChanged.get().remove(self.serviceChanged)
instance.setContent(None)
def recalcEntrySize(self):
esize = self.l.getItemSize()
width = esize.width()
height = esize.height()
if self.showServiceTitle:
w = width / 10 * 2;
else: # if self.showPicon: # this must be set if showServiceTitle is None
w = 2 * height - 2 * self.serviceBorderWidth # FIXME: could do better...
self.service_rect = Rect(0, 0, w, height)
self.event_rect = Rect(w, 0, width - w, height)
piconHeight = height - 2 * self.serviceBorderWidth
piconWidth = 2 * piconHeight # FIXME: could do better...
if piconWidth > w - 2 * self.serviceBorderWidth:
piconWidth = w - 2 * self.serviceBorderWidth
self.picon_size = eSize(piconWidth, piconHeight)
def calcEntryPosAndWidthHelper(self, stime, duration, start, end, width):
xpos = (stime - start) * width / (end - start)
ewidth = (stime + duration - start) * width / (end - start)
ewidth -= xpos;
if xpos < 0:
ewidth += xpos;
xpos = 0;
if (xpos + ewidth) > width:
ewidth = width - xpos
return xpos, ewidth
def calcEntryPosAndWidth(self, event_rect, time_base, time_epoch, ev_start, ev_duration):
xpos, width = self.calcEntryPosAndWidthHelper(ev_start, ev_duration, time_base, time_base + time_epoch * 60, event_rect.width())
return xpos + event_rect.left(), width
def buildEntry(self, service, service_name, events, picon):
r1 = self.service_rect
r2 = self.event_rect
selected = self.cur_service[0] == service
# Picon and Service name
if CompareWithAlternatives(service, self.currentlyPlaying and self.currentlyPlaying.toString()):
serviceForeColor = self.foreColorServiceSelected
serviceBackColor = self.backColorServiceSelected
bgpng = self.curSerPix or self.nowEvPix
currentservice = True
else:
serviceForeColor = self.foreColorService
serviceBackColor = self.backColorService
bgpng = self.othEvPix
currentservice = False
res = [ None ]
if bgpng is not None: # bacground for service rect
res.append(MultiContentEntryPixmapAlphaTest(
pos = (r1.x + self.serviceBorderWidth, r1.y + self.serviceBorderWidth),
size = (r1.w - 2 * self.serviceBorderWidth, r1.h - 2 * self.serviceBorderWidth),
png = bgpng,
flags = BT_SCALE))
else:
res.append(MultiContentEntryText(
pos = (r1.x, r1.y),
size = (r1.w, r1.h),
font = 0, flags = RT_HALIGN_LEFT | RT_VALIGN_CENTER,
text = "",
color = serviceForeColor, color_sel = serviceForeColor,
backcolor = serviceBackColor, backcolor_sel = serviceBackColor))
displayPicon = None
if self.showPicon:
if picon is None: # go find picon and cache its location
picon = getPiconName(service)
curIdx = self.l.getCurrentSelectionIndex()
self.list[curIdx] = (service, service_name, events, picon)
piconWidth = self.picon_size.width()
piconHeight = self.picon_size.height()
if picon != "":
displayPicon = loadPNG(picon)
if displayPicon is not None:
res.append(MultiContentEntryPixmapAlphaTest(
pos = (r1.x + self.serviceBorderWidth, r1.y + self.serviceBorderWidth),
size = (piconWidth, piconHeight),
png = displayPicon,
backcolor = None, backcolor_sel = None, flags = BT_SCALE | BT_KEEP_ASPECT_RATIO))
elif not self.showServiceTitle:
# no picon so show servicename anyway in picon space
namefont = 1
namefontflag = int(config.misc.graph_mepg.servicename_alignment.value)
namewidth = piconWidth
piconWidth = 0
else:
piconWidth = 0
if self.showServiceTitle: # we have more space so reset parms
namefont = 0
namefontflag = int(config.misc.graph_mepg.servicename_alignment.value)
namewidth = r1.w - piconWidth
if self.showServiceTitle or displayPicon is None:
res.append(MultiContentEntryText(
pos = (r1.x + piconWidth + self.serviceBorderWidth + self.serviceNamePadding,
r1.y + self.serviceBorderWidth),
size = (namewidth - 2 * (self.serviceBorderWidth + self.serviceNamePadding),
r1.h - 2 * self.serviceBorderWidth),
font = namefont, flags = namefontflag,
text = service_name,
color = serviceForeColor, color_sel = serviceForeColor,
backcolor = None, backcolor_sel = None))
# Events for service
backColorSel = self.backColorSelected
if events:
start = self.time_base + self.offs * self.time_epoch * 60
end = start + self.time_epoch * 60
left = r2.x
top = r2.y
width = r2.w
height = r2.h
now = time()
for ev in events: #(event_id, event_title, begin_time, duration)
stime = ev[2]
duration = ev[3]
xpos, ewidth = self.calcEntryPosAndWidthHelper(stime, duration, start, end, width)
rec = self.timer.isInTimer(ev[0], stime, duration, service)
# event box background
foreColorSelected = foreColor = self.foreColor
if stime <= now and now < stime + duration:
backColor = self.backColorNow
if isPlayableForCur(ServiceReference(service).ref):
foreColor = self.foreColorNow
foreColorSelected = self.foreColorSelected
else:
backColor = self.backColor
if selected and self.select_rect.x == xpos + left and self.selEvPix:
bgpng = self.selEvPix
backColorSel = None
elif rec is not None and rec[1][-1] in (2, 12):
bgpng = self.recEvPix
foreColor = self.foreColorRec
backColor = self.backColorRec
elif stime <= now and now < stime + duration:
bgpng = self.nowEvPix
elif currentservice:
bgpng = self.curSerPix or self.othEvPix
backColor = self.backColorServiceSelected
else:
bgpng = self.othEvPix
if bgpng is not None:
res.append(MultiContentEntryPixmapAlphaTest(
pos = (left + xpos + self.eventBorderWidth, top + self.eventBorderWidth),
size = (ewidth - 2 * self.eventBorderWidth, height - 2 * self.eventBorderWidth),
png = bgpng,
flags = BT_SCALE))
else:
res.append(MultiContentEntryText(
pos = (left + xpos, top), size = (ewidth, height),
font = 1, flags = int(config.misc.graph_mepg.event_alignment.value),
text = "", color = None, color_sel = None,
backcolor = backColor, backcolor_sel = backColorSel))
# event text
evX = left + xpos + self.eventBorderWidth + self.eventNamePadding
evY = top + self.eventBorderWidth
evW = ewidth - 2 * (self.eventBorderWidth + self.eventNamePadding)
evH = height - 2 * self.eventBorderWidth
if evW > 0:
res.append(MultiContentEntryText(
pos = (evX, evY),
size = (evW, evH),
font = 1,
flags = int(config.misc.graph_mepg.event_alignment.value),
text = ev[1],
color = foreColor,
color_sel = foreColorSelected))
# recording icons
if rec is not None:
for i in range(len(rec[1])):
if ewidth < (i + 1) * 22:
break
res.append(MultiContentEntryPixmapAlphaTest(
pos = (left + xpos + ewidth - (i + 1) * 22, top + height - 22), size = (21, 21),
png = self.clocks[rec[1][len(rec[1]) - 1 - i]]))
else:
if selected and self.selEvPix:
res.append(MultiContentEntryPixmapAlphaTest(
pos = (r2.x + self.eventBorderWidth, r2.y + self.eventBorderWidth),
size = (r2.w - 2 * self.eventBorderWidth, r2.h - 2 * self.eventBorderWidth),
png = self.selEvPix,
flags = BT_SCALE))
return res
def selEntry(self, dir, visible = True):
cur_service = self.cur_service #(service, service_name, events, picon)
self.recalcEntrySize()
valid_event = self.cur_event is not None
if cur_service:
update = True
entries = cur_service[2]
if dir == 0: #current
update = False
elif dir == +1: #next
if valid_event and self.cur_event + 1 < len(entries):
self.cur_event += 1
else:
self.offs += 1
self.fillMultiEPG(None) # refill
return True
elif dir == -1: #prev
if valid_event and self.cur_event - 1 >= 0:
self.cur_event -= 1
elif self.offs > 0:
self.offs -= 1
self.fillMultiEPG(None) # refill
return True
elif dir == +2: #next page
self.offs += 1
self.fillMultiEPG(None) # refill
return True
elif dir == -2: #prev
if self.offs > 0:
self.offs -= 1
self.fillMultiEPG(None) # refill
return True
elif dir == +3: #next day
self.offs += 60 * 24 / self.time_epoch
self.fillMultiEPG(None) # refill
return True
elif dir == -3: #prev day
self.offs -= 60 * 24 / self.time_epoch
if self.offs < 0:
self.offs = 0;
self.fillMultiEPG(None) # refill
return True
if cur_service and valid_event:
entry = entries[self.cur_event] #(event_id, event_title, begin_time, duration)
time_base = self.time_base + self.offs*self.time_epoch * 60
xpos, width = self.calcEntryPosAndWidth(self.event_rect, time_base, self.time_epoch, entry[2], entry[3])
self.select_rect = Rect(xpos ,0, width, self.event_rect.height)
self.l.setSelectionClip(eRect(xpos, 0, width, self.event_rect.h), visible and update)
else:
self.select_rect = self.event_rect
self.l.setSelectionClip(eRect(self.event_rect.x, self.event_rect.y, self.event_rect.w, self.event_rect.h), False)
self.selectionChanged()
return False
def fillMultiEPG(self, services, stime = None):
if stime is not None:
self.time_base = int(stime)
if services is None:
time_base = self.time_base + self.offs * self.time_epoch * 60
test = [ (service[0], 0, time_base, self.time_epoch) for service in self.list ]
serviceList = self.list
piconIdx = 3
else:
self.cur_event = None
self.cur_service = None
test = [ (service.ref.toString(), 0, self.time_base, self.time_epoch) for service in services ]
serviceList = services
piconIdx = 0
test.insert(0, 'XRnITBD') #return record, service ref, service name, event id, event title, begin time, duration
epg_data = [] if self.epgcache is None else self.epgcache.lookupEvent(test)
self.list = [ ]
tmp_list = None
service = ""
sname = ""
serviceIdx = 0
for x in epg_data:
if service != x[0]:
if tmp_list is not None:
picon = None if piconIdx == 0 else serviceList[serviceIdx][piconIdx]
self.list.append((service, sname, tmp_list[0][0] is not None and tmp_list or None, picon))
serviceIdx += 1
service = x[0]
sname = x[1]
tmp_list = [ ]
tmp_list.append((x[2], x[3], x[4], x[5])) #(event_id, event_title, begin_time, duration)
if tmp_list and len(tmp_list):
picon = None if piconIdx == 0 else serviceList[serviceIdx][piconIdx]
self.list.append((service, sname, tmp_list[0][0] is not None and tmp_list or None, picon))
serviceIdx += 1
self.l.setList(self.list)
self.findBestEvent()
def getEventRect(self):
rc = self.event_rect
return Rect( rc.left() + (self.instance and self.instance.position().x() or 0), rc.top(), rc.width(), rc.height() )
def getServiceRect(self):
rc = self.service_rect
return Rect( rc.left() + (self.instance and self.instance.position().x() or 0), rc.top(), rc.width(), rc.height() )
def getTimeEpoch(self):
return self.time_epoch
def getTimeBase(self):
return self.time_base + (self.offs * self.time_epoch * 60)
def resetOffset(self):
self.offs = 0
class TimelineText(HTMLComponent, GUIComponent):
def __init__(self):
GUIComponent.__init__(self)
self.l = eListboxPythonMultiContent()
self.l.setSelectionClip(eRect(0, 0, 0, 0))
self.l.setItemHeight(25);
self.foreColor = 0xffc000
self.backColor = 0x000000
self.time_base = 0
self.time_epoch = 0
self.font = gFont("Regular", 20)
GUI_WIDGET = eListbox
def applySkin(self, desktop, screen):
if self.skinAttributes is not None:
attribs = [ ]
for (attrib, value) in self.skinAttributes:
if attrib == "foregroundColor":
self.foreColor = parseColor(value).argb()
elif attrib == "backgroundColor":
self.backColor = parseColor(value).argb()
elif attrib == "font":
self.font = parseFont(value, ((1, 1), (1, 1)) )
else:
attribs.append((attrib,value))
self.skinAttributes = attribs
self.l.setFont(0, self.font)
return GUIComponent.applySkin(self, desktop, screen)
def postWidgetCreate(self, instance):
instance.setContent(self.l)
def setDateFormat(self, value):
if "servicename" in value:
self.datefmt = _("%A %d %B")
elif "picon" in value:
self.datefmt = _("%d-%m")
def setEntries(self, l, timeline_now, time_lines, force):
event_rect = l.getEventRect()
time_epoch = l.getTimeEpoch()
time_base = l.getTimeBase()
if event_rect is None or time_epoch is None or time_base is None:
return
eventLeft = event_rect.left()
res = [ None ]
# Note: event_rect and service_rect are relative to the timeline_text position
# while the time lines are relative to the GraphEPG screen position!
if self.time_base != time_base or self.time_epoch != time_epoch or force:
service_rect = l.getServiceRect()
itemHeight = self.l.getItemSize().height()
time_steps = 60 if time_epoch > 180 else 30
num_lines = time_epoch / time_steps
timeStepsCalc = time_steps * 60
incWidth = event_rect.width() / num_lines
if int(config.misc.graph_mepg.center_timeline.value):
tlMove = incWidth / 2
tlFlags = RT_HALIGN_CENTER | RT_VALIGN_CENTER
else:
tlMove = 0
tlFlags = RT_HALIGN_LEFT | RT_VALIGN_CENTER
res.append( MultiContentEntryText(
pos = (0, 0),
size = (service_rect.width(), itemHeight),
font = 0, flags = RT_HALIGN_LEFT | RT_VALIGN_CENTER,
text = strftime(self.datefmt, localtime(time_base)),
color = self.foreColor, color_sel = self.foreColor,
backcolor = self.backColor, backcolor_sel = self.backColor) )
xpos = 0 # eventLeft
for x in range(0, num_lines):
res.append( MultiContentEntryText(
pos = (service_rect.width() + xpos-tlMove, 0),
size = (incWidth, itemHeight),
font = 0, flags = tlFlags,
text = strftime("%H:%M", localtime( time_base + x*timeStepsCalc )),
color = self.foreColor, color_sel = self.foreColor,
backcolor = self.backColor, backcolor_sel = self.backColor) )
line = time_lines[x]
old_pos = line.position
line.setPosition(xpos + eventLeft, old_pos[1])
line.visible = True
xpos += incWidth
for x in range(num_lines, MAX_TIMELINES):
time_lines[x].visible = False
self.l.setList([res])
self.time_base = time_base
self.time_epoch = time_epoch
now = time()
if now >= time_base and now < (time_base + time_epoch * 60):
xpos = int((((now - time_base) * event_rect.width()) / (time_epoch * 60)) - (timeline_now.instance.size().width() / 2))
old_pos = timeline_now.position
new_pos = (xpos + eventLeft, old_pos[1])
if old_pos != new_pos:
timeline_now.setPosition(new_pos[0], new_pos[1])
timeline_now.visible = True
else:
timeline_now.visible = False
class GraphMultiEPG(Screen, HelpableScreen):
EMPTY = 0
ADD_TIMER = 1
REMOVE_TIMER = 2
ZAP = 1
def __init__(self, session, services, zapFunc=None, bouquetChangeCB=None, bouquetname=""):
Screen.__init__(self, session)
self.bouquetChangeCB = bouquetChangeCB
now = time() - config.epg.histminutes.getValue() * 60
self.ask_time = now - now % int(config.misc.graph_mepg.roundTo.getValue())
self["key_red"] = Button("")
self["key_green"] = Button("")
global listscreen
if listscreen:
self["key_yellow"] = Button(_("Normal mode"))
self.skinName="GraphMultiEPGList"
else:
self["key_yellow"] = Button(_("List mode"))
self["key_blue"] = Button(_("Goto"))
self.key_green_choice = self.EMPTY
self.key_red_choice = self.EMPTY
self["timeline_text"] = TimelineText()
self["Service"] = ServiceEvent()
self["Event"] = Event()
self.time_lines = [ ]
for x in range(0, MAX_TIMELINES):
pm = Pixmap()
self.time_lines.append(pm)
self["timeline%d"%(x)] = pm
self["timeline_now"] = Pixmap()
self.services = services
self.zapFunc = zapFunc
if bouquetname != "":
Screen.setTitle(self, bouquetname)
self["list"] = EPGList( selChangedCB = self.onSelectionChanged,
timer = self.session.nav.RecordTimer,
time_epoch = config.misc.graph_mepg.prev_time_period.value,
overjump_empty = config.misc.graph_mepg.overjump.value)
HelpableScreen.__init__(self)
self["okactions"] = HelpableActionMap(self, "OkCancelActions",
{
"cancel": (self.closeScreen, _("Exit EPG")),
"ok": (self.eventSelected, _("Zap to selected channel, or show detailed event info (depends on configuration)"))
}, -1)
self["okactions"].csel = self
self["epgactions"] = HelpableActionMap(self, "EPGSelectActions",
{
"timerAdd": (self.timerAdd, _("Add/remove change timer for current event")),
"info": (self.infoKeyPressed, _("Show detailed event info")),
"red": (self.zapTo, _("Zap to selected channel")),
"yellow": (self.swapMode, _("Switch between normal mode and list mode")),
"blue": (self.enterDateTime, _("Goto specific date/time")),
"menu": (self.furtherOptions, _("Further Options")),
"nextBouquet": (self.nextBouquet, _("Show bouquet selection menu")),
"prevBouquet": (self.prevBouquet, _("Show bouquet selection menu")),
"nextService": (self.nextPressed, _("Goto next page of events")),
"prevService": (self.prevPressed, _("Goto previous page of events")),
"preview": (self.preview, _("Preview selected channel")),
"nextDay": (self.nextDay, _("Goto next day of events")),
"prevDay": (self.prevDay, _("Goto previous day of events"))
}, -1)
self["epgactions"].csel = self
self["inputactions"] = HelpableActionMap(self, "InputActions",
{
"left": (self.leftPressed, _("Go to previous event")),
"right": (self.rightPressed, _("Go to next event")),
"1": (self.key1, _("Set time window to 1 hour")),
"2": (self.key2, _("Set time window to 2 hours")),
"3": (self.key3, _("Set time window to 3 hours")),
"4": (self.key4, _("Set time window to 4 hours")),
"5": (self.key5, _("Set time window to 5 hours")),
"6": (self.key6, _("Set time window to 6 hours")),
"7": (self.prevPage, _("Go to previous page of service")),
"9": (self.nextPage, _("Go to next page of service")),
"8": (self.toTop, _("Go to first service")),
"0": (self.toEnd, _("Go to last service"))
}, -1)
self["inputactions"].csel = self
self.updateTimelineTimer = eTimer()
self.updateTimelineTimer.callback.append(self.moveTimeLines)
self.updateTimelineTimer.start(60 * 1000)
self.onLayoutFinish.append(self.onCreate)
self.previousref = self.session.nav.getCurrentlyPlayingServiceOrGroup()
def prevPage(self):
self["list"].moveTo(eListbox.pageUp)
def nextPage(self):
self["list"].moveTo(eListbox.pageDown)
def toTop(self):
self["list"].moveTo(eListbox.moveTop)
def toEnd(self):
self["list"].moveTo(eListbox.moveEnd)
def prevPressed(self):
self.updEvent(-2)
def nextPressed(self):
self.updEvent(+2)
def leftPressed(self):
self.updEvent(-1)
def rightPressed(self):
self.updEvent(+1)
def prevDay(self):
self.updEvent(-3)
def nextDay(self):
self.updEvent(+3)
def updEvent(self, dir, visible = True):
ret = self["list"].selEntry(dir, visible)
if ret:
self.moveTimeLines(True)
def updEpoch(self, mins):
self["list"].setEpoch(mins)
config.misc.graph_mepg.prev_time_period.value = mins
self.moveTimeLines()
def key1(self):
self.updEpoch(60)
def key2(self):
self.updEpoch(120)
def key3(self):
self.updEpoch(180)
def key4(self):
self.updEpoch(240)
def key5(self):
self.updEpoch(300)
def key6(self):
self.updEpoch(360)
def nextBouquet(self):
if self.bouquetChangeCB:
self.bouquetChangeCB(1, self)
def prevBouquet(self):
if self.bouquetChangeCB:
self.bouquetChangeCB(-1, self)
def enterDateTime(self):
t = localtime(time())
config.misc.graph_mepg.prev_time.value = [t.tm_hour, t.tm_min]
self.session.openWithCallback(self.onDateTimeInputClosed, TimeDateInput, config.misc.graph_mepg.prev_time)
def onDateTimeInputClosed(self, ret):
if len(ret) > 1:
if ret[0]:
now = time() - config.epg.histminutes.getValue() * 60
self.ask_time = ret[1] if ret[1] >= now else now
self.ask_time = self.ask_time - self.ask_time % int(config.misc.graph_mepg.roundTo.getValue())
l = self["list"]
l.resetOffset()
l.fillMultiEPG(None, self.ask_time)
self.moveTimeLines(True)
def showSetup(self):
self.session.openWithCallback(self.onSetupClose, GraphMultiEpgSetup)
def onSetupClose(self, ignore = -1):
l = self["list"]
l.setItemsPerPage()
l.setEventFontsize()
l.setEpoch(config.misc.graph_mepg.prev_time_period.value)
l.setOverjump_Empty(config.misc.graph_mepg.overjump.value)
l.setShowServiceMode(config.misc.graph_mepg.servicetitle_mode.value)
now = time() - config.epg.histminutes.getValue() * 60
self.ask_time = now - now % int(config.misc.graph_mepg.roundTo.getValue())
self["timeline_text"].setDateFormat(config.misc.graph_mepg.servicetitle_mode.value)
l.fillMultiEPG(None, self.ask_time)
self.moveTimeLines(True)
def closeScreen(self):
self.zapFunc(None, zapback = True)
config.misc.graph_mepg.save()
self.close(False)
def furtherOptions(self):
menu = []
text = _("Select action")
event = self["list"].getCurrent()[0]
if event:
menu = [(p.name, boundFunction(self.runPlugin, p)) for p in plugins.getPlugins(where = PluginDescriptor.WHERE_EVENTINFO) \
if 'selectedevent' in p.__call__.func_code.co_varnames]
if menu:
text += _(": %s") % event.getEventName()
menu.append((_("Timer Overview"), self.openTimerOverview))
menu.append((_("Setup menu"), self.showSetup))
if len(menu) == 1:
menu and menu[0][1]()
elif len(menu) > 1:
def boxAction(choice):
if choice:
choice[1]()
self.session.openWithCallback(boxAction, ChoiceBox, title=text, list=menu)
def runPlugin(self, plugin):
event = self["list"].getCurrent()
plugin(session=self.session, selectedevent=event)
def openTimerOverview(self):
self.session.open(TimerEditList)
def infoKeyPressed(self):
cur = self["list"].getCurrent()
event = cur[0]
service = cur[1]
if event is not None:
self.session.open(EventViewEPGSelect, event, service, self.eventViewCallback, self.openSingleServiceEPG, self.openMultiServiceEPG, self.openSimilarList)
def openSimilarList(self, eventid, refstr):
self.session.open(EPGSelection, refstr, None, eventid)
def openSingleServiceEPG(self):
ref = self["list"].getCurrent()[1].ref.toString()
if ref:
self.session.open(EPGSelection, ref)
def openMultiServiceEPG(self):
if self.services:
self.session.openWithCallback(self.doRefresh, EPGSelection, self.services, self.zapFunc, None, self.bouquetChangeCB)
def setServices(self, services):
self.services = services
self.onCreate()
def doRefresh(self, answer):
serviceref = self.session.nav.getCurrentlyPlayingServiceOrGroup()
l = self["list"]
l.moveToService(serviceref)
l.setCurrentlyPlaying(serviceref)
self.moveTimeLines()
def onCreate(self):
serviceref = self.session.nav.getCurrentlyPlayingServiceOrGroup()
l = self["list"]
l.setShowServiceMode(config.misc.graph_mepg.servicetitle_mode.value)
self["timeline_text"].setDateFormat(config.misc.graph_mepg.servicetitle_mode.value)
l.fillMultiEPG(self.services, self.ask_time)
l.moveToService(serviceref)
l.setCurrentlyPlaying(serviceref)
self.moveTimeLines()
def eventViewCallback(self, setEvent, setService, val):
l = self["list"]
old = l.getCurrent()
self.updEvent(val, False)
cur = l.getCurrent()
if cur[0] is None and cur[1].ref != old[1].ref:
self.eventViewCallback(setEvent, setService, val)
else:
setService(cur[1])
setEvent(cur[0])
def preview(self):
ref = self["list"].getCurrent()[1]
if ref:
self.zapFunc(ref.ref, preview = True)
self["list"].setCurrentlyPlaying(ref.ref)
self["list"].l.invalidate()
def zapTo(self):
if self.zapFunc and self.key_red_choice == self.ZAP:
ref = self["list"].getCurrent()[1]
if ref:
from Components.ServiceEventTracker import InfoBarCount
preview = InfoBarCount > 1
self.zapFunc(ref.ref, preview)
if self.previousref and self.previousref == ref.ref and not preview:
config.misc.graph_mepg.save()
self.close(True)
self.previousref = ref.ref
self["list"].setCurrentlyPlaying(ref.ref)
self["list"].l.invalidate()
def swapMode(self):
global listscreen
listscreen = not listscreen
self.close(None)
def eventSelected(self):
if config.misc.graph_mepg.OKButton.value == "info":
self.infoKeyPressed()
else:
self.zapTo()
def removeTimer(self, timer):
timer.afterEvent = AFTEREVENT.NONE
self.session.nav.RecordTimer.removeEntry(timer)
self["key_green"].setText(_("Add timer"))
self.key_green_choice = self.ADD_TIMER
def disableTimer(self, timer):
timer.disable()
self.session.nav.RecordTimer.timeChanged(timer)
self["key_green"].setText(_("Add timer"))
self.key_green_choice = self.ADD_TIMER
def timerAdd(self):
cur = self["list"].getCurrent()
event = cur[0]
if event is None:
return
eventid = event.getEventId()
serviceref = cur[1]
refstr = ':'.join(serviceref.ref.toString().split(':')[:11])
for timer in self.session.nav.RecordTimer.timer_list:
if timer.eit == eventid and ':'.join(timer.service_ref.ref.toString().split(':')[:11]) == refstr:
menu = [(_("Delete timer"), "delete"),(_("Edit timer"), "edit")]
buttons = ["red", "green"]
if not timer.isRunning():
menu.append((_("Disable timer"), "disable"))
buttons.append("yellow")
menu.append((_("Timer Overview"), "timereditlist"))
def timerAction(choice):
if choice is not None:
if choice[1] == "delete":
self.removeTimer(timer)
elif choice[1] == "edit":
self.session.open(TimerEntry, timer)
elif choice[1] == "disable":
self.disableTimer(timer)
elif choice[1] == "timereditlist":
self.session.open(TimerEditList)
self.session.openWithCallback(timerAction, ChoiceBox, title=_("Select action for timer %s:") % event.getEventName(), list=menu, keys=buttons)
break
else:
newEntry = RecordTimerEntry(serviceref, checkOldTimers = True, *parseEvent(event))
self.session.openWithCallback(self.finishedTimerAdd, TimerEntry, newEntry)
def finishedTimerAdd(self, answer):
print "finished add"
if answer[0]:
entry = answer[1]
simulTimerList = self.session.nav.RecordTimer.record(entry)
if simulTimerList is not None:
for x in simulTimerList:
if x.setAutoincreaseEnd(entry):
self.session.nav.RecordTimer.timeChanged(x)
simulTimerList = self.session.nav.RecordTimer.record(entry)
if simulTimerList is not None:
if not entry.repeated and not config.recording.margin_before.value and not config.recording.margin_after.value and len(simulTimerList) > 1:
change_time = False
conflict_begin = simulTimerList[1].begin
conflict_end = simulTimerList[1].end
if conflict_begin == entry.end:
entry.end -= 30
change_time = True
elif entry.begin == conflict_end:
entry.begin += 30
change_time = True
if change_time:
simulTimerList = self.session.nav.RecordTimer.record(entry)
if simulTimerList is not None:
self.session.openWithCallback(self.finishSanityCorrection, TimerSanityConflict, simulTimerList)
self["key_green"].setText(_("Change timer"))
self.key_green_choice = self.REMOVE_TIMER
else:
self["key_green"].setText(_("Add timer"))
self.key_green_choice = self.ADD_TIMER
print "Timeredit aborted"
def finishSanityCorrection(self, answer):
self.finishedTimerAdd(answer)
def onSelectionChanged(self):
cur = self["list"].getCurrent()
event = cur[0]
self["Event"].newEvent(event)
if cur[1] is None or cur[1].getServiceName() == "":
if self.key_green_choice != self.EMPTY:
self["key_green"].setText("")
self.key_green_choice = self.EMPTY
if self.key_red_choice != self.EMPTY:
self["key_red"].setText("")
self.key_red_choice = self.EMPTY
return
servicerefref = cur[1].ref
self["Service"].newService(servicerefref)
if self.key_red_choice != self.ZAP:
self["key_red"].setText(_("Zap"))
self.key_red_choice = self.ZAP
if not event:
if self.key_green_choice != self.EMPTY:
self["key_green"].setText("")
self.key_green_choice = self.EMPTY
return
eventid = event.getEventId()
refstr = ':'.join(servicerefref.toString().split(':')[:11])
isRecordEvent = False
for timer in self.session.nav.RecordTimer.timer_list:
if timer.eit == eventid and ':'.join(timer.service_ref.ref.toString().split(':')[:11]) == refstr:
isRecordEvent = True
break
if isRecordEvent and self.key_green_choice != self.REMOVE_TIMER:
self["key_green"].setText(_("Change timer"))
self.key_green_choice = self.REMOVE_TIMER
elif not isRecordEvent and self.key_green_choice != self.ADD_TIMER:
self["key_green"].setText(_("Add timer"))
self.key_green_choice = self.ADD_TIMER
def moveTimeLines(self, force=False):
self.updateTimelineTimer.start((60 - (int(time()) % 60)) * 1000) #keep syncronised
self["timeline_text"].setEntries(self["list"], self["timeline_now"], self.time_lines, force)
self["list"].l.invalidate() # not needed when the zPosition in the skin is correct! ?????
|
mdanielwork/intellij-community | refs/heads/master | python/testData/docstrings/numpySectionBlockBreaksOnDoubleEmptyLine.py | 53 | def func():
"""
Parameters
----------
x
First line
Second line
Line after single break
Not included
"""
|
bd339/servo | refs/heads/master | tests/wpt/web-platform-tests/tools/wptserve/wptserve/constants.py | 141 | from . import utils
content_types = utils.invert_dict({"text/html": ["htm", "html"],
"application/json": ["json"],
"application/xhtml+xml": ["xht", "xhtm", "xhtml"],
"application/xml": ["xml"],
"application/x-xpinstall": ["xpi"],
"text/javascript": ["js"],
"text/css": ["css"],
"text/plain": ["txt", "md"],
"image/svg+xml": ["svg"],
"image/gif": ["gif"],
"image/jpeg": ["jpg", "jpeg"],
"image/png": ["png"],
"image/bmp": ["bmp"],
"text/event-stream": ["event_stream"],
"text/cache-manifest": ["manifest"],
"video/mp4": ["mp4", "m4v"],
"audio/mp4": ["m4a"],
"audio/mpeg": ["mp3"],
"video/webm": ["webm"],
"audio/webm": ["weba"],
"video/ogg": ["ogg", "ogv"],
"audio/ogg": ["oga"],
"audio/x-wav": ["wav"],
"text/vtt": ["vtt"],})
response_codes = {
100: ('Continue', 'Request received, please continue'),
101: ('Switching Protocols',
'Switching to new protocol; obey Upgrade header'),
200: ('OK', 'Request fulfilled, document follows'),
201: ('Created', 'Document created, URL follows'),
202: ('Accepted',
'Request accepted, processing continues off-line'),
203: ('Non-Authoritative Information', 'Request fulfilled from cache'),
204: ('No Content', 'Request fulfilled, nothing follows'),
205: ('Reset Content', 'Clear input form for further input.'),
206: ('Partial Content', 'Partial content follows.'),
300: ('Multiple Choices',
'Object has several resources -- see URI list'),
301: ('Moved Permanently', 'Object moved permanently -- see URI list'),
302: ('Found', 'Object moved temporarily -- see URI list'),
303: ('See Other', 'Object moved -- see Method and URL list'),
304: ('Not Modified',
'Document has not changed since given time'),
305: ('Use Proxy',
'You must use proxy specified in Location to access this '
'resource.'),
307: ('Temporary Redirect',
'Object moved temporarily -- see URI list'),
400: ('Bad Request',
'Bad request syntax or unsupported method'),
401: ('Unauthorized',
'No permission -- see authorization schemes'),
402: ('Payment Required',
'No payment -- see charging schemes'),
403: ('Forbidden',
'Request forbidden -- authorization will not help'),
404: ('Not Found', 'Nothing matches the given URI'),
405: ('Method Not Allowed',
'Specified method is invalid for this resource.'),
406: ('Not Acceptable', 'URI not available in preferred format.'),
407: ('Proxy Authentication Required', 'You must authenticate with '
'this proxy before proceeding.'),
408: ('Request Timeout', 'Request timed out; try again later.'),
409: ('Conflict', 'Request conflict.'),
410: ('Gone',
'URI no longer exists and has been permanently removed.'),
411: ('Length Required', 'Client must specify Content-Length.'),
412: ('Precondition Failed', 'Precondition in headers is false.'),
413: ('Request Entity Too Large', 'Entity is too large.'),
414: ('Request-URI Too Long', 'URI is too long.'),
415: ('Unsupported Media Type', 'Entity body in unsupported format.'),
416: ('Requested Range Not Satisfiable',
'Cannot satisfy request range.'),
417: ('Expectation Failed',
'Expect condition could not be satisfied.'),
500: ('Internal Server Error', 'Server got itself in trouble'),
501: ('Not Implemented',
'Server does not support this operation'),
502: ('Bad Gateway', 'Invalid responses from another server/proxy.'),
503: ('Service Unavailable',
'The server cannot process the request due to a high load'),
504: ('Gateway Timeout',
'The gateway server did not receive a timely response'),
505: ('HTTP Version Not Supported', 'Cannot fulfill request.'),
}
|
neudesk/neucloud | refs/heads/master | openstack_dashboard/dashboards/project/images_and_snapshots/snapshots/views.py | 1 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Copyright 2012 Nebula, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Views for managing instance snapshots.
"""
from django.core.urlresolvers import reverse
from django.core.urlresolvers import reverse_lazy
from django.utils.translation import ugettext_lazy as _
from horizon import exceptions
from horizon import forms
from horizon.utils import memoized
from openstack_dashboard import api
from openstack_dashboard.dashboards.project.images_and_snapshots.snapshots \
import forms as project_forms
class CreateView(forms.ModalFormView):
form_class = project_forms.CreateSnapshot
template_name = 'project/images_and_snapshots/snapshots/create.html'
success_url = reverse_lazy("horizon:project:images_and_snapshots:index")
@memoized.memoized_method
def get_object(self):
try:
return api.nova.server_get(self.request,
self.kwargs["instance_id"])
except Exception:
redirect = reverse('horizon:project:instances:index')
exceptions.handle(self.request,
_("Unable to retrieve instance."),
redirect=redirect)
def get_initial(self):
return {"instance_id": self.kwargs["instance_id"]}
def get_context_data(self, **kwargs):
context = super(CreateView, self).get_context_data(**kwargs)
context['instance'] = self.get_object()
return context
|
wanderknight/tushare | refs/heads/master | tushare/stock/reference.py | 2 | # -*- coding:utf-8 -*-
"""
投资参考数据接口
Created on 2015/03/21
@author: Jimmy Liu
@group : waditu
@contact: jimmysoa@sina.cn
"""
from __future__ import division
from tushare.stock import cons as ct
from tushare.stock import ref_vars as rv
from tushare.util import dateu as dt
import pandas as pd
import time
import lxml.html
from lxml import etree
import re
import json
from pandas.compat import StringIO
from tushare.util import dateu as du
from tushare.util.netbase import Client
try:
from urllib.request import urlopen, Request
except ImportError:
from urllib2 import urlopen, Request
def profit_data(year=2014, top=25,
retry_count=3, pause=0.001):
"""
获取分配预案数据
Parameters
--------
year:年份
top:取最新n条数据,默认取最近公布的25条
retry_count : int, 默认 3
如遇网络等问题重复执行的次数
pause : int, 默认 0
重复请求数据过程中暂停的秒数,防止请求间隔时间太短出现的问题
returns
-------
DataFrame
code:股票代码
name:股票名称
year:分配年份
report_date:公布日期
divi:分红金额(每10股)
shares:转增和送股数(每10股)
"""
if top <= 25:
df, pages = _dist_cotent(year, 0, retry_count, pause)
return df.head(top)
elif top == 'all':
ct._write_head()
df, pages = _dist_cotent(year, 0, retry_count, pause)
for idx in range(1,int(pages)):
df = df.append(_dist_cotent(year, idx, retry_count,
pause), ignore_index=True)
return df
else:
if isinstance(top, int):
ct._write_head()
allPages = top/25+1 if top%25>0 else top/25
df, pages = _dist_cotent(year, 0, retry_count, pause)
if int(allPages) < int(pages):
pages = allPages
for idx in range(1, int(pages)):
df = df.append(_dist_cotent(year, idx, retry_count,
pause), ignore_index=True)
return df.head(top)
else:
print(ct.TOP_PARAS_MSG)
def _fun_divi(x):
if ct.PY3:
reg = re.compile(r'分红(.*?)元', re.UNICODE)
res = reg.findall(x)
return 0 if len(res)<1 else float(res[0])
else:
if isinstance(x, unicode):
s1 = unicode('分红','utf-8')
s2 = unicode('元','utf-8')
reg = re.compile(r'%s(.*?)%s'%(s1, s2), re.UNICODE)
res = reg.findall(x)
return 0 if len(res)<1 else float(res[0])
else:
return 0
def _fun_into(x):
if ct.PY3:
reg1 = re.compile(r'转增(.*?)股', re.UNICODE)
reg2 = re.compile(r'送股(.*?)股', re.UNICODE)
res1 = reg1.findall(x)
res2 = reg2.findall(x)
res1 = 0 if len(res1)<1 else float(res1[0])
res2 = 0 if len(res2)<1 else float(res2[0])
return res1 + res2
else:
if isinstance(x, unicode):
s1 = unicode('转增','utf-8')
s2 = unicode('送股','utf-8')
s3 = unicode('股','utf-8')
reg1 = re.compile(r'%s(.*?)%s'%(s1, s3), re.UNICODE)
reg2 = re.compile(r'%s(.*?)%s'%(s2, s3), re.UNICODE)
res1 = reg1.findall(x)
res2 = reg2.findall(x)
res1 = 0 if len(res1)<1 else float(res1[0])
res2 = 0 if len(res2)<1 else float(res2[0])
return res1 + res2
else:
return 0
def _dist_cotent(year, pageNo, retry_count, pause):
for _ in range(retry_count):
time.sleep(pause)
try:
if pageNo > 0:
ct._write_console()
html = lxml.html.parse(rv.DP_163_URL%(ct.P_TYPE['http'], ct.DOMAINS['163'],
ct.PAGES['163dp'], year, pageNo))
res = html.xpath('//div[@class=\"fn_rp_list\"]/table')
if ct.PY3:
sarr = [etree.tostring(node).decode('utf-8') for node in res]
else:
sarr = [etree.tostring(node) for node in res]
sarr = ''.join(sarr)
df = pd.read_html(sarr, skiprows=[0])[0]
df = df.drop(df.columns[0], axis=1)
df.columns = rv.DP_163_COLS
df['divi'] = df['plan'].map(_fun_divi)
df['shares'] = df['plan'].map(_fun_into)
df = df.drop('plan', axis=1)
df['code'] = df['code'].astype(object)
df['code'] = df['code'].map(lambda x : str(x).zfill(6))
pages = []
if pageNo == 0:
page = html.xpath('//div[@class=\"mod_pages\"]/a')
if len(page)>1:
asr = page[len(page)-2]
pages = asr.xpath('text()')
except Exception as e:
print(e)
else:
if pageNo == 0:
return df, pages[0] if len(pages)>0 else 0
else:
return df
raise IOError(ct.NETWORK_URL_ERROR_MSG)
def forecast_data(year, quarter):
"""
获取业绩预告数据
Parameters
--------
year:int 年度 e.g:2014
quarter:int 季度 :1、2、3、4,只能输入这4个季度
说明:由于是从网站获取的数据,需要一页页抓取,速度取决于您当前网络速度
Return
--------
DataFrame
code,代码
name,名称
type,业绩变动类型【预增、预亏等】
report_date,发布日期
pre_eps,上年同期每股收益
range,业绩变动范围
"""
if ct._check_input(year, quarter) is True:
ct._write_head()
data = _get_forecast_data(year, quarter, 1, pd.DataFrame())
df = pd.DataFrame(data, columns=ct.FORECAST_COLS)
df['code'] = df['code'].map(lambda x: str(x).zfill(6))
return df
def _get_forecast_data(year, quarter, pageNo, dataArr):
ct._write_console()
try:
html = lxml.html.parse(ct.FORECAST_URL%(ct.P_TYPE['http'], ct.DOMAINS['vsf'],
ct.PAGES['fd'], year, quarter, pageNo,
ct.PAGE_NUM[1]))
res = html.xpath("//table[@class=\"list_table\"]/tr")
if ct.PY3:
sarr = [etree.tostring(node).decode('utf-8') for node in res]
else:
sarr = [etree.tostring(node) for node in res]
sarr = ''.join(sarr)
sarr = sarr.replace('--', '0')
sarr = '<table>%s</table>'%sarr
df = pd.read_html(sarr)[0]
df = df.drop([4, 5, 8], axis=1)
df.columns = ct.FORECAST_COLS
dataArr = dataArr.append(df, ignore_index=True)
nextPage = html.xpath('//div[@class=\"pages\"]/a[last()]/@onclick')
if len(nextPage)>0:
pageNo = re.findall(r'\d+',nextPage[0])[0]
return _get_forecast_data(year, quarter, pageNo, dataArr)
else:
return dataArr
except Exception as e:
print(e)
def xsg_data(year=None, month=None,
retry_count=3, pause=0.001):
"""
获取限售股解禁数据
Parameters
--------
year:年份,默认为当前年
month:解禁月份,默认为当前月
retry_count : int, 默认 3
如遇网络等问题重复执行的次数
pause : int, 默认 0
重复请求数据过程中暂停的秒数,防止请求间隔时间太短出现的问题
Return
------
DataFrame
code:股票代码
name:名称
date:解禁日期
count:解禁数量(万股)
ratio:占总盘比率
"""
year = dt.get_year() if year is None else year
month = dt.get_month() if month is None else month
for _ in range(retry_count):
time.sleep(pause)
try:
request = Request(rv.XSG_URL%(ct.P_TYPE['http'], ct.DOMAINS['em'],
ct.PAGES['emxsg'], year, month))
lines = urlopen(request, timeout = 10).read()
lines = lines.decode('utf-8') if ct.PY3 else lines
except Exception as e:
print(e)
else:
da = lines[3:len(lines)-3]
list = []
for row in da.split('","'):
list.append([data for data in row.split(',')])
df = pd.DataFrame(list)
df = df[[1, 3, 4, 5, 6]]
for col in [5, 6]:
df[col] = df[col].astype(float)
df[5] = df[5]/10000
df[6] = df[6]*100
df[5] = df[5].map(ct.FORMAT)
df[6] = df[6].map(ct.FORMAT)
df.columns = rv.XSG_COLS
return df
raise IOError(ct.NETWORK_URL_ERROR_MSG)
def fund_holdings(year, quarter,
retry_count=3, pause=0.001):
"""
获取基金持股数据
Parameters
--------
year:年份e.g 2014
quarter:季度(只能输入1,2,3,4这个四个数字)
retry_count : int, 默认 3
如遇网络等问题重复执行的次数
pause : int, 默认 0
重复请求数据过程中暂停的秒数,防止请求间隔时间太短出现的问题
Return
------
DataFrame
code:股票代码
name:名称
date:报告日期
nums:基金家数
nlast:与上期相比(增加或减少了)
count:基金持股数(万股)
clast:与上期相比
amount:基金持股市值
ratio:占流通盘比率
"""
start,end = rv.QUARTS_DIC[str(quarter)]
if quarter == 1:
start = start % str(year-1)
end = end%year
else:
start, end = start%year, end%year
ct._write_head()
df, pages = _holding_cotent(start, end, 0, retry_count, pause)
for idx in range(1, pages):
df = df.append(_holding_cotent(start, end, idx, retry_count, pause),
ignore_index=True)
return df
def _holding_cotent(start, end, pageNo, retry_count, pause):
for _ in range(retry_count):
time.sleep(pause)
if pageNo>0:
ct._write_console()
try:
request = Request(rv.FUND_HOLDS_URL%(ct.P_TYPE['http'], ct.DOMAINS['163'],
ct.PAGES['163fh'], ct.PAGES['163fh'],
pageNo, start, end, _random(5)))
lines = urlopen(request, timeout = 10).read()
lines = lines.decode('utf-8') if ct.PY3 else lines
lines = lines.replace('--', '0')
lines = json.loads(lines)
data = lines['list']
df = pd.DataFrame(data)
df = df.drop(['CODE', 'ESYMBOL', 'EXCHANGE', 'NAME', 'RN', 'SHANGQIGUSHU',
'SHANGQISHIZHI', 'SHANGQISHULIANG'], axis=1)
for col in ['GUSHU', 'GUSHUBIJIAO', 'SHIZHI', 'SCSTC27']:
df[col] = df[col].astype(float)
df['SCSTC27'] = df['SCSTC27']*100
df['GUSHU'] = df['GUSHU']/10000
df['GUSHUBIJIAO'] = df['GUSHUBIJIAO']/10000
df['SHIZHI'] = df['SHIZHI']/10000
df['GUSHU'] = df['GUSHU'].map(ct.FORMAT)
df['GUSHUBIJIAO'] = df['GUSHUBIJIAO'].map(ct.FORMAT)
df['SHIZHI'] = df['SHIZHI'].map(ct.FORMAT)
df['SCSTC27'] = df['SCSTC27'].map(ct.FORMAT)
df.columns = rv.FUND_HOLDS_COLS
df = df[['code', 'name', 'date', 'nums', 'nlast', 'count',
'clast', 'amount', 'ratio']]
except Exception as e:
print(e)
else:
if pageNo == 0:
return df, int(lines['pagecount'])
else:
return df
raise IOError(ct.NETWORK_URL_ERROR_MSG)
def new_stocks(retry_count=3, pause=0.001):
"""
获取新股上市数据
Parameters
--------
retry_count : int, 默认 3
如遇网络等问题重复执行的次数
pause : int, 默认 0
重复请求数据过程中暂停的秒数,防止请求间隔时间太短出现的问题
Return
------
DataFrame
code:股票代码
name:名称
ipo_date:上网发行日期
issue_date:上市日期
amount:发行数量(万股)
markets:上网发行数量(万股)
price:发行价格(元)
pe:发行市盈率
limit:个人申购上限(万股)
funds:募集资金(亿元)
ballot:网上中签率(%)
"""
data = pd.DataFrame()
ct._write_head()
df = _newstocks(data, 1, retry_count,
pause)
return df
def _newstocks(data, pageNo, retry_count, pause):
for _ in range(retry_count):
time.sleep(pause)
ct._write_console()
try:
html = lxml.html.parse(rv.NEW_STOCKS_URL%(ct.P_TYPE['http'],ct.DOMAINS['vsf'],
ct.PAGES['newstock'], pageNo))
res = html.xpath('//table[@id=\"NewStockTable\"]/tr')
if ct.PY3:
sarr = [etree.tostring(node).decode('utf-8') for node in res]
else:
sarr = [etree.tostring(node) for node in res]
sarr = ''.join(sarr)
sarr = sarr.replace('<font color="red">*</font>', '')
sarr = '<table>%s</table>'%sarr
df = pd.read_html(StringIO(sarr), skiprows=[0, 1])[0]
df = df.drop([df.columns[idx] for idx in [1, 12, 13, 14]], axis=1)
df.columns = rv.NEW_STOCKS_COLS
df['code'] = df['code'].map(lambda x : str(x).zfill(6))
res = html.xpath('//table[@class=\"table2\"]/tr[1]/td[1]/a/text()')
tag = '下一页' if ct.PY3 else unicode('下一页', 'utf-8')
hasNext = True if tag in res else False
data = data.append(df, ignore_index=True)
pageNo += 1
if hasNext:
data = _newstocks(data, pageNo, retry_count, pause)
except Exception as ex:
print(ex)
else:
return data
def sh_margins(start=None, end=None, retry_count=3, pause=0.001):
"""
获取沪市融资融券数据列表
Parameters
--------
start:string
开始日期 format:YYYY-MM-DD 为空时取去年今日
end:string
结束日期 format:YYYY-MM-DD 为空时取当前日期
retry_count : int, 默认 3
如遇网络等问题重复执行的次数
pause : int, 默认 0
重复请求数据过程中暂停的秒数,防止请求间隔时间太短出现的问题
Return
------
DataFrame
opDate:信用交易日期
rzye:本日融资余额(元)
rzmre: 本日融资买入额(元)
rqyl: 本日融券余量
rqylje: 本日融券余量金额(元)
rqmcl: 本日融券卖出量
rzrqjyzl:本日融资融券余额(元)
"""
start = du.today_last_year() if start is None else start
end = du.today() if end is None else end
if du.diff_day(start, end) < 0:
return None
start, end = start.replace('-', ''), end.replace('-', '')
data = pd.DataFrame()
ct._write_head()
df = _sh_hz(data, start=start, end=end,
retry_count=retry_count,
pause=pause)
return df
def _sh_hz(data, start=None, end=None,
pageNo='', beginPage='',
endPage='',
retry_count=3, pause=0.001):
for _ in range(retry_count):
time.sleep(pause)
ct._write_console()
try:
tail = rv.MAR_SH_HZ_TAIL_URL%(pageNo,
beginPage, endPage)
if pageNo == '':
pageNo = 6
tail = ''
else:
pageNo += 5
beginPage = pageNo
endPage = pageNo + 4
url = rv.MAR_SH_HZ_URL%(ct.P_TYPE['http'], ct.DOMAINS['sseq'],
ct.PAGES['qmd'], _random(5),
start, end, tail,
_random())
ref = rv.MAR_SH_HZ_REF_URL%(ct.P_TYPE['http'], ct.DOMAINS['sse'])
clt = Client(url, ref=ref, cookie=rv.MAR_SH_COOKIESTR)
lines = clt.gvalue()
lines = lines.decode('utf-8') if ct.PY3 else lines
lines = lines[19:-1]
lines = json.loads(lines)
pagecount = int(lines['pageHelp'].get('pageCount'))
datapage = int(pagecount/5+1 if pagecount%5>0 else pagecount/5)
df = pd.DataFrame(lines['result'], columns=rv.MAR_SH_HZ_COLS)
df['opDate'] = df['opDate'].map(lambda x: '%s-%s-%s'%(x[0:4], x[4:6], x[6:8]))
data = data.append(df, ignore_index=True)
if beginPage < datapage*5:
data = _sh_hz(data, start=start, end=end, pageNo=pageNo,
beginPage=beginPage, endPage=endPage,
retry_count=retry_count, pause=pause)
except Exception as e:
print(e)
else:
return data
raise IOError(ct.NETWORK_URL_ERROR_MSG)
def sh_margin_details(date='', symbol='',
start='', end='',
retry_count=3, pause=0.001):
"""
获取沪市融资融券明细列表
Parameters
--------
date:string
明细数据日期 format:YYYY-MM-DD 默认为空''
symbol:string
标的代码,6位数字e.g.600848,默认为空
start:string
开始日期 format:YYYY-MM-DD 默认为空''
end:string
结束日期 format:YYYY-MM-DD 默认为空''
retry_count : int, 默认 3
如遇网络等问题重复执行的次数
pause : int, 默认 0
重复请求数据过程中暂停的秒数,防止请求间隔时间太短出现的问题
Return
------
DataFrame
opDate:信用交易日期
stockCode:标的证券代码
securityAbbr:标的证券简称
rzye:本日融资余额(元)
rzmre: 本日融资买入额(元)
rzche:本日融资偿还额(元)
rqyl: 本日融券余量
rqmcl: 本日融券卖出量
rqchl: 本日融券偿还量
"""
date = date if date == '' else date.replace('-', '')
start = start if start == '' else start.replace('-', '')
end = end if end == '' else end.replace('-', '')
if (start != '') & (end != ''):
date = ''
data = pd.DataFrame()
ct._write_head()
df = _sh_mx(data, date=date, start=start,
end=end, symbol=symbol,
retry_count=retry_count,
pause=pause)
return df
def _sh_mx(data, date='', start='', end='',
symbol='',
pageNo='', beginPage='',
endPage='',
retry_count=3, pause=0.001):
for _ in range(retry_count):
time.sleep(pause)
ct._write_console()
try:
tail = '&pageHelp.pageNo=%s&pageHelp.beginPage=%s&pageHelp.endPage=%s'%(pageNo,
beginPage, endPage)
if pageNo == '':
pageNo = 6
tail = ''
else:
pageNo += 5
beginPage = pageNo
endPage = pageNo + 4
ref = rv.MAR_SH_HZ_REF_URL%(ct.P_TYPE['http'], ct.DOMAINS['sse'])
clt = Client(rv.MAR_SH_MX_URL%(ct.P_TYPE['http'], ct.DOMAINS['sseq'],
ct.PAGES['qmd'], _random(5), date,
symbol, start, end, tail,
_random()), ref=ref, cookie=rv.MAR_SH_COOKIESTR)
lines = clt.gvalue()
lines = lines.decode('utf-8') if ct.PY3 else lines
lines = lines[19:-1]
lines = json.loads(lines)
pagecount = int(lines['pageHelp'].get('pageCount'))
datapage = int(pagecount/5+1 if pagecount%5>0 else pagecount/5)
if pagecount == 0:
return data
if pageNo == 6:
ct._write_tips(lines['pageHelp'].get('total'))
df = pd.DataFrame(lines['result'], columns=rv.MAR_SH_MX_COLS)
df['opDate'] = df['opDate'].map(lambda x: '%s-%s-%s'%(x[0:4], x[4:6], x[6:8]))
data = data.append(df, ignore_index=True)
if beginPage < datapage*5:
data = _sh_mx(data, start=start, end=end, pageNo=pageNo,
beginPage=beginPage, endPage=endPage,
retry_count=retry_count, pause=pause)
except Exception as e:
print(e)
else:
return data
raise IOError(ct.NETWORK_URL_ERROR_MSG)
def sz_margins(start=None, end=None, retry_count=3, pause=0.001):
"""
获取深市融资融券数据列表
Parameters
--------
start:string
开始日期 format:YYYY-MM-DD 默认为上一周的今天
end:string
结束日期 format:YYYY-MM-DD 默认为今日
retry_count : int, 默认 3
如遇网络等问题重复执行的次数
pause : int, 默认 0
重复请求数据过程中暂停的秒数,防止请求间隔时间太短出现的问题
Return
------
DataFrame
opDate:信用交易日期(index)
rzmre: 融资买入额(元)
rzye:融资余额(元)
rqmcl: 融券卖出量
rqyl: 融券余量
rqye: 融券余量(元)
rzrqye:融资融券余额(元)
"""
data = pd.DataFrame()
if start is None and end is None:
end = du.today()
start = du.day_last_week()
if start is None or end is None:
ct._write_msg(rv.MAR_SZ_HZ_MSG2)
return None
try:
date_range = pd.date_range(start=start, end=end, freq='B')
if len(date_range)>261:
ct._write_msg(rv.MAR_SZ_HZ_MSG)
else:
ct._write_head()
for date in date_range:
data = data.append(_sz_hz(str(date.date()), retry_count, pause) )
except:
ct._write_msg(ct.DATA_INPUT_ERROR_MSG)
else:
return data
def _sz_hz(date='', retry_count=3, pause=0.001):
for _ in range(retry_count):
time.sleep(pause)
ct._write_console()
try:
request = Request(rv.MAR_SZ_HZ_URL%(ct.P_TYPE['http'], ct.DOMAINS['szse'],
ct.PAGES['szsefc'], date))
lines = urlopen(request, timeout = 10).read()
if len(lines) <= 200:
return pd.DataFrame()
df = pd.read_html(lines, skiprows=[0])[0]
df.columns = rv.MAR_SZ_HZ_COLS
df['opDate'] = date
except Exception as e:
print(e)
else:
return df
raise IOError(ct.NETWORK_URL_ERROR_MSG)
def sz_margin_details(date='', retry_count=3, pause=0.001):
"""
获取深市融资融券明细列表
Parameters
--------
date:string
明细数据日期 format:YYYY-MM-DD 默认为空''
retry_count : int, 默认 3
如遇网络等问题重复执行的次数
pause : int, 默认 0
重复请求数据过程中暂停的秒数,防止请求间隔时间太短出现的问题
Return
------
DataFrame
opDate:信用交易日期
stockCode:标的证券代码
securityAbbr:标的证券简称
rzmre: 融资买入额(元)
rzye:融资余额(元)
rqmcl: 融券卖出量
rqyl: 融券余量
rqye: 融券余量(元)
rzrqye:融资融券余额(元)
"""
for _ in range(retry_count):
time.sleep(pause)
try:
request = Request(rv.MAR_SZ_MX_URL%(ct.P_TYPE['http'], ct.DOMAINS['szse'],
ct.PAGES['szsefc'], date))
lines = urlopen(request, timeout = 10).read()
if len(lines) <= 200:
return pd.DataFrame()
df = pd.read_html(lines, skiprows=[0])[0]
df.columns = rv.MAR_SZ_MX_COLS
df['stockCode'] = df['stockCode'].map(lambda x:str(x).zfill(6))
df['opDate'] = date
except Exception as e:
print(e)
else:
return df
raise IOError(ct.NETWORK_URL_ERROR_MSG)
def _random(n=13):
from random import randint
start = 10**(n-1)
end = (10**n)-1
return str(randint(start, end))
|
mahyarABS/raspi | refs/heads/master | i2c-sensors/bitify/python/sensors/bmp085.py | 3 | import bitify.python.utils.i2cutils as I2CUtils
import smbus
import time
class BMP085(object):
'''
Simple BMP085 implementation
Datasheet: http://www.adafruit.com/datasheets/BMP085_DataSheet_Rev.1.0_01July2008.pdf
'''
CALIB_BLOCK_ADDRESS = 0xAA
CALIB_BLOCK_SIZE = 22
def __init__(self, bus, address, name, oss=3):
'''
Constructor
'''
self.bus = bus
self.address = address
self.name = name
self.calibration = I2CUtils.i2c_read_block(bus, address, BMP085.CALIB_BLOCK_ADDRESS, BMP085.CALIB_BLOCK_SIZE)
self.oss = oss
self.temp_wait_period = 0.004
self.pressure_wait_period = 0.0255 # Conversion time
def twos_compliment(self, val):
if (val >= 0x8000):
return -((0xffff - val) + 1)
else:
return val
def get_word(self, array, index, twos):
val = (array[index] << 8) + array[index + 1]
if twos:
return self.twos_compliment(val)
else:
return val
def calculate(self):
# The sensor has a block of factory set calibration values we need to read
# these are then used in a length calculation to get the temperature and pressure
# copy these into convenience variables
ac1 = self.get_word(self.calibration, 0, True)
ac2 = self.get_word(self.calibration, 2, True)
ac3 = self.get_word(self.calibration, 4, True)
ac4 = self.get_word(self.calibration, 6, False)
ac5 = self.get_word(self.calibration, 8, False)
ac6 = self.get_word(self.calibration, 10, False)
b1 = self.get_word(self.calibration, 12, True)
b2 = self.get_word(self.calibration, 14, True)
mb = self.get_word(self.calibration, 16, True)
mc = self.get_word(self.calibration, 18, True)
md = self.get_word(self.calibration, 20, True)
oss = self.oss
# This code is a direct translation from the datasheet
# and should be optimised for real world use
# Read raw temperature
I2CUtils.i2c_write_byte(self.bus, self.address, 0xF4, 0x2E) # Tell the sensor to take a temperature reading
time.sleep(self.temp_wait_period) # Wait for the conversion to take place
temp_raw = I2CUtils.i2c_read_word_signed(self.bus, self.address, 0xF6)
I2CUtils.i2c_write_byte(self.bus, self.address, 0xF4, 0x34 + (self.oss << 6)) # Tell the sensor to take a pressure reading
time.sleep(self.pressure_wait_period) # Wait for the conversion to take place
pressure_raw = ((I2CUtils.i2c_read_byte(self.bus, self.address, 0xF6) << 16) \
+ (I2CUtils.i2c_read_byte(self.bus, self.address, 0xF7) << 8) \
+ (I2CUtils.i2c_read_byte(self.bus, self.address, 0xF8))) >> (8 - self.oss)
# Calculate temperature
x1 = ((temp_raw - ac6) * ac5) / 32768
x2 = (mc * 2048) / (x1 + md)
b5 = x1 + x2
t = (b5 + 8) / 16
# Now calculate the pressure
b6 = b5 - 4000
x1 = (b2 * (b6 * b6 >> 12)) >> 11
x2 = ac2 * b6 >> 11
x3 = x1 + x2
b3 = (((ac1 * 4 + x3) << oss) + 2) >> 2
x1 = (ac3 * b6) >> 13
x2 = (b1 * (b6 * b6 >> 12)) >> 16
x3 = ((x1 + x2) + 2) >> 2
b4 = ac4 * (x3 + 32768) >> 15
b7 = (pressure_raw - b3) * (50000 >> oss)
if (b7 < 0x80000000):
p = (b7 * 2) / b4
else:
p = (b7 / b4) * 2
x1 = (p >> 8) * (p >> 8)
x1 = (x1 * 3038) >> 16
x2 = (-7357 * p) >> 16
p = p + ((x1 + x2 + 3791) >> 4)
return(t / 10., p / 100.)
def read_pressure(self):
(temperature, pressure) = self.calculate()
return pressure
def read_temperature(self):
(temperature, pressure) = self.calculate()
return temperature
def read_temperature_and_pressure(self):
return self.calculate()
if __name__ == "__main__":
bus = smbus.SMBus(I2CUtils.i2c_raspberry_pi_bus_number())
bmp085 = BMP085(bus, 0x77 , "BMP085")
print bmp085.read_temperature_and_pressure()
|
petteyg/intellij-community | refs/heads/master | python/testData/quickFixes/AddMethodQuickFixTest/addMethodFromInstance.py | 83 | class A:
def __init__(self):
self.x = 1
a = A()
a.<caret><warning descr="Unresolved attribute reference 'y' for class 'A'">y</warning>()
|
alsrgv/tensorflow | refs/heads/master | tensorflow/contrib/opt/python/training/nadam_optimizer_test.py | 16 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for Nadam."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.contrib.opt.python.training import nadam_optimizer
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
def nadam_update_numpy(param,
g_t,
t,
m,
v,
alpha=0.001,
beta1=0.9,
beta2=0.999,
epsilon=1e-8):
alpha_t = alpha * np.sqrt(1 - beta2**t) / (1 - beta1**t)
m_t = beta1 * m + (1 - beta1) * g_t
v_t = beta2 * v + (1 - beta2) * g_t * g_t
m_bar = (1 - beta1) * g_t + beta1 * m_t
param_t = param - alpha_t * m_bar / (np.sqrt(v_t) + epsilon)
return param_t, m_t, v_t
class NadamOptimizerTest(test.TestCase):
def doTestSparse(self, use_resource=False):
# need to use a larger value of epsilon here so that
# np.sqrt(v_t) + epsilon doesn't get rounded to 0 when
# the dtype is half and np.sqrt(v_t) = 0, as is the case
# when the gradient is 0
sparse_epsilon = 1e-7
for dtype in [dtypes.half, dtypes.float32, dtypes.float64]:
with self.cached_session():
# Initialize variables for numpy implementation.
m0, v0, m1, v1 = 0.0, 0.0, 0.0, 0.0
var0_np = np.array([1.0, 1.0, 2.0], dtype=dtype.as_numpy_dtype)
grads0_np = np.array([0.1, 0, 0.1], dtype=dtype.as_numpy_dtype)
var1_np = np.array([3.0, 3.0, 4.0], dtype=dtype.as_numpy_dtype)
grads1_np = np.array([0.01, 0, 0.01], dtype=dtype.as_numpy_dtype)
if use_resource:
var0 = resource_variable_ops.ResourceVariable(var0_np)
var1 = resource_variable_ops.ResourceVariable(var1_np)
else:
var0 = variables.Variable(var0_np)
var1 = variables.Variable(var1_np)
grads0_np_indices = np.array([0, 2], dtype=np.int32)
grads0 = ops.IndexedSlices(
constant_op.constant(grads0_np[grads0_np_indices]),
constant_op.constant(grads0_np_indices), constant_op.constant([3]))
grads1_np_indices = np.array([0, 2], dtype=np.int32)
grads1 = ops.IndexedSlices(
constant_op.constant(grads1_np[grads1_np_indices]),
constant_op.constant(grads1_np_indices), constant_op.constant([3]))
opt = nadam_optimizer.NadamOptimizer(epsilon=sparse_epsilon)
update = opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
variables.global_variables_initializer().run()
# Fetch params to validate initial values
self.assertAllClose([1.0, 1.0, 2.0], var0.eval())
self.assertAllClose([3.0, 3.0, 4.0], var1.eval())
beta1_power, beta2_power = opt._get_beta_accumulators()
# Run 3 steps of Nadam
for t in range(1, 4):
self.assertAllCloseAccordingToType(0.9**t, beta1_power.eval())
self.assertAllCloseAccordingToType(0.999**t, beta2_power.eval())
update.run()
var0_np, m0, v0 = nadam_update_numpy(var0_np, grads0_np, t, m0, v0,
epsilon=sparse_epsilon)
var1_np, m1, v1 = nadam_update_numpy(var1_np, grads1_np, t, m1, v1,
epsilon=sparse_epsilon)
# Validate updated params
self.assertAllCloseAccordingToType(var0_np, var0.eval())
self.assertAllCloseAccordingToType(var1_np, var1.eval())
def testSparse(self):
self.doTestSparse(use_resource=False)
def testResourceSparse(self):
self.doTestSparse(use_resource=True)
def doTestBasic(self, use_resource=False):
for dtype in [dtypes.half, dtypes.float32, dtypes.float64]:
with self.cached_session():
# Initialize variables for numpy implementation.
m0, v0, m1, v1 = 0.0, 0.0, 0.0, 0.0
var0_np = np.array([1.0, 2.0], dtype=dtype.as_numpy_dtype)
grads0_np = np.array([0.1, 0.1], dtype=dtype.as_numpy_dtype)
var1_np = np.array([3.0, 4.0], dtype=dtype.as_numpy_dtype)
grads1_np = np.array([0.01, 0.01], dtype=dtype.as_numpy_dtype)
if use_resource:
var0 = resource_variable_ops.ResourceVariable(var0_np)
var1 = resource_variable_ops.ResourceVariable(var1_np)
else:
var0 = variables.Variable(var0_np)
var1 = variables.Variable(var1_np)
grads0 = constant_op.constant(grads0_np)
grads1 = constant_op.constant(grads1_np)
opt = nadam_optimizer.NadamOptimizer()
update = opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
variables.global_variables_initializer().run()
# Fetch params to validate initial values
self.assertAllClose([1.0, 2.0], var0.eval())
self.assertAllClose([3.0, 4.0], var1.eval())
beta1_power, beta2_power = opt._get_beta_accumulators()
# Run 3 steps of Nadam
for t in range(1, 4):
self.assertAllCloseAccordingToType(0.9**t, beta1_power.eval())
self.assertAllCloseAccordingToType(0.999**t, beta2_power.eval())
update.run()
var0_np, m0, v0 = nadam_update_numpy(var0_np, grads0_np, t, m0, v0)
var1_np, m1, v1 = nadam_update_numpy(var1_np, grads1_np, t, m1, v1)
# Validate updated params
self.assertAllCloseAccordingToType(var0_np, var0.eval())
self.assertAllCloseAccordingToType(var1_np, var1.eval())
def testBasic(self):
self.doTestBasic(use_resource=False)
def testResourceBasic(self):
self.doTestBasic(use_resource=True)
if __name__ == "__main__":
test.main()
|
Xeralux/tensorflow | refs/heads/master | tensorflow/contrib/learn/python/learn/learn_runner_lib.py | 42 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utilities to run and tune an Experiment (deprecated).
This module and all its submodules are deprecated. See
[contrib/learn/README.md](https://www.tensorflow.org/code/tensorflow/contrib/learn/README.md)
for migration instructions.
@@run
@@tune
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.learn.python.learn.learn_runner import * # pylint: disable=wildcard-import
from tensorflow.python.util.all_util import remove_undocumented
_allowed_symbols = []
remove_undocumented(__name__, _allowed_symbols)
|
cfelton/gizflo | refs/heads/master | setup.py | 2 |
from setuptools import setup, find_packages
from gizflo import __version__
# The README is probably a little too long for the
# pipy stuff.
desc = \
"""
Automated FPGA toolflow for MyHDL modules.
"""
setup(name='gizflo',
version=__version__,
author="Christopher Felton",
author_email="chris.felton@gmail.com",
license="LGPL",
description="automated #fpga toolflow for #myhdl modules",
keywords="myhdl FPGA tools",
url="http://github.com/cfelton/gizflo",
packages=find_packages(),
long_description=desc,
)
|
yandy/sea | refs/heads/master | sea/utils.py | 2 | import sys
from threading import Lock
def import_string(import_name):
import_name = str(import_name).replace(':', '.')
try:
__import__(import_name)
except ImportError:
if '.' not in import_name:
raise
else:
return sys.modules[import_name]
module_name, obj_name = import_name.rsplit('.', 1)
module = __import__(module_name, None, None, [obj_name])
try:
return getattr(module, obj_name)
except AttributeError as e:
raise ImportError(e)
class cached_property:
""" thread safe cached property """
def __init__(self, func, name=None):
self.func = func
self.__doc__ = getattr(func, '__doc__')
self.name = name or func.__name__
self.lock = Lock()
def __get__(self, instance, cls=None):
with self.lock:
if instance is None:
return self
try:
return instance.__dict__[self.name]
except KeyError:
res = instance.__dict__[self.name] = self.func(instance)
return res
class Singleton(type):
_instances = {}
def __call__(cls, *args, **kwargs):
if cls not in cls._instances:
cls._instances[cls] = super(
Singleton, cls).__call__(*args, **kwargs)
return cls._instances[cls]
def logger_has_level_handler(logger):
"""Check if there is a handler in the logging chain that will handle the
given logger's :meth:`effective level <~logging.Logger.getEffectiveLevel>`.
"""
level = logger.getEffectiveLevel()
current = logger
while current:
if any(handler.level <= level for handler in current.handlers):
return True
if not current.propagate:
break
current = current.parent
return False
|
joke2k/faker | refs/heads/master | faker/providers/person/ja_JP/__init__.py | 1 | from collections import OrderedDict
from operator import itemgetter
from .. import Provider as PersonProvider
class Provider(PersonProvider):
# link: http://dic.nicovideo.jp/a/日本人の名前一覧
# link: http://www.meijiyasuda.co.jp/enjoy/ranking/
first_name_female_pairs = (
('明美', 'アケミ', 'Akemi'),
('あすか', 'アスカ', 'Asuka'),
('香織', 'カオリ', 'Kaori'),
('加奈', 'カナ', 'Kana'),
('くみ子', 'クミコ', 'Kumiko'),
('さゆり', 'サユリ', 'Sayuri'),
('知実', 'サトミ', 'Satomi'),
('千代', 'チヨ', 'Chiyo'),
('直子', 'ナオコ', 'Naoko'),
('七夏', 'ナナミ', 'Nanami'),
('花子', 'ハナコ', 'Hanako'),
('春香', 'ハルカ', 'Haruka'),
('真綾', 'マアヤ', 'Maaya'),
('舞', 'マイ', 'Mai'),
('美加子', 'ミカコ', 'Mikako'),
('幹', 'ミキ', 'Miki'),
('桃子', 'モモコ', 'Momoko'),
('結衣', 'ユイ', 'Yui'),
('裕美子', 'ユミコ', 'Yumiko'),
('陽子', 'ヨウコ', 'Yoko'),
('里佳', 'リカ', 'Rika'),
)
# for backwards compatibility
first_names_female = tuple(map(itemgetter(0), first_name_female_pairs))
first_kana_names_female = tuple(map(itemgetter(1), first_name_female_pairs))
first_romanized_names_female = tuple(map(itemgetter(2), first_name_female_pairs))
first_name_male_pairs = (
('晃', 'アキラ', 'Akira'),
('篤司', 'アツシ', 'Atsushi'),
('治', 'オサム', 'Osamu'),
('和也', 'カズヤ', 'Kazuya'),
('京助', 'キョウスケ', 'Kyosuke'),
('健一', 'ケンイチ', 'Kenichi'),
('修平', 'シュウヘイ', 'Shohei'),
('翔太', 'ショウタ', 'Shota'),
('淳', 'ジュン', 'Jun'),
('聡太郎', 'ソウタロウ', 'Sotaro'),
('太一', 'タイチ', 'Taichi'),
('太郎', 'タロウ', 'Taro'),
('拓真', 'タクマ', 'Takuma'),
('翼', 'ツバサ', 'Tsubasa'),
('智也', 'トモヤ', 'Tomoya'),
('直樹', 'ナオキ', 'Naoki'),
('直人', 'ナオト', 'Naoto'),
('英樹', 'ヒデキ', 'Hideki'),
('浩', 'ヒロシ', 'Hiroshi'),
('学', 'マナブ', 'Manabu'),
('充', 'ミツル', 'Mituru'),
('稔', 'ミノル', 'Minoru'),
('裕樹', 'ユウキ', 'Yuki'),
('裕太', 'ユウタ', 'Yuta'),
('康弘', 'ヤスヒロ', 'Yasuhiro'),
('陽一', 'ヨウイチ', 'Yoichi'),
('洋介', 'ヨウスケ', 'Yosuke'),
('亮介', 'リョウスケ', 'Ryosuke'),
('涼平', 'リョウヘイ', 'Ryohei'),
('零', 'レイ', 'Rei'),
)
# for backwards compatibility
first_names_male = tuple(map(itemgetter(0), first_name_male_pairs))
first_kana_names_male = tuple(map(itemgetter(1), first_name_male_pairs))
first_romanized_names_male = tuple(map(itemgetter(2), first_name_male_pairs))
# for backwards compatibility
first_names = first_names_male + first_names_female
first_kana_names = first_kana_names_male + first_kana_names_female
first_romanized_names = first_romanized_names_male \
+ first_romanized_names_female
first_name_pairs = first_name_male_pairs + first_name_female_pairs
last_name_pairs = OrderedDict((
(("佐藤", "サトウ", "Sato"), 366803),
(("鈴木", "スズキ", "Suzuki"), 321135),
(("高橋", "タカハシ", "Takahashi"), 266782),
(("田中", "タナカ", "Tanaka"), 245821),
(("伊藤", "イトウ", "Ito"), 203357),
(("渡辺", "ワタナベ", "Watanabe"), 200504),
(("山本", "ヤマモト", "Yamamoto"), 200134),
(("中村", "ナカムラ", "Nakamura"), 195219),
(("小林", "コバヤシ", "Kobayashi"), 191819),
(("加藤", "カトウ", "Kato"), 160283),
(("吉田", "ヨシダ", "Yoshida"), 154461),
(("山田", "ヤマダ", "Yamada"), 151675),
(("佐々木", "ササキ", "Sasaki"), 135927),
(("山口", "ヤマグチ", "Yamaguchi"), 119501),
(("松本", "マツモト", "Matsumoto"), 116490),
(("井上", "イノウエ", "Inoue"), 111287),
(("木村", "キムラ", "Kimura"), 107446),
(("林", "ハヤシ", "Hayashi"), 101826),
(("斎藤", "サイトウ", "Saito"), 101774),
(("清水", "シミズ", "Shimizu"), 97826),
(("山崎", "ヤマザキ", "Yamazaki"), 90781),
(("阿部", "アベ", "Abe"), 86833),
(("森", "モリ", "Mori"), 86507),
(("池田", "イケダ", "Ikeda"), 84860),
(("橋本", "ハシモト", "Hashimoto"), 82836),
(("山下", "ヤマシタ", "Yamashita"), 80588),
(("石川", "イシカワ", "Ishikawa"), 77471),
(("中島", "ナカジマ", "Nakajima"), 74106),
(("前田", "マエダ", "Maeda"), 72930),
(("藤田", "フジタ", "Fujita"), 72375),
(("後藤", "ゴトウ", "Goto"), 71629),
(("小川", "オガワ", "Ogawa"), 71179),
(("岡田", "オカダ", "Okada"), 70347),
(("長谷川", "ハセガワ", "Hasegawa"), 69201),
(("村上", "ムラカミ", "Murakami"), 68606),
(("近藤", "コンドウ", "Kondo"), 68297),
(("石井", "イシイ", "Ishii"), 67079),
(("遠藤", "エンドウ", "Endo"), 62620),
(("斉藤", "サイトウ", "Saito"), 62540),
(("坂本", "サカモト", "Sakamoto"), 62308),
(("青木", "アオキ", "Aoki"), 59516),
(("藤井", "フジイ", "Fujii"), 59204),
(("西村", "ニシムラ", "Nishimura"), 58821),
(("福田", "フクダ", "Fukuda"), 58714),
(("太田", "オオタ", "Ota"), 58439),
(("三浦", "ミウラ", "Miura"), 58006),
(("藤原", "フジワラ", "Fujiwara"), 57742),
(("松田", "マツダ", "Matsuda"), 55883),
(("岡本", "オカモト", "Okamoto"), 55539),
(("中川", "ナカガワ", "Nakagawa"), 55221),
))
# for backwards compatibility only. use the pairs instead
last_names = tuple(map(itemgetter(0), last_name_pairs))
last_kana_names = tuple(map(itemgetter(1), last_name_pairs))
last_romanized_names = tuple(map(itemgetter(2), last_name_pairs))
formats_male = (
'{{last_name}} {{first_name_male}}',
)
formats_female = (
'{{last_name}} {{first_name_female}}',
)
formats = formats_male + formats_female
kana_formats_male = (
'{{last_kana_name}} {{first_kana_name_male}}',
)
kana_formats_female = (
'{{last_kana_name}} {{first_kana_name_female}}',
)
kana_formats = kana_formats_male + kana_formats_female
romanized_formats_male = (
'{{first_romanized_name_male}} {{last_romanized_name}}',
)
romanized_formats_female = (
'{{first_romanized_name_female}} {{last_romanized_name}}',
)
romanized_formats = romanized_formats_male + romanized_formats_female
def first_name_pair(self):
"""
@example ('明美', 'アケミ', 'Akemi')
"""
return self.random_element(self.first_name_pairs)
def first_name_male_pair(self):
"""
@example ('晃', 'アキラ', 'Akira')
"""
return self.random_element(self.first_name_male_pairs)
def first_name_female_pair(self):
"""
@example ('明美', 'アケミ', 'Akemi')
"""
return self.random_element(self.first_name_female_pairs)
def last_name_pair(self):
"""
@example ('佐藤', 'サトウ', 'Sato')
"""
return self.random_element(self.last_name_pairs)
def first_name(self):
"""
@example '明美'
"""
return self.first_name_pair()[0]
def first_name_male(self):
"""
@example '晃'
"""
return self.first_name_male_pair()[0]
def first_name_female(self):
"""
@example '明美'
"""
return self.first_name_female_pair()[0]
def last_name(self):
"""
@example '佐藤'
"""
return self.last_name_pair()[0]
def first_kana_name(self):
"""
@example 'アケミ'
"""
return self.first_name_pair()[1]
def first_kana_name_male(self):
"""
@example 'アキラ'
"""
return self.first_name_male_pair()[1]
def first_kana_name_female(self):
"""
@example 'アケミ'
"""
return self.first_name_female_pair()[1]
def last_kana_name(self):
"""
@example 'サトウ'
"""
return self.last_name_pair()[1]
def first_romanized_name(self):
"""
@example 'Akemi'
"""
return self.first_name_pair()[2]
def first_romanized_name_male(self):
"""
@example 'Akira'
"""
return self.first_name_male_pair()[2]
def first_romanized_name_female(self):
"""
@example 'Akemi'
"""
return self.first_name_female_pair()[2]
def last_romanized_name(self):
"""
@example 'Sato'
"""
return self.last_name_pair()[2]
def kana_name(self):
"""
@example 'サトウ アケミ'
"""
pattern = self.random_element(self.kana_formats)
return self.generator.parse(pattern)
def kana_name_male(self):
"""
@example 'サトウ アキラ'
"""
pattern = self.random_element(self.kana_formats_male)
return self.generator.parse(pattern)
def kana_name_female(self):
"""
@example 'サトウ アケミ'
"""
pattern = self.random_element(self.kana_formats_female)
return self.generator.parse(pattern)
def romanized_name(self):
"""
@example 'Akemi Sato'
"""
pattern = self.random_element(self.romanized_formats)
return self.generator.parse(pattern)
def romanized_name_male(self):
"""
@example 'Akira Sato'
"""
pattern = self.random_element(self.romanized_formats_male)
return self.generator.parse(pattern)
def romanized_name_female(self):
"""
@example 'Akemi Sato'
"""
pattern = self.random_element(self.romanized_formats_female)
return self.generator.parse(pattern)
|
pimutils/todoman | refs/heads/main | docs/source/conf.py | 1 | #!/usr/bin/env python3
#
# Todoman documentation build configuration file, created by
# sphinx-quickstart on Tue Dec 15 22:10:30 2015.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import todoman
from todoman.configuration import CONFIG_SPEC
from todoman.configuration import NO_DEFAULT
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
# sys.path.insert(0, os.path.abspath('.'))
# -- Generate confspec.rst ----------------------------------------------
def confspec_rst():
"""Generator that returns lines for the confspec doc page."""
for name, type_, default, description, _validation in sorted(CONFIG_SPEC):
if default == NO_DEFAULT:
formatted_default = "None, this field is mandatory."
elif isinstance(default, str):
formatted_default = f'``"{default}"``'
else:
formatted_default = f"``{default}``"
yield f"\n.. _main-{name}:"
yield f"\n\n.. object:: {name}\n"
yield " " + "\n ".join(line for line in description.splitlines())
yield "\n\n"
if isinstance(type_, tuple):
yield f" :type: {type_[0].__name__}"
else:
yield f" :type: {type_.__name__}"
yield f"\n :default: {formatted_default}\n"
with open("confspec.tmp", "w") as file_:
file_.writelines(confspec_rst())
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
"sphinx_click.ext",
"sphinx.ext.autodoc",
"sphinx_autorun",
"sphinx.ext.todo",
"sphinx.ext.viewcode",
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ["_templates"]
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
# source_suffix = ['.rst', '.md']
source_suffix = ".rst"
# The encoding of source files.
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = "index"
# General information about the project.
project = "Todoman"
copyright = "2015-2020, Hugo Osvaldo Barrera"
author = "Hugo Osvaldo Barrera <hugo@barrera.io>, et al"
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = todoman.__version__
# The full version, including alpha/beta/rc tags.
release = todoman.__version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
# today = ''
# Else, today_fmt is used as the format for a strftime call.
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ["build"]
# The reST default role (used for this markup: `text`) to use for all
# documents.
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = "sphinx"
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
# keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = True
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = "alabaster"
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
html_theme_options = {
"github_user": "pimutils",
"github_repo": "todoman",
"travis_button": "true",
"github_banner": "true",
"github_button": "false",
}
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
# html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
# html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ["_static"]
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
# html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
# html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
html_sidebars = {
"**": [
"about.html",
"navigation.html",
"relations.html",
"searchbox.html",
"donate.html",
]
}
# Additional templates that should be rendered to pages, maps page names to
# template names.
# html_additional_pages = {}
# If false, no module index is generated.
# html_domain_indices = True
# If false, no index is generated.
# html_use_index = True
# If true, the index is split into individual pages for each letter.
# html_split_index = False
# If true, links to the reST sources are added to the pages.
# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'h', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'r', 'sv', 'tr'
# html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# Now only 'ja' uses this config value
# html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
# html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = "Todomandoc"
# -- Options for LaTeX output ---------------------------------------------
latex_elements: dict = {
# The paper size ('letterpaper' or 'a4paper').
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
# 'preamble': '',
# Latex figure (float) alignment
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(
master_doc,
"Todoman.tex",
"Todoman Documentation",
"Hugo Osvaldo Barrera",
"manual",
),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
# latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
# latex_use_parts = False
# If true, show page references after internal links.
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
# latex_show_urls = False
# Documents to append as an appendix to all manuals.
# latex_appendices = []
# If false, no module index is generated.
# latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(
"man",
"todo",
"a simple, standards-based, cli todo manager",
[author],
1,
)
]
# If true, show URL addresses after external links.
# man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(
master_doc,
"Todoman",
"Todoman Documentation",
author,
"Todoman",
"One line description of project.",
"Miscellaneous",
),
]
# Documents to append as an appendix to all manuals.
# texinfo_appendices = []
# If false, no module index is generated.
# texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
# texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
# texinfo_no_detailmenu = False
|
yjmade/odoo | refs/heads/8.0 | openerp/tools/appdirs.py | 376 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (c) 2005-2010 ActiveState Software Inc.
# Copyright (c) 2013 Eddy Petrișor
"""Utilities for determining application-specific dirs.
See <http://github.com/ActiveState/appdirs> for details and usage.
"""
# Dev Notes:
# - MSDN on where to store app data files:
# http://support.microsoft.com/default.aspx?scid=kb;en-us;310294#XSLTH3194121123120121120120
# - Mac OS X: http://developer.apple.com/documentation/MacOSX/Conceptual/BPFileSystem/index.html
# - XDG spec for Un*x: http://standards.freedesktop.org/basedir-spec/basedir-spec-latest.html
__version_info__ = (1, 3, 0)
__version__ = '.'.join(map(str, __version_info__))
import sys
import os
PY3 = sys.version_info[0] == 3
if PY3:
unicode = str
def user_data_dir(appname=None, appauthor=None, version=None, roaming=False):
r"""Return full path to the user-specific data dir for this application.
"appname" is the name of application.
If None, just the system directory is returned.
"appauthor" (only required and used on Windows) is the name of the
appauthor or distributing body for this application. Typically
it is the owning company name. This falls back to appname.
"version" is an optional version path element to append to the
path. You might want to use this if you want multiple versions
of your app to be able to run independently. If used, this
would typically be "<major>.<minor>".
Only applied when appname is present.
"roaming" (boolean, default False) can be set True to use the Windows
roaming appdata directory. That means that for users on a Windows
network setup for roaming profiles, this user data will be
sync'd on login. See
<http://technet.microsoft.com/en-us/library/cc766489(WS.10).aspx>
for a discussion of issues.
Typical user data directories are:
Mac OS X: ~/Library/Application Support/<AppName>
Unix: ~/.local/share/<AppName> # or in $XDG_DATA_HOME, if defined
Win XP (not roaming): C:\Documents and Settings\<username>\Application Data\<AppAuthor>\<AppName>
Win XP (roaming): C:\Documents and Settings\<username>\Local Settings\Application Data\<AppAuthor>\<AppName>
Win 7 (not roaming): C:\Users\<username>\AppData\Local\<AppAuthor>\<AppName>
Win 7 (roaming): C:\Users\<username>\AppData\Roaming\<AppAuthor>\<AppName>
For Unix, we follow the XDG spec and support $XDG_DATA_HOME.
That means, by deafult "~/.local/share/<AppName>".
"""
if sys.platform == "win32":
if appauthor is None:
appauthor = appname
const = roaming and "CSIDL_APPDATA" or "CSIDL_LOCAL_APPDATA"
path = os.path.normpath(_get_win_folder(const))
if appname:
path = os.path.join(path, appauthor, appname)
elif sys.platform == 'darwin':
path = os.path.expanduser('~/Library/Application Support/')
if appname:
path = os.path.join(path, appname)
else:
path = os.getenv('XDG_DATA_HOME', os.path.expanduser("~/.local/share"))
if appname:
path = os.path.join(path, appname)
if appname and version:
path = os.path.join(path, version)
return path
def site_data_dir(appname=None, appauthor=None, version=None, multipath=False):
"""Return full path to the user-shared data dir for this application.
"appname" is the name of application.
If None, just the system directory is returned.
"appauthor" (only required and used on Windows) is the name of the
appauthor or distributing body for this application. Typically
it is the owning company name. This falls back to appname.
"version" is an optional version path element to append to the
path. You might want to use this if you want multiple versions
of your app to be able to run independently. If used, this
would typically be "<major>.<minor>".
Only applied when appname is present.
"multipath" is an optional parameter only applicable to *nix
which indicates that the entire list of data dirs should be
returned. By default, the first item from XDG_DATA_DIRS is
returned, or '/usr/local/share/<AppName>',
if XDG_DATA_DIRS is not set
Typical user data directories are:
Mac OS X: /Library/Application Support/<AppName>
Unix: /usr/local/share/<AppName> or /usr/share/<AppName>
Win XP: C:\Documents and Settings\All Users\Application Data\<AppAuthor>\<AppName>
Vista: (Fail! "C:\ProgramData" is a hidden *system* directory on Vista.)
Win 7: C:\ProgramData\<AppAuthor>\<AppName> # Hidden, but writeable on Win 7.
For Unix, this is using the $XDG_DATA_DIRS[0] default.
WARNING: Do not use this on Windows. See the Vista-Fail note above for why.
"""
if sys.platform == "win32":
if appauthor is None:
appauthor = appname
path = os.path.normpath(_get_win_folder("CSIDL_COMMON_APPDATA"))
if appname:
path = os.path.join(path, appauthor, appname)
elif sys.platform == 'darwin':
path = os.path.expanduser('/Library/Application Support')
if appname:
path = os.path.join(path, appname)
else:
# XDG default for $XDG_DATA_DIRS
# only first, if multipath is False
path = os.getenv('XDG_DATA_DIRS',
os.pathsep.join(['/usr/local/share', '/usr/share']))
pathlist = [ os.path.expanduser(x.rstrip(os.sep)) for x in path.split(os.pathsep) ]
if appname:
if version:
appname = os.path.join(appname, version)
pathlist = [ os.sep.join([x, appname]) for x in pathlist ]
if multipath:
path = os.pathsep.join(pathlist)
else:
path = pathlist[0]
return path
if appname and version:
path = os.path.join(path, version)
return path
def user_config_dir(appname=None, appauthor=None, version=None, roaming=False):
r"""Return full path to the user-specific config dir for this application.
"appname" is the name of application.
If None, just the system directory is returned.
"appauthor" (only required and used on Windows) is the name of the
appauthor or distributing body for this application. Typically
it is the owning company name. This falls back to appname.
"version" is an optional version path element to append to the
path. You might want to use this if you want multiple versions
of your app to be able to run independently. If used, this
would typically be "<major>.<minor>".
Only applied when appname is present.
"roaming" (boolean, default False) can be set True to use the Windows
roaming appdata directory. That means that for users on a Windows
network setup for roaming profiles, this user data will be
sync'd on login. See
<http://technet.microsoft.com/en-us/library/cc766489(WS.10).aspx>
for a discussion of issues.
Typical user data directories are:
Mac OS X: same as user_data_dir
Unix: ~/.config/<AppName> # or in $XDG_CONFIG_HOME, if defined
Win *: same as user_data_dir
For Unix, we follow the XDG spec and support $XDG_DATA_HOME.
That means, by deafult "~/.local/share/<AppName>".
"""
if sys.platform in [ "win32", "darwin" ]:
path = user_data_dir(appname, appauthor, None, roaming)
else:
path = os.getenv('XDG_CONFIG_HOME', os.path.expanduser("~/.config"))
if appname:
path = os.path.join(path, appname)
if appname and version:
path = os.path.join(path, version)
return path
def site_config_dir(appname=None, appauthor=None, version=None, multipath=False):
"""Return full path to the user-shared data dir for this application.
"appname" is the name of application.
If None, just the system directory is returned.
"appauthor" (only required and used on Windows) is the name of the
appauthor or distributing body for this application. Typically
it is the owning company name. This falls back to appname.
"version" is an optional version path element to append to the
path. You might want to use this if you want multiple versions
of your app to be able to run independently. If used, this
would typically be "<major>.<minor>".
Only applied when appname is present.
"multipath" is an optional parameter only applicable to *nix
which indicates that the entire list of config dirs should be
returned. By default, the first item from XDG_CONFIG_DIRS is
returned, or '/etc/xdg/<AppName>', if XDG_CONFIG_DIRS is not set
Typical user data directories are:
Mac OS X: same as site_data_dir
Unix: /etc/xdg/<AppName> or $XDG_CONFIG_DIRS[i]/<AppName> for each value in
$XDG_CONFIG_DIRS
Win *: same as site_data_dir
Vista: (Fail! "C:\ProgramData" is a hidden *system* directory on Vista.)
For Unix, this is using the $XDG_CONFIG_DIRS[0] default, if multipath=False
WARNING: Do not use this on Windows. See the Vista-Fail note above for why.
"""
if sys.platform in [ "win32", "darwin" ]:
path = site_data_dir(appname, appauthor)
if appname and version:
path = os.path.join(path, version)
else:
# XDG default for $XDG_CONFIG_DIRS
# only first, if multipath is False
path = os.getenv('XDG_CONFIG_DIRS', '/etc/xdg')
pathlist = [ os.path.expanduser(x.rstrip(os.sep)) for x in path.split(os.pathsep) ]
if appname:
if version:
appname = os.path.join(appname, version)
pathlist = [ os.sep.join([x, appname]) for x in pathlist ]
if multipath:
path = os.pathsep.join(pathlist)
else:
path = pathlist[0]
return path
def user_cache_dir(appname=None, appauthor=None, version=None, opinion=True):
r"""Return full path to the user-specific cache dir for this application.
"appname" is the name of application.
If None, just the system directory is returned.
"appauthor" (only required and used on Windows) is the name of the
appauthor or distributing body for this application. Typically
it is the owning company name. This falls back to appname.
"version" is an optional version path element to append to the
path. You might want to use this if you want multiple versions
of your app to be able to run independently. If used, this
would typically be "<major>.<minor>".
Only applied when appname is present.
"opinion" (boolean) can be False to disable the appending of
"Cache" to the base app data dir for Windows. See
discussion below.
Typical user cache directories are:
Mac OS X: ~/Library/Caches/<AppName>
Unix: ~/.cache/<AppName> (XDG default)
Win XP: C:\Documents and Settings\<username>\Local Settings\Application Data\<AppAuthor>\<AppName>\Cache
Vista: C:\Users\<username>\AppData\Local\<AppAuthor>\<AppName>\Cache
On Windows the only suggestion in the MSDN docs is that local settings go in
the `CSIDL_LOCAL_APPDATA` directory. This is identical to the non-roaming
app data dir (the default returned by `user_data_dir` above). Apps typically
put cache data somewhere *under* the given dir here. Some examples:
...\Mozilla\Firefox\Profiles\<ProfileName>\Cache
...\Acme\SuperApp\Cache\1.0
OPINION: This function appends "Cache" to the `CSIDL_LOCAL_APPDATA` value.
This can be disabled with the `opinion=False` option.
"""
if sys.platform == "win32":
if appauthor is None:
appauthor = appname
path = os.path.normpath(_get_win_folder("CSIDL_LOCAL_APPDATA"))
if appname:
path = os.path.join(path, appauthor, appname)
if opinion:
path = os.path.join(path, "Cache")
elif sys.platform == 'darwin':
path = os.path.expanduser('~/Library/Caches')
if appname:
path = os.path.join(path, appname)
else:
path = os.getenv('XDG_CACHE_HOME', os.path.expanduser('~/.cache'))
if appname:
path = os.path.join(path, appname)
if appname and version:
path = os.path.join(path, version)
return path
def user_log_dir(appname=None, appauthor=None, version=None, opinion=True):
r"""Return full path to the user-specific log dir for this application.
"appname" is the name of application.
If None, just the system directory is returned.
"appauthor" (only required and used on Windows) is the name of the
appauthor or distributing body for this application. Typically
it is the owning company name. This falls back to appname.
"version" is an optional version path element to append to the
path. You might want to use this if you want multiple versions
of your app to be able to run independently. If used, this
would typically be "<major>.<minor>".
Only applied when appname is present.
"opinion" (boolean) can be False to disable the appending of
"Logs" to the base app data dir for Windows, and "log" to the
base cache dir for Unix. See discussion below.
Typical user cache directories are:
Mac OS X: ~/Library/Logs/<AppName>
Unix: ~/.cache/<AppName>/log # or under $XDG_CACHE_HOME if defined
Win XP: C:\Documents and Settings\<username>\Local Settings\Application Data\<AppAuthor>\<AppName>\Logs
Vista: C:\Users\<username>\AppData\Local\<AppAuthor>\<AppName>\Logs
On Windows the only suggestion in the MSDN docs is that local settings
go in the `CSIDL_LOCAL_APPDATA` directory. (Note: I'm interested in
examples of what some windows apps use for a logs dir.)
OPINION: This function appends "Logs" to the `CSIDL_LOCAL_APPDATA`
value for Windows and appends "log" to the user cache dir for Unix.
This can be disabled with the `opinion=False` option.
"""
if sys.platform == "darwin":
path = os.path.join(
os.path.expanduser('~/Library/Logs'),
appname)
elif sys.platform == "win32":
path = user_data_dir(appname, appauthor, version); version=False
if opinion:
path = os.path.join(path, "Logs")
else:
path = user_cache_dir(appname, appauthor, version); version=False
if opinion:
path = os.path.join(path, "log")
if appname and version:
path = os.path.join(path, version)
return path
class AppDirs(object):
"""Convenience wrapper for getting application dirs."""
def __init__(self, appname, appauthor=None, version=None,
roaming=False, multipath=False):
self.appname = appname
self.appauthor = appauthor
self.version = version
self.roaming = roaming
self.multipath = multipath
@property
def user_data_dir(self):
return user_data_dir(self.appname, self.appauthor,
version=self.version, roaming=self.roaming)
@property
def site_data_dir(self):
return site_data_dir(self.appname, self.appauthor,
version=self.version, multipath=self.multipath)
@property
def user_config_dir(self):
return user_config_dir(self.appname, self.appauthor,
version=self.version, roaming=self.roaming)
@property
def site_config_dir(self):
return site_data_dir(self.appname, self.appauthor,
version=self.version, multipath=self.multipath)
@property
def user_cache_dir(self):
return user_cache_dir(self.appname, self.appauthor,
version=self.version)
@property
def user_log_dir(self):
return user_log_dir(self.appname, self.appauthor,
version=self.version)
#---- internal support stuff
def _get_win_folder_from_registry(csidl_name):
"""This is a fallback technique at best. I'm not sure if using the
registry for this guarantees us the correct answer for all CSIDL_*
names.
"""
import _winreg
shell_folder_name = {
"CSIDL_APPDATA": "AppData",
"CSIDL_COMMON_APPDATA": "Common AppData",
"CSIDL_LOCAL_APPDATA": "Local AppData",
}[csidl_name]
key = _winreg.OpenKey(_winreg.HKEY_CURRENT_USER,
r"Software\Microsoft\Windows\CurrentVersion\Explorer\Shell Folders")
dir, type = _winreg.QueryValueEx(key, shell_folder_name)
return dir
def _get_win_folder_with_pywin32(csidl_name):
from win32com.shell import shellcon, shell
dir = shell.SHGetFolderPath(0, getattr(shellcon, csidl_name), 0, 0)
# Try to make this a unicode path because SHGetFolderPath does
# not return unicode strings when there is unicode data in the
# path.
try:
dir = unicode(dir)
# Downgrade to short path name if have highbit chars. See
# <http://bugs.activestate.com/show_bug.cgi?id=85099>.
has_high_char = False
for c in dir:
if ord(c) > 255:
has_high_char = True
break
if has_high_char:
try:
import win32api
dir = win32api.GetShortPathName(dir)
except ImportError:
pass
except UnicodeError:
pass
return dir
def _get_win_folder_with_ctypes(csidl_name):
import ctypes
csidl_const = {
"CSIDL_APPDATA": 26,
"CSIDL_COMMON_APPDATA": 35,
"CSIDL_LOCAL_APPDATA": 28,
}[csidl_name]
buf = ctypes.create_unicode_buffer(1024)
ctypes.windll.shell32.SHGetFolderPathW(None, csidl_const, None, 0, buf)
# Downgrade to short path name if have highbit chars. See
# <http://bugs.activestate.com/show_bug.cgi?id=85099>.
has_high_char = False
for c in buf:
if ord(c) > 255:
has_high_char = True
break
if has_high_char:
buf2 = ctypes.create_unicode_buffer(1024)
if ctypes.windll.kernel32.GetShortPathNameW(buf.value, buf2, 1024):
buf = buf2
return buf.value
if sys.platform == "win32":
try:
import win32com.shell
_get_win_folder = _get_win_folder_with_pywin32
except ImportError:
try:
import ctypes
_get_win_folder = _get_win_folder_with_ctypes
except ImportError:
_get_win_folder = _get_win_folder_from_registry
#---- self test code
if __name__ == "__main__":
appname = "MyApp"
appauthor = "MyCompany"
props = ("user_data_dir", "site_data_dir",
"user_config_dir", "site_config_dir",
"user_cache_dir", "user_log_dir")
print("-- app dirs (with optional 'version')")
dirs = AppDirs(appname, appauthor, version="1.0")
for prop in props:
print("%s: %s" % (prop, getattr(dirs, prop)))
print("\n-- app dirs (without optional 'version')")
dirs = AppDirs(appname, appauthor)
for prop in props:
print("%s: %s" % (prop, getattr(dirs, prop)))
print("\n-- app dirs (without optional 'appauthor')")
dirs = AppDirs(appname)
for prop in props:
print("%s: %s" % (prop, getattr(dirs, prop)))
|
AndroidOpenDevelopment/android_external_chromium_org | refs/heads/lp | third_party/libaddressinput/chromium/tools/require_fields.py | 111 | #!/usr/bin/env python
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import json
import urllib
from sys import exit as sys_exit
# Derived from region_data_constants.cc.
_COUNTRIES = [
'AD', 'AE', 'AF', 'AG', 'AI', 'AL', 'AM', 'AN', 'AO', 'AQ', 'AR', 'AS',
'AT', 'AU', 'AW', 'AX', 'AZ', 'BA', 'BB', 'BD', 'BE', 'BF', 'BG', 'BH',
'BI', 'BJ', 'BL', 'BM', 'BN', 'BO', 'BR', 'BS', 'BT', 'BV', 'BW', 'BY',
'BZ', 'CA', 'CC', 'CD', 'CF', 'CG', 'CH', 'CI', 'CK', 'CL', 'CM', 'CN',
'CO', 'CR', 'CS', 'CV', 'CX', 'CY', 'CZ', 'DE', 'DJ', 'DK', 'DM', 'DO',
'DZ', 'EC', 'EE', 'EG', 'EH', 'ER', 'ES', 'ET', 'FI', 'FJ', 'FK', 'FM',
'FO', 'FR', 'GA', 'GB', 'GD', 'GE', 'GF', 'GG', 'GH', 'GI', 'GL', 'GM',
'GN', 'GP', 'GQ', 'GR', 'GS', 'GT', 'GU', 'GW', 'GY', 'HK', 'HM', 'HN',
'HR', 'HT', 'HU', 'ID', 'IE', 'IL', 'IM', 'IN', 'IO', 'IQ', 'IS', 'IT',
'JE', 'JM', 'JO', 'JP', 'KE', 'KG', 'KH', 'KI', 'KM', 'KN', 'KR', 'KW',
'KY', 'KZ', 'LA', 'LB', 'LC', 'LI', 'LK', 'LR', 'LS', 'LT', 'LU', 'LV',
'LY', 'MA', 'MC', 'MD', 'ME', 'MF', 'MG', 'MH', 'MK', 'ML', 'MN', 'MO',
'MP', 'MQ', 'MR', 'MS', 'MT', 'MU', 'MV', 'MW', 'MX', 'MY', 'MZ', 'NA',
'NC', 'NE', 'NF', 'NG', 'NI', 'NL', 'NO', 'NP', 'NR', 'NU', 'NZ', 'OM',
'PA', 'PE', 'PF', 'PG', 'PH', 'PK', 'PL', 'PM', 'PN', 'PR', 'PS', 'PT',
'PW', 'PY', 'QA', 'RE', 'RO', 'RS', 'RU', 'RW', 'SA', 'SB', 'SC', 'SE',
'SG', 'SH', 'SI', 'SJ', 'SK', 'SL', 'SM', 'SN', 'SO', 'SR', 'ST', 'SV',
'SZ', 'TC', 'TD', 'TF', 'TG', 'TH', 'TJ', 'TK', 'TL', 'TM', 'TN', 'TO',
'TR', 'TT', 'TV', 'TW', 'TZ', 'UA', 'UG', 'UM', 'US', 'UY', 'UZ', 'VA',
'VC', 'VE', 'VG', 'VI', 'VN', 'VU', 'WF', 'WS', 'YE', 'YT', 'ZA', 'ZM', 'ZW'
]
_I18N_URL = 'https://i18napis.appspot.com/address/data/%s'
def main():
for country in _COUNTRIES:
url = _I18N_URL % country
try:
data = json.load(urllib.urlopen(url))
except Exception as e:
print 'Error: could not load %s' % url
return 1
if 'require' in data:
print '%s: %s' % (country, data['require'])
return 0
if __name__ == '__main__':
sys_exit(main())
|
Maximilian-Reuter/SickRage | refs/heads/master | lib/unidecode/x0ad.py | 253 | data = (
'gwan', # 0x00
'gwanj', # 0x01
'gwanh', # 0x02
'gwad', # 0x03
'gwal', # 0x04
'gwalg', # 0x05
'gwalm', # 0x06
'gwalb', # 0x07
'gwals', # 0x08
'gwalt', # 0x09
'gwalp', # 0x0a
'gwalh', # 0x0b
'gwam', # 0x0c
'gwab', # 0x0d
'gwabs', # 0x0e
'gwas', # 0x0f
'gwass', # 0x10
'gwang', # 0x11
'gwaj', # 0x12
'gwac', # 0x13
'gwak', # 0x14
'gwat', # 0x15
'gwap', # 0x16
'gwah', # 0x17
'gwae', # 0x18
'gwaeg', # 0x19
'gwaegg', # 0x1a
'gwaegs', # 0x1b
'gwaen', # 0x1c
'gwaenj', # 0x1d
'gwaenh', # 0x1e
'gwaed', # 0x1f
'gwael', # 0x20
'gwaelg', # 0x21
'gwaelm', # 0x22
'gwaelb', # 0x23
'gwaels', # 0x24
'gwaelt', # 0x25
'gwaelp', # 0x26
'gwaelh', # 0x27
'gwaem', # 0x28
'gwaeb', # 0x29
'gwaebs', # 0x2a
'gwaes', # 0x2b
'gwaess', # 0x2c
'gwaeng', # 0x2d
'gwaej', # 0x2e
'gwaec', # 0x2f
'gwaek', # 0x30
'gwaet', # 0x31
'gwaep', # 0x32
'gwaeh', # 0x33
'goe', # 0x34
'goeg', # 0x35
'goegg', # 0x36
'goegs', # 0x37
'goen', # 0x38
'goenj', # 0x39
'goenh', # 0x3a
'goed', # 0x3b
'goel', # 0x3c
'goelg', # 0x3d
'goelm', # 0x3e
'goelb', # 0x3f
'goels', # 0x40
'goelt', # 0x41
'goelp', # 0x42
'goelh', # 0x43
'goem', # 0x44
'goeb', # 0x45
'goebs', # 0x46
'goes', # 0x47
'goess', # 0x48
'goeng', # 0x49
'goej', # 0x4a
'goec', # 0x4b
'goek', # 0x4c
'goet', # 0x4d
'goep', # 0x4e
'goeh', # 0x4f
'gyo', # 0x50
'gyog', # 0x51
'gyogg', # 0x52
'gyogs', # 0x53
'gyon', # 0x54
'gyonj', # 0x55
'gyonh', # 0x56
'gyod', # 0x57
'gyol', # 0x58
'gyolg', # 0x59
'gyolm', # 0x5a
'gyolb', # 0x5b
'gyols', # 0x5c
'gyolt', # 0x5d
'gyolp', # 0x5e
'gyolh', # 0x5f
'gyom', # 0x60
'gyob', # 0x61
'gyobs', # 0x62
'gyos', # 0x63
'gyoss', # 0x64
'gyong', # 0x65
'gyoj', # 0x66
'gyoc', # 0x67
'gyok', # 0x68
'gyot', # 0x69
'gyop', # 0x6a
'gyoh', # 0x6b
'gu', # 0x6c
'gug', # 0x6d
'gugg', # 0x6e
'gugs', # 0x6f
'gun', # 0x70
'gunj', # 0x71
'gunh', # 0x72
'gud', # 0x73
'gul', # 0x74
'gulg', # 0x75
'gulm', # 0x76
'gulb', # 0x77
'guls', # 0x78
'gult', # 0x79
'gulp', # 0x7a
'gulh', # 0x7b
'gum', # 0x7c
'gub', # 0x7d
'gubs', # 0x7e
'gus', # 0x7f
'guss', # 0x80
'gung', # 0x81
'guj', # 0x82
'guc', # 0x83
'guk', # 0x84
'gut', # 0x85
'gup', # 0x86
'guh', # 0x87
'gweo', # 0x88
'gweog', # 0x89
'gweogg', # 0x8a
'gweogs', # 0x8b
'gweon', # 0x8c
'gweonj', # 0x8d
'gweonh', # 0x8e
'gweod', # 0x8f
'gweol', # 0x90
'gweolg', # 0x91
'gweolm', # 0x92
'gweolb', # 0x93
'gweols', # 0x94
'gweolt', # 0x95
'gweolp', # 0x96
'gweolh', # 0x97
'gweom', # 0x98
'gweob', # 0x99
'gweobs', # 0x9a
'gweos', # 0x9b
'gweoss', # 0x9c
'gweong', # 0x9d
'gweoj', # 0x9e
'gweoc', # 0x9f
'gweok', # 0xa0
'gweot', # 0xa1
'gweop', # 0xa2
'gweoh', # 0xa3
'gwe', # 0xa4
'gweg', # 0xa5
'gwegg', # 0xa6
'gwegs', # 0xa7
'gwen', # 0xa8
'gwenj', # 0xa9
'gwenh', # 0xaa
'gwed', # 0xab
'gwel', # 0xac
'gwelg', # 0xad
'gwelm', # 0xae
'gwelb', # 0xaf
'gwels', # 0xb0
'gwelt', # 0xb1
'gwelp', # 0xb2
'gwelh', # 0xb3
'gwem', # 0xb4
'gweb', # 0xb5
'gwebs', # 0xb6
'gwes', # 0xb7
'gwess', # 0xb8
'gweng', # 0xb9
'gwej', # 0xba
'gwec', # 0xbb
'gwek', # 0xbc
'gwet', # 0xbd
'gwep', # 0xbe
'gweh', # 0xbf
'gwi', # 0xc0
'gwig', # 0xc1
'gwigg', # 0xc2
'gwigs', # 0xc3
'gwin', # 0xc4
'gwinj', # 0xc5
'gwinh', # 0xc6
'gwid', # 0xc7
'gwil', # 0xc8
'gwilg', # 0xc9
'gwilm', # 0xca
'gwilb', # 0xcb
'gwils', # 0xcc
'gwilt', # 0xcd
'gwilp', # 0xce
'gwilh', # 0xcf
'gwim', # 0xd0
'gwib', # 0xd1
'gwibs', # 0xd2
'gwis', # 0xd3
'gwiss', # 0xd4
'gwing', # 0xd5
'gwij', # 0xd6
'gwic', # 0xd7
'gwik', # 0xd8
'gwit', # 0xd9
'gwip', # 0xda
'gwih', # 0xdb
'gyu', # 0xdc
'gyug', # 0xdd
'gyugg', # 0xde
'gyugs', # 0xdf
'gyun', # 0xe0
'gyunj', # 0xe1
'gyunh', # 0xe2
'gyud', # 0xe3
'gyul', # 0xe4
'gyulg', # 0xe5
'gyulm', # 0xe6
'gyulb', # 0xe7
'gyuls', # 0xe8
'gyult', # 0xe9
'gyulp', # 0xea
'gyulh', # 0xeb
'gyum', # 0xec
'gyub', # 0xed
'gyubs', # 0xee
'gyus', # 0xef
'gyuss', # 0xf0
'gyung', # 0xf1
'gyuj', # 0xf2
'gyuc', # 0xf3
'gyuk', # 0xf4
'gyut', # 0xf5
'gyup', # 0xf6
'gyuh', # 0xf7
'geu', # 0xf8
'geug', # 0xf9
'geugg', # 0xfa
'geugs', # 0xfb
'geun', # 0xfc
'geunj', # 0xfd
'geunh', # 0xfe
'geud', # 0xff
)
|
fast-project/fast-lib | refs/heads/master | vendor/yaml-cpp/test/gmock-1.7.0/test/gmock_output_test.py | 986 | #!/usr/bin/env python
#
# Copyright 2008, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Tests the text output of Google C++ Mocking Framework.
SYNOPSIS
gmock_output_test.py --build_dir=BUILD/DIR --gengolden
# where BUILD/DIR contains the built gmock_output_test_ file.
gmock_output_test.py --gengolden
gmock_output_test.py
"""
__author__ = 'wan@google.com (Zhanyong Wan)'
import os
import re
import sys
import gmock_test_utils
# The flag for generating the golden file
GENGOLDEN_FLAG = '--gengolden'
PROGRAM_PATH = gmock_test_utils.GetTestExecutablePath('gmock_output_test_')
COMMAND = [PROGRAM_PATH, '--gtest_stack_trace_depth=0', '--gtest_print_time=0']
GOLDEN_NAME = 'gmock_output_test_golden.txt'
GOLDEN_PATH = os.path.join(gmock_test_utils.GetSourceDir(), GOLDEN_NAME)
def ToUnixLineEnding(s):
"""Changes all Windows/Mac line endings in s to UNIX line endings."""
return s.replace('\r\n', '\n').replace('\r', '\n')
def RemoveReportHeaderAndFooter(output):
"""Removes Google Test result report's header and footer from the output."""
output = re.sub(r'.*gtest_main.*\n', '', output)
output = re.sub(r'\[.*\d+ tests.*\n', '', output)
output = re.sub(r'\[.* test environment .*\n', '', output)
output = re.sub(r'\[=+\] \d+ tests .* ran.*', '', output)
output = re.sub(r'.* FAILED TESTS\n', '', output)
return output
def RemoveLocations(output):
"""Removes all file location info from a Google Test program's output.
Args:
output: the output of a Google Test program.
Returns:
output with all file location info (in the form of
'DIRECTORY/FILE_NAME:LINE_NUMBER: 'or
'DIRECTORY\\FILE_NAME(LINE_NUMBER): ') replaced by
'FILE:#: '.
"""
return re.sub(r'.*[/\\](.+)(\:\d+|\(\d+\))\:', 'FILE:#:', output)
def NormalizeErrorMarker(output):
"""Normalizes the error marker, which is different on Windows vs on Linux."""
return re.sub(r' error: ', ' Failure\n', output)
def RemoveMemoryAddresses(output):
"""Removes memory addresses from the test output."""
return re.sub(r'@\w+', '@0x#', output)
def RemoveTestNamesOfLeakedMocks(output):
"""Removes the test names of leaked mock objects from the test output."""
return re.sub(r'\(used in test .+\) ', '', output)
def GetLeakyTests(output):
"""Returns a list of test names that leak mock objects."""
# findall() returns a list of all matches of the regex in output.
# For example, if '(used in test FooTest.Bar)' is in output, the
# list will contain 'FooTest.Bar'.
return re.findall(r'\(used in test (.+)\)', output)
def GetNormalizedOutputAndLeakyTests(output):
"""Normalizes the output of gmock_output_test_.
Args:
output: The test output.
Returns:
A tuple (the normalized test output, the list of test names that have
leaked mocks).
"""
output = ToUnixLineEnding(output)
output = RemoveReportHeaderAndFooter(output)
output = NormalizeErrorMarker(output)
output = RemoveLocations(output)
output = RemoveMemoryAddresses(output)
return (RemoveTestNamesOfLeakedMocks(output), GetLeakyTests(output))
def GetShellCommandOutput(cmd):
"""Runs a command in a sub-process, and returns its STDOUT in a string."""
return gmock_test_utils.Subprocess(cmd, capture_stderr=False).output
def GetNormalizedCommandOutputAndLeakyTests(cmd):
"""Runs a command and returns its normalized output and a list of leaky tests.
Args:
cmd: the shell command.
"""
# Disables exception pop-ups on Windows.
os.environ['GTEST_CATCH_EXCEPTIONS'] = '1'
return GetNormalizedOutputAndLeakyTests(GetShellCommandOutput(cmd))
class GMockOutputTest(gmock_test_utils.TestCase):
def testOutput(self):
(output, leaky_tests) = GetNormalizedCommandOutputAndLeakyTests(COMMAND)
golden_file = open(GOLDEN_PATH, 'rb')
golden = golden_file.read()
golden_file.close()
# The normalized output should match the golden file.
self.assertEquals(golden, output)
# The raw output should contain 2 leaked mock object errors for
# test GMockOutputTest.CatchesLeakedMocks.
self.assertEquals(['GMockOutputTest.CatchesLeakedMocks',
'GMockOutputTest.CatchesLeakedMocks'],
leaky_tests)
if __name__ == '__main__':
if sys.argv[1:] == [GENGOLDEN_FLAG]:
(output, _) = GetNormalizedCommandOutputAndLeakyTests(COMMAND)
golden_file = open(GOLDEN_PATH, 'wb')
golden_file.write(output)
golden_file.close()
else:
gmock_test_utils.Main()
|
JetChars/vim | refs/heads/master | vim/bundle/python-mode/pymode/libs2/rope/contrib/fixsyntax.py | 8 | import rope.base.codeanalyze
import rope.base.evaluate
from rope.base import exceptions
from rope.base import libutils
from rope.base import utils
from rope.base import worder
from rope.base.codeanalyze import ArrayLinesAdapter, LogicalLineFinder
class FixSyntax(object):
def __init__(self, project, code, resource, maxfixes=1):
self.project = project
self.code = code
self.resource = resource
self.maxfixes = maxfixes
@utils.saveit
def get_pymodule(self):
"""Get a `PyModule`"""
msg = None
code = self.code
tries = 0
while True:
try:
if tries == 0 and self.resource is not None and \
self.resource.read() == code:
return self.project.get_pymodule(self.resource,
force_errors=True)
return libutils.get_string_module(
self.project, code, resource=self.resource,
force_errors=True)
except exceptions.ModuleSyntaxError, e:
if msg is None:
msg = '%s:%s %s' % (e.filename, e.lineno, e.message_)
if tries < self.maxfixes:
tries += 1
self.commenter.comment(e.lineno)
code = '\n'.join(self.commenter.lines)
else:
raise exceptions.ModuleSyntaxError(
e.filename, e.lineno,
'Failed to fix error: {}'.format(msg))
@property
@utils.saveit
def commenter(self):
return _Commenter(self.code)
def pyname_at(self, offset):
pymodule = self.get_pymodule()
def old_pyname():
word_finder = worder.Worder(self.code, True)
expression = word_finder.get_primary_at(offset)
expression = expression.replace('\\\n', ' ').replace('\n', ' ')
lineno = self.code.count('\n', 0, offset)
scope = pymodule.get_scope().get_inner_scope_for_line(lineno)
return rope.base.evaluate.eval_str(scope, expression)
new_code = pymodule.source_code
def new_pyname():
newoffset = self.commenter.transfered_offset(offset)
return rope.base.evaluate.eval_location(pymodule, newoffset)
if new_code.startswith(self.code[:offset + 1]):
return new_pyname()
result = old_pyname()
if result is None:
return new_pyname()
return result
class _Commenter(object):
def __init__(self, code):
self.code = code
self.lines = self.code.split('\n')
self.lines.append('\n')
self.origs = range(len(self.lines) + 1)
self.diffs = [0] * (len(self.lines) + 1)
def comment(self, lineno):
start = _logical_start(self.lines, lineno, check_prev=True) - 1
# using self._get_stmt_end() instead of self._get_block_end()
# to lower commented lines
end = self._get_stmt_end(start)
indents = _get_line_indents(self.lines[start])
if 0 < start:
last_lineno = self._last_non_blank(start - 1)
last_line = self.lines[last_lineno]
if last_line.rstrip().endswith(':'):
indents = _get_line_indents(last_line) + 4
self._set(start, ' ' * indents + 'pass')
for line in range(start + 1, end + 1):
self._set(line, self.lines[start])
self._fix_incomplete_try_blocks(lineno, indents)
def transfered_offset(self, offset):
lineno = self.code.count('\n', 0, offset)
diff = sum(self.diffs[:lineno])
return offset + diff
def _last_non_blank(self, start):
while start > 0 and self.lines[start].strip() == '':
start -= 1
return start
def _get_block_end(self, lineno):
end_line = lineno
base_indents = _get_line_indents(self.lines[lineno])
for i in range(lineno + 1, len(self.lines)):
if _get_line_indents(self.lines[i]) >= base_indents:
end_line = i
else:
break
return end_line
def _get_stmt_end(self, lineno):
base_indents = _get_line_indents(self.lines[lineno])
for i in range(lineno + 1, len(self.lines)):
if _get_line_indents(self.lines[i]) <= base_indents:
return i - 1
return lineno
def _fix_incomplete_try_blocks(self, lineno, indents):
block_start = lineno
last_indents = indents
while block_start > 0:
block_start = rope.base.codeanalyze.get_block_start(
ArrayLinesAdapter(self.lines), block_start) - 1
if self.lines[block_start].strip().startswith('try:'):
indents = _get_line_indents(self.lines[block_start])
if indents > last_indents:
continue
last_indents = indents
block_end = self._find_matching_deindent(block_start)
line = self.lines[block_end].strip()
if not (line.startswith('finally:') or
line.startswith('except ') or
line.startswith('except:')):
self._insert(block_end, ' ' * indents + 'finally:')
self._insert(block_end + 1, ' ' * indents + ' pass')
def _find_matching_deindent(self, line_number):
indents = _get_line_indents(self.lines[line_number])
current_line = line_number + 1
while current_line < len(self.lines):
line = self.lines[current_line]
if not line.strip().startswith('#') and not line.strip() == '':
# HACK: We should have used logical lines here
if _get_line_indents(self.lines[current_line]) <= indents:
return current_line
current_line += 1
return len(self.lines) - 1
def _set(self, lineno, line):
self.diffs[self.origs[lineno]] += len(line) - len(self.lines[lineno])
self.lines[lineno] = line
def _insert(self, lineno, line):
self.diffs[self.origs[lineno]] += len(line) + 1
self.origs.insert(lineno, self.origs[lineno])
self.lines.insert(lineno, line)
def _logical_start(lines, lineno, check_prev=False):
logical_finder = LogicalLineFinder(ArrayLinesAdapter(lines))
if check_prev:
prev = lineno - 1
while prev > 0:
start, end = logical_finder.logical_line_in(prev)
if end is None or start <= lineno < end:
return start
if start <= prev:
break
prev -= 1
return logical_finder.logical_line_in(lineno)[0]
def _get_line_indents(line):
return rope.base.codeanalyze.count_line_indents(line)
|
vtesin/sklearn_tutorial | refs/heads/master | examples/plot_iris_projections.py | 3 | """
Iris Projections
----------------
This code generates the Iris projection example plots found in the tutorial
"""
from itertools import cycle
import pylab as pl
from sklearn.datasets import load_iris
from sklearn.decomposition import PCA
def plot_2D(data, target, target_names):
colors = cycle('rgbcmykw')
target_ids = range(len(target_names))
pl.figure()
for i, c, label in zip(target_ids, colors, target_names):
pl.plot(data[target == i, 0],
data[target == i, 1], 'o',
c=c, label=label)
pl.legend(target_names)
#----------------------------------------------------------------------
# Load iris data
iris = load_iris()
X, y = iris.data, iris.target
#----------------------------------------------------------------------
# First figure: PCA
pca = PCA(n_components=2, whiten=True).fit(X)
X_pca = pca.transform(X)
plot_2D(X_pca, iris.target, iris.target_names)
#----------------------------------------------------------------------
# Second figure: Kmeans labels
from sklearn.cluster import KMeans
from numpy.random import RandomState
rng = RandomState(42)
kmeans = KMeans(3, random_state=rng).fit(X_pca)
plot_2D(X_pca, kmeans.labels_, ["c0", "c1", "c2"])
pl.show()
|
miltonruelas/cursotecnico | refs/heads/7.0 | branch/account_einvoice/__openerp__.py | 4 | # -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2013 Cubic ERP - Teradata SAC (<http://cubicerp.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
"name": "Electronic Invoice",
"version": "0.1",
"description": """
Manage the electronic invoice
=============================
The management of electronic invoice integrate the invoices with digital signatures and certificates usually in a PKI infastructure with xml messages to a webservices to generate and validate the electronic invoices.
Key Features
------------
* Add support to manage the webservices communication to generate and validate a electronic invoice
* Generate a abstract model to manage electronic invoices from several countries
""",
"author": "Cubic ERP",
"website": "http://cubicERP.com",
"category": "Financial",
"depends": [
"base_pki",
"account",
],
"data":[
"security/account_einvoice_security.xml",
"security/ir.model.access.csv",
"account_einvoice_workflow.xml",
"account_einvoice_view.xml",
"account_view.xml",
],
"demo_xml": [],
"active": False,
"installable": True,
"certificate" : "",
} |
fedspendingtransparency/data-act-broker-backend | refs/heads/development | dataactvalidator/health_check.py | 1 | import logging
from flask import Flask
from dataactcore.config import CONFIG_SERVICES
from dataactcore.logging import configure_logging
from dataactcore.utils.jsonResponse import JsonResponse
logger = logging.getLogger(__name__)
def create_app():
"""Create the Flask app."""
flask_app = Flask(__name__.split('.')[0])
flask_app.debug = CONFIG_SERVICES['debug']
flask_app.config.from_object(__name__)
@flask_app.route("/", methods=["GET"])
def test_app():
"""Confirm server running."""
return "Validator is running"
JsonResponse.debugMode = flask_app.debug
return flask_app
def run_app():
"""Run the application."""
flask_app = create_app()
flask_app.run(
threaded=True,
host=CONFIG_SERVICES['validator_host'],
port=CONFIG_SERVICES['validator_port']
)
if __name__ == "__main__":
configure_logging()
run_app()
|
ironstein1994/IMU-based-pen-mini-project-semester-5 | refs/heads/master | reference material/MPU 6050 documents/DMP/motion_driver-5.1.2/simple_apps/msp430/motion-driver-client/motion-driver-client.py | 2 | #!/usr/bin/python
# motion-driver-client.py
# A PC application for use with Motion Driver.
# Copyright 2012 InvenSense, Inc. All Rights Reserved.
import serial, sys, time, string, pygame
from ponycube import *
# Sensor sensitivities
ACCEL_SENS = 16384.0
GYRO_SENS = 16.375
QUAT_SENS = 1073741824.0
# Tap direction enums
TAP_X_UP = 1
TAP_X_DOWN = 2
TAP_Y_UP = 3
TAP_Y_DOWN = 4
TAP_Z_UP = 5
TAP_Z_DOWN = 6
# Orientation bits
ORIENTATION_X_UP = 0x01
ORIENTATION_X_DOWN = 0x02
ORIENTATION_Y_UP = 0x04
ORIENTATION_Y_DOWN = 0x08
ORIENTATION_Z_UP = 0x10
ORIENTATION_Z_DOWN = 0x20
ORIENTATION_FLIP = 0x40
ORIENTATION_ALL = 0x3F
# Android orientation enums
ANDROID_PORTRAIT = 0
ANDROID_LANDSCAPE = 1
ANDROID_R_PORTRAIT = 2
ANDROID_R_LANDSCAPE = 3
class motion_driver_packet_reader:
def __init__(self, port, quat_delegate=None, debug_delegate=None, data_delegate=None ):
self.s = serial.Serial(port,115200)
self.s.setTimeout(0.1)
self.s.setWriteTimeout(0.2)
if quat_delegate:
self.quat_delegate = quat_delegate
else:
self.quat_delegate = empty_packet_delegate()
if debug_delegate:
self.debug_delegate = debug_delegate
else:
self.debug_delegate = empty_packet_delegate()
if data_delegate:
self.data_delegate = data_delegate
else:
self.data_delegate = empty_packet_delegate()
self.packets = []
self.length = 0
self.previous = None
def read(self):
NUM_BYTES = 23
MAX_PACKET_TYPES = 8
p = None
if self.s.inWaiting():
c = self.s.read(1)
if ord(c) == ord('$'):
# Found the start of a valid packet (maybe).
c = self.s.read(1)
if ord(c) < MAX_PACKET_TYPES:
d = None
p = None
if ord(c) == 0 or ord(c) == 1:
rs = self.s.read(6)
d = data_packet(ord(c),rs)
elif ord(c) == 2:
rs = self.s.read(16)
p = quat_packet(rs)
self.quat_delegate.dispatch(p)
# Currently, we don't print quaternion data (it's really
# meant for the cube display only. If you'd like to
# change this behavior, uncomment the following line.
#
# d = data_packet(ord(c),rs)
elif ord(c) == 3:
rs = self.s.read(2)
d = data_packet(ord(c),rs)
elif ord(c) == 4:
rs = self.s.read(1)
d = data_packet(ord(c),rs)
elif ord(c) == 5:
rs = self.s.read(8)
d = data_packet(ord(c),rs)
elif ord(c) == 6:
rs = self.s.read(4)
d = data_packet(ord(c),rs)
if d != None:
self.data_delegate.dispatch(d)
else:
print "invalid packet type.."
def write(self,a):
self.s.write(a)
def close(self):
self.s.close()
def write_log(self,fname):
f = open(fname,'w')
for p in self.packets:
f.write(p.logfile_line())
f.close()
# =========== PACKET DELEGATES ==========
class packet_delegate(object):
def loop(self,event):
print "generic packet_delegate loop w/event",event
def dispatch(self,p):
print "generic packet_delegate dispatched",p
class empty_packet_delegate(packet_delegate):
def loop(self,event):
pass
def dispatch(self,p):
pass
class cube_packet_viewer (packet_delegate):
def __init__(self):
self.screen = Screen(480,400,scale=1.5)
self.cube = Cube(30,60,10)
self.q = Quaternion(1,0,0,0)
self.previous = None # previous quaternion
self.latest = None # latest packet (get in dispatch, use in loop)
def loop(self,event):
packet = self.latest
if packet:
q = packet.to_q().normalized()
self.cube.erase(self.screen)
self.cube.draw(self.screen,q)
pygame.display.flip()
self.latest = None
def dispatch(self,p):
if isinstance(p,quat_packet):
self.latest = p
class debug_packet_viewer (packet_delegate):
def loop(self,event):
pass
def dispatch(self,p):
assert isinstance(p,debug_packet);
p.display()
class data_packet_viewer (packet_delegate):
def loop(self,event):
pass
def dispatch(self,p):
assert isinstance(p,data_packet);
p.display()
# =============== PACKETS =================
# For 16-bit signed integers.
def two_bytes(d1,d2):
d = ord(d1)*256 + ord(d2)
if d > 32767:
d -= 65536
return d
# For 32-bit signed integers.
def four_bytes(d1, d2, d3, d4):
d = ord(d1)*(1<<24) + ord(d2)*(1<<16) + ord(d3)*(1<<8) + ord(d4)
if d > 2147483648:
d-= 4294967296
return d
class debug_packet (object):
# body of packet is a debug string
def __init__(self,l):
sss = []
for c in l[3:21]:
if ord(c) != 0:
sss.append(c)
self.s = "".join(sss)
def display(self):
sys.stdout.write(self.s)
class data_packet (object):
def __init__(self, type, l):
self.data = [0,0,0,0]
self.type = type
if self.type == 0: # accel
self.data[0] = two_bytes(l[0],l[1]) / ACCEL_SENS
self.data[1] = two_bytes(l[2],l[3]) / ACCEL_SENS
self.data[2] = two_bytes(l[4],l[5]) / ACCEL_SENS
elif self.type == 1: # gyro
self.data[0] = two_bytes(l[0],l[1]) / GYRO_SENS
self.data[1] = two_bytes(l[2],l[3]) / GYRO_SENS
self.data[2] = two_bytes(l[4],l[5]) / GYRO_SENS
elif self.type == 2: # quaternion
self.data[0] = four_bytes(l[0],l[1],l[2],l[3]) / QUAT_SENS
self.data[1] = four_bytes(l[4],l[5],l[6],l[7]) / QUAT_SENS
self.data[2] = four_bytes(l[8],l[9],l[10],l[11]) / QUAT_SENS
self.data[3] = four_bytes(l[12],l[13],l[14],l[15]) / QUAT_SENS
elif self.type == 3: # tap
self.data[0] = ord(l[0])
self.data[1] = ord(l[1])
elif self.type == 4: # Android orient
self.data[0] = ord(l[0])
elif self.type == 5: # pedometer
self.data[0] = four_bytes(l[0],l[1],l[2],l[3])
self.data[1] = four_bytes(l[4],l[5],l[6],l[7])
elif self.type == 6: # misc
self.data[0] = ord(l[0])
if self.data[0] == ord('t'):
# test event
self.data[1] = ord(l[1])
else: # unsupported
pass
def display(self):
if self.type == 0:
print 'accel: %7.3f %7.3f %7.3f' % \
(self.data[0], self.data[1], self.data[2])
elif self.type == 1:
print 'gyro: %9.5f %9.5f %9.5f' % \
(self.data[0], self.data[1], self.data[2])
elif self.type == 2:
print 'quat: %7.4f %7.4f %7.4f %7.4f' % \
(self.data[0], self.data[1], self.data[2], self.data[3])
elif self.type == 3:
if self.data[0] == TAP_X_UP:
s = "+ X"
elif self.data[0] == TAP_X_DOWN:
s = "- X"
elif self.data[0] == TAP_Y_UP:
s = "+ Y"
elif self.data[0] == TAP_Y_DOWN:
s = "- Y"
elif self.data[0] == TAP_Z_UP:
s = "+ Z"
elif self.data[0] == TAP_Z_DOWN:
s = "- Z"
print 'Detected %s-axis tap x%d' % (s, self.data[1])
elif self.type == 4:
if self.data[0] == ANDROID_PORTRAIT:
s = "Portrait"
elif self.data[0] == ANDROID_LANDSCAPE:
s = "Landscape"
elif self.data[0] == ANDROID_R_PORTRAIT:
s = "Reverse portrait"
elif self.data[0] == ANDROID_R_LANDSCAPE:
s = "Reverse landscape"
print 'Screen orientation: %s' % s
elif self.type == 5:
print 'Walked %d steps over %d milliseconds.' % \
(self.data[0], self.data[1])
elif self.type == 6:
if self.data[0] == ord('t'):
if self.data[1] == 7:
print 'Self test passed.'
else:
print 'Self test failed.'
pass
else:
print 'what?'
class quat_packet (object):
def __init__(self, l):
self.l = l
self.q0 = four_bytes(l[0],l[1],l[2],l[3]) / QUAT_SENS
self.q1 = four_bytes(l[4],l[5],l[6],l[7]) / QUAT_SENS
self.q2 = four_bytes(l[8],l[9],l[10],l[11]) / QUAT_SENS
self.q3 = four_bytes(l[12],l[13],l[14],l[15]) / QUAT_SENS
def display_raw(self):
l = self.l
print "".join(
[ str(ord(l[0])), " "] + \
[ str(ord(l[1])), " "] + \
[ str(ord(a)).ljust(4) for a in
[ l[2], l[3], l[4], l[5], l[6], l[7], l[8], l[9], l[10] ] ] + \
[ str(ord(a)).ljust(4) for a in
[ l[8], l[9], l[10] , l[11], l[12], l[13]] ]
)
def display(self):
if 1:
print "qs " + " ".join([str(s).ljust(15) for s in
[ self.q0, self.q1, self.q2, self.q3 ]])
def to_q(self):
return Quaternion(self.q0, self.q1, self.q2, self.q3)
# =============== MAIN ======================
if __name__ == "__main__":
if len(sys.argv) == 2:
comport = int(sys.argv[1]) - 1
else:
print "usage: " + sys.argv[0] + " port"
sys.exit(-1)
pygame.init()
viewer = cube_packet_viewer()
debug = debug_packet_viewer()
data = data_packet_viewer()
reader = motion_driver_packet_reader(comport,
quat_delegate = viewer,
debug_delegate = debug,
data_delegate = data)
while 1:
event = pygame.event.poll()
# TODO: Allow exit via keystroke.
if event.type == pygame.QUIT:
viewer.close()
break
if event.type == pygame.KEYDOWN:
reader.write(pygame.key.name(event.key))
reader.read()
viewer.loop(event)
debug.loop(event)
data.loop(event)
# TODO: If system load is too high, increase this sleep time.
pygame.time.delay(0)
|
shujaatak/UAV_MissionPlanner | refs/heads/master | Lib/site-packages/numpy/oldnumeric/linear_algebra.py | 102 | """Backward compatible with LinearAlgebra from Numeric
"""
# This module is a lite version of the linalg.py module in SciPy which contains
# high-level Python interface to the LAPACK library. The lite version
# only accesses the following LAPACK functions: dgesv, zgesv, dgeev,
# zgeev, dgesdd, zgesdd, dgelsd, zgelsd, dsyevd, zheevd, dgetrf, dpotrf.
__all__ = ['LinAlgError', 'solve_linear_equations',
'inverse', 'cholesky_decomposition', 'eigenvalues',
'Heigenvalues', 'generalized_inverse',
'determinant', 'singular_value_decomposition',
'eigenvectors', 'Heigenvectors',
'linear_least_squares'
]
from numpy.core import transpose
import numpy.linalg as linalg
# Linear equations
LinAlgError = linalg.LinAlgError
def solve_linear_equations(a, b):
return linalg.solve(a,b)
# Matrix inversion
def inverse(a):
return linalg.inv(a)
# Cholesky decomposition
def cholesky_decomposition(a):
return linalg.cholesky(a)
# Eigenvalues
def eigenvalues(a):
return linalg.eigvals(a)
def Heigenvalues(a, UPLO='L'):
return linalg.eigvalsh(a,UPLO)
# Eigenvectors
def eigenvectors(A):
w, v = linalg.eig(A)
return w, transpose(v)
def Heigenvectors(A):
w, v = linalg.eigh(A)
return w, transpose(v)
# Generalized inverse
def generalized_inverse(a, rcond = 1.e-10):
return linalg.pinv(a, rcond)
# Determinant
def determinant(a):
return linalg.det(a)
# Linear Least Squares
def linear_least_squares(a, b, rcond=1.e-10):
"""returns x,resids,rank,s
where x minimizes 2-norm(|b - Ax|)
resids is the sum square residuals
rank is the rank of A
s is the rank of the singular values of A in descending order
If b is a matrix then x is also a matrix with corresponding columns.
If the rank of A is less than the number of columns of A or greater than
the number of rows, then residuals will be returned as an empty array
otherwise resids = sum((b-dot(A,x)**2).
Singular values less than s[0]*rcond are treated as zero.
"""
return linalg.lstsq(a,b,rcond)
def singular_value_decomposition(A, full_matrices=0):
return linalg.svd(A, full_matrices)
|
bathepawan/workload-automation | refs/heads/master | wlauto/resource_getters/__init__.py | 48 | # Copyright 2013-2015 ARM Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
|
ShashaQin/erpnext | refs/heads/develop | erpnext/controllers/stock_controller.py | 1 | # Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import frappe
from frappe.utils import cint, flt, cstr
from frappe import msgprint, _
import frappe.defaults
from erpnext.accounts.utils import get_fiscal_year
from erpnext.accounts.general_ledger import make_gl_entries, delete_gl_entries, process_gl_map
from erpnext.stock.utils import get_incoming_rate
from erpnext.controllers.accounts_controller import AccountsController
class StockController(AccountsController):
def make_gl_entries(self, repost_future_gle=True):
if self.docstatus == 2:
delete_gl_entries(voucher_type=self.doctype, voucher_no=self.name)
if cint(frappe.defaults.get_global_default("auto_accounting_for_stock")):
warehouse_account = get_warehouse_account()
if self.docstatus==1:
gl_entries = self.get_gl_entries(warehouse_account)
make_gl_entries(gl_entries)
if repost_future_gle:
items, warehouses = self.get_items_and_warehouses()
update_gl_entries_after(self.posting_date, self.posting_time, warehouses, items,
warehouse_account)
def get_gl_entries(self, warehouse_account=None, default_expense_account=None,
default_cost_center=None):
if not warehouse_account:
warehouse_account = get_warehouse_account()
sle_map = self.get_stock_ledger_details()
voucher_details = self.get_voucher_details(default_expense_account, default_cost_center, sle_map)
gl_list = []
warehouse_with_no_account = []
for detail in voucher_details:
sle_list = sle_map.get(detail.name)
if sle_list:
for sle in sle_list:
if warehouse_account.get(sle.warehouse):
# from warehouse account
self.check_expense_account(detail)
account_report_type = frappe.db.get_value("Account", warehouse_account[sle.warehouse]["name"], "report_type")
if account_report_type == "Profit and Loss":
project_name=self.get("project")
support_ticket=self.get("support_ticket")
else:
project_name=''
support_ticket=''
gl_list.append(self.get_gl_dict({
"account": warehouse_account[sle.warehouse]["name"],
"against": detail.expense_account,
"cost_center": detail.cost_center,
"project_name": project_name,
"support_ticket": support_ticket,
"remarks": self.get("remarks") or "Accounting Entry for Stock",
"debit": flt(sle.stock_value_difference, 2),
}, warehouse_account[sle.warehouse]["account_currency"]))
# to target warehouse / expense account
account_report_type = frappe.db.get_value("Account", detail.expense_account, "report_type")
if account_report_type == "Profit and Loss":
project_name=self.get("project")
support_ticket=self.get("support_ticket")
else:
project_name=''
support_ticket=''
gl_list.append(self.get_gl_dict({
"account": detail.expense_account,
"against": warehouse_account[sle.warehouse]["name"],
"cost_center": detail.cost_center,
"project_name": project_name,
"support_ticket": support_ticket,
"remarks": self.get("remarks") or "Accounting Entry for Stock",
"credit": flt(sle.stock_value_difference, 2),
}))
elif sle.warehouse not in warehouse_with_no_account:
warehouse_with_no_account.append(sle.warehouse)
if warehouse_with_no_account:
msgprint(_("No accounting entries for the following warehouses") + ": \n" +
"\n".join(warehouse_with_no_account))
return process_gl_map(gl_list)
def get_voucher_details(self, default_expense_account, default_cost_center, sle_map):
if self.doctype == "Stock Reconciliation":
return [frappe._dict({ "name": voucher_detail_no, "expense_account": default_expense_account,
"cost_center": default_cost_center }) for voucher_detail_no, sle in sle_map.items()]
else:
details = self.get("items")
if default_expense_account or default_cost_center:
for d in details:
if default_expense_account and not d.get("expense_account"):
d.expense_account = default_expense_account
if default_cost_center and not d.get("cost_center"):
d.cost_center = default_cost_center
return details
def get_items_and_warehouses(self):
items, warehouses = [], []
if hasattr(self, "items"):
item_doclist = self.get("items")
elif self.doctype == "Stock Reconciliation":
import json
item_doclist = []
data = json.loads(self.reconciliation_json)
for row in data[data.index(self.head_row)+1:]:
d = frappe._dict(zip(["item_code", "warehouse", "qty", "valuation_rate"], row))
item_doclist.append(d)
if item_doclist:
for d in item_doclist:
if d.item_code and d.item_code not in items:
items.append(d.item_code)
if d.get("warehouse") and d.warehouse not in warehouses:
warehouses.append(d.warehouse)
if self.doctype == "Stock Entry":
if d.get("s_warehouse") and d.s_warehouse not in warehouses:
warehouses.append(d.s_warehouse)
if d.get("t_warehouse") and d.t_warehouse not in warehouses:
warehouses.append(d.t_warehouse)
return items, warehouses
def get_stock_ledger_details(self):
stock_ledger = {}
for sle in frappe.db.sql("""select warehouse, stock_value_difference,
voucher_detail_no, item_code, posting_date, actual_qty
from `tabStock Ledger Entry` where voucher_type=%s and voucher_no=%s""",
(self.doctype, self.name), as_dict=True):
stock_ledger.setdefault(sle.voucher_detail_no, []).append(sle)
return stock_ledger
def make_adjustment_entry(self, expected_gle, voucher_obj):
from erpnext.accounts.utils import get_stock_and_account_difference
account_list = [d.account for d in expected_gle]
acc_diff = get_stock_and_account_difference(account_list, expected_gle[0].posting_date)
cost_center = self.get_company_default("cost_center")
stock_adjustment_account = self.get_company_default("stock_adjustment_account")
gl_entries = []
for account, diff in acc_diff.items():
if diff:
gl_entries.append([
# stock in hand account
voucher_obj.get_gl_dict({
"account": account,
"against": stock_adjustment_account,
"debit": diff,
"remarks": "Adjustment Accounting Entry for Stock",
}),
# account against stock in hand
voucher_obj.get_gl_dict({
"account": stock_adjustment_account,
"against": account,
"credit": diff,
"cost_center": cost_center or None,
"remarks": "Adjustment Accounting Entry for Stock",
}),
])
if gl_entries:
from erpnext.accounts.general_ledger import make_gl_entries
make_gl_entries(gl_entries)
def check_expense_account(self, item):
if not item.get("expense_account"):
frappe.throw(_("Expense or Difference account is mandatory for Item {0} as it impacts overall stock value").format(item.item_code))
else:
is_expense_account = frappe.db.get_value("Account",
item.get("expense_account"), "report_type")=="Profit and Loss"
if self.doctype not in ("Purchase Receipt", "Stock Reconciliation", "Stock Entry") and not is_expense_account:
frappe.throw(_("Expense / Difference account ({0}) must be a 'Profit or Loss' account")
.format(item.get("expense_account")))
if is_expense_account and not item.get("cost_center"):
frappe.throw(_("{0} {1}: Cost Center is mandatory for Item {2}").format(
_(self.doctype), self.name, item.get("item_code")))
def get_sl_entries(self, d, args):
sl_dict = frappe._dict({
"item_code": d.get("item_code", None),
"warehouse": d.get("warehouse", None),
"posting_date": self.posting_date,
"posting_time": self.posting_time,
'fiscal_year': get_fiscal_year(self.posting_date, company=self.company)[0],
"voucher_type": self.doctype,
"voucher_no": self.name,
"voucher_detail_no": d.name,
"actual_qty": (self.docstatus==1 and 1 or -1)*flt(d.get("stock_qty")),
"stock_uom": frappe.db.get_value("Item", args.get("item_code") or d.get("item_code"), "stock_uom"),
"incoming_rate": 0,
"company": self.company,
"batch_no": cstr(d.get("batch_no")).strip(),
"serial_no": d.get("serial_no"),
"project": d.get("project"),
"is_cancelled": self.docstatus==2 and "Yes" or "No"
})
sl_dict.update(args)
return sl_dict
def make_sl_entries(self, sl_entries, is_amended=None, allow_negative_stock=False,
via_landed_cost_voucher=False):
from erpnext.stock.stock_ledger import make_sl_entries
make_sl_entries(sl_entries, is_amended, allow_negative_stock, via_landed_cost_voucher)
def make_gl_entries_on_cancel(self):
if frappe.db.sql("""select name from `tabGL Entry` where voucher_type=%s
and voucher_no=%s""", (self.doctype, self.name)):
self.make_gl_entries()
def get_serialized_items(self):
serialized_items = []
item_codes = list(set([d.item_code for d in self.get("items")]))
if item_codes:
serialized_items = frappe.db.sql_list("""select name from `tabItem`
where has_serial_no=1 and name in ({})""".format(", ".join(["%s"]*len(item_codes))),
tuple(item_codes))
return serialized_items
def get_incoming_rate_for_sales_return(self, item_code, against_document):
incoming_rate = 0.0
if against_document and item_code:
incoming_rate = frappe.db.sql("""select abs(stock_value_difference / actual_qty)
from `tabStock Ledger Entry`
where voucher_type = %s and voucher_no = %s
and item_code = %s limit 1""",
(self.doctype, against_document, item_code))
incoming_rate = incoming_rate[0][0] if incoming_rate else 0.0
return incoming_rate
def update_reserved_qty(self):
so_map = {}
for d in self.get("items"):
if d.so_detail:
if self.doctype == "Delivery Note" and d.against_sales_order:
so_map.setdefault(d.against_sales_order, []).append(d.so_detail)
elif self.doctype == "Sales Invoice" and d.sales_order and self.update_stock:
so_map.setdefault(d.sales_order, []).append(d.so_detail)
for so, so_item_rows in so_map.items():
if so and so_item_rows:
sales_order = frappe.get_doc("Sales Order", so)
if sales_order.status in ["Closed", "Cancelled"]:
frappe.throw(_("{0} {1} is cancelled or closed").format(_("Sales Order"), so),
frappe.InvalidStatusError)
sales_order.update_reserved_qty(so_item_rows)
def update_stock_ledger(self):
self.update_reserved_qty()
sl_entries = []
for d in self.get_item_list():
if frappe.db.get_value("Item", d.item_code, "is_stock_item") == 1 and flt(d.qty):
return_rate = 0
if cint(self.is_return) and self.return_against and self.docstatus==1:
return_rate = self.get_incoming_rate_for_sales_return(d.item_code, self.return_against)
# On cancellation or if return entry submission, make stock ledger entry for
# target warehouse first, to update serial no values properly
if d.warehouse and ((not cint(self.is_return) and self.docstatus==1)
or (cint(self.is_return) and self.docstatus==2)):
sl_entries.append(self.get_sl_entries(d, {
"actual_qty": -1*flt(d.qty),
"incoming_rate": return_rate
}))
if d.target_warehouse:
target_warehouse_sle = self.get_sl_entries(d, {
"actual_qty": flt(d.qty),
"warehouse": d.target_warehouse
})
if self.docstatus == 1:
if not cint(self.is_return):
args = frappe._dict({
"item_code": d.item_code,
"warehouse": d.warehouse,
"posting_date": self.posting_date,
"posting_time": self.posting_time,
"qty": -1*flt(d.qty),
"serial_no": d.serial_no
})
target_warehouse_sle.update({
"incoming_rate": get_incoming_rate(args)
})
else:
target_warehouse_sle.update({
"outgoing_rate": return_rate
})
sl_entries.append(target_warehouse_sle)
if d.warehouse and ((not cint(self.is_return) and self.docstatus==2)
or (cint(self.is_return) and self.docstatus==1)):
sl_entries.append(self.get_sl_entries(d, {
"actual_qty": -1*flt(d.qty),
"incoming_rate": return_rate
}))
self.make_sl_entries(sl_entries)
def validate_warehouse(self):
from erpnext.stock.utils import validate_warehouse_company
warehouses = list(set([d.warehouse for d in
self.get("items") if getattr(d, "warehouse", None)]))
for w in warehouses:
validate_warehouse_company(w, self.company)
def update_billing_percentage(self, update_modified=True):
self._update_percent_field({
"target_dt": self.doctype + " Item",
"target_parent_dt": self.doctype,
"target_parent_field": "per_billed",
"target_ref_field": "amount",
"target_field": "billed_amt",
"name": self.name,
}, update_modified)
def update_gl_entries_after(posting_date, posting_time, for_warehouses=None, for_items=None,
warehouse_account=None):
def _delete_gl_entries(voucher_type, voucher_no):
frappe.db.sql("""delete from `tabGL Entry`
where voucher_type=%s and voucher_no=%s""", (voucher_type, voucher_no))
if not warehouse_account:
warehouse_account = get_warehouse_account()
future_stock_vouchers = get_future_stock_vouchers(posting_date, posting_time, for_warehouses, for_items)
gle = get_voucherwise_gl_entries(future_stock_vouchers, posting_date)
for voucher_type, voucher_no in future_stock_vouchers:
existing_gle = gle.get((voucher_type, voucher_no), [])
voucher_obj = frappe.get_doc(voucher_type, voucher_no)
expected_gle = voucher_obj.get_gl_entries(warehouse_account)
if expected_gle:
if not existing_gle or not compare_existing_and_expected_gle(existing_gle,
expected_gle):
_delete_gl_entries(voucher_type, voucher_no)
voucher_obj.make_gl_entries(repost_future_gle=False)
else:
_delete_gl_entries(voucher_type, voucher_no)
def compare_existing_and_expected_gle(existing_gle, expected_gle):
matched = True
for entry in expected_gle:
account_existed = False
for e in existing_gle:
if entry.account==e.account:
account_existed = True
if entry.account==e.account and entry.against_account==e.against_account \
and (not entry.cost_center or not e.cost_center or entry.cost_center==e.cost_center) \
and (entry.debit != e.debit or entry.credit != e.credit):
matched = False
break
if not account_existed:
matched = False
break
return matched
def get_future_stock_vouchers(posting_date, posting_time, for_warehouses=None, for_items=None):
future_stock_vouchers = []
values = []
condition = ""
if for_items:
condition += " and item_code in ({})".format(", ".join(["%s"] * len(for_items)))
values += for_items
if for_warehouses:
condition += " and warehouse in ({})".format(", ".join(["%s"] * len(for_warehouses)))
values += for_warehouses
for d in frappe.db.sql("""select distinct sle.voucher_type, sle.voucher_no
from `tabStock Ledger Entry` sle
where timestamp(sle.posting_date, sle.posting_time) >= timestamp(%s, %s) {condition}
order by timestamp(sle.posting_date, sle.posting_time) asc, name asc""".format(condition=condition),
tuple([posting_date, posting_time] + values), as_dict=True):
future_stock_vouchers.append([d.voucher_type, d.voucher_no])
return future_stock_vouchers
def get_voucherwise_gl_entries(future_stock_vouchers, posting_date):
gl_entries = {}
if future_stock_vouchers:
for d in frappe.db.sql("""select * from `tabGL Entry`
where posting_date >= %s and voucher_no in (%s)""" %
('%s', ', '.join(['%s']*len(future_stock_vouchers))),
tuple([posting_date] + [d[1] for d in future_stock_vouchers]), as_dict=1):
gl_entries.setdefault((d.voucher_type, d.voucher_no), []).append(d)
return gl_entries
def get_warehouse_account():
warehouse_account = frappe._dict()
for d in frappe.db.sql("""select warehouse, name, account_currency from tabAccount
where account_type = 'Warehouse' and (warehouse is not null and warehouse != '')""", as_dict=1):
warehouse_account.setdefault(d.warehouse, d)
return warehouse_account
|
ZHAW-INES/rioxo-uClinux-dist | refs/heads/rtsp | user/python/python-2.4.4/Tools/modulator/ScrolledListbox.py | 37 | # A ScrolledList widget feels like a list widget but also has a
# vertical scroll bar on its right. (Later, options may be added to
# add a horizontal bar as well, to make the bars disappear
# automatically when not needed, to move them to the other side of the
# window, etc.)
#
# Configuration options are passed to the List widget.
# A Frame widget is inserted between the master and the list, to hold
# the Scrollbar widget.
# Most methods calls are inherited from the List widget; Pack methods
# are redirected to the Frame widget however.
from Tkinter import *
from Tkinter import _cnfmerge
class ScrolledListbox(Listbox):
def __init__(self, master=None, cnf={}):
cnf = _cnfmerge(cnf)
fcnf = {}
vcnf = {'name': 'vbar',
Pack: {'side': 'right', 'fill': 'y'},}
for k in cnf.keys():
if type(k) == ClassType or k == 'name':
fcnf[k] = cnf[k]
del cnf[k]
self.frame = Frame(master, fcnf)
self.vbar = Scrollbar(self.frame, vcnf)
cnf[Pack] = {'side': 'left', 'fill': 'both', 'expand': 'yes'}
cnf['name'] = 'list'
Listbox.__init__(self, self.frame, cnf)
self['yscrollcommand'] = (self.vbar, 'set')
self.vbar['command'] = (self, 'yview')
# Copy Pack methods of self.frame -- hack!
for m in Pack.__dict__.keys():
if m[0] != '_' and m != 'config':
setattr(self, m, getattr(self.frame, m))
|
saurabh6790/medsynaptic1-lib | refs/heads/master | website/templates/pages/message.py | 68 | # Copyright (c) 2013, Web Notes Technologies Pvt. Ltd. and Contributors
# MIT License. See license.txt
from __future__ import unicode_literals
no_cache = True |
yasker/cattle | refs/heads/master | tests/integration/cattletest/core/test_ha.py | 1 | from common_fixtures import * # NOQA
import logging
def _process_names(processes):
return set([x.processName for x in processes])
def test_container_ha_default(client, super_client, user_sim_context):
c = client.create_container(imageUuid=user_sim_context['imageUuid'],
requestedHostId=user_sim_context['host'].id,
name='simForgetImmediately')
c = client.wait_success(c)
def do_ping():
ping = one(super_client.list_task, name='agent.ping')
ping.execute()
def callback():
processes = process_instances(super_client, c, type='instance')
if 'instance.stop' not in _process_names(processes):
do_ping()
return None
return processes
processes = wait_for(callback)
c = client.wait_success(c)
if c.state != 'stopped':
logging.warn('test_container_ha_default debugging')
for p in processes:
logging.warn('ProcessInstance: %s' % p)
for pe in process_executions(super_client, p.id):
logging.warn('ProcessExecution: %s' % pe)
assert c.state == 'stopped'
assert _process_names(processes) == set(['instance.create',
'instance.stop'])
def test_container_ha_stop(super_client, sim_context):
c = super_client.create_container(imageUuid=sim_context['imageUuid'],
requestedHostId=sim_context['host'].id,
instanceTriggeredStop='stop',
systemContainer='NetworkAgent',
data={'simForgetImmediately': True})
c = super_client.wait_success(c)
def do_ping():
ping = one(super_client.list_task, name='agent.ping')
ping.execute()
def callback():
processes = process_instances(super_client, c, type='instance')
if 'instance.stop' not in _process_names(processes):
do_ping()
return None
return processes
processes = wait_for(callback)
c = super_client.wait_success(c)
assert c.state == 'stopped'
assert _process_names(processes) == set(['instance.create',
'instance.restart',
'instance.stop'])
def test_container_ha_restart(super_client, sim_context):
c = super_client.create_container(imageUuid=sim_context['imageUuid'],
requestedHostId=sim_context['host'].id,
instanceTriggeredStop='restart',
systemContainer='NetworkAgent',
data={'simForgetImmediately': True})
c = super_client.wait_success(c)
def do_ping():
ping = one(super_client.list_task, name='agent.ping')
ping.execute()
def callback():
processes = process_instances(super_client, c, type='instance')
if 'instance.start' not in _process_names(processes):
do_ping()
return None
return processes
processes = wait_for(callback)
c = super_client.wait_success(c)
assert c.state == 'running'
assert _process_names(processes) == set(['instance.create',
'instance.restart',
'instance.stop',
'instance.start'])
def test_container_ha_remove(super_client, sim_context):
c = super_client.create_container(imageUuid=sim_context['imageUuid'],
requestedHostId=sim_context['host'].id,
instanceTriggeredStop='remove',
systemContainer='NetworkAgent',
data={'simForgetImmediately': True})
c = super_client.wait_success(c)
def do_ping():
ping = one(super_client.list_task, name='agent.ping')
ping.execute()
def callback():
processes = process_instances(super_client, c, type='instance')
if 'instance.remove' not in _process_names(processes):
do_ping()
return None
return processes
processes = wait_for(callback)
c = super_client.wait_success(c)
assert c.state == 'removed'
assert _process_names(processes) == set(['instance.create',
'instance.restart',
'instance.stop',
'instance.remove'])
def process_executions(cli, id=None):
return cli.list_process_execution(processInstanceId=id)
|
unbreakab1e/jenkins-job-builder-addons | refs/heads/master | tests/base.py | 2 | #!/usr/bin/env python
#
# Joint copyright:
# - Copyright 2012,2013 Wikimedia Foundation
# - Copyright 2012,2013 Antoine "hashar" Musso
# - Copyright 2013 Arnaud Fabre
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import codecs
import logging
import os
import re
import doctest
import json
import operator
import testtools
from testtools.content import text_content
import xml.etree.ElementTree as XML
from six.moves import configparser
from six.moves import StringIO
from yaml import safe_dump
# This dance deals with the fact that we want unittest.mock if
# we're on Python 3.4 and later, and non-stdlib mock otherwise.
try:
from unittest import mock
except ImportError:
import mock # noqa
import jenkins_jobs.local_yaml as yaml
from jenkins_jobs.parser import YamlParser
from jenkins_jobs.xml_config import XmlJob
from jenkins_jobs.modules import (project_flow,
project_matrix,
project_maven,
project_multijob)
from jenkins_jobs_addons import folders
def get_scenarios(fixtures_path, in_ext='yaml', out_ext='xml',
plugins_info_ext='plugins_info.yaml',
filter_func=None):
"""Returns a list of scenarios, each scenario being described
by two parameters (yaml and xml filenames by default).
- content of the fixture output file (aka expected)
"""
scenarios = []
files = []
for dirpath, dirs, fs in os.walk(fixtures_path):
files.extend([os.path.join(dirpath, f) for f in fs])
input_files = [f for f in files if re.match(r'.*\.{0}$'.format(in_ext), f)]
for input_filename in input_files:
if input_filename.endswith(plugins_info_ext):
continue
if callable(filter_func) and filter_func(input_filename):
continue
output_candidate = re.sub(r'\.{0}$'.format(in_ext),
'.{0}'.format(out_ext), input_filename)
# assume empty file if no output candidate found
if output_candidate not in files:
output_candidate = None
plugins_info_candidate = re.sub(r'\.{0}$'.format(in_ext),
'.{0}'.format(plugins_info_ext),
input_filename)
if plugins_info_candidate not in files:
plugins_info_candidate = None
conf_candidate = re.sub(r'\.yaml$', '.conf', input_filename)
# If present, add the configuration file
if conf_candidate not in files:
conf_candidate = None
scenarios.append((input_filename, {
'in_filename': input_filename,
'out_filename': output_candidate,
'conf_filename': conf_candidate,
'plugins_info_filename': plugins_info_candidate,
}))
return scenarios
class BaseTestCase(object):
scenarios = []
fixtures_path = None
# TestCase settings:
maxDiff = None # always dump text difference
longMessage = True # keep normal error message when providing our
logging.basicConfig()
def _read_utf8_content(self):
# if None assume empty file
if self.out_filename is None:
return u""
# Read XML content, assuming it is unicode encoded
xml_content = u"%s" % codecs.open(self.out_filename,
'r', 'utf-8').read()
return xml_content
def _read_yaml_content(self, filename):
with open(filename, 'r') as yaml_file:
yaml_content = yaml.load(yaml_file)
return yaml_content
def test_yaml_snippet(self):
if not self.in_filename:
return
if self.conf_filename is not None:
config = configparser.ConfigParser()
config.readfp(open(self.conf_filename))
else:
config = {}
expected_xml = self._read_utf8_content()
yaml_content = self._read_yaml_content(self.in_filename)
if isinstance(yaml_content, list):
yaml_content = yaml_content[0]
project = None
if ('project-type' in yaml_content):
yaml_content['project-type']
if (yaml_content['project-type'] == "maven"):
project = project_maven.Maven(None)
elif (yaml_content['project-type'] == "matrix"):
project = project_matrix.Matrix(None)
elif (yaml_content['project-type'] == "flow"):
project = project_flow.Flow(None)
elif (yaml_content['project-type'] == "multijob"):
project = project_multijob.MultiJob(None)
elif (yaml_content['project-type'] == "folder"):
project = folders.Folder(None)
if project:
xml_project = project.root_xml(yaml_content)
else:
xml_project = XML.Element('project')
print yaml_content
plugins_info = None
if self.plugins_info_filename is not None:
plugins_info = self._read_yaml_content(self.plugins_info_filename)
self.addDetail("plugins-info-filename",
text_content(self.plugins_info_filename))
self.addDetail("plugins-info",
text_content(str(plugins_info)))
parser = YamlParser(config, plugins_info)
pub = self.klass(parser.registry)
# Generate the XML tree directly with modules/general
pub.gen_xml(parser, xml_project, yaml_content)
# Prettify generated XML
pretty_xml = XmlJob(xml_project, 'fixturejob').output().decode('utf-8')
self.assertThat(
pretty_xml,
testtools.matchers.DocTestMatches(expected_xml,
doctest.ELLIPSIS |
doctest.NORMALIZE_WHITESPACE |
doctest.REPORT_NDIFF)
)
class SingleJobTestCase(BaseTestCase):
def test_yaml_snippet(self):
expected_xml = self._read_utf8_content()
if self.conf_filename:
config = configparser.ConfigParser()
config.readfp(open(self.conf_filename))
else:
config = None
parser = YamlParser(config)
parser.parse(self.in_filename)
# Generate the XML tree
parser.expandYaml()
parser.generateXML()
parser.xml_jobs.sort(key=operator.attrgetter('name'))
# Prettify generated XML
pretty_xml = u"\n".join(job.output().decode('utf-8')
for job in parser.xml_jobs)
self.assertThat(
pretty_xml,
testtools.matchers.DocTestMatches(expected_xml,
doctest.ELLIPSIS |
doctest.NORMALIZE_WHITESPACE |
doctest.REPORT_NDIFF)
)
class JsonTestCase(BaseTestCase):
def test_yaml_snippet(self):
expected_json = self._read_utf8_content()
yaml_content = self._read_yaml_content(self.in_filename)
pretty_json = json.dumps(yaml_content, indent=4,
separators=(',', ': '))
self.assertThat(
pretty_json,
testtools.matchers.DocTestMatches(expected_json,
doctest.ELLIPSIS |
doctest.NORMALIZE_WHITESPACE |
doctest.REPORT_NDIFF)
)
class YamlTestCase(BaseTestCase):
def test_yaml_snippet(self):
expected_yaml = self._read_utf8_content()
yaml_content = self._read_yaml_content(self.in_filename)
# using json forces expansion of yaml anchors and aliases in the
# outputted yaml, otherwise it would simply appear exactly as
# entered which doesn't show that the net effect of the yaml
data = StringIO(json.dumps(yaml_content))
pretty_yaml = safe_dump(json.load(data), default_flow_style=False)
self.assertThat(
pretty_yaml,
testtools.matchers.DocTestMatches(expected_yaml,
doctest.ELLIPSIS |
doctest.NORMALIZE_WHITESPACE |
doctest.REPORT_NDIFF)
)
|
KellyChan/Python | refs/heads/master | javascript/backbone/backbone-templates/backbone-fileupload/venvs/lib/python2.7/site-packages/django/contrib/flatpages/middleware.py | 641 | from django.contrib.flatpages.views import flatpage
from django.http import Http404
from django.conf import settings
class FlatpageFallbackMiddleware(object):
def process_response(self, request, response):
if response.status_code != 404:
return response # No need to check for a flatpage for non-404 responses.
try:
return flatpage(request, request.path_info)
# Return the original response if any errors happened. Because this
# is a middleware, we can't assume the errors will be caught elsewhere.
except Http404:
return response
except:
if settings.DEBUG:
raise
return response
|
otherness-space/myProject003 | refs/heads/master | my_project_003/lib/python2.7/site-packages/wheel/signatures/djbec.py | 566 | # Ed25519 digital signatures
# Based on http://ed25519.cr.yp.to/python/ed25519.py
# See also http://ed25519.cr.yp.to/software.html
# Adapted by Ron Garret
# Sped up considerably using coordinate transforms found on:
# http://www.hyperelliptic.org/EFD/g1p/auto-twisted-extended-1.html
# Specifically add-2008-hwcd-4 and dbl-2008-hwcd
try: # pragma nocover
unicode
PY3 = False
def asbytes(b):
"""Convert array of integers to byte string"""
return ''.join(chr(x) for x in b)
def joinbytes(b):
"""Convert array of bytes to byte string"""
return ''.join(b)
def bit(h, i):
"""Return i'th bit of bytestring h"""
return (ord(h[i//8]) >> (i%8)) & 1
except NameError: # pragma nocover
PY3 = True
asbytes = bytes
joinbytes = bytes
def bit(h, i):
return (h[i//8] >> (i%8)) & 1
import hashlib
b = 256
q = 2**255 - 19
l = 2**252 + 27742317777372353535851937790883648493
def H(m):
return hashlib.sha512(m).digest()
def expmod(b, e, m):
if e == 0: return 1
t = expmod(b, e // 2, m) ** 2 % m
if e & 1: t = (t * b) % m
return t
# Can probably get some extra speedup here by replacing this with
# an extended-euclidean, but performance seems OK without that
def inv(x):
return expmod(x, q-2, q)
d = -121665 * inv(121666)
I = expmod(2,(q-1)//4,q)
def xrecover(y):
xx = (y*y-1) * inv(d*y*y+1)
x = expmod(xx,(q+3)//8,q)
if (x*x - xx) % q != 0: x = (x*I) % q
if x % 2 != 0: x = q-x
return x
By = 4 * inv(5)
Bx = xrecover(By)
B = [Bx % q,By % q]
#def edwards(P,Q):
# x1 = P[0]
# y1 = P[1]
# x2 = Q[0]
# y2 = Q[1]
# x3 = (x1*y2+x2*y1) * inv(1+d*x1*x2*y1*y2)
# y3 = (y1*y2+x1*x2) * inv(1-d*x1*x2*y1*y2)
# return (x3 % q,y3 % q)
#def scalarmult(P,e):
# if e == 0: return [0,1]
# Q = scalarmult(P,e/2)
# Q = edwards(Q,Q)
# if e & 1: Q = edwards(Q,P)
# return Q
# Faster (!) version based on:
# http://www.hyperelliptic.org/EFD/g1p/auto-twisted-extended-1.html
def xpt_add(pt1, pt2):
(X1, Y1, Z1, T1) = pt1
(X2, Y2, Z2, T2) = pt2
A = ((Y1-X1)*(Y2+X2)) % q
B = ((Y1+X1)*(Y2-X2)) % q
C = (Z1*2*T2) % q
D = (T1*2*Z2) % q
E = (D+C) % q
F = (B-A) % q
G = (B+A) % q
H = (D-C) % q
X3 = (E*F) % q
Y3 = (G*H) % q
Z3 = (F*G) % q
T3 = (E*H) % q
return (X3, Y3, Z3, T3)
def xpt_double (pt):
(X1, Y1, Z1, _) = pt
A = (X1*X1)
B = (Y1*Y1)
C = (2*Z1*Z1)
D = (-A) % q
J = (X1+Y1) % q
E = (J*J-A-B) % q
G = (D+B) % q
F = (G-C) % q
H = (D-B) % q
X3 = (E*F) % q
Y3 = (G*H) % q
Z3 = (F*G) % q
T3 = (E*H) % q
return (X3, Y3, Z3, T3)
def pt_xform (pt):
(x, y) = pt
return (x, y, 1, (x*y)%q)
def pt_unxform (pt):
(x, y, z, _) = pt
return ((x*inv(z))%q, (y*inv(z))%q)
def xpt_mult (pt, n):
if n==0: return pt_xform((0,1))
_ = xpt_double(xpt_mult(pt, n>>1))
return xpt_add(_, pt) if n&1 else _
def scalarmult(pt, e):
return pt_unxform(xpt_mult(pt_xform(pt), e))
def encodeint(y):
bits = [(y >> i) & 1 for i in range(b)]
e = [(sum([bits[i * 8 + j] << j for j in range(8)]))
for i in range(b//8)]
return asbytes(e)
def encodepoint(P):
x = P[0]
y = P[1]
bits = [(y >> i) & 1 for i in range(b - 1)] + [x & 1]
e = [(sum([bits[i * 8 + j] << j for j in range(8)]))
for i in range(b//8)]
return asbytes(e)
def publickey(sk):
h = H(sk)
a = 2**(b-2) + sum(2**i * bit(h,i) for i in range(3,b-2))
A = scalarmult(B,a)
return encodepoint(A)
def Hint(m):
h = H(m)
return sum(2**i * bit(h,i) for i in range(2*b))
def signature(m,sk,pk):
h = H(sk)
a = 2**(b-2) + sum(2**i * bit(h,i) for i in range(3,b-2))
inter = joinbytes([h[i] for i in range(b//8,b//4)])
r = Hint(inter + m)
R = scalarmult(B,r)
S = (r + Hint(encodepoint(R) + pk + m) * a) % l
return encodepoint(R) + encodeint(S)
def isoncurve(P):
x = P[0]
y = P[1]
return (-x*x + y*y - 1 - d*x*x*y*y) % q == 0
def decodeint(s):
return sum(2**i * bit(s,i) for i in range(0,b))
def decodepoint(s):
y = sum(2**i * bit(s,i) for i in range(0,b-1))
x = xrecover(y)
if x & 1 != bit(s,b-1): x = q-x
P = [x,y]
if not isoncurve(P): raise Exception("decoding point that is not on curve")
return P
def checkvalid(s, m, pk):
if len(s) != b//4: raise Exception("signature length is wrong")
if len(pk) != b//8: raise Exception("public-key length is wrong")
R = decodepoint(s[0:b//8])
A = decodepoint(pk)
S = decodeint(s[b//8:b//4])
h = Hint(encodepoint(R) + pk + m)
v1 = scalarmult(B,S)
# v2 = edwards(R,scalarmult(A,h))
v2 = pt_unxform(xpt_add(pt_xform(R), pt_xform(scalarmult(A, h))))
return v1==v2
##########################################################
#
# Curve25519 reference implementation by Matthew Dempsky, from:
# http://cr.yp.to/highspeed/naclcrypto-20090310.pdf
# P = 2 ** 255 - 19
P = q
A = 486662
#def expmod(b, e, m):
# if e == 0: return 1
# t = expmod(b, e / 2, m) ** 2 % m
# if e & 1: t = (t * b) % m
# return t
# def inv(x): return expmod(x, P - 2, P)
def add(n, m, d):
(xn, zn) = n
(xm, zm) = m
(xd, zd) = d
x = 4 * (xm * xn - zm * zn) ** 2 * zd
z = 4 * (xm * zn - zm * xn) ** 2 * xd
return (x % P, z % P)
def double(n):
(xn, zn) = n
x = (xn ** 2 - zn ** 2) ** 2
z = 4 * xn * zn * (xn ** 2 + A * xn * zn + zn ** 2)
return (x % P, z % P)
def curve25519(n, base=9):
one = (base,1)
two = double(one)
# f(m) evaluates to a tuple
# containing the mth multiple and the
# (m+1)th multiple of base.
def f(m):
if m == 1: return (one, two)
(pm, pm1) = f(m // 2)
if (m & 1):
return (add(pm, pm1, one), double(pm1))
return (double(pm), add(pm, pm1, one))
((x,z), _) = f(n)
return (x * inv(z)) % P
import random
def genkey(n=0):
n = n or random.randint(0,P)
n &= ~7
n &= ~(128 << 8 * 31)
n |= 64 << 8 * 31
return n
#def str2int(s):
# return int(hexlify(s), 16)
# # return sum(ord(s[i]) << (8 * i) for i in range(32))
#
#def int2str(n):
# return unhexlify("%x" % n)
# # return ''.join([chr((n >> (8 * i)) & 255) for i in range(32)])
#################################################
def dsa_test():
import os
msg = str(random.randint(q,q+q)).encode('utf-8')
sk = os.urandom(32)
pk = publickey(sk)
sig = signature(msg, sk, pk)
return checkvalid(sig, msg, pk)
def dh_test():
sk1 = genkey()
sk2 = genkey()
return curve25519(sk1, curve25519(sk2)) == curve25519(sk2, curve25519(sk1))
|
BigBrother1984/android_external_chromium_org | refs/heads/kitkat | third_party/protobuf/python/google/protobuf/descriptor.py | 228 | # Protocol Buffers - Google's data interchange format
# Copyright 2008 Google Inc. All rights reserved.
# http://code.google.com/p/protobuf/
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Descriptors essentially contain exactly the information found in a .proto
file, in types that make this information accessible in Python.
"""
__author__ = 'robinson@google.com (Will Robinson)'
from google.protobuf.internal import api_implementation
if api_implementation.Type() == 'cpp':
if api_implementation.Version() == 2:
from google.protobuf.internal.cpp import _message
else:
from google.protobuf.internal import cpp_message
class Error(Exception):
"""Base error for this module."""
class TypeTransformationError(Error):
"""Error transforming between python proto type and corresponding C++ type."""
class DescriptorBase(object):
"""Descriptors base class.
This class is the base of all descriptor classes. It provides common options
related functionaility.
Attributes:
has_options: True if the descriptor has non-default options. Usually it
is not necessary to read this -- just call GetOptions() which will
happily return the default instance. However, it's sometimes useful
for efficiency, and also useful inside the protobuf implementation to
avoid some bootstrapping issues.
"""
def __init__(self, options, options_class_name):
"""Initialize the descriptor given its options message and the name of the
class of the options message. The name of the class is required in case
the options message is None and has to be created.
"""
self._options = options
self._options_class_name = options_class_name
# Does this descriptor have non-default options?
self.has_options = options is not None
def _SetOptions(self, options, options_class_name):
"""Sets the descriptor's options
This function is used in generated proto2 files to update descriptor
options. It must not be used outside proto2.
"""
self._options = options
self._options_class_name = options_class_name
# Does this descriptor have non-default options?
self.has_options = options is not None
def GetOptions(self):
"""Retrieves descriptor options.
This method returns the options set or creates the default options for the
descriptor.
"""
if self._options:
return self._options
from google.protobuf import descriptor_pb2
try:
options_class = getattr(descriptor_pb2, self._options_class_name)
except AttributeError:
raise RuntimeError('Unknown options class name %s!' %
(self._options_class_name))
self._options = options_class()
return self._options
class _NestedDescriptorBase(DescriptorBase):
"""Common class for descriptors that can be nested."""
def __init__(self, options, options_class_name, name, full_name,
file, containing_type, serialized_start=None,
serialized_end=None):
"""Constructor.
Args:
options: Protocol message options or None
to use default message options.
options_class_name: (str) The class name of the above options.
name: (str) Name of this protocol message type.
full_name: (str) Fully-qualified name of this protocol message type,
which will include protocol "package" name and the name of any
enclosing types.
file: (FileDescriptor) Reference to file info.
containing_type: if provided, this is a nested descriptor, with this
descriptor as parent, otherwise None.
serialized_start: The start index (inclusive) in block in the
file.serialized_pb that describes this descriptor.
serialized_end: The end index (exclusive) in block in the
file.serialized_pb that describes this descriptor.
"""
super(_NestedDescriptorBase, self).__init__(
options, options_class_name)
self.name = name
# TODO(falk): Add function to calculate full_name instead of having it in
# memory?
self.full_name = full_name
self.file = file
self.containing_type = containing_type
self._serialized_start = serialized_start
self._serialized_end = serialized_end
def GetTopLevelContainingType(self):
"""Returns the root if this is a nested type, or itself if its the root."""
desc = self
while desc.containing_type is not None:
desc = desc.containing_type
return desc
def CopyToProto(self, proto):
"""Copies this to the matching proto in descriptor_pb2.
Args:
proto: An empty proto instance from descriptor_pb2.
Raises:
Error: If self couldnt be serialized, due to to few constructor arguments.
"""
if (self.file is not None and
self._serialized_start is not None and
self._serialized_end is not None):
proto.ParseFromString(self.file.serialized_pb[
self._serialized_start:self._serialized_end])
else:
raise Error('Descriptor does not contain serialization.')
class Descriptor(_NestedDescriptorBase):
"""Descriptor for a protocol message type.
A Descriptor instance has the following attributes:
name: (str) Name of this protocol message type.
full_name: (str) Fully-qualified name of this protocol message type,
which will include protocol "package" name and the name of any
enclosing types.
containing_type: (Descriptor) Reference to the descriptor of the
type containing us, or None if this is top-level.
fields: (list of FieldDescriptors) Field descriptors for all
fields in this type.
fields_by_number: (dict int -> FieldDescriptor) Same FieldDescriptor
objects as in |fields|, but indexed by "number" attribute in each
FieldDescriptor.
fields_by_name: (dict str -> FieldDescriptor) Same FieldDescriptor
objects as in |fields|, but indexed by "name" attribute in each
FieldDescriptor.
nested_types: (list of Descriptors) Descriptor references
for all protocol message types nested within this one.
nested_types_by_name: (dict str -> Descriptor) Same Descriptor
objects as in |nested_types|, but indexed by "name" attribute
in each Descriptor.
enum_types: (list of EnumDescriptors) EnumDescriptor references
for all enums contained within this type.
enum_types_by_name: (dict str ->EnumDescriptor) Same EnumDescriptor
objects as in |enum_types|, but indexed by "name" attribute
in each EnumDescriptor.
enum_values_by_name: (dict str -> EnumValueDescriptor) Dict mapping
from enum value name to EnumValueDescriptor for that value.
extensions: (list of FieldDescriptor) All extensions defined directly
within this message type (NOT within a nested type).
extensions_by_name: (dict, string -> FieldDescriptor) Same FieldDescriptor
objects as |extensions|, but indexed by "name" attribute of each
FieldDescriptor.
is_extendable: Does this type define any extension ranges?
options: (descriptor_pb2.MessageOptions) Protocol message options or None
to use default message options.
file: (FileDescriptor) Reference to file descriptor.
"""
def __init__(self, name, full_name, filename, containing_type, fields,
nested_types, enum_types, extensions, options=None,
is_extendable=True, extension_ranges=None, file=None,
serialized_start=None, serialized_end=None):
"""Arguments to __init__() are as described in the description
of Descriptor fields above.
Note that filename is an obsolete argument, that is not used anymore.
Please use file.name to access this as an attribute.
"""
super(Descriptor, self).__init__(
options, 'MessageOptions', name, full_name, file,
containing_type, serialized_start=serialized_start,
serialized_end=serialized_start)
# We have fields in addition to fields_by_name and fields_by_number,
# so that:
# 1. Clients can index fields by "order in which they're listed."
# 2. Clients can easily iterate over all fields with the terse
# syntax: for f in descriptor.fields: ...
self.fields = fields
for field in self.fields:
field.containing_type = self
self.fields_by_number = dict((f.number, f) for f in fields)
self.fields_by_name = dict((f.name, f) for f in fields)
self.nested_types = nested_types
self.nested_types_by_name = dict((t.name, t) for t in nested_types)
self.enum_types = enum_types
for enum_type in self.enum_types:
enum_type.containing_type = self
self.enum_types_by_name = dict((t.name, t) for t in enum_types)
self.enum_values_by_name = dict(
(v.name, v) for t in enum_types for v in t.values)
self.extensions = extensions
for extension in self.extensions:
extension.extension_scope = self
self.extensions_by_name = dict((f.name, f) for f in extensions)
self.is_extendable = is_extendable
self.extension_ranges = extension_ranges
self._serialized_start = serialized_start
self._serialized_end = serialized_end
def EnumValueName(self, enum, value):
"""Returns the string name of an enum value.
This is just a small helper method to simplify a common operation.
Args:
enum: string name of the Enum.
value: int, value of the enum.
Returns:
string name of the enum value.
Raises:
KeyError if either the Enum doesn't exist or the value is not a valid
value for the enum.
"""
return self.enum_types_by_name[enum].values_by_number[value].name
def CopyToProto(self, proto):
"""Copies this to a descriptor_pb2.DescriptorProto.
Args:
proto: An empty descriptor_pb2.DescriptorProto.
"""
# This function is overriden to give a better doc comment.
super(Descriptor, self).CopyToProto(proto)
# TODO(robinson): We should have aggressive checking here,
# for example:
# * If you specify a repeated field, you should not be allowed
# to specify a default value.
# * [Other examples here as needed].
#
# TODO(robinson): for this and other *Descriptor classes, we
# might also want to lock things down aggressively (e.g.,
# prevent clients from setting the attributes). Having
# stronger invariants here in general will reduce the number
# of runtime checks we must do in reflection.py...
class FieldDescriptor(DescriptorBase):
"""Descriptor for a single field in a .proto file.
A FieldDescriptor instance has the following attributes:
name: (str) Name of this field, exactly as it appears in .proto.
full_name: (str) Name of this field, including containing scope. This is
particularly relevant for extensions.
index: (int) Dense, 0-indexed index giving the order that this
field textually appears within its message in the .proto file.
number: (int) Tag number declared for this field in the .proto file.
type: (One of the TYPE_* constants below) Declared type.
cpp_type: (One of the CPPTYPE_* constants below) C++ type used to
represent this field.
label: (One of the LABEL_* constants below) Tells whether this
field is optional, required, or repeated.
has_default_value: (bool) True if this field has a default value defined,
otherwise false.
default_value: (Varies) Default value of this field. Only
meaningful for non-repeated scalar fields. Repeated fields
should always set this to [], and non-repeated composite
fields should always set this to None.
containing_type: (Descriptor) Descriptor of the protocol message
type that contains this field. Set by the Descriptor constructor
if we're passed into one.
Somewhat confusingly, for extension fields, this is the
descriptor of the EXTENDED message, not the descriptor
of the message containing this field. (See is_extension and
extension_scope below).
message_type: (Descriptor) If a composite field, a descriptor
of the message type contained in this field. Otherwise, this is None.
enum_type: (EnumDescriptor) If this field contains an enum, a
descriptor of that enum. Otherwise, this is None.
is_extension: True iff this describes an extension field.
extension_scope: (Descriptor) Only meaningful if is_extension is True.
Gives the message that immediately contains this extension field.
Will be None iff we're a top-level (file-level) extension field.
options: (descriptor_pb2.FieldOptions) Protocol message field options or
None to use default field options.
"""
# Must be consistent with C++ FieldDescriptor::Type enum in
# descriptor.h.
#
# TODO(robinson): Find a way to eliminate this repetition.
TYPE_DOUBLE = 1
TYPE_FLOAT = 2
TYPE_INT64 = 3
TYPE_UINT64 = 4
TYPE_INT32 = 5
TYPE_FIXED64 = 6
TYPE_FIXED32 = 7
TYPE_BOOL = 8
TYPE_STRING = 9
TYPE_GROUP = 10
TYPE_MESSAGE = 11
TYPE_BYTES = 12
TYPE_UINT32 = 13
TYPE_ENUM = 14
TYPE_SFIXED32 = 15
TYPE_SFIXED64 = 16
TYPE_SINT32 = 17
TYPE_SINT64 = 18
MAX_TYPE = 18
# Must be consistent with C++ FieldDescriptor::CppType enum in
# descriptor.h.
#
# TODO(robinson): Find a way to eliminate this repetition.
CPPTYPE_INT32 = 1
CPPTYPE_INT64 = 2
CPPTYPE_UINT32 = 3
CPPTYPE_UINT64 = 4
CPPTYPE_DOUBLE = 5
CPPTYPE_FLOAT = 6
CPPTYPE_BOOL = 7
CPPTYPE_ENUM = 8
CPPTYPE_STRING = 9
CPPTYPE_MESSAGE = 10
MAX_CPPTYPE = 10
_PYTHON_TO_CPP_PROTO_TYPE_MAP = {
TYPE_DOUBLE: CPPTYPE_DOUBLE,
TYPE_FLOAT: CPPTYPE_FLOAT,
TYPE_ENUM: CPPTYPE_ENUM,
TYPE_INT64: CPPTYPE_INT64,
TYPE_SINT64: CPPTYPE_INT64,
TYPE_SFIXED64: CPPTYPE_INT64,
TYPE_UINT64: CPPTYPE_UINT64,
TYPE_FIXED64: CPPTYPE_UINT64,
TYPE_INT32: CPPTYPE_INT32,
TYPE_SFIXED32: CPPTYPE_INT32,
TYPE_SINT32: CPPTYPE_INT32,
TYPE_UINT32: CPPTYPE_UINT32,
TYPE_FIXED32: CPPTYPE_UINT32,
TYPE_BYTES: CPPTYPE_STRING,
TYPE_STRING: CPPTYPE_STRING,
TYPE_BOOL: CPPTYPE_BOOL,
TYPE_MESSAGE: CPPTYPE_MESSAGE,
TYPE_GROUP: CPPTYPE_MESSAGE
}
# Must be consistent with C++ FieldDescriptor::Label enum in
# descriptor.h.
#
# TODO(robinson): Find a way to eliminate this repetition.
LABEL_OPTIONAL = 1
LABEL_REQUIRED = 2
LABEL_REPEATED = 3
MAX_LABEL = 3
def __init__(self, name, full_name, index, number, type, cpp_type, label,
default_value, message_type, enum_type, containing_type,
is_extension, extension_scope, options=None,
has_default_value=True):
"""The arguments are as described in the description of FieldDescriptor
attributes above.
Note that containing_type may be None, and may be set later if necessary
(to deal with circular references between message types, for example).
Likewise for extension_scope.
"""
super(FieldDescriptor, self).__init__(options, 'FieldOptions')
self.name = name
self.full_name = full_name
self.index = index
self.number = number
self.type = type
self.cpp_type = cpp_type
self.label = label
self.has_default_value = has_default_value
self.default_value = default_value
self.containing_type = containing_type
self.message_type = message_type
self.enum_type = enum_type
self.is_extension = is_extension
self.extension_scope = extension_scope
if api_implementation.Type() == 'cpp':
if is_extension:
if api_implementation.Version() == 2:
self._cdescriptor = _message.GetExtensionDescriptor(full_name)
else:
self._cdescriptor = cpp_message.GetExtensionDescriptor(full_name)
else:
if api_implementation.Version() == 2:
self._cdescriptor = _message.GetFieldDescriptor(full_name)
else:
self._cdescriptor = cpp_message.GetFieldDescriptor(full_name)
else:
self._cdescriptor = None
@staticmethod
def ProtoTypeToCppProtoType(proto_type):
"""Converts from a Python proto type to a C++ Proto Type.
The Python ProtocolBuffer classes specify both the 'Python' datatype and the
'C++' datatype - and they're not the same. This helper method should
translate from one to another.
Args:
proto_type: the Python proto type (descriptor.FieldDescriptor.TYPE_*)
Returns:
descriptor.FieldDescriptor.CPPTYPE_*, the C++ type.
Raises:
TypeTransformationError: when the Python proto type isn't known.
"""
try:
return FieldDescriptor._PYTHON_TO_CPP_PROTO_TYPE_MAP[proto_type]
except KeyError:
raise TypeTransformationError('Unknown proto_type: %s' % proto_type)
class EnumDescriptor(_NestedDescriptorBase):
"""Descriptor for an enum defined in a .proto file.
An EnumDescriptor instance has the following attributes:
name: (str) Name of the enum type.
full_name: (str) Full name of the type, including package name
and any enclosing type(s).
values: (list of EnumValueDescriptors) List of the values
in this enum.
values_by_name: (dict str -> EnumValueDescriptor) Same as |values|,
but indexed by the "name" field of each EnumValueDescriptor.
values_by_number: (dict int -> EnumValueDescriptor) Same as |values|,
but indexed by the "number" field of each EnumValueDescriptor.
containing_type: (Descriptor) Descriptor of the immediate containing
type of this enum, or None if this is an enum defined at the
top level in a .proto file. Set by Descriptor's constructor
if we're passed into one.
file: (FileDescriptor) Reference to file descriptor.
options: (descriptor_pb2.EnumOptions) Enum options message or
None to use default enum options.
"""
def __init__(self, name, full_name, filename, values,
containing_type=None, options=None, file=None,
serialized_start=None, serialized_end=None):
"""Arguments are as described in the attribute description above.
Note that filename is an obsolete argument, that is not used anymore.
Please use file.name to access this as an attribute.
"""
super(EnumDescriptor, self).__init__(
options, 'EnumOptions', name, full_name, file,
containing_type, serialized_start=serialized_start,
serialized_end=serialized_start)
self.values = values
for value in self.values:
value.type = self
self.values_by_name = dict((v.name, v) for v in values)
self.values_by_number = dict((v.number, v) for v in values)
self._serialized_start = serialized_start
self._serialized_end = serialized_end
def CopyToProto(self, proto):
"""Copies this to a descriptor_pb2.EnumDescriptorProto.
Args:
proto: An empty descriptor_pb2.EnumDescriptorProto.
"""
# This function is overriden to give a better doc comment.
super(EnumDescriptor, self).CopyToProto(proto)
class EnumValueDescriptor(DescriptorBase):
"""Descriptor for a single value within an enum.
name: (str) Name of this value.
index: (int) Dense, 0-indexed index giving the order that this
value appears textually within its enum in the .proto file.
number: (int) Actual number assigned to this enum value.
type: (EnumDescriptor) EnumDescriptor to which this value
belongs. Set by EnumDescriptor's constructor if we're
passed into one.
options: (descriptor_pb2.EnumValueOptions) Enum value options message or
None to use default enum value options options.
"""
def __init__(self, name, index, number, type=None, options=None):
"""Arguments are as described in the attribute description above."""
super(EnumValueDescriptor, self).__init__(options, 'EnumValueOptions')
self.name = name
self.index = index
self.number = number
self.type = type
class ServiceDescriptor(_NestedDescriptorBase):
"""Descriptor for a service.
name: (str) Name of the service.
full_name: (str) Full name of the service, including package name.
index: (int) 0-indexed index giving the order that this services
definition appears withing the .proto file.
methods: (list of MethodDescriptor) List of methods provided by this
service.
options: (descriptor_pb2.ServiceOptions) Service options message or
None to use default service options.
file: (FileDescriptor) Reference to file info.
"""
def __init__(self, name, full_name, index, methods, options=None, file=None,
serialized_start=None, serialized_end=None):
super(ServiceDescriptor, self).__init__(
options, 'ServiceOptions', name, full_name, file,
None, serialized_start=serialized_start,
serialized_end=serialized_end)
self.index = index
self.methods = methods
# Set the containing service for each method in this service.
for method in self.methods:
method.containing_service = self
def FindMethodByName(self, name):
"""Searches for the specified method, and returns its descriptor."""
for method in self.methods:
if name == method.name:
return method
return None
def CopyToProto(self, proto):
"""Copies this to a descriptor_pb2.ServiceDescriptorProto.
Args:
proto: An empty descriptor_pb2.ServiceDescriptorProto.
"""
# This function is overriden to give a better doc comment.
super(ServiceDescriptor, self).CopyToProto(proto)
class MethodDescriptor(DescriptorBase):
"""Descriptor for a method in a service.
name: (str) Name of the method within the service.
full_name: (str) Full name of method.
index: (int) 0-indexed index of the method inside the service.
containing_service: (ServiceDescriptor) The service that contains this
method.
input_type: The descriptor of the message that this method accepts.
output_type: The descriptor of the message that this method returns.
options: (descriptor_pb2.MethodOptions) Method options message or
None to use default method options.
"""
def __init__(self, name, full_name, index, containing_service,
input_type, output_type, options=None):
"""The arguments are as described in the description of MethodDescriptor
attributes above.
Note that containing_service may be None, and may be set later if necessary.
"""
super(MethodDescriptor, self).__init__(options, 'MethodOptions')
self.name = name
self.full_name = full_name
self.index = index
self.containing_service = containing_service
self.input_type = input_type
self.output_type = output_type
class FileDescriptor(DescriptorBase):
"""Descriptor for a file. Mimics the descriptor_pb2.FileDescriptorProto.
name: name of file, relative to root of source tree.
package: name of the package
serialized_pb: (str) Byte string of serialized
descriptor_pb2.FileDescriptorProto.
"""
def __init__(self, name, package, options=None, serialized_pb=None):
"""Constructor."""
super(FileDescriptor, self).__init__(options, 'FileOptions')
self.message_types_by_name = {}
self.name = name
self.package = package
self.serialized_pb = serialized_pb
if (api_implementation.Type() == 'cpp' and
self.serialized_pb is not None):
if api_implementation.Version() == 2:
_message.BuildFile(self.serialized_pb)
else:
cpp_message.BuildFile(self.serialized_pb)
def CopyToProto(self, proto):
"""Copies this to a descriptor_pb2.FileDescriptorProto.
Args:
proto: An empty descriptor_pb2.FileDescriptorProto.
"""
proto.ParseFromString(self.serialized_pb)
def _ParseOptions(message, string):
"""Parses serialized options.
This helper function is used to parse serialized options in generated
proto2 files. It must not be used outside proto2.
"""
message.ParseFromString(string)
return message
def MakeDescriptor(desc_proto, package=''):
"""Make a protobuf Descriptor given a DescriptorProto protobuf.
Args:
desc_proto: The descriptor_pb2.DescriptorProto protobuf message.
package: Optional package name for the new message Descriptor (string).
Returns:
A Descriptor for protobuf messages.
"""
full_message_name = [desc_proto.name]
if package: full_message_name.insert(0, package)
fields = []
for field_proto in desc_proto.field:
full_name = '.'.join(full_message_name + [field_proto.name])
field = FieldDescriptor(
field_proto.name, full_name, field_proto.number - 1,
field_proto.number, field_proto.type,
FieldDescriptor.ProtoTypeToCppProtoType(field_proto.type),
field_proto.label, None, None, None, None, False, None,
has_default_value=False)
fields.append(field)
desc_name = '.'.join(full_message_name)
return Descriptor(desc_proto.name, desc_name, None, None, fields,
[], [], [])
|
zanderle/django | refs/heads/master | tests/forms_tests/widget_tests/test_nullbooleanselect.py | 179 | from django.forms import NullBooleanSelect
from django.test import override_settings
from django.utils import translation
from .base import WidgetTest
class NullBooleanSelectTest(WidgetTest):
widget = NullBooleanSelect()
def test_render_true(self):
self.check_html(self.widget, 'is_cool', True, html=(
"""<select name="is_cool">
<option value="1">Unknown</option>
<option value="2" selected="selected">Yes</option>
<option value="3">No</option>
</select>"""
))
def test_render_false(self):
self.check_html(self.widget, 'is_cool', False, html=(
"""<select name="is_cool">
<option value="1">Unknown</option>
<option value="2">Yes</option>
<option value="3" selected="selected">No</option>
</select>"""
))
def test_render_none(self):
self.check_html(self.widget, 'is_cool', None, html=(
"""<select name="is_cool">
<option value="1" selected="selected">Unknown</option>
<option value="2">Yes</option>
<option value="3">No</option>
</select>"""
))
def test_render_value(self):
self.check_html(self.widget, 'is_cool', '2', html=(
"""<select name="is_cool">
<option value="1">Unknown</option>
<option value="2" selected="selected">Yes</option>
<option value="3">No</option>
</select>"""
))
@override_settings(USE_L10N=True)
def test_l10n(self):
"""
Ensure that the NullBooleanSelect widget's options are lazily
localized (#17190).
"""
widget = NullBooleanSelect()
with translation.override('de-at'):
self.check_html(widget, 'id_bool', True, html=(
"""
<select name="id_bool">
<option value="1">Unbekannt</option>
<option value="2" selected="selected">Ja</option>
<option value="3">Nein</option>
</select>
"""
))
|
sambitgaan/nupic | refs/heads/master | src/nupic/data/joiner.py | 11 | #! /usr/bin/env python
# ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
import os
import csv
import datetime
import time
class WeatherJoiner(object):
'''
Given a data file in standard numenta format and the types of weather you are
interested in. WeatherJoiner allows you to create a new temporary file that
includes relevant weather records for your data. It is subject to the following
limitations.
Each record in the original file must have an address field of type string. This
can be either a street address or an IP address.
Each record in the original file must have a timestamp field.
There is no guarantee we will have any weather data to add to your data set.
The records for your time range may be incomplete. Placeholder data will
be provided for missing records.
We will return statistics on how many records were missing from those requested.
It is up to the user to set appropriate tolerances and reject data deemed
insufficiently complete.
'''
def __init__(self, datasets, weatherTypes, WeatherProvider, GeoProvider):
# Assumes that datasets exist and paths are valid
self.datasets = datasets
# Store the provider objects
self.w = WeatherProvider
self.g = GeoProvider
# Check each of the input files and make sure they have address and timestamp
for key in self.datasets:
with open(self.datasets[key]) as f:
header = f.readline()
if 'address' not in header or 'timestamp' not in header:
raise Exception('Data files must contain headers "address" and "timestamp"')
# See WeatherProvider.py for full list of valid weatherTypes
# An exception will be raised there if an invalid type is passed
self.weatherTypes = weatherTypes
def join(self, generateFlag = True):
'''
Writes out a new combined file containing weather data
Returns statistics on the number of records missing
'''
# Create a dict to return paths to generated files
joinedDatasets = {}
# Set up other variables we'll return
percentMissing = 0.00
# For each data file we are given, create a new one with weather data
for key in self.datasets:
# Pull out the path to the csv, then isolate the filename
inputPath = self.datasets[key]
outputDir, inputFilename = os.path.split(inputPath)
# Make up a name for the temp file
outputFile = 'join_' + inputFilename
# We'll put the joined datasets in the same folder as the originals
outputPath = os.path.join(outputDir, outputFile)
print 'Now writing to: ' + outputPath
# Keep the dict keys the same as the input dict
joinedDatasets[key] = outputPath
# If we're not being asked to generate the file just spit out the paths
if not generateFlag:
continue
# Open the input and output files for processing
outputFile = csv.writer(open(outputPath, 'w'),dialect='excel')
inputFile = csv.reader(open(inputPath, 'r'),dialect='excel')
# Update the headers and find where the address and timestamp are
addressIndex, timestampIndex = self._updateHeaders(self.weatherTypes,
inputFile,
outputFile)
# Set up caches to eliminate duplicate API calls
addressCache = {}
# Set up counters for missing line stats
goodRecords = 0
missingRecords = 0
# Iterate over all the lines in the input file
for line in inputFile:
# Get the address and the timestamp
address = line[addressIndex]
timestamp = line[timestampIndex]
date = self._parseTimestamp(timestamp).date()
# check to see if that address is in our cache
if address not in addressCache:
# Look up the lat/long for this address and store it
# print "Getting lat/long for address:"
# print address
lat, long = self.g.getLatLong(address)
addressCache[address] = (lat, long)
# Also reset the range on the WeatherProvider for the new address
self.w.setRange(0)
else:
lat, long = addressCache[address]
# This is ugly, but we have to deal with the fact that the closest
# station may not have complete records for the time period we're
# interested in.
range = 1
while True:
try:
recordsDict = self.w.getRecords(lat, long, date)
# We found a good station so kill the search loop
break
except LookupError:
# We failed to find records for this location and time
# try next closest station
# print lat, long, date
# print "DATA FILE MISSING, trying next closest station ..."
# Will look farther away next time
self.w.setRange(range)
range += 1
# Don't swamp Google API with requests
time.sleep(1)
'''
Check to see if this was a blank row in the data files, put in
placeholder values if it was
'''
if recordsDict == None:
missingRecords += 1
# Specifies the missing data placeholder
line.extend([9999] * len(self.weatherTypes))
outputFile.writerow(line)
else:
goodRecords += 1
for type in self.weatherTypes:
line.append(recordsDict[type])
outputFile.writerow(line)
try:
percentMissing = float(missingRecords) / goodRecords # Oh python *shakeshead*
except ZeroDivisionError:
percentMissing = 100.00
return joinedDatasets, percentMissing
def _updateHeaders(self, newHeaders, inputFileHandle, outputFileHandle):
# We need to deal with the first three header rows and update them with
# the requested weather data
for i in xrange(1,4): # Screw zero indexing!
headerLine = inputFileHandle.next()
for type in newHeaders:
# Header row one
if i == 1:
headerLine.append(type)
# Find out where our address and timestamp columns are
addressIndex = headerLine.index('address')
timestampIndex = headerLine.index('timestamp')
# Header row two
elif i == 2: headerLine.append('float')
# Header row three
elif i == 3: headerLine.append('')
outputFileHandle.writerow(headerLine)
return addressIndex, timestampIndex
def _parseTimestamp(self,t):
tokens = t.split()
year, month, day = [int(x) for x in tokens[0].split('-')]
# TODO Handle times smaller than a day
result = datetime.datetime(year, month, day)
return result
|
Team-M8/android_kernel_htc_msm8974-staging | refs/heads/next | arch/ia64/scripts/unwcheck.py | 13143 | #!/usr/bin/python
#
# Usage: unwcheck.py FILE
#
# This script checks the unwind info of each function in file FILE
# and verifies that the sum of the region-lengths matches the total
# length of the function.
#
# Based on a shell/awk script originally written by Harish Patil,
# which was converted to Perl by Matthew Chapman, which was converted
# to Python by David Mosberger.
#
import os
import re
import sys
if len(sys.argv) != 2:
print "Usage: %s FILE" % sys.argv[0]
sys.exit(2)
readelf = os.getenv("READELF", "readelf")
start_pattern = re.compile("<([^>]*)>: \[0x([0-9a-f]+)-0x([0-9a-f]+)\]")
rlen_pattern = re.compile(".*rlen=([0-9]+)")
def check_func (func, slots, rlen_sum):
if slots != rlen_sum:
global num_errors
num_errors += 1
if not func: func = "[%#x-%#x]" % (start, end)
print "ERROR: %s: %lu slots, total region length = %lu" % (func, slots, rlen_sum)
return
num_funcs = 0
num_errors = 0
func = False
slots = 0
rlen_sum = 0
for line in os.popen("%s -u %s" % (readelf, sys.argv[1])):
m = start_pattern.match(line)
if m:
check_func(func, slots, rlen_sum)
func = m.group(1)
start = long(m.group(2), 16)
end = long(m.group(3), 16)
slots = 3 * (end - start) / 16
rlen_sum = 0L
num_funcs += 1
else:
m = rlen_pattern.match(line)
if m:
rlen_sum += long(m.group(1))
check_func(func, slots, rlen_sum)
if num_errors == 0:
print "No errors detected in %u functions." % num_funcs
else:
if num_errors > 1:
err="errors"
else:
err="error"
print "%u %s detected in %u functions." % (num_errors, err, num_funcs)
sys.exit(1)
|
jalavik/invenio | refs/heads/master | invenio/legacy/bibindex/engine_utils.py | 7 | # -*- coding: utf-8 -*-
#
# This file is part of Invenio.
# Copyright (C) 2010, 2011, 2012, 2013, 2014, 2015 CERN.
#
# Invenio is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# Invenio is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Invenio; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""bibindex.engine_utils: here are some useful regular experssions for tokenizers
and several helper functions.
"""
import re
import sys
from invenio.base.helpers import utf8ifier
from invenio.legacy.dbquery import run_sql, \
DatabaseError
from invenio.legacy.bibsched.bibtask import write_message
from invenio.legacy.bibrecord import get_fieldvalues
from invenio.config import \
CFG_BIBINDEX_CHARS_PUNCTUATION, \
CFG_BIBINDEX_CHARS_ALPHANUMERIC_SEPARATORS
from invenio.legacy.bibindex.engine_config import CFG_BIBINDEX_COLUMN_VALUE_SEPARATOR
from invenio.utils.memoise import memoize
latex_formula_re = re.compile(r'\$.*?\$|\\\[.*?\\\]')
phrase_delimiter_re = re.compile(r'[\.:;\?\!]')
space_cleaner_re = re.compile(r'\s+')
re_block_punctuation_begin = re.compile(r"^" + CFG_BIBINDEX_CHARS_PUNCTUATION + "+")
re_block_punctuation_end = re.compile(CFG_BIBINDEX_CHARS_PUNCTUATION + "+$")
re_punctuation = re.compile(CFG_BIBINDEX_CHARS_PUNCTUATION)
re_separators = re.compile(CFG_BIBINDEX_CHARS_ALPHANUMERIC_SEPARATORS)
re_arxiv = re.compile(r'^arxiv:\d\d\d\d\.\d\d\d\d')
re_pattern_fuzzy_author_trigger = re.compile(r'[\s\,\.]')
# FIXME: re_pattern_fuzzy_author_trigger could be removed and an
# BibAuthorID API function could be called instead after we
# double-check that there are no circular imports.
def load_tokenizers():
"""
Load all the bibindex tokenizers and returns it.
"""
import warnings
warnings.warn("The function is deprecated. Please use the "
"`load_tokenizers()` from `invenio.modules.indexer.utils`",
DeprecationWarning)
from invenio.modules.indexer.registry import tokenizers
return dict((module.__name__.split('.')[-1],
getattr(module, module.__name__.split('.')[-1], ''))
for module in tokenizers)
def get_all_index_names_and_column_values(column_name):
"""Returns a list of tuples of name and another column of all defined words indexes.
Returns empty list in case there are no tags indexed in this index or in case
the column name does not exist.
Example: output=[('global', something), ('title', something)]."""
out = []
query = """SELECT name, %s FROM idxINDEX""" % column_name
try:
res = run_sql(query)
for row in res:
out.append((row[0], row[1]))
except DatabaseError:
write_message("Exception caught for SQL statement: %s; column %s might not exist" % (query, column_name), sys.stderr)
return out
def get_all_synonym_knowledge_bases():
"""Returns a dictionary of name key and knowledge base name and match type tuple value
information of all defined words indexes that have knowledge base information.
Returns empty dictionary in case there are no tags indexed.
Example: output['global'] = ('INDEX-SYNONYM-TITLE', 'exact'), output['title'] = ('INDEX-SYNONYM-TITLE', 'exact')."""
res = get_all_index_names_and_column_values("synonym_kbrs")
out = {}
for row in res:
kb_data = row[1]
# ignore empty strings
if len(kb_data):
out[row[0]] = tuple(kb_data.split(CFG_BIBINDEX_COLUMN_VALUE_SEPARATOR))
return out
def get_index_remove_stopwords(index_id):
"""Returns value of a remove_stopword field from idxINDEX database table
if it's not 'No'. If it's 'No' returns False.
Just for consistency with WordTable.
@param index_id: id of the index
"""
try:
result = run_sql("SELECT remove_stopwords FROM idxINDEX WHERE ID=%s", (index_id, ))[0][0]
except:
return False
if result == 'No' or result == '':
return False
return result
def get_index_remove_html_markup(index_id):
""" Gets remove_html_markup parameter from database ('Yes' or 'No') and
changes it to True, False.
Just for consistency with WordTable."""
try:
result = run_sql("SELECT remove_html_markup FROM idxINDEX WHERE ID=%s", (index_id, ))[0][0]
except:
return False
if result == 'Yes':
return True
return False
def get_index_remove_latex_markup(index_id):
""" Gets remove_latex_markup parameter from database ('Yes' or 'No') and
changes it to True, False.
Just for consistency with WordTable."""
try:
result = run_sql("SELECT remove_latex_markup FROM idxINDEX WHERE ID=%s", (index_id, ))[0][0]
except:
return False
if result == 'Yes':
return True
return False
def author_name_requires_phrase_search(p):
"""
Detect whether author query pattern p requires phrase search.
Notably, look for presence of spaces and commas.
"""
if re_pattern_fuzzy_author_trigger.search(p):
return True
return False
def get_field_count(recID, tags):
"""
Return number of field instances having TAGS in record RECID.
@param recID: record ID
@type recID: int
@param tags: list of tags to count, e.g. ['100__a', '700__a']
@type tags: list
@return: number of tags present in record
@rtype: int
@note: Works internally via getting field values, which may not be
very efficient. Could use counts only, or else retrieve stored
recstruct format of the record and walk through it.
"""
out = 0
for tag in tags:
out += len(get_fieldvalues(recID, tag))
return out
def run_sql_drop_silently(query):
"""
SQL DROP statement with IF EXISTS part generates
warning if table does not exist. To mute the warning
we can remove IF EXISTS and catch SQL exception telling
us that table does not exist.
"""
try:
query = query.replace(" IF EXISTS", "")
run_sql(query)
except Exception as e:
if str(e).find("Unknown table") > -1:
pass
else:
raise e
from invenio.modules.indexer.utils import get_idx_indexer
def get_all_indexes(virtual=True, with_ids=False):
"""Returns the list of the names of all defined words indexes.
Returns empty list in case there are no tags indexed in this index.
@param virtual: if True function will return also virtual indexes
@param with_ids: if True function will return also IDs of found indexes
Example: output=['global', 'author']."""
out = []
if virtual:
query = """SELECT %s name FROM idxINDEX"""
query = query % (with_ids and "id," or "")
else:
query = """SELECT %s w.name FROM idxINDEX AS w
WHERE w.id NOT IN (SELECT DISTINCT id_virtual FROM idxINDEX_idxINDEX)"""
query = query % (with_ids and "w.id," or "")
res = run_sql(query)
if with_ids:
out = [row for row in res]
else:
out = [row[0] for row in res]
return out
def get_all_virtual_indexes():
""" Returns all defined 'virtual' indexes. """
query = """SELECT DISTINCT v.id_virtual, w.name FROM idxINDEX_idxINDEX AS v,
idxINDEX AS w
WHERE v.id_virtual=w.id"""
res = run_sql(query)
return res
def get_index_virtual_indexes(index_id):
"""Returns 'virtual' indexes that should be indexed together with
given index."""
query = """SELECT v.id_virtual, w.name FROM idxINDEX_idxINDEX AS v,
idxINDEX AS w
WHERE v.id_virtual=w.id AND
v.id_normal=%s"""
res = run_sql(query, (index_id,))
return res
def is_index_virtual(index_id):
"""Checks if index is virtual"""
query = """SELECT id_virtual FROM idxINDEX_idxINDEX
WHERE id_virtual=%s"""
res = run_sql(query, (index_id,))
if res:
return True
return False
def filter_for_virtual_indexes(index_list):
"""
Function removes all non-virtual indexes
from given list of indexes.
@param index_list: list of index names
"""
try:
virtual = zip(*get_all_virtual_indexes())[1]
selected = set(virtual) & set(index_list)
return list(selected)
except IndexError:
return []
return []
def get_virtual_index_building_blocks(index_id):
"""Returns indexes that made up virtual index of given index_id.
If index_id is an id of normal index (not virtual) returns
empty tuple.
"""
query = """SELECT v.id_normal, w.name FROM idxINDEX_idxINDEX AS v,
idxINDEX AS w
WHERE v.id_normal=w.id AND
v.id_virtual=%s"""
res = run_sql(query, (index_id,))
return res
def get_index_id_from_index_name(index_name):
"""Returns the words/phrase index id for INDEXNAME.
Returns empty string in case there is no words table for this index.
Example: field='author', output=4."""
out = 0
query = """SELECT w.id FROM idxINDEX AS w
WHERE w.name=%s LIMIT 1"""
res = run_sql(query, (index_name,), 1)
if res:
out = res[0][0]
return out
def get_index_name_from_index_id(index_id):
"""Returns the words/phrase index name for INDEXID.
Returns '' in case there is no words table for this indexid.
Example: field=9, output='fulltext'."""
res = run_sql("SELECT name FROM idxINDEX WHERE id=%s", (index_id,))
if res:
return res[0][0]
return ''
@memoize
def get_field_tags(field, tagtype="marc"):
"""Returns a list of tags for the field code 'field'. Works
for both MARC and nonMARC tags.
Returns empty list in case of error.
Example: field='author', output=['100__%','700__%'].
@param tagtype: can be: "marc" or "nonmarc", default value
is "marc" for backward compatibility
"""
from invenio.modules.search.models import Field
return list(Field.get_field_tags(field, tagtype=tagtype))
def get_marc_tag_indexes(tag, virtual=True):
"""Returns indexes names and ids corresponding to the given tag
@param tag: MARC tag in one of the forms:
'xx%', 'xxx', 'xxx__a', 'xxx__%'
@param virtual: if True function will also return virtual indexes"""
tag2 = tag[0:2] + "%" #for tags in the form: 10%
tag3 = tag[:-1] + "%" #for tags in the form: 100__%
query = """SELECT DISTINCT w.id,w.name FROM idxINDEX AS w,
idxINDEX_field AS wf,
field_tag AS ft,
tag as t
WHERE (t.value=%%s OR
t.value=%%s OR
%s) AND
t.id=ft.id_tag AND
ft.id_field=wf.id_field AND
wf.id_idxINDEX=w.id"""
if tag[-1] == "%":
missing_piece = "t.value LIKE %s"
elif tag[-1] != "%" and len(tag) == 3:
missing_piece = "t.value LIKE %s"
tag3 = tag + "%" #for all tags which start from 'tag'
else:
missing_piece = "t.value=%s"
query = query % missing_piece
res = run_sql(query, (tag, tag2, tag3))
if res:
if virtual:
response = list(res)
index_ids = map(str, zip(*res)[0])
query = """SELECT DISTINCT v.id_virtual,w.name FROM idxINDEX_idxINDEX AS v,
idxINDEX as w
WHERE v.id_virtual=w.id AND
v.id_normal IN ("""
query = query + ", ".join(index_ids) + ")"
response.extend(run_sql(query))
return tuple(response)
return res
return ()
def get_nonmarc_tag_indexes(nonmarc_tag, virtual=True):
"""Returns index names and ids corresponding to the given nonmarc tag
(nonmarc tag can be also called 'bibfield field').
If param 'virtual' is set to True function will also return
virtual indexes"""
query = """SELECT DISTINCT w.id, w.name FROM idxINDEX AS w,
idxINDEX_field AS wf,
field_tag AS ft,
tag as t
WHERE (t.recjson_value LIKE %s OR
t.recjson_value LIKE %s OR
t.recjson_value LIKE %s OR
t.recjson_value=%s) AND
t.id=ft.id_tag AND
ft.id_field=wf.id_field AND
wf.id_idxINDEX=w.id"""
at_the_begining = nonmarc_tag + ',%%'
in_the_middle = '%%,' + nonmarc_tag + ',%%'
at_the_end = '%%,' + nonmarc_tag
res = run_sql(query, (at_the_begining, in_the_middle, at_the_end, nonmarc_tag))
if res:
if virtual:
response = list(res)
index_ids = map(str, zip(*res)[0])
query = """SELECT DISTINCT v.id_virtual,w.name FROM idxINDEX_idxINDEX AS v,
idxINDEX as w
WHERE v.id_virtual=w.id AND
v.id_normal IN ("""
query = query + ", ".join(index_ids) + ")"
response.extend(run_sql(query))
return tuple(response)
return res
return ()
@memoize
def get_index_tags(indexname, virtual=True, tagtype="marc"):
"""Returns the list of tags that are indexed inside INDEXNAME.
Returns empty list in case there are no tags indexed in this index.
Note: uses get_field_tags() defined before.
Example: field='author', output=['100__%', '700__%'].
@param tagtype: can be: "marc" or "nonmarc", default value
is "marc" for backward compatibility
"""
out = []
query = """SELECT f.code FROM idxINDEX AS w,
idxINDEX_field AS wf,
field AS f
WHERE w.name=%s AND
w.id=wf.id_idxINDEX AND
f.id=wf.id_field"""
res = run_sql(query, (indexname,))
for row in res:
out.extend(get_field_tags(row[0], tagtype))
if not out and virtual:
index_id = get_index_id_from_index_name(indexname)
try:
dependent_indexes = map(str, zip(*get_virtual_index_building_blocks(index_id))[0])
except IndexError:
return out
tags = set()
query = """SELECT DISTINCT f.code FROM idxINDEX AS w, idxINDEX_field AS wf, field AS f
WHERE w.id=wf.id_idxINDEX AND
f.id=wf.id_field AND
w.id IN ("""
query = query + ", ".join(dependent_indexes) + ")"
res = run_sql(query)
for row in res:
tags |= set(get_field_tags(row[0], tagtype))
out = list(tags)
out = [tag for tag in out if tag]
return out
def get_min_last_updated(indexes):
"""Returns min modification date for 'indexes':
min(last_updated)
@param indexes: list of indexes
"""
query= """SELECT min(last_updated) FROM idxINDEX WHERE name IN ("""
for index in indexes:
query += "%s,"
query = query[:-1] + ")"
res = run_sql(query, tuple(indexes))
return res
def remove_inexistent_indexes(indexes, leave_virtual=False):
"""Removes indexes that don't exist from the given list of indexes.
@param indexes: list of indexes
@param leave_virtual: should we leave virtual indexes in the list?
"""
correct_indexes = get_all_indexes(leave_virtual)
cleaned = []
for index in indexes:
if index in correct_indexes:
cleaned.append(index)
return cleaned
def get_records_range_for_index(index_id):
"""
Get records range for given index.
"""
try:
query = """SELECT min(id_bibrec), max(id_bibrec) FROM idxWORD%02dR""" % index_id
resp = run_sql(query)
if resp:
return resp[0]
return None
except Exception:
return None
def make_prefix(index_name):
"""
Creates a prefix for specific index which is added
to every word from this index stored in reversed table
of corresponding virtual index.
@param index_name: name of the dependent index we want to create prefix for
"""
return "__" + index_name + "__"
class UnknownTokenizer(Exception):
pass
def list_union(list1, list2):
"Returns union of the two lists."
union_dict = {}
for e in list1:
union_dict[e] = 1
for e in list2:
union_dict[e] = 1
return union_dict.keys()
def get_index_fields(index_id):
"""Returns fields that are connected to index specified by
index_id.
"""
query = """SELECT f.id, f.name FROM field as f,
idxINDEX as w,
idxINDEX_field as wf
WHERE f.id=wf.id_field AND
wf.id_idxINDEX=w.id AND
w.id=%s
"""
index_fields = run_sql(query, (index_id, ) )
return index_fields
def recognize_marc_tag(tag):
"""Checks if tag is a MARC tag or not"""
tag_len = len(tag)
if 3 <= tag_len <= 6 and tag[0:3].isdigit():
return True
if tag_len == 3 and tag[0:2].isdigit() and tag[2] == '%':
return True
return False
def _is_collection(subfield):
"""Checks if a type is a collection;
get_values_recursively internal function."""
return hasattr(subfield, '__iter__')
def _get_values(subfield):
"""Returns values of a subfield suitable for later tokenizing;
get_values_recursively internal function."""
if type(subfield) == dict:
return subfield.values()
else:
return subfield
def get_values_recursively(subfield, phrases):
"""Finds all values suitable for later tokenizing in
field/subfield of bibfield record.
@param subfield: name of the field/subfield
@param phrases: container for phrases (for example empty list)
FIXME: move this function to bibfield!
As soon as possible. Note that journal tokenizer
also needs to be changed.
"""
if _is_collection(subfield):
for s in _get_values(subfield):
get_values_recursively(s, phrases)
elif subfield is not None:
phrases.append(utf8ifier(subfield))
|
OCA/vertical-isp | refs/heads/12.0 | connector_equipment_service/models/__init__.py | 1 | # Copyright (C) 2019 Open Source Integrators
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl).
from . import maintenance_equipment
from . import agreement_serviceprofile
|
j1fig/green-catalyst | refs/heads/master | green-catalyst/probe.py | 2 | from time import sleep
from pigpio import pi
from dht11driver import DHT11
class Probe(object):
"""
A small wrapper class around DHT11 and pigpio drivers
that reports info to a web server
"""
def __enter__(self):
class ProbeResource(object):
def __init__(self):
self.pi = pi()
self.dht = DHT11(self.pi, 4, power=1)
self.reading_number = 0
def report(self):
"""
Takes a reading and posts it to a web server in JSON
"""
self.dht.trigger()
sleep(0.2)
self.reading_number += 1
print("{} {} {} {:3.2f} {} {} {} {}".format(
self.reading_number, self.dht.humidity(),
self.dht.temperature(), self.dht.staleness(),
self.dht.bad_checksum(), self.dht.short_message(),
self.dht.missing_message(), self.dht.sensor_resets()))
def release(self):
self.dht.cancel()
self.pi.stop()
self.resource = ProbeResource()
return self.resource
def __exit__(self, type, value, traceback):
self.dht.cancel()
self.pi.stop()
|
lyan6/genenetwork2 | refs/heads/master | wqflask/wqflask/correlation/correlation_functions.py | 1 | # Copyright (C) University of Tennessee Health Science Center, Memphis, TN.
#
# This program is free software: you can redistribute it and/or modify it
# under the terms of the GNU Affero General Public License
# as published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero General Public License for more details.
#
# This program is available from Source Forge: at GeneNetwork Project
# (sourceforge.net/projects/genenetwork/).
#
# Contact Drs. Robert W. Williams and Xiaodong Zhou (2010)
# at rwilliams@uthsc.edu and xzhou15@uthsc.edu
#
#
#
# This module is used by GeneNetwork project (www.genenetwork.org)
#
# Created by GeneNetwork Core Team 2010/08/10
#
# Last updated by NL 2011/03/23
from __future__ import absolute_import, print_function, division
import math
import rpy2.robjects
import pp
import string
from utility import webqtlUtil
from base.mrna_assay_tissue_data import MrnaAssayTissueData
from base.trait import GeneralTrait
from db import webqtlDatabaseFunction
from flask import Flask, g
#XZ: The input 'controls' is String. It contains the full name of control traits.
#XZ: The input variable 'strainlst' is List. It contains the strain names of primary trait.
#XZ: The returned tcstrains is the list of list [[],[]...]. So are tcvals and tcvars. The last returned parameter is list of numbers.
#XZ, 03/29/2010: For each returned control trait, there is no None value in it.
def controlStrains(controls, strainlst):
controls = controls.split(',')
cvals = {}
for oneTraitName in controls:
oneTrait = webqtlTrait(fullname=oneTraitName, cursor=webqtlDatabaseFunction.getCursor() )
oneTrait.retrieveData()
cvals[oneTraitName] = oneTrait.data
tcstrains = []
tcvals = []
tcvars = []
for oneTraitName in controls:
strains = []
vals = []
vars = []
for _strain in strainlst:
if cvals[oneTraitName].has_key(_strain):
_val = cvals[oneTraitName][_strain].val
if _val != None:
strains.append(_strain)
vals.append(_val)
vars.append(None)
tcstrains.append(strains)
tcvals.append(vals)
tcvars.append(vars)
return tcstrains, tcvals, tcvars, [len(x) for x in tcstrains]
#XZ, 03/29/2010: After execution of functon "controlStrains" and "fixStrains", primary trait and control traits have the same strains and in the same order. There is no 'None' value in them.
def fixStrains(_strains,_controlstrains,_vals,_controlvals,_vars,_controlvars):
"""Corrects strains, vals, and vars so that all contrain only those strains common
to the reference trait and all control traits."""
def dictify(strains,vals,vars):
subdict = {}
for i in xrange(len(strains)):
subdict[strains[i]] = (vals[i],vars[i])
return subdict
#XZ: The 'dicts' is a list of dictionary. The first element is the dictionary of reference trait. The rest elements are for control traits.
dicts = []
dicts.append(dictify(_strains,_vals,_vars))
nCstrains = len(_controlstrains)
for i in xrange(nCstrains):
dicts.append(dictify(_controlstrains[i],_controlvals[i],_controlvars[i]))
_newstrains = []
_vals = []
_vars = []
_controlvals = [[] for x in xrange(nCstrains)]
_controlvars = [[] for x in xrange(nCstrains)]
for strain in _strains:
inall = True
for d in dicts:
if strain not in d:
inall = False
break
if inall:
_newstrains.append(strain)
_vals.append(dicts[0][strain][0])
_vars.append(dicts[0][strain][1])
for i in xrange(nCstrains):
_controlvals[i].append(dicts[i+1][strain][0])
_controlvars[i].append(dicts[i+1][strain][1])
return _newstrains, _vals, _controlvals, _vars, _controlvars
#XZ, 6/15/2010: If there is no identical control traits, the returned list is empty.
#else, the returned list has two elements of control trait name.
def findIdenticalControlTraits ( controlVals, controlNames ):
nameOfIdenticalTraits = []
controlTraitNumber = len(controlVals)
if controlTraitNumber > 1:
#XZ: reset the precision of values and convert to string type
for oneTraitVal in controlVals:
for oneStrainVal in oneTraitVal:
oneStrainVal = '%.3f' % oneStrainVal
for i, oneTraitVal in enumerate( controlVals ):
for j in range(i+1, controlTraitNumber):
if oneTraitVal == controlVals[j]:
nameOfIdenticalTraits.append(controlNames[i])
nameOfIdenticalTraits.append(controlNames[j])
return nameOfIdenticalTraits
#XZ, 6/15/2010: If there is no identical control traits, the returned list is empty.
#else, the returned list has two elements of control trait name.
#primaryVal is of list type. It contains value of primary trait.
#primaryName is of string type.
#controlVals is of list type. Each element is list too. Each element contain value of one control trait.
#controlNames is of list type.
def findIdenticalTraits (primaryVal, primaryName, controlVals, controlNames ):
nameOfIdenticalTraits = []
#XZ: reset the precision of values and convert to string type
for oneStrainVal in primaryVal:
oneStrainVal = '%.3f' % oneStrainVal
for oneTraitVal in controlVals:
for oneStrainVal in oneTraitVal:
oneStrainVal = '%.3f' % oneStrainVal
controlTraitNumber = len(controlVals)
if controlTraitNumber > 1:
for i, oneTraitVal in enumerate( controlVals ):
for j in range(i+1, controlTraitNumber):
if oneTraitVal == controlVals[j]:
nameOfIdenticalTraits.append(controlNames[i])
nameOfIdenticalTraits.append(controlNames[j])
break
if len(nameOfIdenticalTraits) == 0:
for i, oneTraitVal in enumerate( controlVals ):
if primaryVal == oneTraitVal:
nameOfIdenticalTraits.append(primaryName)
nameOfIdenticalTraits.append(controlNames[i])
break
return nameOfIdenticalTraits
#XZ, 03/29/2010: The strains in primaryVal, controlVals, targetVals must be of the same number and in same order.
#XZ: No value in primaryVal and controlVals could be None.
def determinePartialsByR (primaryVal, controlVals, targetVals, targetNames, method='p'):
def compute_partial ( primaryVal, controlVals, targetVals, targetNames, method ):
rpy2.robjects.r("""
pcor.test <- function(x,y,z,use="mat",method="p",na.rm=T){
# The partial correlation coefficient between x and y given z
#
# pcor.test is free and comes with ABSOLUTELY NO WARRANTY.
#
# x and y should be vectors
#
# z can be either a vector or a matrix
#
# use: There are two methods to calculate the partial correlation coefficient.
# One is by using variance-covariance matrix ("mat") and the other is by using recursive formula ("rec").
# Default is "mat".
#
# method: There are three ways to calculate the correlation coefficient,
# which are Pearson's ("p"), Spearman's ("s"), and Kendall's ("k") methods.
# The last two methods which are Spearman's and Kendall's coefficient are based on the non-parametric analysis.
# Default is "p".
#
# na.rm: If na.rm is T, then all the missing samples are deleted from the whole dataset, which is (x,y,z).
# If not, the missing samples will be removed just when the correlation coefficient is calculated.
# However, the number of samples for the p-value is the number of samples after removing
# all the missing samples from the whole dataset.
# Default is "T".
x <- c(x)
y <- c(y)
z <- as.data.frame(z)
if(use == "mat"){
p.use <- "Var-Cov matrix"
pcor = pcor.mat(x,y,z,method=method,na.rm=na.rm)
}else if(use == "rec"){
p.use <- "Recursive formula"
pcor = pcor.rec(x,y,z,method=method,na.rm=na.rm)
}else{
stop("use should be either rec or mat!\n")
}
# print the method
if(gregexpr("p",method)[[1]][1] == 1){
p.method <- "Pearson"
}else if(gregexpr("s",method)[[1]][1] == 1){
p.method <- "Spearman"
}else if(gregexpr("k",method)[[1]][1] == 1){
p.method <- "Kendall"
}else{
stop("method should be pearson or spearman or kendall!\n")
}
# sample number
n <- dim(na.omit(data.frame(x,y,z)))[1]
# given variables' number
gn <- dim(z)[2]
# p-value
if(p.method == "Kendall"){
statistic <- pcor/sqrt(2*(2*(n-gn)+5)/(9*(n-gn)*(n-1-gn)))
p.value <- 2*pnorm(-abs(statistic))
}else{
statistic <- pcor*sqrt((n-2-gn)/(1-pcor^2))
p.value <- 2*pnorm(-abs(statistic))
}
data.frame(estimate=pcor,p.value=p.value,statistic=statistic,n=n,gn=gn,Method=p.method,Use=p.use)
}
# By using var-cov matrix
pcor.mat <- function(x,y,z,method="p",na.rm=T){
x <- c(x)
y <- c(y)
z <- as.data.frame(z)
if(dim(z)[2] == 0){
stop("There should be given data\n")
}
data <- data.frame(x,y,z)
if(na.rm == T){
data = na.omit(data)
}
xdata <- na.omit(data.frame(data[,c(1,2)]))
Sxx <- cov(xdata,xdata,m=method)
xzdata <- na.omit(data)
xdata <- data.frame(xzdata[,c(1,2)])
zdata <- data.frame(xzdata[,-c(1,2)])
Sxz <- cov(xdata,zdata,m=method)
zdata <- na.omit(data.frame(data[,-c(1,2)]))
Szz <- cov(zdata,zdata,m=method)
# is Szz positive definite?
zz.ev <- eigen(Szz)$values
if(min(zz.ev)[1]<0){
stop("\'Szz\' is not positive definite!\n")
}
# partial correlation
Sxx.z <- Sxx - Sxz %*% solve(Szz) %*% t(Sxz)
rxx.z <- cov2cor(Sxx.z)[1,2]
rxx.z
}
# By using recursive formula
pcor.rec <- function(x,y,z,method="p",na.rm=T){
#
x <- c(x)
y <- c(y)
z <- as.data.frame(z)
if(dim(z)[2] == 0){
stop("There should be given data\n")
}
data <- data.frame(x,y,z)
if(na.rm == T){
data = na.omit(data)
}
# recursive formula
if(dim(z)[2] == 1){
tdata <- na.omit(data.frame(data[,1],data[,2]))
rxy <- cor(tdata[,1],tdata[,2],m=method)
tdata <- na.omit(data.frame(data[,1],data[,-c(1,2)]))
rxz <- cor(tdata[,1],tdata[,2],m=method)
tdata <- na.omit(data.frame(data[,2],data[,-c(1,2)]))
ryz <- cor(tdata[,1],tdata[,2],m=method)
rxy.z <- (rxy - rxz*ryz)/( sqrt(1-rxz^2)*sqrt(1-ryz^2) )
return(rxy.z)
}else{
x <- c(data[,1])
y <- c(data[,2])
z0 <- c(data[,3])
zc <- as.data.frame(data[,-c(1,2,3)])
rxy.zc <- pcor.rec(x,y,zc,method=method,na.rm=na.rm)
rxz0.zc <- pcor.rec(x,z0,zc,method=method,na.rm=na.rm)
ryz0.zc <- pcor.rec(y,z0,zc,method=method,na.rm=na.rm)
rxy.z <- (rxy.zc - rxz0.zc*ryz0.zc)/( sqrt(1-rxz0.zc^2)*sqrt(1-ryz0.zc^2) )
return(rxy.z)
}
}
""")
R_pcorr_function = rpy2.robjects.r['pcor.test']
R_corr_test = rpy2.robjects.r['cor.test']
primary = rpy2.robjects.FloatVector(range(len(primaryVal)))
for i in range(len(primaryVal)):
primary[i] = primaryVal[i]
control = rpy2.robjects.r.matrix(rpy2.robjects.FloatVector( range(len(controlVals)*len(controlVals[0])) ), ncol=len(controlVals))
for i in range(len(controlVals)):
for j in range(len(controlVals[0])):
control[i*len(controlVals[0]) + j] = controlVals[i][j]
allcorrelations = []
for targetIndex, oneTargetVals in enumerate(targetVals):
this_primary = None
this_control = None
this_target = None
if None in oneTargetVals:
goodIndex = []
for i in range(len(oneTargetVals)):
if oneTargetVals[i] != None:
goodIndex.append(i)
this_primary = rpy2.robjects.FloatVector(range(len(goodIndex)))
for i in range(len(goodIndex)):
this_primary[i] = primaryVal[goodIndex[i]]
this_control = rpy2.robjects.r.matrix(rpy2.robjects.FloatVector( range(len(controlVals)*len(goodIndex)) ), ncol=len(controlVals))
for i in range(len(controlVals)):
for j in range(len(goodIndex)):
this_control[i*len(goodIndex) + j] = controlVals[i][goodIndex[j]]
this_target = rpy2.robjects.FloatVector(range(len(goodIndex)))
for i in range(len(goodIndex)):
this_target[i] = oneTargetVals[goodIndex[i]]
else:
this_primary = primary
this_control = control
this_target = rpy2.robjects.FloatVector(range(len(oneTargetVals)))
for i in range(len(oneTargetVals)):
this_target[i] = oneTargetVals[i]
one_name = targetNames[targetIndex]
one_N = len(this_primary)
#calculate partial correlation
one_pc_coefficient = 'NA'
one_pc_p = 1
try:
if method == 's':
result = R_pcorr_function(this_primary, this_target, this_control, method='s')
else:
result = R_pcorr_function(this_primary, this_target, this_control)
#XZ: In very few cases, the returned coefficient is nan.
#XZ: One way to detect nan is to compare the number to itself. NaN is always != NaN
if result[0][0] == result[0][0]:
one_pc_coefficient = result[0][0]
#XZ: when the coefficient value is 1 (primary trait and target trait are the same),
#XZ: occationally, the returned p value is nan instead of 0.
if result[1][0] == result[1][0]:
one_pc_p = result[1][0]
elif abs(one_pc_coefficient - 1) < 0.0000001:
one_pc_p = 0
except:
pass
#calculate zero order correlation
one_corr_coefficient = 0
one_corr_p = 1
try:
if method == 's':
R_result = R_corr_test(this_primary, this_target, method='spearman')
else:
R_result = R_corr_test(this_primary, this_target)
one_corr_coefficient = R_result[3][0]
one_corr_p = R_result[2][0]
except:
pass
traitinfo = [ one_name, one_N, one_pc_coefficient, one_pc_p, one_corr_coefficient, one_corr_p ]
allcorrelations.append(traitinfo)
return allcorrelations
#End of function compute_partial
allcorrelations = []
target_trait_number = len(targetVals)
if target_trait_number < 1000:
allcorrelations = compute_partial ( primaryVal, controlVals, targetVals, targetNames, method )
else:
step = 1000
job_number = math.ceil( float(target_trait_number)/step )
job_targetVals_lists = []
job_targetNames_lists = []
for job_index in range( int(job_number) ):
starti = job_index*step
endi = min((job_index+1)*step, target_trait_number)
one_job_targetVals_list = []
one_job_targetNames_list = []
for i in range( starti, endi ):
one_job_targetVals_list.append( targetVals[i] )
one_job_targetNames_list.append( targetNames[i] )
job_targetVals_lists.append( one_job_targetVals_list )
job_targetNames_lists.append( one_job_targetNames_list )
ppservers = ()
# Creates jobserver with automatically detected number of workers
job_server = pp.Server(ppservers=ppservers)
jobs = []
results = []
for i, one_job_targetVals_list in enumerate( job_targetVals_lists ):
one_job_targetNames_list = job_targetNames_lists[i]
#pay attention to modules from outside
jobs.append( job_server.submit(func=compute_partial, args=( primaryVal, controlVals, one_job_targetVals_list, one_job_targetNames_list, method), depfuncs=(), modules=("rpy2.robjects",)) )
for one_job in jobs:
one_result = one_job()
results.append( one_result )
for one_result in results:
for one_traitinfo in one_result:
allcorrelations.append( one_traitinfo )
return allcorrelations
#XZ, April 30, 2010: The input primaryTrait and targetTrait are instance of webqtlTrait
#XZ: The primaryTrait and targetTrait should have executed retrieveData function
def calZeroOrderCorr(primaryTrait, targetTrait, method='pearson'):
#primaryTrait.retrieveData()
#there is no None value in primary_val
primary_strain, primary_val, primary_var = primaryTrait.exportInformative()
#targetTrait.retrieveData()
#there might be None value in target_val
target_val = targetTrait.exportData(primary_strain, type="val")
R_primary = rpy2.robjects.FloatVector(range(len(primary_val)))
for i in range(len(primary_val)):
R_primary[i] = primary_val[i]
N = len(target_val)
if None in target_val:
goodIndex = []
for i in range(len(target_val)):
if target_val[i] != None:
goodIndex.append(i)
N = len(goodIndex)
R_primary = rpy2.robjects.FloatVector(range(len(goodIndex)))
for i in range(len(goodIndex)):
R_primary[i] = primary_val[goodIndex[i]]
R_target = rpy2.robjects.FloatVector(range(len(goodIndex)))
for i in range(len(goodIndex)):
R_target[i] = target_val[goodIndex[i]]
else:
R_target = rpy2.robjects.FloatVector(range(len(target_val)))
for i in range(len(target_val)):
R_target[i] = target_val[i]
R_corr_test = rpy2.robjects.r['cor.test']
if method == 'spearman':
R_result = R_corr_test(R_primary, R_target, method='spearman')
else:
R_result = R_corr_test(R_primary, R_target)
corr_result = []
corr_result.append( R_result[3][0] )
corr_result.append( N )
corr_result.append( R_result[2][0] )
return corr_result
#####################################################################################
#Input: primaryValue(list): one list of expression values of one probeSet,
# targetValue(list): one list of expression values of one probeSet,
# method(string): indicate correlation method ('pearson' or 'spearman')
#Output: corr_result(list): first item is Correlation Value, second item is tissue number,
# third item is PValue
#Function: get correlation value,Tissue quantity ,p value result by using R;
#Note : This function is special case since both primaryValue and targetValue are from
#the same dataset. So the length of these two parameters is the same. They are pairs.
#Also, in the datatable TissueProbeSetData, all Tissue values are loaded based on
#the same tissue order
#####################################################################################
def cal_zero_order_corr_for_tiss (primaryValue=[], targetValue=[], method='pearson'):
R_primary = rpy2.robjects.FloatVector(range(len(primaryValue)))
N = len(primaryValue)
for i in range(len(primaryValue)):
R_primary[i] = primaryValue[i]
R_target = rpy2.robjects.FloatVector(range(len(targetValue)))
for i in range(len(targetValue)):
R_target[i]=targetValue[i]
R_corr_test = rpy2.robjects.r['cor.test']
if method =='spearman':
R_result = R_corr_test(R_primary, R_target, method='spearman')
else:
R_result = R_corr_test(R_primary, R_target)
corr_result =[]
corr_result.append( R_result[3][0])
corr_result.append( N )
corr_result.append( R_result[2][0])
return corr_result
def batchCalTissueCorr(primaryTraitValue=[], SymbolValueDict={}, method='pearson'):
def cal_tissue_corr(primaryTraitValue, oneSymbolValueDict, method ):
oneSymbolCorrDict = {}
oneSymbolPvalueDict = {}
R_corr_test = rpy2.robjects.r['cor.test']
R_primary = rpy2.robjects.FloatVector(range(len(primaryTraitValue)))
for i in range(len(primaryTraitValue)):
R_primary[i] = primaryTraitValue[i]
for (oneTraitSymbol, oneTraitValue) in oneSymbolValueDict.iteritems():
R_target = rpy2.robjects.FloatVector(range(len(oneTraitValue)))
for i in range(len(oneTraitValue)):
R_target[i] = oneTraitValue[i]
if method =='spearman':
R_result = R_corr_test(R_primary, R_target, method='spearman')
else:
R_result = R_corr_test(R_primary, R_target)
oneSymbolCorrDict[oneTraitSymbol] = R_result[3][0]
oneSymbolPvalueDict[oneTraitSymbol] = R_result[2][0]
return(oneSymbolCorrDict, oneSymbolPvalueDict)
symbolCorrDict = {}
symbolPvalueDict = {}
items_number = len(SymbolValueDict)
if items_number <= 1000:
symbolCorrDict, symbolPvalueDict = cal_tissue_corr(primaryTraitValue, SymbolValueDict, method)
else:
items_list = SymbolValueDict.items()
step = 1000
job_number = math.ceil( float(items_number)/step )
job_oneSymbolValueDict_list = []
for job_index in range( int(job_number) ):
starti = job_index*step
endi = min((job_index+1)*step, items_number)
oneSymbolValueDict = {}
for i in range( starti, endi ):
one_item = items_list[i]
one_symbol = one_item[0]
one_value = one_item[1]
oneSymbolValueDict[one_symbol] = one_value
job_oneSymbolValueDict_list.append( oneSymbolValueDict )
ppservers = ()
# Creates jobserver with automatically detected number of workers
job_server = pp.Server(ppservers=ppservers)
jobs = []
results = []
for i, oneSymbolValueDict in enumerate( job_oneSymbolValueDict_list ):
#pay attention to modules from outside
jobs.append( job_server.submit(func=cal_tissue_corr, args=(primaryTraitValue, oneSymbolValueDict, method), depfuncs=(), modules=("rpy2.robjects",)) )
for one_job in jobs:
one_result = one_job()
results.append( one_result )
for one_result in results:
oneSymbolCorrDict, oneSymbolPvalueDict = one_result
symbolCorrDict.update( oneSymbolCorrDict )
symbolPvalueDict.update( oneSymbolPvalueDict )
return (symbolCorrDict, symbolPvalueDict)
###########################################################################
#Input: cursor, GeneNameLst (list), TissueProbeSetFreezeId
#output: geneIdDict,dataIdDict,ChrDict,MbDict,descDict,pTargetDescDict (Dict)
#function: get multi dicts for short and long label functions, and for getSymbolValuePairDict and
# getGeneSymbolTissueValueDict to build dict to get CorrPvArray
#Note: If there are multiple probesets for one gene, select the one with highest mean.
###########################################################################
def getTissueProbeSetXRefInfo(GeneNameLst=[],TissueProbeSetFreezeId=0):
Symbols =""
symbolList =[]
geneIdDict ={}
dataIdDict = {}
ChrDict = {}
MbDict = {}
descDict = {}
pTargetDescDict = {}
count = len(GeneNameLst)
# Added by NL 01/06/2011
# Note that:inner join is necessary in this query to get distinct record in one symbol group with highest mean value
# Duo to the limit size of TissueProbeSetFreezeId table in DB, performance of inner join is acceptable.
if count==0:
query='''
select t.Symbol,t.GeneId, t.DataId,t.Chr, t.Mb,t.description,t.Probe_Target_Description
from (
select Symbol, max(Mean) as maxmean
from TissueProbeSetXRef
where TissueProbeSetFreezeId=%s and Symbol!='' and Symbol Is Not Null group by Symbol)
as x inner join TissueProbeSetXRef as t on t.Symbol = x.Symbol and t.Mean = x.maxmean;
'''%TissueProbeSetFreezeId
else:
for i, item in enumerate(GeneNameLst):
if i == count-1:
Symbols += "'%s'" %item
else:
Symbols += "'%s'," %item
Symbols = "("+ Symbols+")"
query='''
select t.Symbol,t.GeneId, t.DataId,t.Chr, t.Mb,t.description,t.Probe_Target_Description
from (
select Symbol, max(Mean) as maxmean
from TissueProbeSetXRef
where TissueProbeSetFreezeId=%s and Symbol in %s group by Symbol)
as x inner join TissueProbeSetXRef as t on t.Symbol = x.Symbol and t.Mean = x.maxmean;
'''% (TissueProbeSetFreezeId,Symbols)
try:
cursor.execute(query)
results =cursor.fetchall()
resultCount = len(results)
# Key in all dicts is the lower-cased symbol
for i, item in enumerate(results):
symbol = item[0]
symbolList.append(symbol)
key =symbol.lower()
geneIdDict[key]=item[1]
dataIdDict[key]=item[2]
ChrDict[key]=item[3]
MbDict[key]=item[4]
descDict[key]=item[5]
pTargetDescDict[key]=item[6]
except:
symbolList = None
geneIdDict=None
dataIdDict=None
ChrDict=None
MbDict=None
descDict=None
pTargetDescDict=None
return symbolList,geneIdDict,dataIdDict,ChrDict,MbDict,descDict,pTargetDescDict
###########################################################################
#Input: cursor, symbolList (list), dataIdDict(Dict)
#output: symbolValuepairDict (dictionary):one dictionary of Symbol and Value Pair,
# key is symbol, value is one list of expression values of one probeSet;
#function: get one dictionary whose key is gene symbol and value is tissue expression data (list type).
#Attention! All keys are lower case!
###########################################################################
def get_symbol_value_pairs(tissue_data):
id_list = [tissue_data[symbol.lower()].data_id for item in tissue_data]
symbol_value_pairs = {}
value_list=[]
query = """SELECT value, id
FROM TissueProbeSetData
WHERE Id IN {}""".format(create_in_clause(id_list))
try :
results = g.db.execute(query).fetchall()
for result in results:
value_list.append(result.value)
symbol_value_pairs[symbol] = value_list
except:
symbol_value_pairs[symbol] = None
#for symbol in symbol_list:
# if tissue_data.has_key(symbol):
# data_id = tissue_data[symbol].data_id
#
# query = """select value, id
# from TissueProbeSetData
# where Id={}""".format(escape(data_id))
# try :
# results = g.db.execute(query).fetchall()
# for item in results:
# item = item[0]
# value_list.append(item)
# symbol_value_pairs[symbol] = value_list
# value_list=[]
# except:
# symbol_value_pairs[symbol] = None
return symbol_value_pairs
########################################################################################################
#input: cursor, symbolList (list), dataIdDict(Dict): key is symbol
#output: SymbolValuePairDict(dictionary):one dictionary of Symbol and Value Pair.
# key is symbol, value is one list of expression values of one probeSet.
#function: wrapper function for getSymbolValuePairDict function
# build gene symbol list if necessary, cut it into small lists if necessary,
# then call getSymbolValuePairDict function and merge the results.
########################################################################################################
def get_trait_symbol_and_tissue_values(symbol_list=None):
tissue_data = MrnaAssayTissueData(gene_symbols=symbol_list)
if len(tissue_data.gene_symbols):
return tissue_data.get_symbol_values_pairs()
#symbolList,
#geneIdDict,
#dataIdDict,
#ChrDict,
#MbDict,
#descDict,
#pTargetDescDict = getTissueProbeSetXRefInfo(
# GeneNameLst=GeneNameLst,TissueProbeSetFreezeId=TissueProbeSetFreezeId)
#limit_num=1000
#count = len(symbol_list)
#
#symbol_value_pairs = {}
#
#if count !=0 and count <= limit_num:
# symbol_value_pairs = getSymbolValuePairDict(cursor=cursor,symbolList=symbol_list,dataIdDict=dataIdDict)
#
#elif count > limit_num:
# n = count/limit_num
# start = 0
# stop = 0
#
# for i in range(n):
# stop =limit_num*(i+1)
# gList1 = symbolList[start:stop]
# PairDict1 = getSymbolValuePairDict(cursor=cursor,symbolList=gList1,dataIdDict=dataIdDict)
# start =limit_num*(i+1)
#
# SymbolValuePairDict.update(PairDict1)
#
# if stop < count:
# stop = count
# gList2 = symbolList[start:stop]
# PairDict2 = getSymbolValuePairDict(cursor=cursor,symbolList=gList2,dataIdDict=dataIdDict)
# SymbolValuePairDict.update(PairDict2)
#
#return SymbolValuePairDict
########################################################################################################
#input: cursor, GeneNameLst (list), TissueProbeSetFreezeId(int)
#output: SymbolValuePairDict(dictionary):one dictionary of Symbol and Value Pair.
# key is symbol, value is one list of expression values of one probeSet.
#function: wrapper function of getGeneSymbolTissueValueDict function
# for CorrelationPage.py
########################################################################################################
#def get_trait_symbol_and_tissue_values(cursor=None,GeneNameLst=[],TissueProbeSetFreezeId=0):
# SymbolValuePairDict={}
#
# symbolList,geneIdDict,dataIdDict,ChrDict,MbDict,descDict,pTargetDescDict = getTissueProbeSetXRefInfo(
# cursor=cursor,GeneNameLst=GeneNameLst,TissueProbeSetFreezeId=TissueProbeSetFreezeId)
#
# if symbolList:
# SymbolValuePairDict = get_gene_symbol_and_tissue_values(symbolList=symbolList,
# dataIdDict=dataIdDict)
#
# return SymbolValuePairDict
########################################################################################################
#Input: cursor(cursor): MySQL connnection cursor;
# priGeneSymbolList(list): one list of gene symbol;
# symbolValuepairDict(dictionary): one dictionary of Symbol and Value Pair,
# key is symbol, value is one list of expression values of one probeSet;
#Output: corrArray(array): array of Correlation Value,
# pvArray(array): array of PValue;
#Function: build corrArray, pvArray for display by calling calculation function:calZeroOrderCorrForTiss
########################################################################################################
def getCorrPvArray(cursor=None,priGeneSymbolList=[],symbolValuepairDict={}):
# setting initial value for corrArray, pvArray equal to 0
Num = len(priGeneSymbolList)
corrArray = [([0] * (Num))[:] for i in range(Num)]
pvArray = [([0] * (Num))[:] for i in range(Num)]
i = 0
for pkey in priGeneSymbolList:
j = 0
pkey = pkey.strip().lower()# key in symbolValuepairDict is low case
if symbolValuepairDict.has_key(pkey):
priValue = symbolValuepairDict[pkey]
for tkey in priGeneSymbolList:
tkey = tkey.strip().lower()# key in symbolValuepairDict is low case
if priValue and symbolValuepairDict.has_key(tkey):
tarValue = symbolValuepairDict[tkey]
if tarValue:
if i>j:
# corrArray stores Pearson Correlation values
# pvArray stores Pearson P-Values
pcorr_result =calZeroOrderCorrForTiss(primaryValue=priValue,targetValue=tarValue)
corrArray[i][j] =pcorr_result[0]
pvArray[i][j] =pcorr_result[2]
elif i<j:
# corrArray stores Spearman Correlation values
# pvArray stores Spearman P-Values
scorr_result =calZeroOrderCorrForTiss(primaryValue=priValue,targetValue=tarValue,method='spearman')
corrArray[i][j] =scorr_result[0]
pvArray[i][j] =scorr_result[2]
else:
# on the diagonal line, correlation value is 1, P-Values is 0
corrArray[i][j] =1
pvArray[i][j] =0
j+=1
else:
corrArray[i][j] = None
pvArray[i][j] = None
j+=1
else:
corrArray[i][j] = None
pvArray[i][j] = None
j+=1
else:
corrArray[i][j] = None
pvArray[i][j] = None
i+=1
return corrArray, pvArray
########################################################################################################
#Input: cursor(cursor): MySQL connnection cursor;
# primaryTraitSymbol(string): one gene symbol;
# TissueProbeSetFreezeId (int): Id of related TissueProbeSetFreeze
# method: '0' default value, Pearson Correlation; '1', Spearman Correlation
#Output: symbolCorrDict(Dict): Dict of Correlation Value, key is symbol
# symbolPvalueDict(Dict): Dict of PValue,key is symbol ;
#Function: build symbolCorrDict, symbolPvalueDict for display by calling calculation function:calZeroOrderCorrForTiss
########################################################################################################
def calculateCorrOfAllTissueTrait(cursor=None, primaryTraitSymbol=None, TissueProbeSetFreezeId=None,method='0'):
symbolCorrDict = {}
symbolPvalueDict = {}
primaryTraitSymbolValueDict = getGeneSymbolTissueValueDictForTrait(cursor=cursor, GeneNameLst=[primaryTraitSymbol], TissueProbeSetFreezeId=TissueProbeSetFreezeId)
primaryTraitValue = primaryTraitSymbolValueDict.values()[0]
SymbolValueDict = getGeneSymbolTissueValueDictForTrait(cursor=cursor, GeneNameLst=[], TissueProbeSetFreezeId=TissueProbeSetFreezeId)
if method =='1':
symbolCorrDict, symbolPvalueDict = batchCalTissueCorr(primaryTraitValue,SymbolValueDict,method='spearman')
else:
symbolCorrDict, symbolPvalueDict = batchCalTissueCorr(primaryTraitValue,SymbolValueDict)
return (symbolCorrDict, symbolPvalueDict)
|
40023256/2015cdag1man | refs/heads/master | static/Brython3.1.3-20150514-095342/Lib/site-packages/editor.py | 84 | # -*- coding: utf-8 -*-
import sys
import time
import traceback
import dis
from browser import document as doc, window, alert, ajax
# set height of container to 66% of screen
_height = doc.documentElement.clientHeight
_s = doc['container']
_s.style.height = '%spx' % int(_height * 0.66)
has_ace = True
try:
editor = window.ace.edit("editor")
session = editor.getSession()
session.setMode("ace/mode/python")
editor.setOptions({
'width': '390px;',
'enableLiveAutocompletion': True,
'enableSnippets': True,
'highlightActiveLine': False,
'highlightSelectedWord': True
})
except:
from browser import html
editor = html.TEXTAREA(rows=20, cols=70)
doc["editor"] <= editor
def get_value(): return editor.value
def set_value(x):editor.value = x
editor.getValue = get_value
editor.setValue = set_value
has_ace = False
if sys.has_local_storage:
from local_storage import storage
else:
storage = None
if 'set_debug' in doc:
__BRYTHON__.debug = int(doc['set_debug'].checked)
def reset_src():
if storage is not None and "py_src" in storage:
editor.setValue(storage["py_src"])
else:
editor.setValue('''#coding: utf-8
# 猜數字遊戲
import random
標準答案 = random.randint(1, 100)
print(標準答案)
你猜的數字 = int(input("請輸入您所猜的整數:"))
猜測次數 = 1
while 標準答案 != 你猜的數字:
if 標準答案 < 你猜的數字:
print("太大了,再猜一次 :)加油")
else:
print("太小了,再猜一次 :)加油")
你猜的數字 = int(input("請輸入您所猜的整數:"))
猜測次數 += 1
print("猜對了!總共猜了", 猜測次數, "次")
''')
def reset_src_area():
if storage and "py_src" in storage:
editor.value = storage["py_src"]
else:
editor.value = '''#coding: utf-8
# 猜數字遊戲
import random
標準答案 = random.randint(1, 100)
print(標準答案)
你猜的數字 = int(input("請輸入您所猜的整數:"))
猜測次數 = 1
while 標準答案 != 你猜的數字:
if 標準答案 < 你猜的數字:
print("太大了,再猜一次 :)加油")
else:
print("太小了,再猜一次 :)加油")
你猜的數字 = int(input("請輸入您所猜的整數:"))
猜測次數 += 1
print("猜對了!總共猜了", 猜測次數, "次")
'''
class cOutput:
def write(self, data):
doc["console"].value += str(data)
def flush(self):
pass
sys.stdout = cOutput()
sys.stderr = cOutput()
def to_str(xx):
return str(xx)
output = ''
def show_console(ev):
doc["console"].value = output
doc["console"].cols = 60
# load a Python script
def load_script(evt):
_name = evt.target.value + '?foo=%s' % time.time()
editor.setValue(open(_name).read())
def err_msg():
doc["result"].html = "server didn't reply after %s seconds" %timeout
def on_complete(req):
print(req.text)
# run a script, in global namespace if in_globals is True
def run(in_globals=False):
global output
doc["console"].value = ''
src = editor.getValue()
if storage is not None:
storage["py_src"] = src
t0 = time.perf_counter()
try:
if(in_globals):
exec(src)
else:
ns = {}
exec(src, ns)
state = 1
except Exception as exc:
traceback.print_exc(file=sys.stderr)
state = 0
output = doc["console"].value
print('Brython: %6.2f ms' % ((time.perf_counter() - t0) * 1000.0))
# run with CPython
req = ajax.ajax()
req.bind('complete',on_complete)
req.set_timeout(4,err_msg)
req.open('POST','/cgi-bin/speed.py',True)
req.set_header('content-type','application/x-www-form-urlencoded')
req.send({'src':src})
return state
# load a Python script
def load_script(evt):
_name=evt.target.value+'?foo=%s' %time.time()
editor.setValue(open(_name).read())
def show_js(ev):
src = editor.getValue()
doc["console"].value = dis.dis(src)
# Yen defined
def clear_text(ev):
editor.setValue('')
if sys.has_local_storage:
storage["py_src"]=''
doc["console"].value=''
def clear_src(ev):
editor.setValue('')
if sys.has_local_storage:
storage["py_src"]=''
def clear_canvas(ev):
canvas = doc["plotarea"]
ctx = canvas.getContext("2d")
# Store the current transformation matrix
ctx.save();
# Use the identity matrix while clearing the canvas
ctx.setTransform(1, 0, 0, 1, 0, 0);
ctx.clearRect(0, 0, canvas.width, canvas.height);
# Restore the transform
ctx.restore();
#ctx.clearRect(0, 0, canvas.width, canvas.height)
def clear_console(ev):
doc["console"].value=''
def change_theme(evt):
_theme=evt.target.value
editor.setTheme(_theme)
if storage:
storage["ace_theme"]=_theme
doc["ace_theme"].bind("change",change_theme)
def reset_theme():
if storage:
if "ace_theme" in storage:
editor.setTheme(storage["ace_theme"])
doc["ace_theme"].value=storage["ace_theme"]
def reset_the_src(ev):
if has_ace:
reset_src()
reset_theme()
else:
reset_src_area()
if has_ace:
reset_src()
else:
reset_src_area()
|
leiferikb/bitpop | refs/heads/master | build/third_party/requests_1_2_3/requests/auth.py | 78 | # -*- coding: utf-8 -*-
"""
requests.auth
~~~~~~~~~~~~~
This module contains the authentication handlers for Requests.
"""
import os
import re
import time
import hashlib
import logging
from base64 import b64encode
from .compat import urlparse, str
from .utils import parse_dict_header
log = logging.getLogger(__name__)
CONTENT_TYPE_FORM_URLENCODED = 'application/x-www-form-urlencoded'
CONTENT_TYPE_MULTI_PART = 'multipart/form-data'
def _basic_auth_str(username, password):
"""Returns a Basic Auth string."""
return 'Basic ' + b64encode(('%s:%s' % (username, password)).encode('latin1')).strip().decode('latin1')
class AuthBase(object):
"""Base class that all auth implementations derive from"""
def __call__(self, r):
raise NotImplementedError('Auth hooks must be callable.')
class HTTPBasicAuth(AuthBase):
"""Attaches HTTP Basic Authentication to the given Request object."""
def __init__(self, username, password):
self.username = username
self.password = password
def __call__(self, r):
r.headers['Authorization'] = _basic_auth_str(self.username, self.password)
return r
class HTTPProxyAuth(HTTPBasicAuth):
"""Attaches HTTP Proxy Authentication to a given Request object."""
def __call__(self, r):
r.headers['Proxy-Authorization'] = _basic_auth_str(self.username, self.password)
return r
class HTTPDigestAuth(AuthBase):
"""Attaches HTTP Digest Authentication to the given Request object."""
def __init__(self, username, password):
self.username = username
self.password = password
self.last_nonce = ''
self.nonce_count = 0
self.chal = {}
def build_digest_header(self, method, url):
realm = self.chal['realm']
nonce = self.chal['nonce']
qop = self.chal.get('qop')
algorithm = self.chal.get('algorithm')
opaque = self.chal.get('opaque')
if algorithm is None:
_algorithm = 'MD5'
else:
_algorithm = algorithm.upper()
# lambdas assume digest modules are imported at the top level
if _algorithm == 'MD5':
def md5_utf8(x):
if isinstance(x, str):
x = x.encode('utf-8')
return hashlib.md5(x).hexdigest()
hash_utf8 = md5_utf8
elif _algorithm == 'SHA':
def sha_utf8(x):
if isinstance(x, str):
x = x.encode('utf-8')
return hashlib.sha1(x).hexdigest()
hash_utf8 = sha_utf8
# XXX MD5-sess
KD = lambda s, d: hash_utf8("%s:%s" % (s, d))
if hash_utf8 is None:
return None
# XXX not implemented yet
entdig = None
p_parsed = urlparse(url)
path = p_parsed.path
if p_parsed.query:
path += '?' + p_parsed.query
A1 = '%s:%s:%s' % (self.username, realm, self.password)
A2 = '%s:%s' % (method, path)
if qop == 'auth':
if nonce == self.last_nonce:
self.nonce_count += 1
else:
self.nonce_count = 1
ncvalue = '%08x' % self.nonce_count
s = str(self.nonce_count).encode('utf-8')
s += nonce.encode('utf-8')
s += time.ctime().encode('utf-8')
s += os.urandom(8)
cnonce = (hashlib.sha1(s).hexdigest()[:16])
noncebit = "%s:%s:%s:%s:%s" % (nonce, ncvalue, cnonce, qop, hash_utf8(A2))
respdig = KD(hash_utf8(A1), noncebit)
elif qop is None:
respdig = KD(hash_utf8(A1), "%s:%s" % (nonce, hash_utf8(A2)))
else:
# XXX handle auth-int.
return None
self.last_nonce = nonce
# XXX should the partial digests be encoded too?
base = 'username="%s", realm="%s", nonce="%s", uri="%s", ' \
'response="%s"' % (self.username, realm, nonce, path, respdig)
if opaque:
base += ', opaque="%s"' % opaque
if algorithm:
base += ', algorithm="%s"' % algorithm
if entdig:
base += ', digest="%s"' % entdig
if qop:
base += ', qop=auth, nc=%s, cnonce="%s"' % (ncvalue, cnonce)
return 'Digest %s' % (base)
def handle_401(self, r, **kwargs):
"""Takes the given response and tries digest-auth, if needed."""
num_401_calls = getattr(self, 'num_401_calls', 1)
s_auth = r.headers.get('www-authenticate', '')
if 'digest' in s_auth.lower() and num_401_calls < 2:
setattr(self, 'num_401_calls', num_401_calls + 1)
pat = re.compile(r'digest ', flags=re.IGNORECASE)
self.chal = parse_dict_header(pat.sub('', s_auth, count=1))
# Consume content and release the original connection
# to allow our new request to reuse the same one.
r.content
r.raw.release_conn()
r.request.headers['Authorization'] = self.build_digest_header(r.request.method, r.request.url)
_r = r.connection.send(r.request, **kwargs)
_r.history.append(r)
return _r
setattr(self, 'num_401_calls', 1)
return r
def __call__(self, r):
# If we have a saved nonce, skip the 401
if self.last_nonce:
r.headers['Authorization'] = self.build_digest_header(r.method, r.url)
r.register_hook('response', self.handle_401)
return r
|
ryfeus/lambda-packs | refs/heads/master | Rasterio_osgeo_shapely_PIL_pyproj_numpy/source/osgeo/_gdal.py | 1 | def __bootstrap__():
global __bootstrap__, __loader__, __file__
import sys, pkg_resources, imp
__file__ = pkg_resources.resource_filename(__name__, '_gdal.so')
__loader__ = None; del __bootstrap__, __loader__
imp.load_dynamic(__name__,__file__)
__bootstrap__()
|
ianatpn/nupictest | refs/heads/master | nupic/regions/extra/GaborNode2.py | 8 | #!/usr/bin/env python
# ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
import ctypes
import numpy
try:
# Not normally needed. Not available in demo app.
import hotshot
except:
pass
# Attempt to import OpenCV's ctypes-based bindings
try:
from opencv.cvtypes import cv
except:
cv = None
from StringIO import StringIO
from PIL import (Image,
ImageChops)
from nupic.regions.PyRegion import PyRegion, RealNumpyDType
from nupic.regions.Spec import *
# Global counter used for some debugging operations
id = 0
#+=+=+=+=+=+=+=+=+=+=+=++=+=+=+=+=+=+=+=+=+=+=++=+=+=+=+=+=+=+=+=+=+=+
# GaborNode
#+=+=+=+=+=+=+=+=+=+=+=++=+=+=+=+=+=+=+=+=+=+=++=+=+=+=+=+=+=+=+=+=+=+
class GaborNode2(PyRegion):
"""
Performs dense Gabor filtering upon a multi-resolution grid.
"""
#+=+=+=+=+=+=+=+=+=+=+=++=+=+=+=+=+=+=+=+=+=+=++=+=+=+=+=+=+=+=+=+=+=+
# Class constants
#+=+=+=+=+=+=+=+=+=+=+=++=+=+=+=+=+=+=+=+=+=+=++=+=+=+=+=+=+=+=+=+=+=+
# The minimum filter size dimension (3x3)
minFilterDim = 3
# The minimum filter size dimension (3x3)
minNumOrients = 0
# List of filter dimensions supported by the optimized
# C library
_optimizedFilterDims = [5, 7, 9, 11, 13]
# Valid parameter values
_validValues = {
'phaseMode': ('single', 'dual'),
'targetType': ('edge', 'line'),
'boundaryMode': ('constrained', 'sweepOff'),
'normalizationMethod': ('fixed', 'max', 'mean'),
'postProcessingMethod': ('raw', 'sigmoid', 'threshold'),
'nta_morphologyMethod': ('best', 'opencv', 'nta'),
}
# Default parameter values
_defaults = {
# Documented parameters:
'filterDim': 9,
'numOrientations': 4,
'phaseMode': 'single',
'centerSurround': False,
'targetType': 'edge',
'gainConstant': 1.0,
'normalizationMethod': 'fixed',
'perPlaneNormalization': False,
'perPhaseNormalization': True,
'postProcessingMethod': 'raw',
'postProcessingSlope': 1.0,
'postProcessingCenter': 0.5,
'postProcessingMin': 0.0,
'postProcessingMax': 1.0,
'zeroThresholdOut': 0.0,
'boundaryMode': 'constrained',
'offImagePixelValue': 0,
'suppressOutsideBox': True,
'forceBoxContraction': False,
'suppressByAlpha': False,
'logPrefix': None,
# Undocumented parameters:
'nta_aspectRatio': 0.3,
'nta_effectiveWidth': 4.5,
'nta_wavelength': 5.6,
'nta_lobeSuppression': True,
'nta_debugLogBuffers': False,
'nta_morphologyMethod': 'best',
}
# Our C implementation performs the 2D convolution using
# integer math, but scales the operands to preserve
# precision. The scaling is done by left shifting the Gabor
# filter coefficients by a fixed number of bits:
_integerMathShifts = 12 # 2^12 = 4096
_integerMathScale = 1 << _integerMathShifts
#+=+=+=+=+=+=+=+=+=+=+=++=+=+=+=+=+=+=+=+=+=+=++=+=+=+=+=+=+=+=+=+=+=+
# Public API calls
#+=+=+=+=+=+=+=+=+=+=+=++=+=+=+=+=+=+=+=+=+=+=++=+=+=+=+=+=+=+=+=+=+=+
def __init__(self,
# Filter size:
filterDim=None,
# Filter responses:
numOrientations=None,
phaseMode=None,
centerSurround=None,
targetType=None,
# Normalization:
gainConstant=None,
normalizationMethod=None,
perPlaneNormalization=None,
perPhaseNormalization=None,
# Post-processing:
postProcessingMethod=None,
postProcessingSlope=None,
postProcessingCenter=None,
postProcessingMin=None,
postProcessingMax=None,
zeroThresholdOut=None,
# Bounding effects:
boundaryMode=None,
offImagePixelValue=None,
suppressOutsideBox=None,
forceBoxContraction=None,
suppressByAlpha=None,
# Logging
logPrefix=None,
# Additional keywords
**keywds
):
"""
@param filterDim -- The size (in pixels) of both the width and height of the
gabor filters. Defaults to 9x9.
@param numOrientations -- The number of gabor filter orientations to produce.
The half-circle (180 degrees) of rotational angle will be evenly partitioned.
Defaults to 4, which produces a gabor bank containing filters oriented
at 0, 45, 90, and 135 degrees.
@param phaseMode -- The number of separate phases to compute per orientation.
Valid values are: 'single' or 'dual'. In 'single', responses to each such
orientation are rectified by absolutizing them; i.e., a 90-degree edge
will produce the same responses as a 270-degree edge, and the two
responses will be indistinguishable. In "dual" mode, the responses to
each orientation are rectified by clipping at zero, and then creating
a second output response by inverting the raw response and again clipping
at zero; i.e., a 90-degree edge will produce a response only in the
90-degree-oriented plane, and a 270-degree edge will produce a response
only the dual phase plane associated with the 90-degree plane (an
implicit 270-degree plane.) Default is 'single'.
@param centerSurround -- Controls whether an additional filter corresponding to
a non-oriented "center surround" response is applied to the image.
If phaseMode is "dual", then a second "center surround" response plane
is added as well (the inverted version of the center-surround response.)
Defaults to False.
@param targetType -- The preferred "target" of the gabor filters. A value of
'line' specifies that line detectors (peaks in the center and troughs
on either side) are to be used. A value of 'edge' specifies that edge
detectors (with a peak on one side and a trough on the other) are to
be used. Default is 'edge'.
@param gainConstant -- A multiplicative amplifier that is applied to the gabor
responses after any normalization. Defaults to 1.0; larger values
increase the sensitivity to edges.
@param normalizationMethod -- Controls the method by which responses are
normalized on a per image (and per scale) basis. Accepts the following
three legal values:
"fixed": No response normalization;
"max": Applies a global gain value to the responses so that the
max response equals the value of 'gainConstant'
"mean": Applies a global gain value to the responses so that the
mean response equals the value of 'gainConstant'
Default is 'fixed'.
@param perPlaneNormalization -- Controls whether normalization (as specified by
'normalizationMethod') is applied globally across all response planes
(for a given scale), or individually to each response plane. Default
is False. Note: this parameter is ignored if normalizationMethod is "fixed".
@param perPhaseNormalization -- Controls whether normalization (as specified by
'normalizationMethod') is applied globally across both phases for a
particular response orientation and scale, or individually to each
phase of the response. Default is True. Note: this parameter is
ignored if normalizationMethod is "fixed".
@param postProcessingMethod -- Controls what type of post-processing (if any)
is to be performed on the normalized responses. Valid value are:
"raw": No post-processing is performed; final output values are
unmodified after normalization
"sigmoid": Passes normalized output values through a sigmoid function
parameterized by 'postProcessingSlope' and 'postProcessingCenter'.
"threshold": Passes normalized output values through a piecewise linear
thresholding function parameterized by 'postProcessingMin'
and 'postProcessingMax'.
@param postProcessingSlope -- Controls the slope (steepness) of the sigmoid
function used when 'postProcessingMethod' is set to 'sigmoid'.
@param postProcessingCenter -- Controls the center point of the sigmoid function
used when 'postProcessingMethod' is set to 'sigmoid'.
@param postProcessingMin -- If 'postProcessingMethod' is set to 'threshold', all
normalized response values less than 'postProcessingMin' are suppressed to zero.
@param postProcessingMax -- If 'postProcessingMethod' is set to 'threshold', all
normalized response values greater than 'postProcessingMax' are clamped to one.
@param zeroThresholdOut -- if all outputs of a gabor node are below this threshold,
they will all be driven to absolute 0. This is useful in conjunction with
using the product mode/don't care spatial pooler which needs to know when
an input should be treated as 0 vs being normalized to sum to 1.
@param boundaryMode -- Controls how GaborNode deals with boundary effects. Accepts
two valid parameters:
'constrained' -- Gabor responses are normally only computed for image locations
that are far enough from the edge of the input image so that the entire
filter mask fits within the input image. Thus, the spatial dimensions of
the output gabor maps will be smaller than the input image layers.
'sweepOff' -- Gabor responses will be generated at every location within
the input image layer. Thus, the spatial dimensions of the output gabor
maps will be identical to the spatial dimensions of the input image.
For input image locations that are near the edge (i.e., a portion of
the gabor filter extends off the edge of the input image), the values
of pixels that are off the edge of the image are taken to be as specifed
by the parameter 'offImagePixelValue'.
Default is 'constrained'.
@param offImagePixelValue -- If 'boundaryMode' is set to 'sweepOff', then this
parameter specifies the value of the input pixel to use for "filling"
enough image locations outside the bounds of the original image.
Ignored if 'boundaryMode' is 'constrained'. Default value is 0.
@param suppressOutsideBox -- If True, then gabor responses outside of the bounding
box (provided from the sensor) are suppressed. Internally, the bounding
box is actually expanded by half the filter dimension (respecting the edge
of the image, of course) so that responses can be computed for all image
locations within the original bounding box.
@param forceBoxContraction -- Fine-tunes the behavior of bounding box suppression.
If False (the default), then the bounding box will only be 'contracted'
(by the half-width of the filter) in the dimenion(s) in which it is not
the entire span of the image. If True, then the bounding box will be
contracted unconditionally.
@param suppressByAlpha -- A boolean that, if True, instructs GaborNode to use
the pixel-accurate alpha mask received on the input 'validAlphaIn' for
the purpose of suppression of responses.
@param logPrefix -- If non-None, causes the response planes at each scale, and
for each input image, to be written to disk using the specified prefix
for the name of the log images. Default is None (no such logging.)
"""
#+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+
#| The following parameters are for advanced configuration and unsupported at this time |
#| They may be specified via keyword arguments only. |
#+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+
#
# @param nta_aspectRatio -- Controls how "fat" (i.e., how oriented) the Gabor
# filters are. A value of 1 would produce completely non-oriented
# (circular) filters; smaller values will produce a more oriented
# filter. Default is 0.3.
#
# @param nta_effectiveWidth -- Controls the rate of exponential drop-off in
# the Gaussian component of the Gabor filter. Default is 4.5.
#
# @param nta_wavelength -- Controls the frequency of the sinusoidal component
# of the Gabor filter. Default is 5.6.
#
# @param nta_lobeSuppression -- Controls whether or not the secondary lobes of the
# Gabor filters are suppressed. The suppression is performed based
# on the radial distance from the oriented edge to which the Gabor
# filter is tuned. If True, then the secondary lobes produced
# by the pure mathematical Gabor equation will be suppressed
# and have no effect; if False, then the pure mathematical
# Gabor equation (digitized into discrete sampling points, of
# course) will be used. Default is True.
#
# @param nta_debugLogBuffers -- If enabled, causes internal memory buffers used
# C implementation to be dumped to disk after each compute()
# cycle as an aid in the debugging of the C code path.
#
# @param nta_morphologyMethod -- Controls the method to use for performing
# morphological operations (erode or dilate) upon the
# valid alpha masks. Legal values are: 'opencv' (use the
# faster OpenCV routines), 'nta' (use the slower routines,
# or 'best' (use OpenCV if it is available on the platform,
# otherwise use the slower routines.)
#
# ------------------------------------------------------
# Handle hidden/undocumented parameters
for paramName in [p for p in self._defaults if self._isHiddenParam(p)]:
exec("%s = keywds.pop('%s', None)" % (paramName, paramName))
# ------------------------------------------------------
# Assign default values to missing parameters
for paramName, paramValue in self._defaults.items():
if eval(paramName) is None:
exec("%s = paramValue" % paramName)
# ------------------------------------------------------
# Handle deprecated parameters
# Deprecated: numOrients
numOrients = keywds.pop('numOrients', None)
if numOrients:
print "WARNING: 'numOrients' has been deprecated and replaced with 'numOrientations'"
if numOrientations is None:
numOrientations = numOrients
elif numOrients != numOrientations:
print "WARNING: 'numOrients' (%s) is inconsistent with 'numOrientations' (%s) and will be ignored" % \
(str(numOrients), str(numOrientations))
# Deprecated: filterPhase
filterPhase = keywds.pop('filterPhase', None)
if filterPhase:
print "WARNING: 'filterPhase' has been deprecated and replaced with 'targetType'"
if targetType is None:
targetType = filterPhase
elif filterPhase != targetType:
print "WARNING: 'filterPhase' (%s) is inconsistent with 'targetType' (%s) and will be ignored" % \
(str(filterPhase), str(targetType))
# Deprecated: nta_edgeMode
nta_edgeMode = keywds.pop('nta_edgeMode', None)
if nta_edgeMode:
print "WARNING: 'nta_edgeMode' has been deprecated and replaced with 'edgeMode'"
if edgeMode is None:
edgeMode = nta_edgeMode
elif nta_edgeMode != edgeMode:
print "WARNING: 'nta_edgeMode' (%s) is inconsistent with 'edgeMode' (%s) and will be ignored" % \
(str(nta_edgeMode), str(edgeMode))
# Deprecated: lateralInhibition
lateralInhibition = keywds.pop('nta_lateralInhibition', None)
if lateralInhibition:
print "WARNING: 'lateralInhibition' has been deprecated and will not be supported in future releases"
# Deprecated: validityShrinkage
validityShrinkage = keywds.pop('validityShrinkage', None)
if validityShrinkage:
print "WARNING: 'validityShrinkage' has been deprecated and replaced with 'suppressOutsideBox'"
if suppressOutsideBox is None:
suppressOutsideBox = (validityShrinkage >= 0.0)
elif suppressOutsideBox != (validityShrinkage >= 0.0):
print "WARNING: 'validityShrinkage' (%s) is inconsistent with 'suppressOutsideBox' (%s) and will be ignored" % \
(str(validityShrinkage), str(suppressOutsideBox))
self._numScales = None
self.nta_phaseIndex = 0
self._inputPyramidTopology = None
self._outputPyramidTopology = None
self._topDownCombiner = None
self._tdNumParents = None
self._enabledNodes = []
self._nodesWithReceptiveField = None
# These are cached inputs/outputs used for detecting/skipping either the
# bottom up or top down compute to improve performance.
self._cachedRFInput = None
self._cachedBUInput = None
self._cachedBUOutput = None
self._cachedTDInput = None
self._cachedTDOutput = None
self._cachedResetIn = None
self._cachedValidRegionIn = None
self._cachedValidRegionOut = None
# Profiling information
self._profileObj = None
self._iterations = 0
# No longer neede for receptivefields_test, but still needed to satisfy
# an assertion in _checkEphemeralMembers
if not hasattr(self, "_inputSplitter"):
self._inputSplitter = None
self._rfMask = None
self._rfSize = None
self._rfInvLenY = None
self._rfCenterX = None
self._rfCenterY = None
self._rfMinX = None
self._rfMinY = None
self._rfInvLenX = None
self._rfMaxX = None
self._rfMaxY = None
self._initEphemerals()
# ------------------------------------------------------
# Validate each parameter
for paramName in self._defaults.keys():
self._validate(paramName, eval(paramName))
# ------------------------------------------------------
# Store each parameter value
for paramName in self._defaults.keys():
# Hidden parameters have the 'nta_' prefix stripped
#if self._isHiddenParam(paramName):
# internalName = paramName[4:]
#else:
# internalName = paramName
internalName = self._stripHidingPrefixIfPresent(paramName)
exec("self._%s = %s" % (internalName, paramName))
# ------------------------------------------------------
# Perform additional validations that operate on
# combinations/interactions of parameters
self._doHolisticValidation()
# ------------------------------------------------------
# Set up internal state
# This node always get its input as a padded image cube from the ImageSensor
# It may change in the future when ImageSensor supports packed image pyramids
self._gaborBank = None
# Generation of response images must be explicitly enabled
self.disableResponseImages()
# This node type is non-learning, and thus begins life in 'infer' mode.
# This is only needed because our base class requires it.
self._stage = 'infer'
# We are always connected to an image sensor with padded pixels
self._inputPyramidFormat = 'padded'
# Store the number of output planes we'll produce
self._numPlanes = self.getNumPlanes()
# Initially, we do not generate response images
self._makeResponseImages = False
# Where we keep the maxTopDownOut for every node
self._maxTopDownOut = []
#+=+=+=+=+=+=+=+=+=+=+=++=+=+=+=+=+=+=+=+=+=+=++=+=+=+=+=+=+=+=+=+=+=+
def _stripHidingPrefixIfPresent(self, paramName):
"""
If the named parameter is hidden, strip off the
leading "nta_" prefix.
"""
if self._isHiddenParam(paramName):
return paramName[4:]
else:
return paramName
#+=+=+=+=+=+=+=+=+=+=+=++=+=+=+=+=+=+=+=+=+=+=++=+=+=+=+=+=+=+=+=+=+=+
def _isHiddenParam(self, paramName):
"""
Utility method for returning True if 'paramName' is the name
of a hidden parameter.
"""
return paramName.find('nta_') == 0
#+=+=+=+=+=+=+=+=+=+=+=++=+=+=+=+=+=+=+=+=+=+=++=+=+=+=+=+=+=+=+=+=+=+
def getOutputDims(self, inputDims):
"""
Instance method version of class method
"""
return self.calcOutputDims(inputDims,
self._filterDim,
self._boundaryMode)
#+=+=+=+=+=+=+=+=+=+=+=++=+=+=+=+=+=+=+=+=+=+=++=+=+=+=+=+=+=+=+=+=+=+
def getNumPlanes(self):
"""
Instance method version of class method
"""
return self.calcNumPlanes(self._numOrientations,
self._phaseMode,
self._centerSurround)
#+=+=+=+=+=+=+=+=+=+=+=++=+=+=+=+=+=+=+=+=+=+=++=+=+=+=+=+=+=+=+=+=+=+
@classmethod
def calcOutputDims(cls, inputDims,
filterDim,
boundaryMode,
**keywds):
"""
Public utility method that computes the output dimensions
in form (height, width), given 'inputDims' (height, width),
for a particular 'filterDim'.
"""
# Assign default values to missing parameters
for paramName in ['filterDim', 'boundaryMode']:
if eval(paramName) is None:
defValue = cls._defaults[paramName]
exec("%s = defValue" % paramName)
# Validatation
cls._validate('filterDim', filterDim)
cls._validate('boundaryMode', boundaryMode)
# Compute output dimensions
if boundaryMode == 'sweepOff':
shrinkage = 0
elif boundaryMode == 'constrained':
shrinkage = filterDim - 1
return tuple([dim - shrinkage for dim in inputDims])
#+=+=+=+=+=+=+=+=+=+=+=++=+=+=+=+=+=+=+=+=+=+=++=+=+=+=+=+=+=+=+=+=+=+
@classmethod
def calcNumPlanes(cls, numOrientations=None,
phaseMode=None,
centerSurround=None,
**keywds):
"""
Public utility method that computes the number
of responses planes for a particular Gabor
configuration.
"""
# Assign default values to missing parameters
for paramName in ['numOrientations', 'phaseMode', 'centerSurround']:
if eval(paramName) is None:
defValue = cls._defaults[paramName]
exec("%s = defValue" % paramName)
# Validatation
cls._validate('phaseMode', phaseMode)
cls._validate('numOrientations', numOrientations)
cls._validate('centerSurround', centerSurround)
# Compute output planes
numPlanes = numOrientations
if centerSurround:
numPlanes += 1
if phaseMode == 'dual':
numPlanes *= 2
return numPlanes
#+=+=+=+=+=+=+=+=+=+=+=++=+=+=+=+=+=+=+=+=+=+=++=+=+=+=+=+=+=+=+=+=+=+
def _doHolisticValidation(self):
"""
Perform additional validations that operate on
combinations/interactions of parameters.
"""
# We must have at least one response plane
if self.getNumPlanes() < 1:
raise RuntimeError("Configuration error: no response planes; " \
"either 'numOrientations' must be > 0 or " \
"'centerSurround' must be True")
#+=+=+=+=+=+=+=+=+=+=+=++=+=+=+=+=+=+=+=+=+=+=++=+=+=+=+=+=+=+=+=+=+=+
@classmethod
def _validate(cls, name, value):
"""
Validate a parameter. Raises a RunTimeError if
the parameter is invalid.
"""
# ------------------------------------------------------
# Filter size:
# Validation: filterDim
if name == "filterDim":
if type(value) != type(0) or \
value < cls.minFilterDim or \
value % 2 != 1:
raise RuntimeError("Value error: '%s' must be an odd integer >= %d; your value: %s" % \
(name, cls.minFilterDim, str(value)))
# ------------------------------------------------------
# Filter responses:
# Validation: numOrientations
elif name == "numOrientations":
if type(value) != type(0) or \
value < cls.minNumOrients:
raise RuntimeError("Value error: '%s' must be an integers >= %d; your value: %s" % \
(name, cls.minNumOrients, str(value)))
# Validation: phaseMode
elif name == "phaseMode":
if value not in cls._validValues[name]:
raise RuntimeError("Value error: '%s' must be one of %s; your value: %s" % \
(name, str(cls._validValues[name]), value))
# Validation: centerSurround
elif name == "centerSurround":
if value not in [True, False]:
raise RuntimeError("Value error: '%s' must be a boolean; your value: %s" % \
(name, str(value)))
# Validation: targetType
elif name == "targetType":
if value not in cls._validValues[name]:
raise RuntimeError("Value error: '%s' must be one of %; your value: %s" % \
(name, str(cls._validValues[name]), value))
# ------------------------------------------------------
# Normalization:
elif name == "gainConstant":
if type(value) not in [type(0), type(0.0)] or float(value) < 0.0:
raise RuntimeError("Value error: '%s' must be a float or integer >= 0.0; your value: %s" % \
(name, str(value)))
# Validation: targetType
elif name == "normalizationMethod":
if not value in cls._validValues[name]:
raise RuntimeError("Value error: '%s' must be one of %; your value: %s" % \
(name, str(cls._validValues[name]), value))
# Validation: perPlaneNormalization
elif name == "perPlaneNormalization":
if value not in [True, False]:
raise RuntimeError("Value error: '%s' must be a boolean; your value: %s" % \
(name, str(value)))
# Validation: perPhaseNormalization
elif name == "perPhaseNormalization":
if value not in [True, False]:
raise RuntimeError("Value error: '%s' must be a boolean; your value: %s" % \
(name, str(value)))
# ------------------------------------------------------
# Post-processing:
# Validation: targetType
elif name == "postProcessingMethod":
if not value in cls._validValues[name]:
raise RuntimeError("Value error: '%s' must be one of %; your value: %s" % \
(name, str(cls._validValues[name]), value))
# Validation: postProcessingSlope
elif name == "postProcessingSlope":
if type(value) not in [type(0), type(0.0)] or float(value) <= 0.0:
raise RuntimeError("Value error: '%s' must be a float or integer > 0.0; your value: %s" % \
(name, str(value)))
# Validation: postProcessingCenter
elif name == "postProcessingCenter":
if type(value) not in [type(0), type(0.0)]:
raise RuntimeError("Value error: '%s' must be a float or integer; your value: %s" % \
(name, str(value)))
# Validation: postProcessingMin
elif name == "postProcessingMin":
if type(value) not in [type(0), type(0.0)]:
raise RuntimeError("Value error: '%s' must be a float or integer; your value: %s" % \
(name, str(value)))
# Validation: postProcessingMax
elif name == "postProcessingMax":
if type(value) not in [type(0), type(0.0)]:
raise RuntimeError("Value error: '%s' must be a float or integer; your value: %s" % \
(name, str(value)))
# Validation: zeroThresholdOut
elif name == "zeroThresholdOut":
if type(value) not in [type(0), type(0.0)]:
raise RuntimeError("Value error: '%s' must be a float or integer >= 0.0; your value: %s" % \
(name, str(value)))
# ------------------------------------------------------
# Boundary effects:
# Validation: boundaryMode
elif name == "boundaryMode":
if not value in cls._validValues[name]:
raise RuntimeError("Value error: '%s' must be one of %; your value: %s" % \
(name, str(cls._validValues[name]), str(value)))
# Validation: offImagePixelValue
elif name == "offImagePixelValue":
if value != 'colorKey' and (type(value) not in (int, float) or float(value) < 0.0 or float(value) > 255.0):
raise RuntimeError("Value error: '%s' must be a float or integer between 0 and 255, or 'colorKey'; your value: %s" % \
(name, str(value)))
# Validation: suppressOutsideBox
elif name == "suppressOutsideBox":
if value not in [True, False]:
raise RuntimeError("Value error: '%s' must be a boolean; your value: %s" % \
(name, str(value)))
# Validation: forceBoxContraction
elif name == "forceBoxContraction":
if value not in [True, False]:
raise RuntimeError("Value error: '%s' must be a boolean; your value: %s" % \
(name, str(value)))
# Validation: suppressByAlpha
elif name == "suppressByAlpha":
if value not in [True, False]:
raise RuntimeError("Value error: '%s' must be a boolean; your value: %s" % \
(name, str(value)))
# ------------------------------------------------------
# Logging
# Validation: logPrefix
elif name == "logPrefix":
if value is not None and (type(value) != type("") or len(value) == 0):
raise RuntimeError("Value error: '%s' must be a string; your value: %s" % \
(name, str(value)))
# ------------------------------------------------------
# Undocumented parameters:
# Validation: aspectRatio
elif name == "nta_aspectRatio":
if type(value) not in [type(0), type(0.)] or value <= 0.0:
raise RuntimeError("Value error: '%s' must be a float > 0.0; your value: %s" % \
(name, str(value)))
# Validation: effectiveWidth
elif name == "nta_effectiveWidth":
if type(value) not in [type(0), type(0.)] or value <= 0.0:
raise RuntimeError("Value error: '%s' must be a float > 0.0; your value: %s" % \
(name, str(value)))
# Validation: wavelength
elif name == "nta_wavelength":
if type(value) not in [type(0), type(0.)] or value <= 0.0:
raise RuntimeError("Value error: '%s' must be a float > 0.0; your value: %s" % \
(name, str(value)))
# Validation: lobeSuppression
elif name == "nta_lobeSuppression":
if value not in [True, False]:
raise RuntimeError("Value error: '%s' must be a boolean; your value: %s" % \
(name, str(value)))
# Validation: debugLogBuffers
elif name == "nta_debugLogBuffers":
if value not in [True, False]:
raise RuntimeError("Value error: '%s' must be a boolean; your value: %s" % \
(name, str(value)))
# Validation: morphologyMethod
elif name == "nta_morphologyMethod":
if value not in cls._validValues[name]:
raise RuntimeError("Value error: '%s' must be one of %; your value: %s" % \
(name, str(cls._validValues[name]), str(value)))
elif value == "opencv" and cv is None:
raise RuntimeError(
"'%s' was explicitly specified as 'opencv' " \
"but OpenCV is not available on this platform" % name)
# ------------------------------------------------------
# Deprecated parameters:
# Validation: numOrients
elif name == "numOrients":
if type(value) != type(0) or \
value < cls.minNumOrients:
raise RuntimeError("Value error: '%s' must be an integers >= %d; your value: %s" % \
(name, cls.minNumOrients, str(value)))
# Validation: lateralInhibition
elif name == "lateralInhibition":
if type(value) not in [type(0), type(0.0)] or value < 0.0 or value > 1.0:
raise RuntimeError("Value error: '%s' must be a float >= 0 and <= 1; your value: %s" % \
(name, str(value)))
# Validation: validityShrinkage
elif name == "validityShrinkage":
if type(value) not in [type(0), type(0.0)] or float(value) < 0.0 or float(value) > 1.0:
raise RuntimeError("Value error: '%s' must be a float or integer between 0 and 1; your value: %s" % \
(name, str(value)))
# ------------------------------------------------------
# Unknown parameter
else:
raise RuntimeError("Unknown parameter: %s [%s]" % (name, value))
#+=+=+=+=+=+=+=+=+=+=+=++=+=+=+=+=+=+=+=+=+=+=++=+=+=+=+=+=+=+=+=+=+=+
def initialize(self, dims, splitterMaps):
"""Build the gaborfilter bank.
This method is called after construction.
"""
# Preparations (creation of buffer, etc.)
# Send the dims as a tuple that contains one pair. This needed to make
# the node treat its input as a single scale.
self._prepare((dims,))
# Determine the number of response planes
self._numPlanes = self.getNumPlanes()
#+=+=+=+=+=+=+=+=+=+=+=++=+=+=+=+=+=+=+=+=+=+=++=+=+=+=+=+=+=+=+=+=+=+
def getParameter(self, parameterName, nodeSet=""):
"""
Get the value of an PyMultiNode parameter.
@param parameterName -- the name of the parameter to retrieve, as defined
by the Node Spec.
"""
if parameterName in self._defaults:
# Hidden "nta_" parameters are internally stored as
# class attributes without the leading "nta"
if parameterName.startswith("nta_"):
parameterName = parameterName[4:]
return eval("self._%s" % parameterName)
# Handle standard MRG infrastructure
elif parameterName == 'nta_width':
return self._inputPyramidTopology[0]['numNodes'][0]
elif parameterName == 'nta_height':
return self._inputPyramidTopology[0]['numNodes'][1]
# Handle the maxTopDownOut read-only parameter
elif parameterName == 'maxTopDownOut':
return self._maxTopDownOut
# Handle deprecated parameters
elif parameterName == 'numOrients':
return self._numPlanes
elif parameterName == 'filterPhase':
return self._targetType
elif parameterName == 'nta_edgeMode':
return self._boundaryMode
elif parameterName == 'nta_lateralInhibition':
return 0.0
# Unknown parameter (at least by GaborNode)
else:
return PyRegion.getParameter(self, parameterName, nodeSet)
#+=+=+=+=+=+=+=+=+=+=+=++=+=+=+=+=+=+=+=+=+=+=++=+=+=+=+=+=+=+=+=+=+=+
def setParameter(self, parameterName, parameterValue, nodeSet=""):
"""
Set the value of an PyRegion parameter.
@param parameterName -- the name of the parameter to update, as defined
by the Node Spec.
@param parameterValue -- the value to which the parameter is to be set.
"""
# @todo -- Need to add validation of parameter changes
settableParams = ["suppressOutsideBox", "forceBoxContraction",
"suppressByAlpha", "offImagePixelValue",
"perPlaneNormalization", "perPhaseNormalization",
"nta_debugLogBuffers", "logPrefix",
"zeroThresholdOut"]
regenParams = ["gainConstant", "normalizationMethod",
"postProcessingMethod", "postProcessingSlope",
"postProcessingCenter", "postProcessingMin",
"postProcessingMax"]
if parameterName in settableParams + regenParams:
exec("self._%s = parameterValue" % parameterName)
elif parameterName == 'nta_morphologyMethod':
self._morphologyMethod = parameterValue
# Not one of our parameters
else:
return PyRegion.setParameter(self, parameterName, parameterValue, nodeSet)
# Generate post-processing lookup-tables (LUTs) that will be
# used by the C implementation
if parameterName in regenParams:
self._makeLUTs()
#+=+=+=+=+=+=+=+=+=+=+=++=+=+=+=+=+=+=+=+=+=+=++=+=+=+=+=+=+=+=+=+=+=+
def enableResponseImages(self):
"""
Enable the generation of PIL Images representing the Gabor reponses.
"""
self._makeResponseImages = True
#+=+=+=+=+=+=+=+=+=+=+=++=+=+=+=+=+=+=+=+=+=+=++=+=+=+=+=+=+=+=+=+=+=+
def disableResponseImages(self):
"""
Disable the generation of PIL Images representing the Gabor reponses.
"""
self._makeResponseImages = False
#+=+=+=+=+=+=+=+=+=+=+=++=+=+=+=+=+=+=+=+=+=+=++=+=+=+=+=+=+=+=+=+=+=+
def getResponseImages(self, whichResponse='all',
preSuppression=False,
whichScale='all',
whichPhase=0,
whichDirection='bottomUp'):
"""
Return a list of PIL Images representing the Gabor responses
computed upon the latest multi-resolution input image pyramid.
@param whichResponse -- Indicates which Gabor orientation response
should be returned. If 'all' (the default), then false
color composite images will be generated that contains the
gabor responses for all orientations. Otherwise, it should
be an integer index between 0 and numOrients-1, in which
case grayscale images will be generated.
@param preSuppression -- Indicates whether the images should be
generated before bounding box suppression is performed
(if True), or after suppression (if False, the default.)
@param whichScale -- Indicates which multi-resolution scale
should be used to generate the response Images. If 'all'
(the default), then images will be generated for each
scale in the input multi-resolution grid, and will be
returned in a list. Otherwise, it should be an integer
index between 0 and numResolutions-1 (the number of
layers in the multi-resolution grid), in which case a
single Image will be returned (not a list).
@param whichDirection -- Indicates which phase of resonse images should
be returned ('bottomUp', 'topDown', 'combined'). 'bottomUp'
gets the unaltered bottom-up responses, 'top-down' gets the
top-down feedback responses, and 'combined'
@returns -- Either a single PIL Image, or a list of PIL Images
that correspond to different resolutions.
"""
# Make sure response images were enabled
if not self._makeResponseImages:
# Need to generate images now
if whichDirection == 'bottomUp':
if self.response is None:
return
response = self.response
elif whichDirection == 'topDown':
if self.tdInput is None:
return
response = self.tdInput
elif whichDirection == 'combined':
if self.selectedBottomUpOut:
return
response = self.selectedBottomUpOut
if response is None:
# No response to use
return
self._genResponseImages(response, preSuppression=preSuppression, phase=whichDirection)
# Make sure we have images to provide
if self._responseImages is None:
return
# Pull subset of images based on 'preSuppression' setting
imageSet = self._responseImages.get(self._getResponseKey(preSuppression))
# Validate format of 'whichScale' arg
numScales = len(self._inputPyramidTopology)
if whichScale != 'all' and (type(whichScale) != type(0) or whichScale < 0 or whichScale >= numScales):
raise RuntimeError, \
"'whichScale' must be 'all' or an integer between 0 and %d." % self._numScales
# Validate format of 'whichResponse' arg
if whichResponse not in ['all', 'centerSurround']:
if type(whichResponse) != type(0) or whichResponse < 0 or whichResponse >= self._numPlanes:
raise RuntimeError, \
"'whichResponse' must be 'all' or an integer between 0 and %d." % self._numPlanes
# Make sure the requested phase of response exists
if not imageSet.has_key(whichDirection):
return
# Handle "exotic" responses
if whichResponse != 'all':
if whichResponse == 'centerSurround':
whichResponse = self._numOrientations
assert type(whichResponse) == type(0)
if whichPhase > 0:
whichResponse += self._numOrientations
if self._centerSurround:
whichResponse += 1
# Return composite gabor response(s)
return imageSet[whichDirection][whichResponse][whichScale]
#+=+=+=+=+=+=+=+=+=+=+=++=+=+=+=+=+=+=+=+=+=+=++=+=+=+=+=+=+=+=+=+=+=+
# Public class methods
#+=+=+=+=+=+=+=+=+=+=+=++=+=+=+=+=+=+=+=+=+=+=++=+=+=+=+=+=+=+=+=+=+=+
@classmethod
def deserializeImage(cls, serialized):
"""
Helper function that training/testing scripts can invoke in order
to deserialize debugging images provided by the getResponseImages()
method.
"""
image = Image.open(StringIO(serialized))
image.load()
return image
#+=+=+=+=+=+=+=+=+=+=+=++=+=+=+=+=+=+=+=+=+=+=++=+=+=+=+=+=+=+=+=+=+=+
# Private methods - Overriding base class
#+=+=+=+=+=+=+=+=+=+=+=++=+=+=+=+=+=+=+=+=+=+=++=+=+=+=+=+=+=+=+=+=+=+
class ARRAY(ctypes.Structure):
_fields_ = [
("nd", ctypes.c_int),
("dimensions", ctypes.c_void_p),
("strides", ctypes.c_void_p),
("data", ctypes.c_void_p),
]
#+=+=+=+=+=+=+=+=+=+=+=++=+=+=+=+=+=+=+=+=+=+=++=+=+=+=+=+=+=+=+=+=+=+
def _wrapArray(self, array):
"""
Helper function that takes a numpy array and returns
a 4-tuple consisting of ctypes references to the
following:
(nd, dimensions, strides, data)
"""
if array is None:
return None
else:
return ctypes.byref(self.ARRAY(len(array.ctypes.shape),
ctypes.cast(array.ctypes.shape, ctypes.c_void_p),
ctypes.cast(array.ctypes.strides, ctypes.c_void_p),
array.ctypes.data))
#+=+=+=+=+=+=+=+=+=+=+=++=+=+=+=+=+=+=+=+=+=+=++=+=+=+=+=+=+=+=+=+=+=+
def _prepare(self, inputDims):
"""
Perform one-time preparations need for gabor processing.
"""
#inputDims = [(inputDim['numNodes'][1], inputDim['numNodes'][0]) \
# for inputDim in self._inputPyramidTopology]
self.prepare(inputDims)
#+=+=+=+=+=+=+=+=+=+=+=++=+=+=+=+=+=+=+=+=+=+=++=+=+=+=+=+=+=+=+=+=+=+
def prepare(self, inputDims):
"""
Perform one-time preparations need for gabor processing.
Public interface allowing the GaborNode to be tested
outside of the full RTE.
@param inputDims: a list of input image sizes in the
form of 2-tuples (width, height)
"""
# Reverse the input dims into (height, width) format for internal storage
self._numScales = len(inputDims)
self._inputDims = inputDims
# Compute output dims for each input dim
self._outputDims = [self.getOutputDims(inputDim) for inputDim in inputDims]
# Compute the minimum output dimension
self._minInputDim = min([min(inputDim) for inputDim in self._inputDims])
self._minOutputDim = min([min(outputDim) for outputDim in self._outputDims])
# Break out
self._inHeight, self._inWidth = [float(x) for x in self._inputDims[0]]
self._outHeight, self._outWidth = [float(x) for x in self._outputDims[0]]
# Load the _gaborNode C library
libGabor = self._loadLibrary("_algorithms")
# Prepare the C calls
if libGabor:
self._gaborComputeProc = libGabor.gaborCompute
else:
raise Exception('Unable to load gaborNode C library _algorithms')
# If we could not load the library, then we'll default to
# using numpy for our gabor processing.
self._gaborComputeProc = None
# Prepare some data structures in advance
# Allocate working buffers to be used by the C implementation
#self._buffers = [numpy.zeros(inputDim, dtype=numpy.int32) for inputDim in inputDims]
self._allocBuffers()
# Generate post-processing lookup-tables (LUTs) that will be
# used by the C implementation
self._makeLUTs()
#+=+=+=+=+=+=+=+=+=+=+=++=+=+=+=+=+=+=+=+=+=+=++=+=+=+=+=+=+=+=+=+=+=+
def _alignToFour(self, val):
"""
Utility macro that increases a value 'val' to ensure
that it is evenly divisible by four (e.g., for
purposes of memory alignment, etc.)
"""
return (((val - 1) / 4) + 1) * 4
#+=+=+=+=+=+=+=+=+=+=+=++=+=+=+=+=+=+=+=+=+=+=++=+=+=+=+=+=+=+=+=+=+=+
def _makeLUTs(self):
"""
Generate post-processing lookup-tables (LUTs) that will be
used by the C implementation
"""
# --------------------------------------------------
# Define LUT parameters
# For 'normalizationMethod' of 'mean', this internal parameter
# controls the trade-off between how finely we can discretize our
# LUT bins vs. how often a raw response value "overflows" the
# maximum LUT bin and has to be clamped. In essence, any raw
# response value greater than 'meanLutCushionFactor' times the
# mean response for the image will "overflow" and be clamped
# to the response value of the largest bin in the LUT.
meanLutCushionFactor = 4.0
# We'll use a LUT large enough to give us decent precision
# but not so large that it causes cache problems.
# A total of 1024 bins seems reasonable:
numLutShifts = 10
numLutBins = (1 << numLutShifts)
# --------------------------------------------------
# Build LUT
# Build our Gabor Bank if it doesn't already exist
self._buildGaborBankIfNeeded()
# Empirically compute the maximum possible response value
# given our current parameter settings. We do this by
# generating a fake image of size (filterDim X filterDim)
# that has a pure vertical edge and then convolving it with
# the first gabor filter (which is always vertically oriented)
# and measuring the response.
testImage = numpy.ones((self._filterDim, self._filterDim), dtype=numpy.float32) * 255.0
#testImage[:, :(self._filterDim/2)] = 0
testImage[numpy.where(self._gaborBank[0] < 0.0)] *= -1.0
maxRawResponse = (testImage * self._gaborBank[0]).sum()
# At run time our Gabor responses will be scaled (via
# bit shifting) so that we can do integer match instead of
# floating point match, but still have high precision.
# So we'll simulate that in order to get a comparable result.
maxShiftedResponse = maxRawResponse / (255.0 * float(self._integerMathScale))
# Depending on our normalization method, our LUT will have a
# different scaling factor (for pre-scaling values prior
# to discretizing them into LUT bins)
if self._normalizationMethod == 'fixed':
postProcScalar = float(numLutBins - 1) / maxShiftedResponse
elif self._normalizationMethod == 'max':
postProcScalar = float(numLutBins - 1)
elif self._normalizationMethod == 'mean':
postProcScalar = float(numLutBins - 1) / meanLutCushionFactor
else:
assert False
# Build LUT
lutInputs = numpy.array(range(numLutBins), dtype=numpy.float32) / postProcScalar
# Sigmoid: output = 1 / (1 + exp(input))
if self._postProcessingMethod == 'sigmoid':
offset = 1.0 / (1.0 + numpy.exp(self._postProcessingSlope * self._postProcessingCenter))
scaleFactor = 1.0 / (1.0 - offset)
postProcLUT = ((1.0 / (numpy.exp(numpy.clip(self._postProcessingSlope \
* (self._postProcessingCenter - lutInputs), \
-40.0, 40.0)) + 1.0)) - offset) * scaleFactor
# For some parameter choices, it is possible that numerical precision
# issues will result in the 'offset' being ever so slightly larger
# than the value of postProcLUT[0]. This will result in a very
# tiny negative value in the postProcLUT[0] slot, which is
# undesireable because the output of a sigmoid should always
# be bound between (0.0, 1.0).
# So we clip the LUT values to this range just to keep
# things clean.
postProcLUT = numpy.clip(postProcLUT, 0.0, 1.0)
# Threshold: Need piecewise linear LUT
elif self._postProcessingMethod == "threshold":
postProcLUT = lutInputs
postProcLUT[lutInputs < self._postProcessingMin] = 0.0
postProcLUT[lutInputs > self._postProcessingMax] = 1.0
# Raw: no LUT needed at all
else:
assert self._postProcessingMethod == "raw"
postProcLUT = None
# If we are in 'dual' phase mode, then we'll reflect
# the LUT on the negative side of zero to speed up
# processing inside the C function.
if False:
if postProcLUT is not None and self._phaseMode == 'dual':
# Make a reflected LUT
comboLut = numpy.concatenate((numpy.fliplr(postProcLUT[numpy.newaxis,:]),
postProcLUT[numpy.newaxis,:]),
axis=1)
# Now clone the reflected LUT and clip it's responses
# for positive and negative phases
postProcLUT = numpy.concatenate((comboLut, comboLut), axis=1).reshape(4*numLutBins)
# First half of it is for positive phase
postProcLUT[:numLutBins] = 0.0
# Second half of it is for negative phase
postProcLUT[-numLutBins:] = 0.0
# Store our LUT and it's pre-scaling factor
self._postProcLUT = postProcLUT
self._postProcLutScalar = postProcScalar
#+=+=+=+=+=+=+=+=+=+=+=++=+=+=+=+=+=+=+=+=+=+=++=+=+=+=+=+=+=+=+=+=+=+
def _allocBuffers(self):
"""
Allocate some working buffers that are required
by the C implementation.
"""
# Allocate working buffers to be used by the C implementation
#self._buffers = [numpy.zeros(inputDim, dtype=numpy.int32) for inputDim in self._inputDims]
# Compute how much "padding" ou input buffers
# we will need due to boundary effects
if self._boundaryMode == 'sweepOff':
padding = self._filterDim - 1
else:
padding = 0
# For each scale, allocate a set of buffers
# Allocate a working "input buffer" of unsigned int32
# We want our buffers to have rows that are aligned on 16-byte boundaries
#self._bufferSetIn = []
#for inHeight, inWidth in self._inputDims:
# self._bufferSetIn = numpy.zeros((inHeight + padding,
# _alignToFour(inWidth + padding)),
# dtype=numpy.int32)
self._bufferSetIn = [numpy.zeros((inHeight + padding,
self._alignToFour(inWidth + padding)),
dtype=numpy.int32) \
for inHeight, inWidth in self._inputDims]
# Allocate a working plane of "output buffers" of unsigned int32
# We want our buffers to have rows that are aligned on 16-byte boundaries
#self._bufferSetOut = []
#for outHeight, outWidth in self._outputDims:
# self._bufferSetOut += numpy.zeros((self._numOrientations,
# outHeight,
# _alignToFour(outWith)),
# dtype=numpy.int32)
numBuffersNeeded = self._numOrientations
if self._centerSurround:
numBuffersNeeded += 1
self._bufferSetOut = [numpy.zeros((numBuffersNeeded,
outHeight,
self._alignToFour(outWidth)),
dtype=numpy.int32) \
for outHeight, outWidth in self._outputDims]
#+=+=+=+=+=+=+=+=+=+=+=++=+=+=+=+=+=+=+=+=+=+=++=+=+=+=+=+=+=+=+=+=+=+
def _initEphemerals(self):
self._gaborComputeProc = None
# For (optional) debug logging, we keep track of the number of
# images we have seen
self._imageCounter = 0
self._bufferSetIn = None
self._bufferSetOut = None
self._morphHeader = None
self._erosion = None
self._numScales = None
self._inputDims = None
self._outputDims = None
self._minInputDim = None
self._minOutputDim = None
self._inHeight = None
self._inWidth = None
self._outHeight = None
self._outWidth = None
self._postProcLUT = None
self._postProcLutScalar = None
self._filterPhase = None
self.response = None
self._responseImages = None
self._makeResponseImages = None
self.tdInput = None
self.selectedBottomUpOut = None
self._tdThreshold = None
self._morphHeader = None
if not hasattr(self, '_numPlanes'):
self._numPlanes = None
# Assign default values to missing parameters
for paramName, paramValue in self._defaults.items():
paramName = self._stripHidingPrefixIfPresent(paramName)
if not hasattr(self, "_%s" % paramName):
exec("self._%s = paramValue" % paramName)
#+=+=+=+=+=+=+=+=+=+=+=++=+=+=+=+=+=+=+=+=+=+=++=+=+=+=+=+=+=+=+=+=+=+
def _getEphemeralMembers(self):
"""
Callback (to be overridden) allowing the class to publish a list of
all "ephemeral" members (i.e., data members that should not and/or
cannot be pickled.)
"""
# We can't pickle a pointer to a C function
return [
'_gaborComputeProc',
'_bufferSetIn',
'_bufferSetOut',
'_imageCounter',
'_morphHeader',
'_erosion',
]
#+=+=+=+=+=+=+=+=+=+=+=++=+=+=+=+=+=+=+=+=+=+=++=+=+=+=+=+=+=+=+=+=+=+
def _loadLibrary(self, libraryName, libSubDir=None):
"""
Utility method for portably loading a NuPIC shared library.
Note: we assume the library lives in the NuPIC "lib" directory.
@param: libraryName - the name of the library (sans extension)
@returns: reference to the loaded library; otherwise raises
a runtime exception.
"""
# By default, we will look for our shared library in our
# bindings directory.
if not libSubDir:
libSubDir = "bindings"
# Attempt to load the library
try:
# All of these shared libraries are python modules. Let python find them
# for us. Once it finds us the path, we'll load it with CDLL.
dottedPath = ('.'.join(['nupic', libSubDir, libraryName]))
exec("import %s" % dottedPath)
libPath = eval("%s.__file__" % dottedPath)
lib = ctypes.cdll.LoadLibrary(libPath)
# These calls initialize the logging system inside
# the loaded library. Disabled for now.
# See comments at INIT_FROM_PYTHON in gaborNode.cpp
# pythonSystemRefP = PythonSystem.getInstanceP()
# lib.initFromPython(ctypes.c_void_p(pythonSystemRefP))
return lib
except Exception, e:
print "Warning: Could not load shared library: %s" % libraryName
print "Exception: %s" % str(e)
return None
#+=+=+=+=+=+=+=+=+=+=+=++=+=+=+=+=+=+=+=+=+=+=++=+=+=+=+=+=+=+=+=+=+=+
def compute(self, inputs, outputs):
"""
Run one iteration of fat node, profiling it if requested.
Derived classes should NOT override this method.
The guts of the compute are contained in the _compute() call so that
we can profile it if requested.
"""
# Modify this line to turn on profiling for a given node. The results file
# ('hotshot.stats') will be sensed and printed out by the vision framework's
# RunInference.py script and the end of inference.
# Also uncomment the hotshot import at the top of this file.
if False:
if self._profileObj is None:
self._profileObj = hotshot.Profile("hotshot.stats", 1, 1)
# filename, lineevents, linetimings
self._profileObj.runcall(self._gaborCompute, *[inputs, outputs])
else:
self._gaborCompute(inputs, outputs)
self._imageCounter += 1
#+=+=+=+=+=+=+=+=+=+=+=++=+=+=+=+=+=+=+=+=+=+=++=+=+=+=+=+=+=+=+=+=+=+
def _getUpperLeftPixelValue(self, inputs, validAlpha=None):
"""
Extract the intensity value of the upper-left pixel.
"""
# Obtain raw input pixel data
#buInputVector = inputs['bottomUpIn'][0].array()
buInputVector = inputs['bottomUpIn']
# Respect valid region for selection of
# color key value
pixelIndex = 0
# If we have an alpha channel, then we need to find
# the first pixel for which the alpha is nonzero
if validAlpha is not None:
# Temporarily decode the polarity that is stored
# in the first alpha element
indicatorValue = validAlpha[0,0]
if indicatorValue < 0.0:
validAlpha[0,0] = -1.0 - indicatorValue
alphaLocns = numpy.where(validAlpha >= 0.5)[0]
# Put the indicator back
validAlpha[0,0] = indicatorValue
# If there are no positive alpha pixels anywhere, then
# just use white (255) as the color key (which may not
# be the "correct" thing to do, but we have no other
# options really.
if len(alphaLocns) == 0:
return 255.0;
pixelIndex = alphaLocns[0]
# Otherwise, if we have a bounding box, then we
# need to find the first (upper-left) pixel in
# the valid bounding box
elif 'validRegionIn' in inputs:
#validRegionIn = inputs['validRegionIn'][0].array()
validRegionIn = inputs['validRegionIn']
left = int(validRegionIn[0])
top = int(validRegionIn[1])
if left > 0 or top > 0:
pixelIndex = left + top * int(self._inWidth)
return buInputVector[pixelIndex]
#+=+=+=+=+=+=+=+=+=+=+=++=+=+=+=+=+=+=+=+=+=+=++=+=+=+=+=+=+=+=+=+=+=+
def _gaborCompute(self, inputs, outputs):
"""
Run one iteration of multi-node.
We are taking the unconventional approach of overridding the
base class compute() method in order to avoid applying the
splitter map, since this is an expensive process for a densely
overlapped node such as GaborNode.
"""
# Build our Gabor Bank (first time only)
self._buildGaborBankIfNeeded()
# If we are using "color-key" mode, then detect the value of
# the upper-left pixel and use it as the value of
# 'offImagePixelValue'
if self._offImagePixelValue == "colorKey":
offImagePixelValue = self._getUpperLeftPixelValue(inputs)
else:
offImagePixelValue = float(self._offImagePixelValue)
# Fast C implementation
# Get our inputs into numpy arrays
buInputVector = inputs['bottomUpIn']
validRegionIn = inputs.get('validRegionIn', None)
# Obtain access to valid alpha region, if it exists
# and if we are configured to use the pixel-accurate
# alpha validity mask (as opposed to using the
# valid bounding box.)
if self._suppressByAlpha and 'validAlphaIn' in inputs:
if self._numScales > 1:
raise NotImplementedError("Multi-scale GaborNodes cannot currently handle alpha channels")
# We assume alpha channels are expressed in a format in
# which '0.0' corresponds to total suppression of
# responses, and '255.0' corresponds to no suppression
# whatsoever, and intermediate values apply a linearly
# proportional degree of suppression (e.g., a value of
# '127.5' would result in a 50% suppression of the
# raw responses.)
#validAlpha = inputs['validAlphaIn'][0].array()[:, numpy.newaxis] * (1.0/255.0)
validAlpha = inputs['validAlphaIn'][:, numpy.newaxis] * (1.0/255.0)
# If we are using an alpha channel, then it will take
# a bit more work to find the correct "upper left"
# pixel because we can't just look for the first
# upper-left pixel in the valid bounding box; we have
# to find the first upper-left pixel in the actual
# valid alpha zone.
if self._offImagePixelValue == "colorKey":
offImagePixelValue = self._getUpperLeftPixelValue(inputs, validAlpha)
else:
validAlpha = None
if self.nta_phaseIndex == 0: # Do bottom-up inference.
self._computeWithC(buInputVector, validRegionIn,
outputs, offImagePixelValue, validAlpha)
# Cache input. The output is already stored in self.response
if self._topDownCombiner is not None and self._stage == 'infer':
self._cachedBUInput = buInputVector
self._cachedValidRegionIn = validRegionIn
else: # Try top-down inference.
cachedBUInput = self._cachedBUInput \
if self._cachedBUInput is not None else numpy.zeros(0)
validCachedBUInput = numpy.array_equal(buInputVector, cachedBUInput)
cachedValidRegionIn = self._cachedValidRegionIn \
if self._cachedValidRegionIn is not None else numpy.zeros(0)
validCachedValidRegionIn = ((validRegionIn is None) or
numpy.array_equal(validRegionIn, cachedValidRegionIn))
# See if we can use the cached values from the last bottom up compute. For better performance,
# we only perform the cache checking when we know we might have top down computes.
topDownConditionsMet = (self.nta_phaseIndex == 1) and \
(self._stage == 'infer') and \
(self._topDownCombiner is not None) and \
validCachedBUInput and validCachedValidRegionIn
if not topDownConditionsMet:
message = (
("Top-down conditions were not met for GaborNode:\n") +
(" phaseIndex=%s (expected %d)\n" % (self.nta_phaseIndex, 1)) +
(" stage='%s' (expected '%s')\n" % (self._stage, "infer")) +
(" topDownCombiner is %s (expected not None)\n" %
("not None" if (self._topDownCombiner is not None) else "None")) +
(" buInputVector %s cache (expected ==)\n" %
("==" if validCachedBUInput else "!=")) +
(" validRegionIn %s cache (expected ==)\n" %
("==" if validCachedValidRegionIn else "!="))
)
import warnings
warnings.warn(message, stacklevel=2)
return
# No need to copy to the node outputs, they should be the same as last time.
# IMPORTANT: When using the pipeline scheduler, you MUST write to the output buffer
# each time because there are 2 output buffers. But, we know that for feedback
# networks, the pipleline scheduler cannot and will not be used, so it's OK to
# skip the write to the output when we have top down computes.
# Perform the topDown compute instead
#print "Gabor topdown"
buOutput = self.response.reshape(self._inputSplitter.shape[0], self._numPlanes)
PyRegion._topDownCompute(self, inputs, outputs, buOutput,
buInputVector)
# DEBUG DEBUG
#self._logPrefix = "debug"
#print "WARNING: using a hacked version of GaborNode.py [forced logging]"
# Write debugging images
if self._logPrefix is not None:
self._doDebugLogging()
#+=+=+=+=+=+=+=+=+=+=+=++=+=+=+=+=+=+=+=+=+=+=++=+=+=+=+=+=+=+=+=+=+=+
def _doDebugLogging(self):
"""
Dump the most recently computed responses to logging image files.
"""
preSuppression = False
# Make the response images if they haven't already been made
if not self._makeResponseImages:
self._genResponseImages(self.response, preSuppression=False)
# Write the response images to disk
imageSet = self._responseImages[self._getResponseKey(preSuppression=False)]['bottomUp']
for orient, orientImages in imageSet.items():
for scale, image in orientImages.items():
if type(scale) == type(0):
if type(orient) == type(0):
orientCode = "%02d" % orient
else:
orientCode = "%s" % orient
debugPath = "%s.img-%04d.scale-%02d.orient-%s.png" % (self._logPrefix,
self._imageCounter,
scale, orientCode)
self.deserializeImage(image).save(debugPath)
#+=+=+=+=+=+=+=+=+=+=+=++=+=+=+=+=+=+=+=+=+=+=++=+=+=+=+=+=+=+=+=+=+=+
def filter(self, image, validRegionIn=None,
orientation='all', phase=0,
scaleIndex=0,
cachedResponse=None,
gain=1.0):
"""
Perform gabor filtering on a PIL image, and return a PIL
image containing the composite responses.
@param validRegion: [left, top, right, bottom]
"""
if validRegionIn is None:
validRegionIn = (0, 0, image.size[0], image.size[1])
# Decide whether or not to use numpy
self._buildGaborBankIfNeeded()
# Determine proper input/output dimensions
inHeight, inWidth = self._inputDims[scaleIndex]
outHeight, outWidth = self._outputDims[scaleIndex]
inputSize = inHeight * inWidth
outputSize = outHeight * outWidth * self._numPlanes
inputVector = numpy.array(image.getdata()).astype(RealNumpyDType)
inputVector.shape = (inHeight, inWidth)
assert image.size[1] == inHeight
assert image.size[0] == inWidth
# Locate correct portion of output
outputVector = numpy.zeros((outHeight, outWidth, self._numPlanes), dtype=RealNumpyDType)
outputVector.shape = (self._numPlanes, outHeight, outWidth)
inputVector.shape = (inHeight, inWidth)
# Use a provided responses
if cachedResponse is not None:
response = cachedResponse
# If we need to re-generate the gabor response cache:
else:
# If we are using "color-key" mode, then detect the value of
# the upper-left pixel and use it as the value of
# 'offImagePixelValue'
if self._offImagePixelValue == "colorKey":
# Respect valid region for selection of
# color key value
[left, top, right, bottom] = validRegionIn
offImagePixelValue = inputVector[top, left]
#offImagePixelValue = inputVector[0, 0]
else:
offImagePixelValue = self._offImagePixelValue
# Extract the bounding box signal (if present).
validPyramid = validRegionIn / numpy.array([self._inWidth,
self._inHeight,
self._inWidth,
self._inHeight],
dtype=RealNumpyDType)
# Compute the bounding box to use for our C implementation
bbox = self._computeBBox(validPyramid, outWidth, outHeight)
imageBox = numpy.array([0, 0, self._inputDims[scaleIndex][1],
self._inputDims[scaleIndex][0]],
dtype=numpy.int32)
# Perform gabor processing
self._doGabor(inputVector, bbox, imageBox, outputVector, scaleIndex, offImagePixelValue)
outputVector = numpy.rollaxis(outputVector, 0, 3)
outputVector = outputVector.reshape(outWidth * outHeight, self._numPlanes).flatten()
assert outputVector.dtype == RealNumpyDType
numLocns = len(outputVector) / self._numPlanes
response = outputVector.reshape(numLocns, self._numPlanes)
nCols, nRows = self._outputPyramidTopology[scaleIndex]['numNodes']
startNodeIdx, stopNodeIdx = self._getNodeRangeByScale(scaleIndex)
# Make composite response
if orientation == 'all':
# Build all the single-orientation responses
responseSet = []
for responseIdx in xrange(self._numPlanes):
img = Image.new('L', (nCols, nRows))
img.putdata((gain * 255.0 * response[:stopNodeIdx-startNodeIdx, responseIdx]).astype(numpy.uint8))
responseSet += [img]
finalResponse = self._makeCompositeImage(responseSet)
# Make an individual response
else:
img = Image.new('L', (nCols, nRows))
if orientation == 'centerSurround':
orientation = self._numOrientations
if phase > 0:
orientation += self._numOrientations
if self._centerSurround:
orientation += 1
img.putdata((gain * 255.0 * response[:stopNodeIdx-startNodeIdx, orientation]).astype(numpy.uint8))
finalResponse = img
return finalResponse, response
#+=+=+=+=+=+=+=+=+=+=+=++=+=+=+=+=+=+=+=+=+=+=++=+=+=+=+=+=+=+=+=+=+=+
def _buildGaborBankIfNeeded(self):
"""
Check to see if we have a Gabor Bank, and if not, then build it.
"""
if self._gaborBank is None:
self._buildGaborBank()
#+=+=+=+=+=+=+=+=+=+=+=++=+=+=+=+=+=+=+=+=+=+=++=+=+=+=+=+=+=+=+=+=+=+
def _doCompute(self, rfInput, rfMask, rfSize, resetSignal, validPyramid):
"""
Actual compute() implementation. This is a placeholder that should
be overridden by derived sub-classes
@param inputPyramid -- a list of numpy array containing planes of the
input pyramid.
@param rfMask -- a 2-dimensional numpy array (of same shape as 'inputPyramid')
that contains a value of 0.0 for every element that corresponds
to a padded "dummy" (sentinel) value within 'inputPyramid', and
a value of 1.0 for every real input element.
@param rfSize -- a 1-dimensional numpy array (same number of rows as
'inputPyramid') containing the total number of real (non-dummy)
elements for each row of 'inputPyramid'.
@param reset -- boolean indicating whether the current input is the first
of a new temporal sequence.
@param validPyramid -- a 4-element numpy array (vector) that specifies the
zone in which the input pyramid is "valid". A point in the
pyramid is "valid" if that point maps to a location in the
original image, rather than a "padded" region that was added
around the original image in order to scale/fit it into the
dimensions of the input pyramid.
The 4-element array is in the following format:
[left, top, right, bottom]
where 'left' is the fraction (between 0 and 1) of the width of
the image where the valid zone begins, etc.
Returns:
outputPyramid -- a list of numpy arrays containing planes of the
output pyramid.
"""
numGaborFilters = self._gaborBank.shape[1]
numOutputLocns = rfInput.shape[0]
# ---------------------------------------------------------------
# Conceptual pipeline:
#
# 1. Apply Gabor filtering upon the input pixels X to
# generate raw responses Y0 Even in dual-phase mode,
# we will only need to perform the actual computations
# on a single phase (because the responses can be inverted).
#
# 2. Rectify the raw Gabor responses Y0 to produce rectified
# responses Y1.
#
# 3. Apply an adaptive normalization operation to the
# rectified responses Y1 to produce Y2.
#
# 4. Amplify the normalized responses Y2 by a fixed gain G
# to produce amplified responses Y3.
#
# 5. Apply post-processing upon the amplified responses Y3 to
# produce final responses Z.
#
#----------------------------------
# Step 1 - Raw Gabor filtering:
# Convolve each output location against the complete gabor bank.
responseRaw = numpy.dot(rfInput, self._gaborBank)
#----------------------------------
# Step 2 - Rectify responses:
effectiveInfinity = 1.0e7
if self._phaseMode == 'single':
responseRectified = numpy.abs(responseRaw)
elif self._phaseMode == 'dual':
responseRectified = numpy.concatenate((responseRaw.clip(min=0.0, max=effectiveInfinity),
(-responseRaw).clip(min=0.0, max=effectiveInfinity)),
axis=1)
#----------------------------------
# Step 3 - Adaptive normalization:
# Step 4 - Amplification
# If we are not doing any normalization, then it is easy:
if self._normalizationMethod == 'fixed':
# In 'fixed' mode, we simply apply a default normalization
# that takes into account the fact that the input range
# lies between 0 and 255.
responseAmplified = responseRectified * (self._gainConstant / 255.0)
# Otherwise, we have to perform normalization
else:
# First we'll apply the power rule, if needed
if self._normalizationMethod in ['meanPower', 'maxPower']:
responseToUse = (responseRectified * responseRectified)
elif self._normalizationMethod in ['mean', 'max']:
responseToUse = responseRectified
# At this point, our responseRectified array is of
# the shape (totNumOutputLocns, numOrients)
# First, we will perform the max/mean operation over
# the spatial dimensions; the result will be an
# intermediate array of the shape:
# (numScales, numOrients) which will contain the
# max/mean over the spatial dimensions for each
# scale and orientation.
numLayers = len(self._inputPyramidTopology)
layerOffsets = self._computeLayerOffsets(self._inputPyramidTopology)
responseStats = []
for k in xrange(numLayers):
startOffset = layerOffsets[k]
stopOffset = layerOffsets[k+1]
if self._normalizationMethod in ['max', 'maxPower']:
responseStats += [responseToUse[startOffset:stopOffset].max(axis=0)[numpy.newaxis, :]]
elif self._normalizationMethod in ['mean', 'meanPower']:
responseStats += [responseToUse[startOffset:stopOffset].mean(axis=0)[numpy.newaxis, :]]
responseStats = numpy.array(responseStats).reshape(numLayers, self._numPlanes)
# This should be a numpy array containing the desired statistics
# over the spatial dimensions; one statistic for each tuple
# of (scale, orientation)
# If we used a power law, then take the square root of the statistics
if self._normalizationMethod in ['maxPower', 'meanPower']:
responseStats = numpy.sqrt(responseStats)
# Compute statistics over orientation (if needed)
if not self._perOrientNormalization:
if self._normalizationMethod in ['max', 'maxPower']:
responseStats = responseStats.max(axis=1)
elif self._normalizationMethod in ['mean', 'meanPower']:
responseStats = responseStats.mean(axis=1)
responseStats = responseStats[:, numpy.newaxis]
# At this point, responseStats is of shape: (numLayers, 1)
# Compute statistics over scale (if needed)
if not self._perScaleNormalization:
if self._normalizationMethod in ['max', 'maxPower']:
responseStats = responseStats.max(axis=0)
elif self._normalizationMethod in ['mean', 'meanPower']:
responseStats = responseStats.mean(axis=0)
# Expand back out for each scale
responseStats = responseStats[numpy.newaxis, :] * numpy.ones((numLayers, 1))
# Expand back out for each orientation
if not self._perOrientNormalization:
responseStats = responseStats[:, numpy.newaxis] * numpy.ones((1, self._numPlanes))
# Step 4 - Amplification
responseStats = responseStats.reshape(numLayers, self._numPlanes)
gain = self._gainConstant * numpy.ones((numLayers, self._numPlanes), dtype=RealNumpyDType)
nonZeros = numpy.where(responseStats > 0.0)
gain[nonZeros] /= responseStats[nonZeros]
# Fast usage case: neither per-scale nor per-orient normalization
if not self._perScaleNormalization and not self._perOrientNormalization:
responseAmplified = responseRectified * gain[0, 0]
# Somewhat slower: per-orient (but not per-scale) normalization
elif not self._perScaleNormalization:
responseAmplified = responseRectified * gain[0, :]
# Slowest: per-scale normalization
else:
responseAmplified = None
for k in xrange(numLayers):
startOffset = layerOffsets[k]
stopOffset = layerOffsets[k+1]
if not self._perOrientNormalization:
gainToUse = gain[k, 0]
else:
gainToUse = gain[k, :]
thisResponse = responseRectified[startOffset:stopOffset, :] * gainToUse
if responseAmplified is None:
responseAmplified = thisResponse
else:
responseAmplified = numpy.concatenate((responseAmplified, thisResponse), axis=0)
#----------------------------------
# Step 5 - Post-processing
# No post-processing (linear)
if self._postProcessingMethod == "raw":
responseFinal = responseAmplified
# Sigmoidal post-processing
elif self._postProcessingMethod == "sigmoid":
offset = 1.0 / (1.0 + numpy.exp(self._postProcessingSlope * self._postProcessingCenter))
scaleFactor = 1.0 / (1.0 - offset)
responseFinal = ((1.0 / (numpy.exp(numpy.clip(self._postProcessingSlope \
* (self._postProcessingCenter - responseAmplified), \
-40.0, 40.0)) + 1.0)) - offset) * scaleFactor
# Piece-wise linear post-processing
elif self._postProcessingMethod == "threshold":
responseFinal = responseAmplified
responseFinal[responseAmplified < self._postProcessingMin] = 0.0
responseFinal[responseAmplified > self._postProcessingMax] = 1.0
#----------------------------------
# Optional: Dump statistics for comparative purposes
#self._dumpStats(responseFinal, "gabor.stats.txt")
# Generate raw response images (prior to suppression)
if self._makeResponseImages:
self._genResponseImages(responseFinal, preSuppression=True)
# Apply suppression to responses outside valid pyramid.
if self._suppressOutsideBox:
self._applyValiditySuppression(responseFinal, validPyramid)
# Perform the zeroOutThreshold clipping now if requested
if self._zeroThresholdOut > 0.0:
# Get the max of each node
nodeMax = responseFinal.max(axis=1).reshape(numOutputLocns)
# Zero out children where all elements are below the threshold
responseFinal[nodeMax < self._zeroThresholdOut] = 0
# Generate final response images (after suppression)
if self._makeResponseImages:
self._genResponseImages(responseFinal, preSuppression=False)
# Store the response so that it can be retrieved later
self.response = responseFinal
return responseFinal
#+=+=+=+=+=+=+=+=+=+=+=++=+=+=+=+=+=+=+=+=+=+=++=+=+=+=+=+=+=+=+=+=+=+
def _applyValiditySuppression(self, response, validPyramid):
"""
Apply suppression to responses outside valid pyramid.
This overrides the default PyRegion implementation.
"""
# We compute the valid fraction of each output locations' RF by
# computing the valid fraction of it's spatial dimension.
# @todo -- Generalize this to handle more than two spatial dimensions.
validX = (self._rfMaxX.clip(min=validPyramid[0], max=validPyramid[2]) - \
self._rfMinX.clip(min=validPyramid[0], max=validPyramid[2])) * \
self._rfInvLenX
validY = (self._rfMaxY.clip(min=validPyramid[1], max=validPyramid[3]) - \
self._rfMinY.clip(min=validPyramid[1], max=validPyramid[3])) * \
self._rfInvLenY
# At this point the validX and validY numpy vectors contain values
# between 0 and 1 that encode the validity of each output location
# with respect to the X and Y spatial dimensions, respectively.
# Now we map the raw validities of each output location into
# suppression factors; i.e., a scalar (for each output location)
# that will be multiplied against each response for that particular
# output location.
# Use a hard threshold:
# Discovered a nasty, subtle bug here. The code used to be like this:
#
# suppressionFactor = ((validX * validY) >= self._validitySuppressionLow).astype(RealNumpyDType)
#
# However, in the case of validitySuppressionLow of 1.0, numpy experienced
# "random" roundoff errors, and nodes for which both validX and validY were
# 1.0 would be computed as 1 - epsilon, which would fail the test against
# validitySuppressionLow, and thus get suppressed incorrectly.
# So we introduced an epsilon to deal with this situation.
suppressionFactor = ((validX * validY) + self._epsilon >= \
self._validitySuppressionLow).astype(RealNumpyDType)
# Apply the suppression factor to the output response array
response *= suppressionFactor[:, numpy.newaxis]
#+=+=+=+=+=+=+=+=+=+=+=++=+=+=+=+=+=+=+=+=+=+=++=+=+=+=+=+=+=+=+=+=+=+
def _dumpStats(self, response, statsLogPath):
"""
In order to do a kind of "unit testing" of the GaborNode
tuning parameters for a particular application, it is useful
to dump statistics on the responses at different scales
and orientations/phases.
We'll dump the following statistics for each (scale, orientation) tuple:
* response mean
* response standard deviation
* power mean (squared response mean)
* response max
@param response -- response array of shape (totNumOutputLocns, numOrients)
"""
meanResponse = []
meanPower = []
stddevResponse = []
maxResponse = []
# Compute a squared (power) response
power = response * response
# Compute our mean/max/stddev statistics over the spatial dimensions
# for each scale and for each orientation. The result will be four
# array of shape: (numScales, numOrients) which will contain the
# statistics over the spatial dimensions for each scale and orientation.
numLayers = len(self._outputPyramidTopology)
layerOffsets = self._computeLayerOffsets(self._outputPyramidTopology)
for k in xrange(numLayers):
startOffset = layerOffsets[k]
stopOffset = layerOffsets[k+1]
# Mean response
meanResponse += [response[startOffset:stopOffset].mean(axis=0)[numpy.newaxis, :]]
# Max response
maxResponse += [response[startOffset:stopOffset].max(axis=0)[numpy.newaxis, :]]
# Std. deviation response
stddevResponse += [response[startOffset:stopOffset].std(axis=0)[numpy.newaxis, :]]
# Mean power
meanPower += [power[startOffset:stopOffset].mean(axis=0)[numpy.newaxis, :]]
# Now compile the responses at each scale into overall arrays
# of shape: (numScales, numOrientations)
meanResponse = numpy.array(meanResponse).reshape(numLayers, self._numPlanes)
maxResponse = numpy.array(maxResponse).reshape(numLayers, self._numPlanes)
stddevResponse = numpy.array(stddevResponse).reshape(numLayers, self._numPlanes)
meanPower = numpy.array(meanPower).reshape(numLayers, self._numPlanes)
# Finally, form the different statistics into a single desriptive vector
responseStats = numpy.concatenate((meanResponse[numpy.newaxis,:,:],
maxResponse[numpy.newaxis,:,:],
stddevResponse[numpy.newaxis,:,:],
meanPower[numpy.newaxis,:,:]), axis=0)
# Append to the stats log
fpStatsLog = open(statsLogPath, "a")
response = " ".join(["%f" % x for x in responseStats.flatten().tolist()])
fpStatsLog.write(response + "\n")
fpStatsLog.close()
#+=+=+=+=+=+=+=+=+=+=+=++=+=+=+=+=+=+=+=+=+=+=++=+=+=+=+=+=+=+=+=+=+=+
def _doTopDownInfer(self, tdInput, tdNumParents, buOutput, buInput):
"""
Actual top down compute() implementation. This is a placeholder that should
be overridden by derived sub-classes.
@param tdInput -- a 3D array containing the top-down inputs to each baby node.
Think of this as N 2D arrays, where N is the number of baby nodes.
Each baby node's 2D array has R rows, where each row is the top-down
output from one of the parents. The width of each row is equal to the
width of the bottomUpOut of the baby node. If a baby node
has only 2 parents, but R is 5 for example, then the last 3 rows
of the 2D array will contain all 0's. The tdNumParents argument
can be referenced to find out how many parents the node actually has.
The tdInput array is structured in this manner to make it easy to
sum the contributions from the parents. All the sub-class needs to
do is a numpy.add.reduce(tdInput, axis=1).
@param tdNumParents a vector whose length is equal to the number of baby nodes. Each
element contains the number of parents of each baby node.
@param buInput -- a 2D array containing the bottom-up inputs to each baby node.
This is the same input that is passed to the _doCompute() method,
but it is called rfInput there.
@param buOutput -- a 2D array containing the results of the bottomUp compute for
this node. This is a copy of the return value returned from the
_doCompute method of the node.
Returns:
tdOutput -- a 2-D numpy array containing the outputs from each baby node. Each
row is a baby node output.
"""
# NOTE: Making this a float32 makes the copy to the node outputs at the end of
# the compute faster.
#tdOutput = numpy.zeros(self._inputSplitter.shape, dtype='float32')
# print "Top-down infer called on a Gabor node. Use breakpoint to step through"
# print "and make sure things are as expected:"
# import pdb; pdb.set_trace()
numBabyNodes = len(tdInput)
numOrients = len(tdInput[0][0])
assert self._numPlanes == numOrients # Number of filters must match top-down input
tdThreshold = numpy.ones((numBabyNodes, numOrients))
version=('tdThreshold', 'combine', 'td_normalize')
minResponse=1e-10
# Average top-down inputs for each baby Node
tdInput_avg = numpy.add.reduce(tdInput, axis=1) / tdNumParents
# For the gabor node, we will usually get 1 orientation fed down from
# the complex level above us. This is because the SparsePooler above that
# sparsified it's inputs and only saves one orientation from each complex node.
# But, for the Gabor node which is at the bottom of the hierarchy, it makes more
# sense to spread the topdown activation among all the orientations since
# each gabor covers only a few pixels and won't select one object from another.
tdMaxes = tdInput_avg.max(axis=1)
tdInput_avg *= 0
tdInput_avg += tdMaxes.reshape(-1,1)
if tdInput_avg.max() <= minResponse:
#print "Top-down Input is Blank"
pass
else:
if 'combine' in version: # Combine top-down and bottom-up inputs
tdInput_avg *= buOutput
if 'td_normalize' in version: # Normalize top-down inputs for viewing
# td_max = tdInput_avg.max()
# tdInput_avg /= td_max
td_max = tdInput_avg.max()
if td_max != 0:
tdInput_avg /= td_max
if 'tdThreshold' in version: # Use tdInput_avg to threshold bottomUp outputs
if not hasattr(self, '_tdThreshold'):
self._tdThreshold = 0.01
tdThreshold = tdInput_avg > self._tdThreshold
self.tdInput = tdInput_avg
self.selectedBottomUpOut = buOutput * tdThreshold
theMax = self.selectedBottomUpOut.max()
if theMax > 0:
self.selectedBottomUpOut /= theMax
# Generate response images
if self._makeResponseImages:
self._genResponseImages(self.tdInput, preSuppression=False, phase='topDown')
self._genResponseImages(self.selectedBottomUpOut, preSuppression=False,
phase='combined')
# Generate the topDown outputs. At this point, tdMaxes contains the max gabor orientation
# output from each baby node. We will simply "spread" this value across all of the
# topDown outputs for each baby node as an indication of their input activation level.
# In a perfect world, you would try and reconstruct the input by summing the inverse of the
# gabor operation for each output orientation. But, for now, we are only using the top
# down output of the Gabor as an indication of the relative input strength to each gabor
# filter - essentially as a mask on the input image.
tdOutput = numpy.ones(self._inputSplitter.shape, dtype='float32')
tdOutput *= tdMaxes.reshape(-1,1)
# Save the maxTopDownOut for each baby node so that it can be returned as a read-only
# parameter. This provides faster performance for things like the top down image inspector
# that only need the max output from each node
self._maxTopDownOut = tdMaxes
return tdOutput
#+=+=+=+=+=+=+=+=+=+=+=++=+=+=+=+=+=+=+=+=+=+=++=+=+=+=+=+=+=+=+=+=+=+
def _computeWithC(self,
inputPlane,
validRegionIn,
outputs,
offImagePixelValue,
validAlpha):
"""
Perform Gabor processing using custom C library.
"""
if validRegionIn is None:
validRegionIn = (0, 0, self._inWidth, self._inHeight)
inputLen = len(inputPlane)
if self._inputPyramidTopology is None or \
inputLen == self._inWidth * self._inHeight * len(self._inputPyramidTopology):
isPadded = True
else:
assert inputLen == sum([lvl['numNodes'][0] * lvl['numNodes'][1] \
for lvl in self._inputPyramidTopology])
isPadded = False
# Extract the bounding box signal (if present).
validPyramid = validRegionIn / numpy.array([self._inWidth,
self._inHeight,
self._inWidth,
self._inHeight],
dtype=RealNumpyDType)
# First extract a numpy array containing the entire input vector
assert inputPlane.dtype == numpy.float32
# Convert the output images to a numpy vector
#outputPlane = outputs['bottomUpOut'].wvector()[:].array()
outputPlane = outputs['bottomUpOut']
assert outputPlane.dtype == numpy.float32
inputOffset = 0
outputOffset = 0
for scaleIndex in xrange(self._numScales):
# Handle padded case (normal)
if isPadded:
inputScaleIndex = 0
# Handle packed case (deployed)
else:
inputScaleIndex = scaleIndex
# Determine proper input/output dimensions
inHeight, inWidth = self._inputDims[inputScaleIndex]
outHeight, outWidth = self._outputDims[scaleIndex]
inputSize = inHeight * inWidth
outputSize = outHeight * outWidth * self._numPlanes
# Locate correct portion of input
inputVector = inputPlane[inputOffset:inputOffset+inputSize]
inputOffset += inputSize
inputVector.shape = (inHeight, inWidth)
# Locate correct portion of output
outputVector = outputPlane[outputOffset:outputOffset+outputSize]
outputVector.shape = (self._numPlanes, outHeight, outWidth)
# Compute the bounding box to use for our C implementation
bbox = self._computeBBox(validPyramid, self._inputDims[scaleIndex][1],
self._inputDims[scaleIndex][0])
imageBox = numpy.array([0, 0, self._inputDims[scaleIndex][1],
self._inputDims[scaleIndex][0]],
dtype=numpy.int32)
## --- DEBUG CODE ----
#global id
#o = inputVector
#print outputVector.shape, len(o)
#f = os.path.abspath('gabor_input_%d.txt' % id)
#print f
#numpy.savetxt(f, o)
#id += 1
##from dbgp.client import brk; brk(port=9019)
## --- DEBUG CODE END ----
# Erode and/or dilate the alpha channel
# @todo -- This should be moved into the C function
if validAlpha is not None:
validAlpha = self._adjustAlphaChannel(validAlpha)
# Perform gabor processing
self._doGabor(inputVector,
bbox,
imageBox,
outputVector,
scaleIndex,
offImagePixelValue,
validAlpha)
# Optionally, dump working buffers for debugging purposes
if self._debugLogBuffers:
self._logDebugBuffers(outputVector, scaleIndex);
# Note: it would be much better if we did not have to do this
# post-processing "transposition" operation, and instead just
# performed all the different orientation computations for
# each pixel.
# Note: this operation costs us about 1 msec
outputVector = numpy.rollaxis(outputVector, 0, 3)
outputVector = outputVector.reshape(outWidth * outHeight, self._numPlanes)
assert outputVector.dtype == numpy.float32
# Perform the zeroOutThreshold clipping now if requested
# @todo -- This should be moved into the C function
if self._zeroThresholdOut > 0.0:
# Get the max of each node
nodeMax = outputVector.max(axis=1).reshape(outWidth * outHeight)
# Zero out children where all elements are below the threshold
outputVector[nodeMax < self._zeroThresholdOut] = 0.0
outputPlane[outputOffset:outputOffset+outputSize] = outputVector.flatten()
outputOffset += outputSize
# Generate final response images (after suppression)
if self._makeResponseImages:
self._genResponseImages(outputPlane, preSuppression=False)
# Store the response so that it can be retrieved later
self.response = outputPlane
## --- DEBUG CODE ----
#global id
#o = outputPlane
##print outputVector.shape, len(o)
#f = os.path.abspath('gabor_output_%d.txt' % id)
#print f
#numpy.savetxt(f, o)
#id += 1
##from dbgp.client import brk; brk(port=9019)
## --- DEBUG CODE END ----
# De-multiplex inputs/outputs
#outputs['bottomUpOut'].wvector()[:] = outputPlane
outputs['bottomUpOut'] = outputPlane
#+=+=+=+=+=+=+=+=+=+=+=++=+=+=+=+=+=+=+=+=+=+=++=+=+=+=+=+=+=+=+=+=+=+
def _adjustAlphaChannel(self, alphaMask):
"""
Apply an alpha suppression channel (in place) to each plane
of gabor responses.
@param alphaMask: a numpy array of shape (numPixels, 1)
containing the alpha mask that determines which responses
are to be suppressed. If the values in the alpha mask
are in the range (0.0, 255.0), then the alpha mask will
be eroded by halfFilterDim; if the values in the alpha
mask are in the range (-255.0, 0.0), then the mask will
be dilated by halfFilterDim.
"""
# Determine whether to erode or dilate.
# In order to make this determination, we check
# the sign of the first alpha pixel:
#
# MorphOp true mask[0,0] alpha[0,0] code
# ======= ============== ===============
# erode 0 (background) 0
# erode 255 (foreground) 255
# dilate 0 (background) -1
# dilate 255 (foreground) -256
indicatorValue = alphaMask[0,0]
if indicatorValue < 0.0:
operation = 'dilate'
# Convert the alpha value back to it's
# true value
alphaMask[0,0] = -1.0 - indicatorValue
else:
operation = 'erode'
# We need to perform enough iterations to cover
# half of the filter dimension
halfFilterDim = (self._filterDim - 1) / 2
if self._morphologyMethod == "opencv" or \
(self._morphologyMethod == "best" and cv is not None):
# Use the faster OpenCV code path
assert cv is not None
# Lazily allocate the necessary OpenCV wrapper structure(s)
self._prepMorphology()
# Make the OpenCV image header structure's pixel buffer
# pointer point at the underlying memory buffer of
# the alpha channel (numpy array)
self._morphHeader.contents.imageData = alphaMask.ctypes.data
# Perform dilation in place
if operation == 'dilate':
cv.Dilate(self._morphHeader, self._morphHeader, iterations=halfFilterDim)
# Perform erosion in place
else:
cv.Erode(self._morphHeader, self._morphHeader, iterations=halfFilterDim)
else:
# Use the custom C++ code path
if not self._erosion:
from nupic.bindings.algorithms import Float32Erosion
self._erosion = Float32Erosion()
self._erosion.init(int(self._inHeight), int(self._inWidth))
# Perform the erosion/dilation in-place
self._erosion.compute(alphaMask,
alphaMask,
halfFilterDim,
(operation=='dilate'))
# Legacy numpy method
# If we are in constrained mode, then the size of our
# response planes will be less than the size of our
# alpha mask (by halfFilterDim along each edge).
# So we need to "shave off" halfFilterDim pixels
# from all edges of the alpha mask before applying
# suppression to the response planes.
inWidth = int(self._inWidth)
inHeight = int(self._inHeight)
# For erosion mode, we need to shave off halfFilterDim
# from the four edges of the alpha mask.
if operation == "erode":
alphaMask.shape = (inHeight, inWidth)
alphaMask[:halfFilterDim, :] = 0.0
alphaMask[-halfFilterDim:, :] = 0.0
alphaMask[:, :halfFilterDim] = 0.0
alphaMask[:, -halfFilterDim:] = 0.0
alphaMask.shape = (inHeight * inWidth, 1)
# For dilation mode, we need to shave off halfFilterDim
# from any edge of the alpha mask that touches the
# image boundary *unless* the alpha mask is "full"
# (i.e., consumes the entire image.)
elif operation == "dilate":
# Handle top, bottom, left, and right
alphaMask.shape = (inHeight, inWidth)
zapTop = numpy.where(alphaMask[0,:])[0]
zapBottom = numpy.where(alphaMask[-1,:])[0]
zapLeft = numpy.where(alphaMask[:,0])[0]
zapRight = numpy.where(alphaMask[:,-1])[0]
# Apply zaps unless all of them are of the full
# length possible
if len(zapTop) < inWidth or len(zapBottom) < inWidth or \
len(zapLeft) < inHeight or len(zapRight) < inHeight:
alphaMask[:halfFilterDim, zapTop] = 0.0
alphaMask[-halfFilterDim:, zapBottom] = 0.0
alphaMask[zapLeft, :halfFilterDim] = 0.0
alphaMask[zapRight, -halfFilterDim:] = 0.0
alphaMask.shape = (inHeight * inWidth, 1)
return alphaMask
#+=+=+=+=+=+=+=+=+=+=+=++=+=+=+=+=+=+=+=+=+=+=++=+=+=+=+=+=+=+=+=+=+=+
def _prepMorphology(self):
"""
Prepare buffers used for eroding/dilating alpha
channels.
"""
# Check if we've already allocated a header
#if not hasattr(self, '_morphHeader'):
if not getattr(self, '_morphHeader', None):
if cv is None:
raise RuntimeError("OpenCV not available on this platform")
# Create a header only (not backed by data memory) that will
# allow us to operate on numpy arrays (valid alpha channels)
# using OpenCV operations
self._morphHeader = cv.CreateImageHeader(cv.Size(int(self._inWidth),
int(self._inHeight)), 32, 1)
# @todo: this will leak a small bit of memory every time
# we create and use a new GaborNode unless we find a way
# to guarantee the invocation of cv.ReleaseImageHeader()
#+=+=+=+=+=+=+=+=+=+=+=++=+=+=+=+=+=+=+=+=+=+=++=+=+=+=+=+=+=+=+=+=+=+
def _computeBBox(self, validPyramid, inWidth, inHeight):
"""
Compute a bounding box given the validPyramid (a fraction
of the valid input region as provided by the sensor) and
the output dimensions for a particular current scale.
"""
# Assemble the bounding box by converting 'validPyramid' from float (0,1) to integer (O,N)
if self._suppressOutsideBox:
halfFilterDim = (self._filterDim - 1) / 2
bbox = numpy.round((validPyramid * numpy.array([inWidth, inHeight, inWidth, inHeight],
dtype=validPyramid.dtype))).astype(numpy.int32)
# Subtract enough padding for our filter on all four edges
# We'll only subtract enough padding if we have a non-trivlal bounding box.
# In other words, if our validRegionIn is [0, 25, 200, 175] for input image
# dimensions of [0, 0, 200, 200], then we will assume that two horizontal strips
# of filler pixels were artificially added at the top and bottom, but no
# such artificial vertical strips were added. So we don't need to erode the
# bounding box horizontally, only vertically.
if self._forceBoxContraction or bbox[0] > 0:
bbox[0] += halfFilterDim
if self._forceBoxContraction or bbox[1] > 0:
bbox[1] += halfFilterDim
if self._forceBoxContraction or bbox[2] < inWidth:
bbox[2] -= halfFilterDim
if self._forceBoxContraction or bbox[3] < inHeight:
bbox[3] -= halfFilterDim
# Clip the bounding box to the size of the image
bbox[0] = max(bbox[0], 0)
bbox[1] = max(bbox[1], 0)
bbox[2] = min(bbox[2], inWidth)
bbox[3] = min(bbox[3], inHeight)
# Make sure the bounding box didn't become negative width/height
bbox[0] = min(bbox[0], bbox[2])
bbox[1] = min(bbox[1], bbox[3])
# If absolutely no suppression is requested under any
# circumstances, then force the bbox to be the entire image
else:
bbox = numpy.array([0, 0, inWidth, inHeight], dtype=numpy.int32)
# Check in case bbox is non-existent or mal-formed
if bbox[0] < 0 or bbox[1] < 0 or bbox[2] <= bbox[0] or bbox[3] <= bbox[1]:
print "WARNING: empty or malformed bounding box:", bbox
# Fix bbox so that it is a null box but at least not malformed
if bbox[0] < 0:
bbox[0] = 0
if bbox[1] < 0:
bbox[1] = 0
if bbox[2] < bbox[0]:
bbox[2] = bbox[0]
if bbox[3] < bbox[1]:
bbox[3] = bbox[1]
return bbox
#+=+=+=+=+=+=+=+=+=+=+=++=+=+=+=+=+=+=+=+=+=+=++=+=+=+=+=+=+=+=+=+=+=+
def _logDebugBuffers(self, outputVector, scaleIndex, outPrefix="debug"):
"""
Dump detailed debugging information to disk (specifically, the
state of internal working buffers used by C implementaiton.
@param outPrefix -- Prefix to prepend to standard names
for debugging images.
"""
# Save input buffer
self._saveImage(self._bufferSetIn[scaleIndex],
"%s.buffer.in.%02d.png" % (outPrefix, scaleIndex))
# Save output buffer planes
for k in xrange(self._bufferSetOut[scaleIndex].shape[0]):
# We do integer arithmetic shifted by 12 bits
buf = (self._bufferSetOut[scaleIndex][k] / 4096).clip(min=0, max=255);
self._saveImage(buf, "%s.buffer.out.%02d.%02d.png" % (outPrefix, scaleIndex, k))
# Save raw gabor output images (from C implementation)
for k in xrange(self._numPlanes):
self._saveImage(outputVector[k], "%s.out.%02d.%02d.png" % \
(outPrefix, scaleIndex, k))
#+=+=+=+=+=+=+=+=+=+=+=++=+=+=+=+=+=+=+=+=+=+=++=+=+=+=+=+=+=+=+=+=+=+
def _saveImage(self, imgArray, outPath):
imgDims = imgArray.shape
img = Image.new('L', (imgDims[1], imgDims[0]))
if imgArray.dtype == numpy.float32:
img.putdata( ((254.9 * imgArray.flatten()).clip(min=0.0, max=255.0)).astype(numpy.uint8) )
#img.putdata((255.0 * imgArray.flatten()).astype(numpy.uint8))
elif imgArray.dtype == numpy.int32:
img.putdata((imgArray.flatten()).astype(numpy.uint8))
else:
assert imgArray.dtype == numpy.uint8
img.putdata(imgArray.flatten())
img.save(outPath)
#+=+=+=+=+=+=+=+=+=+=+=++=+=+=+=+=+=+=+=+=+=+=++=+=+=+=+=+=+=+=+=+=+=+
def _doGabor(self, inputVector,
bbox,
imageBox,
outputVector,
scaleIndex,
offImagePixelValue=None,
validAlpha=None):
"""
Prepare arguments and invoke C function for
performing actual 2D convolution, rectification,
normalization, and post-processing.
"""
if offImagePixelValue is None:
assert type(offImagePixelValue) in [type(0), type(0.0)]
offImagePixelValue = self._offImagePixelValue
# If we actually have a valid validAlpha mask,
# then reshape it to the input image size
if validAlpha is not None:
origAlphaShape = validAlpha.shape
validAlpha.shape = inputVector.shape
# Invoke C function
result = self._gaborComputeProc(
self._wrapArray(self._gaborBank),
self._wrapArray(inputVector),
self._wrapArray(validAlpha),
self._wrapArray(bbox),
self._wrapArray(imageBox),
self._wrapArray(outputVector),
ctypes.c_float(self._gainConstant),
self._mapParamFromPythonToC('boundaryMode'),
ctypes.c_float(offImagePixelValue),
self._mapParamFromPythonToC('phaseMode'),
self._mapParamFromPythonToC('normalizationMethod'),
self._mapParamFromPythonToC('perPlaneNormalization'),
self._mapParamFromPythonToC('perPhaseNormalization'),
self._mapParamFromPythonToC('postProcessingMethod'),
ctypes.c_float(self._postProcessingSlope),
ctypes.c_float(self._postProcessingCenter),
ctypes.c_float(self._postProcessingMin),
ctypes.c_float(self._postProcessingMax),
self._wrapArray(self._bufferSetIn[scaleIndex]),
self._wrapArray(self._bufferSetOut[scaleIndex]),
self._wrapArray(self._postProcLUT),
ctypes.c_float(self._postProcLutScalar),
)
if result < 0:
raise Exception("gaborCompute failed")
# If we actually have a valid validAlpha mask,
# then reshape it back to it's original shape
if validAlpha is not None:
validAlpha.shape = origAlphaShape
#+=+=+=+=+=+=+=+=+=+=+=++=+=+=+=+=+=+=+=+=+=+=++=+=+=+=+=+=+=+=+=+=+=+
def _convertEnumValue(self, enumValue):
"""
Convert a Python integer object into a ctypes integer
that can be passed to a C function and seen as an
int on the C side.
"""
return ctypes.c_int(enumValue)
#+=+=+=+=+=+=+=+=+=+=+=++=+=+=+=+=+=+=+=+=+=+=++=+=+=+=+=+=+=+=+=+=+=+
def _mapParamFromPythonToC(self, paramName):
"""
Map Python object values to equivalent enumerated C values.
"""
# boundaryMode
if paramName == "boundaryMode":
if self._boundaryMode == 'constrained':
enumValue = 0
elif self._boundaryMode == 'sweepOff':
enumValue = 1
return self._convertEnumValue(enumValue)
# phaseMode
elif paramName == "phaseMode":
if self._phaseMode == 'single':
enumValue = 0
elif self._phaseMode == 'dual':
enumValue = 1
return self._convertEnumValue(enumValue)
# normalizationMethod
elif paramName == "normalizationMethod":
if self._normalizationMethod == 'fixed':
enumValue = 0
elif self._normalizationMethod == 'max':
enumValue = 1
elif self._normalizationMethod == 'mean':
enumValue = 2
#elif self._normalizationMethod == 'maxPower':
# enumValue = 3
#elif self._normalizationMethod == 'meanPower':
# enumValue = 4
return self._convertEnumValue(enumValue)
# perPlaneNormalization
elif paramName == "perPlaneNormalization":
if not self._perPlaneNormalization:
enumValue = 0
else:
enumValue = 1
return self._convertEnumValue(enumValue)
# perPhaseNormalization
elif paramName == "perPhaseNormalization":
if not self._perPhaseNormalization:
enumValue = 0
else:
enumValue = 1
return self._convertEnumValue(enumValue)
# postProcessingMethod
elif paramName == "postProcessingMethod":
if self._postProcessingMethod == 'raw':
enumValue = 0
elif self._postProcessingMethod == 'sigmoid':
enumValue = 1
elif self._postProcessingMethod == 'threshold':
enumValue = 2
return self._convertEnumValue(enumValue)
# Invalid parameter
else:
assert False
#+=+=+=+=+=+=+=+=+=+=+=++=+=+=+=+=+=+=+=+=+=+=++=+=+=+=+=+=+=+=+=+=+=+
# Private helper methods
#+=+=+=+=+=+=+=+=+=+=+=++=+=+=+=+=+=+=+=+=+=+=++=+=+=+=+=+=+=+=+=+=+=+
def _getValidEdgeModes(self):
"""
Returns a list of the valid edge modes.
"""
return ['constrained', 'sweepOff']
#+=+=+=+=+=+=+=+=+=+=+=++=+=+=+=+=+=+=+=+=+=+=++=+=+=+=+=+=+=+=+=+=+=+
def _serializeImage(self, image):
"""
Serialize a PIL image so that it can be transported through
the runtime engine.
"""
s = StringIO()
format = 'png'
if hasattr(image, 'format') and image.format:
format = image.format
try:
image.save(s, format=format)
except:
image.save(s, format='png')
return s.getvalue()
#+=+=+=+=+=+=+=+=+=+=+=++=+=+=+=+=+=+=+=+=+=+=++=+=+=+=+=+=+=+=+=+=+=+
def _getResponseKey(self, preSuppression):
"""
Returns a key used to index the response image dict
(either 'raw' or 'final')
"""
if preSuppression:
return 'raw'
else:
return 'final'
#+=+=+=+=+=+=+=+=+=+=+=++=+=+=+=+=+=+=+=+=+=+=++=+=+=+=+=+=+=+=+=+=+=+
def _genResponseImages(self, rawResponse, preSuppression, phase='bottomUp'):
"""
Generate PIL images from the response array.
@param preSuppression -- a boolean, which indicates whether to
store the generated images using the key 'raw' (if True)
or 'final' (if False) within the _responseImages member dict.
@param phase -- 'bottomUp', 'topDown', or 'combined', depending on which
phase of response image we're generating
Generate a dict of dicts. The primary dict is keyed by response,
which can be either 'all' or an integer between 0 and numOrients-1;
the secondary dicts are keyed by scale, which can be either 'all'
or an integer between 0 and numScales.
"""
if phase not in ('bottomUp', 'topDown', 'combined'):
raise RuntimeError, "phase must be either 'bottomUp', 'topDown', or 'combined'"
numLocns = len(rawResponse.flatten()) / self._numPlanes
response = rawResponse.reshape(numLocns, self._numPlanes)
#numScales = len(self._inputPyramidTopology)
numScales = self._numScales
imageSet = {}
# Build all the single-orientation responses
for responseIdx in xrange(self._numPlanes):
responseSet = {}
# Build all the scales
for scaleIdx in xrange(numScales):
responseSet[scaleIdx] = self._makeImage(response, scaleIdx, responseIdx)
# Build the "all scale" list
#responseSet['all'] = responseSet.values()
imageSet[responseIdx] = responseSet
# Build the composite respones
responseSet = {}
for scaleIdx in xrange(numScales):
scaleSet = [imageSet[orientIdx][scaleIdx] for orientIdx in xrange(self._numPlanes)]
responseSet[scaleIdx] = self._makeCompositeImage(scaleSet)
imageSet['all'] = responseSet
# Serialize all images
for orientIdx, orientResponses in imageSet.items():
for scaleIdx, scaleResponse in orientResponses.items():
imageSet[orientIdx][scaleIdx] = self._serializeImage(scaleResponse)
imageSet[orientIdx]['all'] = imageSet[orientIdx].values()
# Store the image set
if self._responseImages is None:
self._responseImages = {self._getResponseKey(preSuppression): {}}
self._responseImages[self._getResponseKey(preSuppression)][phase] = imageSet
#+=+=+=+=+=+=+=+=+=+=+=++=+=+=+=+=+=+=+=+=+=+=++=+=+=+=+=+=+=+=+=+=+=+
def _getNodeRangeByScale(self, whichScale):
"""
Returns a 2-tuple of node indices corresponding to the set of
nodes associated with the specified 'whichScale'.
"""
assert whichScale >= 0
#assert whichScale < len(self._outputPyramidTopology)
assert whichScale < self._numScales
startNodeIdx = 0
#for scaleIndex, outputTopo in enumerate(self._outputPyramidTopology):
for scaleIndex, outputDim in enumerate(self._outputDims):
#nCols, nRows = outputTopo['numNodes']
nRows, nCols = outputDim
stopNodeIdx = startNodeIdx + nCols * nRows
if scaleIndex == whichScale:
return (startNodeIdx, stopNodeIdx)
else:
startNodeIdx = stopNodeIdx
assert False
#+=+=+=+=+=+=+=+=+=+=+=++=+=+=+=+=+=+=+=+=+=+=++=+=+=+=+=+=+=+=+=+=+=+
def _makeImage(self, response, whichScale, whichOrient, gain=1.0):
"""
Generate a single PIL image (using the raw response array) for a
particular scale and orientation.
"""
#nCols, nRows = self._outputPyramidTopology[whichScale]['numNodes']
nRows, nCols = self._outputDims[whichScale]
img = Image.new('L', (nCols, nRows))
startNodeIdx, stopNodeIdx = self._getNodeRangeByScale(whichScale)
img.putdata((gain * 255.0 * response[startNodeIdx:stopNodeIdx,
whichOrient]).clip(min=0.0, max=255.0).astype(numpy.uint8))
return img
#+=+=+=+=+=+=+=+=+=+=+=++=+=+=+=+=+=+=+=+=+=+=++=+=+=+=+=+=+=+=+=+=+=+
def _makeCompositeImage(self, imageSet):
"""
Create a false color composite image of the individiual
orientation-specific gabor response images in 'imageSet'.
"""
# Generate the bands
numBands = 3
bands = [Image.new('L',imageSet[0].size)] * numBands
for k, img in enumerate(imageSet):
whichBand = k % numBands
bands[whichBand] = ImageChops.add(bands[whichBand], img)
# Make final composite for this scale
compositeImage = Image.merge(mode='RGB', bands=bands)
return compositeImage
#+=+=+=+=+=+=+=+=+=+=+=++=+=+=+=+=+=+=+=+=+=+=++=+=+=+=+=+=+=+=+=+=+=+
if False:
def _getEffectiveOrients(self):
"""
Internal helper method that returns the number of "effective"
orientations (which treats the dual phases responses as a
single orientation.)
"""
numEffectiveOrients = self._numPlanes
if self._phaseMode == 'dual':
numEffectiveOrients /= 2
if self._centerSurround:
numEffectiveOrients -= 1
return numEffectiveOrients
#+=+=+=+=+=+=+=+=+=+=+=++=+=+=+=+=+=+=+=+=+=+=++=+=+=+=+=+=+=+=+=+=+=+
def _buildGaborBank(self):
"""
Build an array of Gabor filters. Also build a 1-D vector of
filter bank indices that maps each output location to a particular
(customized) bank of gabor filters.
"""
# Make sure dimensions of our Gabor filters are odd
assert self._filterDim % 2 == 1
# Create mesh grid indices. The result will be a numpy array of
# shape (2, filterDim, filterDim).
# Then meshGrid[0] stores the row indices of the master grid,
# and meshGrid[1] stores the column indices.
lowerIndex = -(self._filterDim / 2)
upperIndex = 1 + self._filterDim / 2
meshGrid = numpy.mgrid[lowerIndex:upperIndex, lowerIndex:upperIndex]
# If we are supposed to produce only center-surround output
# (no oriented responses), then we will still go through the
# process of making a minimalist bank of 2 oriented gabor
# filters since that is needed by the center-surround filter
# generation code
numOrientations = self._numOrientations
if numOrientations == 0:
numOrientations = 2
# Select the orientation sample points (in radians)
radianInterval = numpy.pi / float(numOrientations)
orientations = numpy.array(range(numOrientations), dtype=RealNumpyDType) * \
radianInterval
# Compute trigonometric functions of orientation
sinTheta = numpy.sin(orientations).reshape(numOrientations, 1, 1)
cosTheta = numpy.cos(orientations).reshape(numOrientations, 1, 1)
# Construct two filterDim X filterDim arrays containing y (row) and
# x (column) coordinates (in dimensions of pixels), respectively.
y = meshGrid[0].reshape(1, self._filterDim, self._filterDim)
x = meshGrid[1].reshape(1, self._filterDim, self._filterDim)
X = x * cosTheta - y * sinTheta
Y = x * sinTheta + y * cosTheta
# Build the Gabor filters
#if hasattr(self, '_phase') and self._phase == 'edge':
if self._targetType == 'edge':
sinusoidalTerm = numpy.sin(2.0 * numpy.pi / self._wavelength * X)
else:
sinusoidalTerm = numpy.cos(2.0 * numpy.pi / self._wavelength * X)
numerator = (X * X + self._aspectRatio * self._aspectRatio * Y * Y)
denominator = -2.0 * self._effectiveWidth * self._effectiveWidth
exponentialTerm = numpy.exp(numerator / denominator)
gaborBank = sinusoidalTerm * exponentialTerm
# Add center-surround filters, if requsted
if self._centerSurround:
expFilter = exponentialTerm[0] * exponentialTerm[numOrientations/2]
# Cubing the raw exponential component seems to give a nice
# center-surround filter
centerSurround = expFilter * expFilter * expFilter
# If our center-surround filter is in addition to the oriented
# filter, then concatenate it to our filter bank; otherwise
# it is the filter bank
if self._numOrientations > 0:
gaborBank = numpy.concatenate((gaborBank, centerSurround[numpy.newaxis,:,:]))
else:
gaborBank = centerSurround[numpy.newaxis,:,:]
# Apply lobe suppression: Suppress the outer lobes of the sinusoidal
# component of the Gabor filters so as to avoid "ringing" effects in
# the Gabor response maps.
#
# We make a single lobe-suppression mask (which is directionally
# oriented.) Then we rotate this mask by each orientation and
# apply it to the pre-suppressed filter bank.
# In order to minimize discontinuities in the gradients, the
# suppression mask will be constructed as follows:
#
# y = 1 - |x|^p
#
# where:
# y = Suppression (0 for total suppression, 1 for no-suppression)
# x = position relative to center
# p = Some exponent that controls the sharpness of suppression
numGaborFilters = gaborBank.shape[0]
# New lobe suppression.
if self._lobeSuppression:
# The orientation is always vertical, so we'll locate the discrete
# filter cell where we go negative
halfFilterDim = (self._filterDim - 1) / 2
firstBadCell = None
for cellIdx in xrange(halfFilterDim, self._filterDim):
if gaborBank[0, 0, cellIdx] < 0.0:
firstBadCell = cellIdx - halfFilterDim
break
if firstBadCell is not None:
radialDist = numpy.abs(X / float(halfFilterDim))
# Establish a radial distance threshold that is halfway
# between the first discrete bad cell and the last good cell.
if firstBadCell == halfFilterDim:
distThresh = 0.5 * (radialDist[0, 0, halfFilterDim + firstBadCell] + \
radialDist[0, 0, halfFilterDim + firstBadCell - 1])
else:
assert firstBadCell < halfFilterDim
# Establish a radial distance threshold that is halfway
# between the first discrete bad cell and the second bad cell.
# This seems to give good results in practice.
distThresh = 0.5 * (radialDist[0, 0, halfFilterDim + firstBadCell] + \
radialDist[0, 0, halfFilterDim + firstBadCell + 1])
suppressTerm = (radialDist < distThresh).astype(RealNumpyDType)
if self._centerSurround:
suppressTerm = numpy.concatenate((suppressTerm,
numpy.ones((1, self._filterDim, self._filterDim),
dtype=RealNumpyDType)))
gaborBank *= suppressTerm
# Normalize so that mean of each filter is zero
means = gaborBank.mean(axis=2).mean(axis=1).reshape(numGaborFilters, 1, 1)
offsets = means.repeat(self._filterDim, axis=1).repeat(self._filterDim, axis=2)
gaborBank -= offsets
# Normalize so that sum of squares over each filter is one
squareSums = (gaborBank * gaborBank).sum(axis=2).sum(axis=1).reshape(numGaborFilters, 1, 1)
scalars = 1.0 / numpy.sqrt(squareSums)
gaborBank *= scalars
# Log gabor filters to disk
if self._logPrefix:
for k in xrange(numGaborFilters):
img = Image.new('L', (self._filterDim, self._filterDim))
minVal = gaborBank[k].min()
gaborFilter = gaborBank[k] - minVal
gaborFilter *= (254.99 / gaborFilter.max())
img.putdata(gaborFilter.flatten().astype(numpy.uint8))
img.save("%s.filter.%03d.png" % (self._logPrefix, k))
# Store the Gabor Bank as a transposed set of 'numOrients' 1-D column-vectors
# which can be easily dot-producted-ed against the split input vectors
# during our compute() calls.
self._gaborBank = (gaborBank.astype(numpy.float32) * 4096.0).astype(numpy.int32)
#+=+=+=+=+=+=+=+=+=+=+=++=+=+=+=+=+=+=+=+=+=+=++=+=+=+=+=+=+=+=+=+=+=+
@classmethod
def getSpec(cls):
ns = Spec(description = cls.__doc__,
singleNodeOnly=False)
ns.inputs = dict(
bottomUpIn=InputSpec(
description="""The input signal, conceptually organized as an
image pyramid data structure, but internally
organized as a flattened vector.""",
dataType='float',
regionLevel=False,
requireSplitterMap=False),
validRegionIn=InputSpec(
description="""A bounding box around the valid region of the image,
expressed in pixel coordinates; if the first element
of the bounding box is negative, then the valid
region is specified by 'validAlphaIn', in the form
of a non-rectangular alpha channel.""",
dataType='float',
regionLevel=True,
requireSplitterMap=False),
validAlphaIn=InputSpec(
description="""An alpha channel that may be used (in place of the
'validRegionIn' bounding box) to specify the valid
region of the image on a per-pixel basis; the channel
should be an image of identical size to the finest
resolution data input image.""",
dataType='float',
regionLevel=True,
requireSplitterMap=False)
)
ns.outputs = dict(
bottomUpOut=OutputSpec(
description="""The output signal, conceptually organized as an
image pyramid data structure, but internally
organized as a flattened vector.""",
dataType='float',
count=0,
regionLevel=False,
isDefaultOutput=True
),
topDownOut=OutputSpec(
description="""The feedback output signal, sent to the topDownIn
input of the next level down.""",
dataType='float',
count=0,
regionLevel=True)
)
ns.parameters = dict(
# -------------------------------------
# Create/Read-only parameters
filterDim=ParameterSpec(dataType='int', accessMode='Create',
description="""
The size (in pixels) of both the width and height of the
gabor filters. Defaults to 9x9.
""",
defaultValue=9),
numOrientations=ParameterSpec(dataType='int', accessMode='Create',
description="""
The number of gabor filter orientations to produce.
The half-circle (180 degrees) of rotational angle will be evenly partitioned.
Defaults to 4, which produces a gabor bank containing filters oriented
at 0, 45, 90, and 135 degrees.
"""),
phaseMode=ParameterSpec(dataType='str', accessMode='Create',
description="""
The number of separate phases to compute per orientation.
Valid values are: 'single' or 'dual'. In 'single', responses to each such
orientation are rectified by absolutizing them; i.e., a 90-degree edge
will produce the same responses as a 270-degree edge, and the two
responses will be indistinguishable. In "dual" mode, the responses to
each orientation are rectified by clipping at zero, and then creating
a second output response by inverting the raw response and again clipping
at zero; i.e., a 90-degree edge will produce a response only in the
90-degree-oriented plane, and a 270-degree edge will produce a response
only the dual phase plane associated with the 90-degree plane (an
implicit 270-degree plane.) Default is 'single'.
""",
constraints="enum: single, dual",
defaultValue='single'),
centerSurround=ParameterSpec(dataType='int', accessMode='Create',
description="""
Controls whether an additional filter corresponding to
a non-oriented "center surround" response is applied to the image.
If phaseMode is "dual", then a second "center surround" response plane
is added as well (the inverted version of the center-surround response.)
Defaults to False.
""",
defaultValue=0),
targetType=ParameterSpec(dataType='str', accessMode='Create',
description="""
The preferred "target" of the gabor filters. A value of
'line' specifies that line detectors (peaks in the center and troughs
on either side) are to be used. A value of 'edge' specifies that edge
detectors (with a peak on one side and a trough on the other) are to
be used. Default is 'edge'.
""",
constraints="enum: line,edge",
defaultValue='edge'),
gainConstant=ParameterSpec(dataType='float', accessMode='ReadWrite',
description="""
A multiplicative amplifier that is applied to the gabor
responses after any normalization. Defaults to 1.0; larger values
increase the sensitivity to edges.
"""),
normalizationMethod=ParameterSpec(dataType='str', accessMode='ReadWrite',
description="""
Controls the method by which responses are
normalized on a per image (and per scale) basis. Accepts the following
three legal values:
"fixed": No response normalization;
"max": Applies a global gain value to the responses so that the
max response equals the value of 'gainConstant'
"mean": Applies a global gain value to the responses so that the
mean response equals the value of 'gainConstant'
Default is 'fixed'.
""",
constraints="enum: fixed, mean, max"
),
perPlaneNormalization=ParameterSpec(dataType='int', accessMode='ReadWrite',
description="""
Controls whether normalization (as specified by
'normalizationMethod') is applied globally across all response planes
(for a given scale), or individually to each response plane. Default
is False. Note: this parameter is ignored if normalizationMethod is "fixed".
""",
),
perPhaseNormalization=ParameterSpec(dataType='int', accessMode='ReadWrite',
description="""
Controls whether normalization (as specified by
'normalizationMethod') is applied globally across both phases for a
particular response orientation and scale, or individually to each
phase of the response. Default is True. Note: this parameter is
ignored if normalizationMethod is "fixed".
""",
),
postProcessingMethod=ParameterSpec(dataType='str', accessMode='ReadWrite',
description="""
Controls what type of post-processing (if any)
is to be performed on the normalized responses. Valid value are:
"raw": No post-processing is performed; final output values are
unmodified after normalization
"sigmoid": Passes normalized output values through a sigmoid function
parameterized by 'postProcessingSlope' and 'postProcessingCenter'.
"threshold": Passes normalized output values through a piecewise linear
thresholding function parameterized by 'postProcessingMin'
and 'postProcessingMax'.
""",
constraints="enum: raw, sigmoid, threshold"),
postProcessingSlope=ParameterSpec(dataType='float', accessMode='ReadWrite',
description="""
Specifies the slope of the sigmoid function to apply if the
post-processing mode is set to 'sigmoid'.
"""),
postProcessingCenter=ParameterSpec(dataType='float', accessMode='ReadWrite',
description="""
Specifies the mid-point of the sigmoid function to apply if the
post-processing mode is set to 'sigmoid'.
"""),
postProcessingMin=ParameterSpec(dataType='float', accessMode='ReadWrite',
description="""
Specifies the value below which responses will be clipped to zero
when post-processing mode is set to 'threshold'.
"""),
postProcessingMax=ParameterSpec(dataType='float', accessMode='ReadWrite',
description="""
Specifies the value above which responses will be clipped to one
when post-processing mode is set to 'threshold'.
"""),
zeroThresholdOut=ParameterSpec(dataType='float', accessMode='ReadWrite',
description="""
If all outputs of a gabor node are below this threshold,
they will all be driven to absolute 0. This is useful in conjunction with
using the product mode/don't care spatial pooler which needs to know when
an input should be treated as 0 vs being normalized to sum to 1.
"""),
boundaryMode=ParameterSpec(dataType='str', accessMode='Create',
description="""
Controls how GaborNode deals with boundary effects. Accepts
two valid parameters:
'constrained' -- Gabor responses are normally only computed for image locations
that are far enough from the edge of the input image so that the entire
filter mask fits within the input image. Thus, the spatial dimensions of
the output gabor maps will be smaller than the input image layers.
'sweepOff' -- Gabor responses will be generated at every location within
the input image layer. Thus, the spatial dimensions of the output gabor
maps will be identical to the spatial dimensions of the input image.
For input image locations that are near the edge (i.e., a portion of
the gabor filter extends off the edge of the input image), the values
of pixels that are off the edge of the image are taken to be as specifed
by the parameter 'offImagePixelValue'.
Default is 'constrained'.
""",
constraints='enum: constrained, sweepOff',
defaultValue='constrained'),
offImagePixelValue=ParameterSpec(dataType="str", accessMode='ReadWrite',
description="""
If 'boundaryMode' is set to 'sweepOff', then this
parameter specifies the value of the input pixel to use for "filling"
enough image locations outside the bounds of the original image.
Ignored if 'boundaryMode' is 'constrained'. Default value is 0.
"""
),
suppressOutsideBox=ParameterSpec(dataType='int', accessMode='ReadWrite',
description="""
If True, then gabor responses outside of the bounding
box (provided from the sensor) are suppressed. Internally, the bounding
box is actually expanded by half the filter dimension (respecting the edge
of the image, of course) so that responses can be computed for all image
locations within the original bounding box.
"""),
forceBoxContraction=ParameterSpec(dataType='int', accessMode='ReadWrite',
description="""
Fine-tunes the behavior of bounding box suppression.
If False (the default), then the bounding box will only be 'contracted'
(by the half-width of the filter) in the dimenion(s) in which it is not
the entire span of the image. If True, then the bounding box will be
contracted unconditionally.
"""),
suppressByAlpha=ParameterSpec(dataType='int', accessMode='ReadWrite',
description="""
A boolean that, if True, instructs GaborNode to use
the pixel-accurate alpha mask received on the input 'validAlphaIn' for
the purpose of suppression of responses.
"""),
logPrefix=ParameterSpec(dataType='str', accessMode='ReadWrite',
description="""
If non-None, causes the response planes at each scale, and
for each input image, to be written to disk using the specified prefix
for the name of the log images. Default is None (no such logging.)
"""),
maxTopDownOut=ParameterSpec(dataType='float', accessMode='Read', count=0,
description="""
The max top-down output from each node. It is faster to access this
variable than to fetch the entire top-down output of every node. The
top down image inspector fetches this parameter (if available)
instead of the topDownOut output variable for better performance.
"""),
# -------------------------------------
# Undocumented parameters
nta_aspectRatio=ParameterSpec(dataType='float', accessMode='Create',
description="""
Controls how "fat" (i.e., how oriented) the Gabor
filters are. A value of 1 would produce completely non-oriented
(circular) filters; smaller values will produce a more oriented
filter. Default is 0.3.
""",
defaultValue=0.3),
nta_effectiveWidth=ParameterSpec(dataType='float', accessMode='Create',
description="""
Controls the rate of exponential drop-off in
the Gaussian component of the Gabor filter. Default is 4.5.
""",
defaultValue=4.5),
nta_wavelength=ParameterSpec(dataType='float', accessMode='Create',
description="""
Controls the frequency of the sinusoidal component
of the Gabor filter. Default is 5.6.
""",
defaultValue=5.6),
nta_lobeSuppression=ParameterSpec(dataType='int', accessMode='Create',
description="""
Controls whether or not the secondary lobes of the
Gabor filters are suppressed. The suppression is performed based
on the radial distance from the oriented edge to which the Gabor
filter is tuned. If True, then the secondary lobes produced
by the pure mathematical Gabor equation will be suppressed
and have no effect; if False, then the pure mathematical
Gabor equation (digitized into discrete sampling points, of
course) will be used. Default is True.
""",
defaultValue=1),
nta_debugLogBuffers=ParameterSpec(dataType='int', accessMode='ReadWrite',
description="""
If enabled, causes internal memory buffers used
C implementation to be dumped to disk after each compute()
cycle as an aid in the debugging of the C code path.
Defaults to False.
""",
),
nta_width=ParameterSpec(dataType="int", accessMode='Read',
description="""Width of the maximum resolution."""),
nta_height=ParameterSpec(dataType="int", accessMode='Read',
description="""Width of the maximum resolution."""),
nta_morphologyMethod=ParameterSpec(dataType='str', accessMode='ReadWrite',
description="""
Controls the routines used to perform dilation and erosion of
valid alpha masks. Legal values are:
'opencv' -- use faster OpenCV routines;
'nta' -- use the slower Numenta routines;
'best' -- use OpenCV if it is available on the platform,
otherwise use the slower routines.
Default is 'best'.
"""),
)
return ns.toDict()
#---------------------------------------------------------------------------------
def getOutputElementCount(self, name):
"""This method will be called only when the node is used in nuPIC 2"""
if name == 'bottomUpOut':
return self.getNumPlanes()
elif name == 'topDownOut':
return 0
else:
raise Exception('Unknown output: ' + name)
#+=+=+=+=+=+=+=+=+=+=+=++=+=+=+=+=+=+=+=+=+=+=++=+=+=+=+=+=+=+=+=+=+=+
# Command line unit testing
#+=+=+=+=+=+=+=+=+=+=+=++=+=+=+=+=+=+=+=+=+=+=++=+=+=+=+=+=+=+=+=+=+=+
if __name__=='__main__':
from nupic.engine import Network
n = Network()
gabor = n.addRegion(
'gabor',
'py.GaborNode2',
"""{ filterDim: 5,
numOrientations: 2,
centerSurround: 1,
phaseMode: single,
targetType: edge,
gainConstant: 1.0,
normalizationMethod: max,
postProcessingMethod: threshold,
postProcessingMin: 0.15,
postProcessingMax: 1.0,
boundaryMode: sweepOff,
#suppressOutsideBox: False,
#suppressByAlpha: True,
offImagePixelValue: colorKey,
zeroThresholdOut: 0.003
}""")
print 'Done.'
|
thodoris/djangoPharma | refs/heads/master | djangoPharma/env/Lib/site-packages/django/contrib/gis/db/backends/mysql/base.py | 58 | from django.db.backends.mysql.base import \
DatabaseWrapper as MySQLDatabaseWrapper
from .features import DatabaseFeatures
from .introspection import MySQLIntrospection
from .operations import MySQLOperations
from .schema import MySQLGISSchemaEditor
class DatabaseWrapper(MySQLDatabaseWrapper):
SchemaEditorClass = MySQLGISSchemaEditor
# Classes instantiated in __init__().
features_class = DatabaseFeatures
introspection_class = MySQLIntrospection
ops_class = MySQLOperations
|
jcfrank/myrepo | refs/heads/master | subcmds/abandon.py | 82 | #
# Copyright (C) 2008 The Android Open Source Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import sys
from command import Command
from git_command import git
from progress import Progress
class Abandon(Command):
common = True
helpSummary = "Permanently abandon a development branch"
helpUsage = """
%prog <branchname> [<project>...]
This subcommand permanently abandons a development branch by
deleting it (and all its history) from your local repository.
It is equivalent to "git branch -D <branchname>".
"""
def Execute(self, opt, args):
if not args:
self.Usage()
nb = args[0]
if not git.check_ref_format('heads/%s' % nb):
print("error: '%s' is not a valid name" % nb, file=sys.stderr)
sys.exit(1)
nb = args[0]
err = []
success = []
all_projects = self.GetProjects(args[1:])
pm = Progress('Abandon %s' % nb, len(all_projects))
for project in all_projects:
pm.update()
status = project.AbandonBranch(nb)
if status is not None:
if status:
success.append(project)
else:
err.append(project)
pm.end()
if err:
for p in err:
print("error: %s/: cannot abandon %s" % (p.relpath, nb),
file=sys.stderr)
sys.exit(1)
elif not success:
print('error: no project has branch %s' % nb, file=sys.stderr)
sys.exit(1)
else:
print('Abandoned in %d project(s):\n %s'
% (len(success), '\n '.join(p.relpath for p in success)),
file=sys.stderr)
|
CatsAndDogsbvba/odoo | refs/heads/8.0 | addons/mass_mailing/controllers/main.py | 24 |
import werkzeug
from openerp import http, SUPERUSER_ID
from openerp.http import request
class MassMailController(http.Controller):
@http.route('/mail/track/<int:mail_id>/blank.gif', type='http', auth='none')
def track_mail_open(self, mail_id, **post):
""" Email tracking. """
mail_mail_stats = request.registry.get('mail.mail.statistics')
mail_mail_stats.set_opened(request.cr, SUPERUSER_ID, mail_mail_ids=[mail_id])
response = werkzeug.wrappers.Response()
response.mimetype = 'image/gif'
response.data = 'R0lGODlhAQABAIAAANvf7wAAACH5BAEAAAAALAAAAAABAAEAAAICRAEAOw=='.decode('base64')
return response
@http.route(['/mail/mailing/<int:mailing_id>/unsubscribe'], type='http', auth='none')
def mailing(self, mailing_id, email=None, res_id=None, **post):
cr, uid, context = request.cr, request.uid, request.context
MassMailing = request.registry['mail.mass_mailing']
mailing_ids = MassMailing.exists(cr, SUPERUSER_ID, [mailing_id], context=context)
if not mailing_ids:
return 'KO'
mailing = MassMailing.browse(cr, SUPERUSER_ID, mailing_ids[0], context=context)
if mailing.mailing_model == 'mail.mass_mailing.contact':
list_ids = [l.id for l in mailing.contact_list_ids]
record_ids = request.registry[mailing.mailing_model].search(cr, SUPERUSER_ID, [('list_id', 'in', list_ids), ('id', '=', res_id), ('email', 'ilike', email)], context=context)
request.registry[mailing.mailing_model].write(cr, SUPERUSER_ID, record_ids, {'opt_out': True}, context=context)
else:
email_fname = None
model = request.registry[mailing.mailing_model]
if 'email_from' in model._fields:
email_fname = 'email_from'
elif 'email' in model._fields:
email_fname = 'email'
if email_fname:
ctx = dict(context or {}, active_test=False)
record_ids = model.search(cr, SUPERUSER_ID, [('id', '=', res_id), (email_fname, 'ilike', email)], context=ctx)
if 'opt_out' in model._fields:
model.write(cr, SUPERUSER_ID, record_ids, {'opt_out': True}, context=context)
return 'OK'
@http.route(['/website_mass_mailing/is_subscriber'], type='json', auth="public", website=True)
def is_subscriber(self, list_id, **post):
cr, uid, context = request.cr, request.uid, request.context
Contacts = request.registry['mail.mass_mailing.contact']
Users = request.registry['res.users']
is_subscriber = False
email = None
if uid != request.website.user_id.id:
email = Users.browse(cr, SUPERUSER_ID, uid, context).email
elif request.session.get('mass_mailing_email'):
email = request.session['mass_mailing_email']
if email:
contact_ids = Contacts.search(cr, SUPERUSER_ID, [('list_id', '=', int(list_id)), ('email', '=', email), ('opt_out', '=', False)], context=context)
is_subscriber = len(contact_ids) > 0
return {'is_subscriber': is_subscriber, 'email': email}
@http.route(['/website_mass_mailing/subscribe'], type='json', auth="public", website=True)
def subscribe(self, list_id, email, **post):
cr, uid, context = request.cr, request.uid, request.context
Contacts = request.registry['mail.mass_mailing.contact']
parsed_email = Contacts.get_name_email(email, context=context)[1]
contact_ids = Contacts.search_read(
cr, SUPERUSER_ID,
[('list_id', '=', int(list_id)), ('email', '=', parsed_email)],
['opt_out'], context=context)
if not contact_ids:
Contacts.add_to_list(cr, SUPERUSER_ID, email, int(list_id), context=context)
else:
if contact_ids[0]['opt_out']:
Contacts.write(cr, SUPERUSER_ID, [contact_ids[0]['id']], {'opt_out': False}, context=context)
# add email to session
request.session['mass_mailing_email'] = email
return True
|
josecolella/PLD | refs/heads/master | bin/osx/treasurehunters.app/Contents/Resources/lib/python3.4/numpy/numarray/convolve.py | 13 | from __future__ import division, absolute_import, print_function
try:
from stsci.convolve import *
except ImportError:
try:
from scipy.stsci.convolve import *
except ImportError:
msg = \
"""The convolve package is not installed.
It can be downloaded by checking out the latest source from
http://svn.scipy.org/svn/scipy/trunk/Lib/stsci or by downloading and
installing all of SciPy from http://www.scipy.org.
"""
raise ImportError(msg)
|
oroca/SkyRoverNano2-firmware_backup | refs/heads/master | scripts/dfu-convert.py | 23 | #!/usr/bin/python2
# Written by Antonio Galea - 2010/11/18
# Distributed under Gnu LGPL 3.0
# see http://www.gnu.org/licenses/lgpl-3.0.txt
#
# Removed ihex functionality and dependency, Bitcraze - 2014-12-11
import sys,struct,zlib,os
from optparse import OptionParser
DEFAULT_DEVICE="0x0483:0xdf11"
def named(tuple,names):
return dict(zip(names.split(),tuple))
def consume(fmt,data,names):
n = struct.calcsize(fmt)
return named(struct.unpack(fmt,data[:n]),names),data[n:]
def cstring(string):
return string.split('\0',1)[0]
def compute_crc(data):
return 0xFFFFFFFF & -zlib.crc32(data) -1
def parse(file,dump_images=False):
print 'File: "%s"' % file
data = open(file,'rb').read()
crc = compute_crc(data[:-4])
prefix, data = consume('<5sBIB',data,'signature version size targets')
print '%(signature)s v%(version)d, image size: %(size)d, targets: %(targets)d' % prefix
for t in range(prefix['targets']):
tprefix, data = consume('<6sBI255s2I',data,'signature altsetting named name size elements')
tprefix['num'] = t
if tprefix['named']:
tprefix['name'] = cstring(tprefix['name'])
else:
tprefix['name'] = ''
print '%(signature)s %(num)d, alt setting: %(altsetting)s, name: "%(name)s", size: %(size)d, elements: %(elements)d' % tprefix
tsize = tprefix['size']
target, data = data[:tsize], data[tsize:]
for e in range(tprefix['elements']):
eprefix, target = consume('<2I',target,'address size')
eprefix['num'] = e
print ' %(num)d, address: 0x%(address)08x, size: %(size)d' % eprefix
esize = eprefix['size']
image, target = target[:esize], target[esize:]
if dump_images:
out = '%s.target%d.image%d.bin' % (file,t,e)
open(out,'wb').write(image)
print ' DUMPED IMAGE TO "%s"' % out
if len(target):
print "target %d: PARSE ERROR" % t
suffix = named(struct.unpack('<4H3sBI',data[:16]),'device product vendor dfu ufd len crc')
print 'usb: %(vendor)04x:%(product)04x, device: 0x%(device)04x, dfu: 0x%(dfu)04x, %(ufd)s, %(len)d, 0x%(crc)08x' % suffix
if crc != suffix['crc']:
print "CRC ERROR: computed crc32 is 0x%08x" % crc
data = data[16:]
if data:
print "PARSE ERROR"
def build(file,targets,device=DEFAULT_DEVICE):
data = ''
for t,target in enumerate(targets):
tdata = ''
for image in target:
tdata += struct.pack('<2I',image['address'],len(image['data']))+image['data']
tdata = struct.pack('<6sBI255s2I','Target',0,1,'ST...',len(tdata),len(target)) + tdata
data += tdata
data = struct.pack('<5sBIB','DfuSe',1,len(data)+11,len(targets)) + data
v,d=map(lambda x: int(x,0) & 0xFFFF, device.split(':',1))
data += struct.pack('<4H3sB',0,d,v,0x011a,'UFD',16)
crc = compute_crc(data)
data += struct.pack('<I',crc)
open(file,'wb').write(data)
if __name__=="__main__":
usage = """
%prog [-d|--dump] infile.dfu
%prog {-b|--build} address:file.bin [-b address:file.bin ...] [{-D|--device}=vendor:device] outfile.dfu"""
parser = OptionParser(usage=usage)
parser.add_option("-b", "--build", action="append", dest="binfiles",
help="build a DFU file from given BINFILES", metavar="BINFILES")
parser.add_option("-D", "--device", action="store", dest="device",
help="build for DEVICE, defaults to %s" % DEFAULT_DEVICE, metavar="DEVICE")
parser.add_option("-d", "--dump", action="store_true", dest="dump_images",
default=False, help="dump contained images to current directory")
(options, args) = parser.parse_args()
if options.binfiles and len(args)==1:
target = []
if options.binfiles:
for arg in options.binfiles:
try:
address,binfile = arg.split(':',1)
except ValueError:
print "Address:file couple '%s' invalid." % arg
sys.exit(1)
try:
address = int(address,0) & 0xFFFFFFFF
except ValueError:
print "Address %s invalid." % address
sys.exit(1)
if not os.path.isfile(binfile):
print "Unreadable file '%s'." % binfile
sys.exit(1)
target.append({ 'address': address, 'data': open(binfile,'rb').read() })
outfile = args[0]
device = DEFAULT_DEVICE
if options.device:
device=options.device
try:
v,d=map(lambda x: int(x,0) & 0xFFFF, device.split(':',1))
except:
print "Invalid device '%s'." % device
sys.exit(1)
build(outfile,[target],device)
elif len(args)==1:
infile = args[0]
if not os.path.isfile(infile):
print "Unreadable file '%s'." % infile
sys.exit(1)
parse(infile, dump_images=options.dump_images)
else:
parser.print_help()
sys.exit(1)
|
rahuldhote/odoo | refs/heads/8.0 | addons/auth_oauth/__openerp__.py | 300 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2010-2012 OpenERP s.a. (<http://openerp.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'OAuth2 Authentication',
'version': '1.0',
'category': 'Tools',
'description': """
Allow users to login through OAuth2 Provider.
=============================================
""",
'author': 'OpenERP s.a.',
'maintainer': 'OpenERP s.a.',
'website': 'https://www.odoo.com',
'depends': ['base', 'web', 'base_setup', 'auth_signup'],
'data': [
'res_users.xml',
'auth_oauth_data.xml',
'auth_oauth_data.yml',
'auth_oauth_view.xml',
'security/ir.model.access.csv',
'res_config.xml',
'views/auth_oauth_login.xml',
],
'installable': True,
'auto_install': False,
}
|
szebenyib/bookshelf | refs/heads/master | markdown/extensions/__init__.py | 65 | """
Extensions
-----------------------------------------------------------------------------
"""
from __future__ import unicode_literals
class Extension(object):
""" Base class for extensions to subclass. """
def __init__(self, configs = {}):
"""Create an instance of an Extention.
Keyword arguments:
* configs: A dict of configuration setting used by an Extension.
"""
self.config = configs
def getConfig(self, key, default=''):
""" Return a setting for the given key or an empty string. """
if key in self.config:
return self.config[key][0]
else:
return default
def getConfigs(self):
""" Return all configs settings as a dict. """
return dict([(key, self.getConfig(key)) for key in self.config.keys()])
def getConfigInfo(self):
""" Return all config descriptions as a list of tuples. """
return [(key, self.config[key][1]) for key in self.config.keys()]
def setConfig(self, key, value):
""" Set a config setting for `key` with the given `value`. """
self.config[key][0] = value
def extendMarkdown(self, md, md_globals):
"""
Add the various proccesors and patterns to the Markdown Instance.
This method must be overriden by every extension.
Keyword arguments:
* md: The Markdown instance.
* md_globals: Global variables in the markdown module namespace.
"""
raise NotImplementedError('Extension "%s.%s" must define an "extendMarkdown"' \
'method.' % (self.__class__.__module__, self.__class__.__name__))
|
Omegaphora/external_chromium_org | refs/heads/lp5.1 | tools/telemetry/telemetry/core/backends/adb_commands.py | 26 | # Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Brings in Chrome Android's android_commands module, which itself is a
thin(ish) wrapper around adb."""
import logging
import os
import shutil
import stat
from telemetry.core import platform
from telemetry.core import util
from telemetry.util import support_binaries
# This is currently a thin wrapper around Chrome Android's
# build scripts, located in chrome/build/android. This file exists mainly to
# deal with locating the module.
util.AddDirToPythonPath(util.GetChromiumSrcDir(), 'build', 'android')
from pylib import android_commands # pylint: disable=F0401
from pylib import constants # pylint: disable=F0401
try:
from pylib import ports # pylint: disable=F0401
except Exception:
ports = None
from pylib.device import device_utils # pylint: disable=F0401
def IsAndroidSupported():
return device_utils != None
def GetAttachedDevices():
"""Returns a list of attached, online android devices.
If a preferred device has been set with ANDROID_SERIAL, it will be first in
the returned list."""
return android_commands.GetAttachedDevices()
def AllocateTestServerPort():
return ports.AllocateTestServerPort()
def ResetTestServerPortAllocation():
return ports.ResetTestServerPortAllocation()
class AdbCommands(object):
"""A thin wrapper around ADB"""
def __init__(self, device):
self._device = device_utils.DeviceUtils(device)
self._device_serial = device
def device_serial(self):
return self._device_serial
def device(self):
return self._device
def __getattr__(self, name):
"""Delegate all unknown calls to the underlying AndroidCommands object."""
return getattr(self._device.old_interface, name)
def Forward(self, local, remote):
ret = self._device.old_interface.Adb().SendCommand(
'forward %s %s' % (local, remote))
assert ret == ''
def IsUserBuild(self):
return self._device.GetProp('ro.build.type') == 'user'
def GetBuildTypeOfPath(path):
if not path:
return None
for build_dir, build_type in util.GetBuildDirectories():
if os.path.join(build_dir, build_type) in path:
return build_type
return None
def SetupPrebuiltTools(adb):
"""Some of the android pylib scripts we depend on are lame and expect
binaries to be in the out/ directory. So we copy any prebuilt binaries there
as a prereq."""
# TODO(bulach): Build the targets for x86/mips.
device_tools = [
'file_poller',
'forwarder_dist/device_forwarder',
'md5sum_dist/md5sum_bin',
'purge_ashmem',
'run_pie',
]
host_tools = [
'bitmaptools',
'md5sum_bin_host',
]
if platform.GetHostPlatform().GetOSName() == 'linux':
host_tools.append('host_forwarder')
has_device_prebuilt = adb.device().GetProp('ro.product.cpu.abi').startswith(
'armeabi')
if not has_device_prebuilt:
return all([support_binaries.FindLocallyBuiltPath(t) for t in device_tools])
build_type = None
for t in device_tools + host_tools:
executable = os.path.basename(t)
locally_built_path = support_binaries.FindLocallyBuiltPath(t)
if not build_type:
build_type = GetBuildTypeOfPath(locally_built_path) or 'Release'
constants.SetBuildType(build_type)
dest = os.path.join(constants.GetOutDirectory(), t)
if not locally_built_path:
logging.info('Setting up prebuilt %s', dest)
if not os.path.exists(os.path.dirname(dest)):
os.makedirs(os.path.dirname(dest))
platform_name = ('android' if t in device_tools else
platform.GetHostPlatform().GetOSName())
prebuilt_path = support_binaries.FindPath(executable, platform_name)
if not prebuilt_path or not os.path.exists(prebuilt_path):
raise NotImplementedError("""
%s must be checked into cloud storage.
Instructions:
http://www.chromium.org/developers/telemetry/upload_to_cloud_storage
""" % t)
shutil.copyfile(prebuilt_path, dest)
os.chmod(dest, stat.S_IRUSR | stat.S_IWUSR | stat.S_IXUSR)
return True
|
clumsy/intellij-community | refs/heads/master | python/testData/intentions/beforeReturnTypeInEmptyGoogleDocString.py | 106 | def <caret>f(x, y):
"""""" |
programadorjc/django | refs/heads/master | tests/check_framework/test_security.py | 242 | from django.conf import settings
from django.core.checks.security import base, csrf, sessions
from django.test import SimpleTestCase
from django.test.utils import override_settings
class CheckSessionCookieSecureTest(SimpleTestCase):
@property
def func(self):
from django.core.checks.security.sessions import check_session_cookie_secure
return check_session_cookie_secure
@override_settings(
SESSION_COOKIE_SECURE=False,
INSTALLED_APPS=["django.contrib.sessions"],
MIDDLEWARE_CLASSES=[])
def test_session_cookie_secure_with_installed_app(self):
"""
Warn if SESSION_COOKIE_SECURE is off and "django.contrib.sessions" is
in INSTALLED_APPS.
"""
self.assertEqual(self.func(None), [sessions.W010])
@override_settings(
SESSION_COOKIE_SECURE=False,
INSTALLED_APPS=[],
MIDDLEWARE_CLASSES=["django.contrib.sessions.middleware.SessionMiddleware"])
def test_session_cookie_secure_with_middleware(self):
"""
Warn if SESSION_COOKIE_SECURE is off and
"django.contrib.sessions.middleware.SessionMiddleware" is in
MIDDLEWARE_CLASSES.
"""
self.assertEqual(self.func(None), [sessions.W011])
@override_settings(
SESSION_COOKIE_SECURE=False,
INSTALLED_APPS=["django.contrib.sessions"],
MIDDLEWARE_CLASSES=["django.contrib.sessions.middleware.SessionMiddleware"])
def test_session_cookie_secure_both(self):
"""
If SESSION_COOKIE_SECURE is off and we find both the session app and
the middleware, provide one common warning.
"""
self.assertEqual(self.func(None), [sessions.W012])
@override_settings(
SESSION_COOKIE_SECURE=True,
INSTALLED_APPS=["django.contrib.sessions"],
MIDDLEWARE_CLASSES=["django.contrib.sessions.middleware.SessionMiddleware"])
def test_session_cookie_secure_true(self):
"""
If SESSION_COOKIE_SECURE is on, there's no warning about it.
"""
self.assertEqual(self.func(None), [])
class CheckSessionCookieHttpOnlyTest(SimpleTestCase):
@property
def func(self):
from django.core.checks.security.sessions import check_session_cookie_httponly
return check_session_cookie_httponly
@override_settings(
SESSION_COOKIE_HTTPONLY=False,
INSTALLED_APPS=["django.contrib.sessions"],
MIDDLEWARE_CLASSES=[])
def test_session_cookie_httponly_with_installed_app(self):
"""
Warn if SESSION_COOKIE_HTTPONLY is off and "django.contrib.sessions"
is in INSTALLED_APPS.
"""
self.assertEqual(self.func(None), [sessions.W013])
@override_settings(
SESSION_COOKIE_HTTPONLY=False,
INSTALLED_APPS=[],
MIDDLEWARE_CLASSES=["django.contrib.sessions.middleware.SessionMiddleware"])
def test_session_cookie_httponly_with_middleware(self):
"""
Warn if SESSION_COOKIE_HTTPONLY is off and
"django.contrib.sessions.middleware.SessionMiddleware" is in
MIDDLEWARE_CLASSES.
"""
self.assertEqual(self.func(None), [sessions.W014])
@override_settings(
SESSION_COOKIE_HTTPONLY=False,
INSTALLED_APPS=["django.contrib.sessions"],
MIDDLEWARE_CLASSES=["django.contrib.sessions.middleware.SessionMiddleware"])
def test_session_cookie_httponly_both(self):
"""
If SESSION_COOKIE_HTTPONLY is off and we find both the session app and
the middleware, provide one common warning.
"""
self.assertEqual(self.func(None), [sessions.W015])
@override_settings(
SESSION_COOKIE_HTTPONLY=True,
INSTALLED_APPS=["django.contrib.sessions"],
MIDDLEWARE_CLASSES=["django.contrib.sessions.middleware.SessionMiddleware"])
def test_session_cookie_httponly_true(self):
"""
If SESSION_COOKIE_HTTPONLY is on, there's no warning about it.
"""
self.assertEqual(self.func(None), [])
class CheckCSRFMiddlewareTest(SimpleTestCase):
@property
def func(self):
from django.core.checks.security.csrf import check_csrf_middleware
return check_csrf_middleware
@override_settings(MIDDLEWARE_CLASSES=[])
def test_no_csrf_middleware(self):
"""
Warn if CsrfViewMiddleware isn't in MIDDLEWARE_CLASSES.
"""
self.assertEqual(self.func(None), [csrf.W003])
@override_settings(
MIDDLEWARE_CLASSES=["django.middleware.csrf.CsrfViewMiddleware"])
def test_with_csrf_middleware(self):
self.assertEqual(self.func(None), [])
class CheckCSRFCookieSecureTest(SimpleTestCase):
@property
def func(self):
from django.core.checks.security.csrf import check_csrf_cookie_secure
return check_csrf_cookie_secure
@override_settings(
MIDDLEWARE_CLASSES=["django.middleware.csrf.CsrfViewMiddleware"],
CSRF_COOKIE_SECURE=False)
def test_with_csrf_cookie_secure_false(self):
"""
Warn if CsrfViewMiddleware is in MIDDLEWARE_CLASSES but
CSRF_COOKIE_SECURE isn't True.
"""
self.assertEqual(self.func(None), [csrf.W016])
@override_settings(MIDDLEWARE_CLASSES=[], CSRF_COOKIE_SECURE=False)
def test_with_csrf_cookie_secure_false_no_middleware(self):
"""
No warning if CsrfViewMiddleware isn't in MIDDLEWARE_CLASSES, even if
CSRF_COOKIE_SECURE is False.
"""
self.assertEqual(self.func(None), [])
@override_settings(
MIDDLEWARE_CLASSES=["django.middleware.csrf.CsrfViewMiddleware"],
CSRF_COOKIE_SECURE=True)
def test_with_csrf_cookie_secure_true(self):
self.assertEqual(self.func(None), [])
class CheckCSRFCookieHttpOnlyTest(SimpleTestCase):
@property
def func(self):
from django.core.checks.security.csrf import check_csrf_cookie_httponly
return check_csrf_cookie_httponly
@override_settings(
MIDDLEWARE_CLASSES=["django.middleware.csrf.CsrfViewMiddleware"],
CSRF_COOKIE_HTTPONLY=False)
def test_with_csrf_cookie_httponly_false(self):
"""
Warn if CsrfViewMiddleware is in MIDDLEWARE_CLASSES but
CSRF_COOKIE_HTTPONLY isn't True.
"""
self.assertEqual(self.func(None), [csrf.W017])
@override_settings(MIDDLEWARE_CLASSES=[], CSRF_COOKIE_HTTPONLY=False)
def test_with_csrf_cookie_httponly_false_no_middleware(self):
"""
No warning if CsrfViewMiddleware isn't in MIDDLEWARE_CLASSES, even if
CSRF_COOKIE_HTTPONLY is False.
"""
self.assertEqual(self.func(None), [])
@override_settings(
MIDDLEWARE_CLASSES=["django.middleware.csrf.CsrfViewMiddleware"],
CSRF_COOKIE_HTTPONLY=True)
def test_with_csrf_cookie_httponly_true(self):
self.assertEqual(self.func(None), [])
class CheckSecurityMiddlewareTest(SimpleTestCase):
@property
def func(self):
from django.core.checks.security.base import check_security_middleware
return check_security_middleware
@override_settings(MIDDLEWARE_CLASSES=[])
def test_no_security_middleware(self):
"""
Warn if SecurityMiddleware isn't in MIDDLEWARE_CLASSES.
"""
self.assertEqual(self.func(None), [base.W001])
@override_settings(
MIDDLEWARE_CLASSES=["django.middleware.security.SecurityMiddleware"])
def test_with_security_middleware(self):
self.assertEqual(self.func(None), [])
class CheckStrictTransportSecurityTest(SimpleTestCase):
@property
def func(self):
from django.core.checks.security.base import check_sts
return check_sts
@override_settings(
MIDDLEWARE_CLASSES=["django.middleware.security.SecurityMiddleware"],
SECURE_HSTS_SECONDS=0)
def test_no_sts(self):
"""
Warn if SECURE_HSTS_SECONDS isn't > 0.
"""
self.assertEqual(self.func(None), [base.W004])
@override_settings(
MIDDLEWARE_CLASSES=[],
SECURE_HSTS_SECONDS=0)
def test_no_sts_no_middlware(self):
"""
Don't warn if SECURE_HSTS_SECONDS isn't > 0 and SecurityMiddleware isn't
installed.
"""
self.assertEqual(self.func(None), [])
@override_settings(
MIDDLEWARE_CLASSES=["django.middleware.security.SecurityMiddleware"],
SECURE_HSTS_SECONDS=3600)
def test_with_sts(self):
self.assertEqual(self.func(None), [])
class CheckStrictTransportSecuritySubdomainsTest(SimpleTestCase):
@property
def func(self):
from django.core.checks.security.base import check_sts_include_subdomains
return check_sts_include_subdomains
@override_settings(
MIDDLEWARE_CLASSES=["django.middleware.security.SecurityMiddleware"],
SECURE_HSTS_INCLUDE_SUBDOMAINS=False,
SECURE_HSTS_SECONDS=3600)
def test_no_sts_subdomains(self):
"""
Warn if SECURE_HSTS_INCLUDE_SUBDOMAINS isn't True.
"""
self.assertEqual(self.func(None), [base.W005])
@override_settings(
MIDDLEWARE_CLASSES=[],
SECURE_HSTS_INCLUDE_SUBDOMAINS=False,
SECURE_HSTS_SECONDS=3600)
def test_no_sts_subdomains_no_middlware(self):
"""
Don't warn if SecurityMiddleware isn't installed.
"""
self.assertEqual(self.func(None), [])
@override_settings(
MIDDLEWARE_CLASSES=["django.middleware.security.SecurityMiddleware"],
SECURE_SSL_REDIRECT=False,
SECURE_HSTS_SECONDS=None)
def test_no_sts_subdomains_no_seconds(self):
"""
Don't warn if SECURE_HSTS_SECONDS isn't set.
"""
self.assertEqual(self.func(None), [])
@override_settings(
MIDDLEWARE_CLASSES=["django.middleware.security.SecurityMiddleware"],
SECURE_HSTS_INCLUDE_SUBDOMAINS=True,
SECURE_HSTS_SECONDS=3600)
def test_with_sts_subdomains(self):
self.assertEqual(self.func(None), [])
class CheckXFrameOptionsMiddlewareTest(SimpleTestCase):
@property
def func(self):
from django.core.checks.security.base import check_xframe_options_middleware
return check_xframe_options_middleware
@override_settings(MIDDLEWARE_CLASSES=[])
def test_middleware_not_installed(self):
"""
Warn if XFrameOptionsMiddleware isn't in MIDDLEWARE_CLASSES.
"""
self.assertEqual(self.func(None), [base.W002])
@override_settings(MIDDLEWARE_CLASSES=["django.middleware.clickjacking.XFrameOptionsMiddleware"])
def test_middleware_installed(self):
self.assertEqual(self.func(None), [])
class CheckXFrameOptionsDenyTest(SimpleTestCase):
@property
def func(self):
from django.core.checks.security.base import check_xframe_deny
return check_xframe_deny
@override_settings(
MIDDLEWARE_CLASSES=["django.middleware.clickjacking.XFrameOptionsMiddleware"],
X_FRAME_OPTIONS='SAMEORIGIN',
)
def test_x_frame_options_not_deny(self):
"""
Warn if XFrameOptionsMiddleware is in MIDDLEWARE_CLASSES but
X_FRAME_OPTIONS isn't 'DENY'.
"""
self.assertEqual(self.func(None), [base.W019])
@override_settings(MIDDLEWARE_CLASSES=[], X_FRAME_OPTIONS='SAMEORIGIN')
def test_middleware_not_installed(self):
"""
No error if XFrameOptionsMiddleware isn't in MIDDLEWARE_CLASSES even if
X_FRAME_OPTIONS isn't 'DENY'.
"""
self.assertEqual(self.func(None), [])
@override_settings(
MIDDLEWARE_CLASSES=["django.middleware.clickjacking.XFrameOptionsMiddleware"],
X_FRAME_OPTIONS='DENY',
)
def test_xframe_deny(self):
self.assertEqual(self.func(None), [])
class CheckContentTypeNosniffTest(SimpleTestCase):
@property
def func(self):
from django.core.checks.security.base import check_content_type_nosniff
return check_content_type_nosniff
@override_settings(
MIDDLEWARE_CLASSES=["django.middleware.security.SecurityMiddleware"],
SECURE_CONTENT_TYPE_NOSNIFF=False)
def test_no_content_type_nosniff(self):
"""
Warn if SECURE_CONTENT_TYPE_NOSNIFF isn't True.
"""
self.assertEqual(self.func(None), [base.W006])
@override_settings(
MIDDLEWARE_CLASSES=[],
SECURE_CONTENT_TYPE_NOSNIFF=False)
def test_no_content_type_nosniff_no_middleware(self):
"""
Don't warn if SECURE_CONTENT_TYPE_NOSNIFF isn't True and
SecurityMiddleware isn't in MIDDLEWARE_CLASSES.
"""
self.assertEqual(self.func(None), [])
@override_settings(
MIDDLEWARE_CLASSES=["django.middleware.security.SecurityMiddleware"],
SECURE_CONTENT_TYPE_NOSNIFF=True)
def test_with_content_type_nosniff(self):
self.assertEqual(self.func(None), [])
class CheckXssFilterTest(SimpleTestCase):
@property
def func(self):
from django.core.checks.security.base import check_xss_filter
return check_xss_filter
@override_settings(
MIDDLEWARE_CLASSES=["django.middleware.security.SecurityMiddleware"],
SECURE_BROWSER_XSS_FILTER=False)
def test_no_xss_filter(self):
"""
Warn if SECURE_BROWSER_XSS_FILTER isn't True.
"""
self.assertEqual(self.func(None), [base.W007])
@override_settings(
MIDDLEWARE_CLASSES=[],
SECURE_BROWSER_XSS_FILTER=False)
def test_no_xss_filter_no_middleware(self):
"""
Don't warn if SECURE_BROWSER_XSS_FILTER isn't True and
SecurityMiddleware isn't in MIDDLEWARE_CLASSES.
"""
self.assertEqual(self.func(None), [])
@override_settings(
MIDDLEWARE_CLASSES=["django.middleware.security.SecurityMiddleware"],
SECURE_BROWSER_XSS_FILTER=True)
def test_with_xss_filter(self):
self.assertEqual(self.func(None), [])
class CheckSSLRedirectTest(SimpleTestCase):
@property
def func(self):
from django.core.checks.security.base import check_ssl_redirect
return check_ssl_redirect
@override_settings(
MIDDLEWARE_CLASSES=["django.middleware.security.SecurityMiddleware"],
SECURE_SSL_REDIRECT=False)
def test_no_ssl_redirect(self):
"""
Warn if SECURE_SSL_REDIRECT isn't True.
"""
self.assertEqual(self.func(None), [base.W008])
@override_settings(
MIDDLEWARE_CLASSES=[],
SECURE_SSL_REDIRECT=False)
def test_no_ssl_redirect_no_middlware(self):
"""
Don't warn if SECURE_SSL_REDIRECT is False and SecurityMiddleware isn't
installed.
"""
self.assertEqual(self.func(None), [])
@override_settings(
MIDDLEWARE_CLASSES=["django.middleware.security.SecurityMiddleware"],
SECURE_SSL_REDIRECT=True)
def test_with_ssl_redirect(self):
self.assertEqual(self.func(None), [])
class CheckSecretKeyTest(SimpleTestCase):
@property
def func(self):
from django.core.checks.security.base import check_secret_key
return check_secret_key
@override_settings(SECRET_KEY=('abcdefghijklmnopqrstuvwx' * 2) + 'ab')
def test_okay_secret_key(self):
self.assertEqual(len(settings.SECRET_KEY), base.SECRET_KEY_MIN_LENGTH)
self.assertGreater(len(set(settings.SECRET_KEY)), base.SECRET_KEY_MIN_UNIQUE_CHARACTERS)
self.assertEqual(self.func(None), [])
@override_settings(SECRET_KEY='')
def test_empty_secret_key(self):
self.assertEqual(self.func(None), [base.W009])
@override_settings(SECRET_KEY=None)
def test_missing_secret_key(self):
del settings.SECRET_KEY
self.assertEqual(self.func(None), [base.W009])
@override_settings(SECRET_KEY=None)
def test_none_secret_key(self):
self.assertEqual(self.func(None), [base.W009])
@override_settings(SECRET_KEY=('abcdefghijklmnopqrstuvwx' * 2) + 'a')
def test_low_length_secret_key(self):
self.assertEqual(len(settings.SECRET_KEY), base.SECRET_KEY_MIN_LENGTH - 1)
self.assertEqual(self.func(None), [base.W009])
@override_settings(SECRET_KEY='abcd' * 20)
def test_low_entropy_secret_key(self):
self.assertGreater(len(settings.SECRET_KEY), base.SECRET_KEY_MIN_LENGTH)
self.assertLess(len(set(settings.SECRET_KEY)), base.SECRET_KEY_MIN_UNIQUE_CHARACTERS)
self.assertEqual(self.func(None), [base.W009])
class CheckDebugTest(SimpleTestCase):
@property
def func(self):
from django.core.checks.security.base import check_debug
return check_debug
@override_settings(DEBUG=True)
def test_debug_true(self):
"""
Warn if DEBUG is True.
"""
self.assertEqual(self.func(None), [base.W018])
@override_settings(DEBUG=False)
def test_debug_false(self):
self.assertEqual(self.func(None), [])
class CheckAllowedHostsTest(SimpleTestCase):
@property
def func(self):
from django.core.checks.security.base import check_allowed_hosts
return check_allowed_hosts
@override_settings(ALLOWED_HOSTS=[])
def test_allowed_hosts_empty(self):
self.assertEqual(self.func(None), [base.W020])
@override_settings(ALLOWED_HOSTS=['.example.com', ])
def test_allowed_hosts_set(self):
self.assertEqual(self.func(None), [])
|
findapad/find_a_pad | refs/heads/master | find_a_pad_app/migrations/0003_auto_20170709_1432.py | 1 | # -*- coding: utf-8 -*-
# Generated by Django 1.11.3 on 2017-07-09 14:32
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('find_a_pad_app', '0002_auto_20170709_1414'),
]
operations = [
migrations.AlterField(
model_name='organization',
name='email',
field=models.CharField(default=None, max_length=100),
preserve_default=False,
),
migrations.AlterField(
model_name='organization',
name='phone_number',
field=models.CharField(default=None, max_length=12),
preserve_default=False,
),
migrations.AlterField(
model_name='type',
name='name',
field=models.CharField(max_length=200, unique=True),
),
]
|
Guneet-Dhillon/mxnet | refs/heads/master | example/reinforcement-learning/parallel_actor_critic/train.py | 24 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Trains an `Agent` using trajectories from multiple environments."""
import argparse
from itertools import chain
import time
import gym
import numpy as np
import mxnet as mx
from config import Config
from envs import Atari8080Preprocessor, IdentityPreprocessor
from model import Agent
def train_episode(agent, envs, preprocessors, t_max, render):
"""Complete an episode's worth of training for each environment."""
num_envs = len(envs)
# Buffers to hold trajectories, e.g. `env_xs[i]` will hold the observations
# for environment `i`.
env_xs, env_as = _2d_list(num_envs), _2d_list(num_envs)
env_rs, env_vs = _2d_list(num_envs), _2d_list(num_envs)
episode_rs = np.zeros(num_envs, dtype=np.float)
for p in preprocessors:
p.reset()
observations = [p.preprocess(e.reset())
for p, e in zip(preprocessors, envs)]
done = np.array([False for _ in range(num_envs)])
all_done = False
t = 1
while not all_done:
if render:
envs[0].render()
# NOTE(reed): Reshape to set the data shape.
agent.model.reshape([('data', (num_envs, preprocessors[0].obs_size))])
step_xs = np.vstack([o.ravel() for o in observations])
# Get actions and values for all environments in a single forward pass.
step_xs_nd = mx.nd.array(step_xs, ctx=agent.ctx)
data_batch = mx.io.DataBatch(data=[step_xs_nd], label=None)
agent.model.forward(data_batch, is_train=False)
_, step_vs, _, step_ps = agent.model.get_outputs()
step_ps = step_ps.asnumpy()
step_vs = step_vs.asnumpy()
step_as = agent.act(step_ps)
# Step each environment whose episode has not completed.
for i, env in enumerate(envs):
if not done[i]:
obs, r, done[i], _ = env.step(step_as[i])
# Record the observation, action, value, and reward in the
# buffers.
env_xs[i].append(step_xs[i].ravel())
env_as[i].append(step_as[i])
env_vs[i].append(step_vs[i][0])
env_rs[i].append(r)
episode_rs[i] += r
# Add 0 as the state value when done.
if done[i]:
env_vs[i].append(0.0)
else:
observations[i] = preprocessors[i].preprocess(obs)
# Perform an update every `t_max` steps.
if t == t_max:
# If the episode has not finished, add current state's value. This
# will be used to 'bootstrap' the final return (see Algorithm S3
# in A3C paper).
step_xs = np.vstack([o.ravel() for o in observations])
step_xs_nd = mx.nd.array(step_xs, ctx=agent.ctx)
data_batch = mx.io.DataBatch(data=[step_xs_nd], label=None)
agent.model.forward(data_batch, is_train=False)
_, extra_vs, _, _ = agent.model.get_outputs()
extra_vs = extra_vs.asnumpy()
for i in range(num_envs):
if not done[i]:
env_vs[i].append(extra_vs[i][0])
# Perform update and clear buffers.
env_xs = np.vstack(list(chain.from_iterable(env_xs)))
agent.train_step(env_xs, env_as, env_rs, env_vs)
env_xs, env_as = _2d_list(num_envs), _2d_list(num_envs)
env_rs, env_vs = _2d_list(num_envs), _2d_list(num_envs)
t = 0
all_done = np.all(done)
t += 1
return episode_rs
def _2d_list(n):
return [[] for _ in range(n)]
def save_params(save_pre, model, epoch):
model.save_checkpoint(save_pre, epoch, save_optimizer_states=True)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--num-envs', type=int, default=16)
parser.add_argument('--t-max', type=int, default=50)
parser.add_argument('--env-type', default='PongDeterministic-v3')
parser.add_argument('--render', action='store_true')
parser.add_argument('--save-pre', default='checkpoints')
parser.add_argument('--save-every', type=int, default=0)
parser.add_argument('--num-episodes', type=int, default=100000)
parser.add_argument('--learning-rate', type=float, default=1e-3)
parser.add_argument('--seed', type=int, default=0)
parser.add_argument('--print-every', type=int, default=1)
parser.add_argument('--gpu', action='store_true')
# Parse arguments and setup configuration `config`
args = parser.parse_args()
config = Config(args)
print('args=%s' % args)
print('config=%s' % config.__dict__)
np.random.seed(args.seed)
# Create and seed the environments
envs = [gym.make(args.env_type) for _ in range(args.num_envs)]
if args.env_type == 'CartPole-v0':
preprocessors = [
IdentityPreprocessor(np.prod(envs[0].observation_space.shape))
for _ in range(args.num_envs)]
else:
preprocessors = [Atari8080Preprocessor() for _ in range(args.num_envs)]
for i, env in enumerate(envs):
env.seed(i+args.seed)
agent = Agent(preprocessors[0].obs_size, envs[0].action_space.n,
config=config)
# Train
running_reward = None
start = time.time()
for i in range(args.num_episodes):
tic = time.time()
episode_rs = train_episode(
agent, envs, preprocessors, t_max=args.t_max, render=args.render)
for er in episode_rs:
running_reward = er if running_reward is None else (
0.99 * running_reward + 0.01 * er)
if i % args.print_every == 0:
print('Batch %d complete (%.2fs) (%.1fs elapsed) (episode %d), '
'batch avg. reward: %.2f, running reward: %.3f' %
(i, time.time() - tic, time.time() - start,
(i + 1) * args.num_envs, np.mean(episode_rs),
running_reward))
if args.save_every > 0:
if i % args.save_every == 0:
save_params(args.save_pre, agent.model, i)
|
evodify/genotype-files-manipulations | refs/heads/master | selectSamples_in_callsTab.py | 1 | #!/usr/bin/env python2
"""
This script subsamples a genotype calls file by sample names. It also can be used to rearrange samples in a calls file.
# input file:
CHROM POS REF sample1 sample2 sample3 sample4 sample5 sample6 sample7 sample8
chr_1 1 A W N N A N N N N
chr_1 2 C Y Y N C C N C N
chr_1 3 C N - N C C C C C
chr_1 4 T T T N T T T T T
chr_1 6 C N C N C C C C C
chr_2 1 A A A N A A A A A
chr_2 2 C C C N C C C C C
chr_2 3 C N N N N N N N N
chr_2 4 C C T C C C C C C
chr_2 5 T T C T Y T Y T T
chr_3 1 G G N N G N N N N
chr_3 2 C S C N C C N C N
chr_3 3 N N N N N N N N N
chr_3 4 N T T N T T T T N
chr_3 5 G - N N G G G C G
chr_4 1 G - N N G G G C G
chr_4 2 G - N N G G G C G
# output:
CHROM POS sample3 sample4 sample6 sample8 REF
chr_1 1 N A N N A
chr_1 2 N C N N C
chr_1 3 N C C C C
chr_1 4 N T T T T
chr_1 6 N C C C C
chr_2 1 N A A A A
chr_2 2 N C C C C
chr_2 3 N N N N C
chr_2 4 C C C C C
chr_2 5 T Y Y T T
chr_3 1 N G N N G
chr_3 2 N C N N C
chr_3 3 N N N N N
chr_3 4 N T T N N
chr_3 5 N G G G G
chr_4 1 N G G G G
chr_4 2 N G G G G
# command:
$ python2 selectSamples.py -i input.tab -o output -s "sample3,sample4,sample6,sample8,REF"
# contact:
Dmytro Kryvokhyzha dmytro.kryvokhyzha@evobio.eu
"""
############################# modules #############################
import calls # my custom module
############################# options #############################
parser = calls.CommandLineParser()
parser.add_argument('-i', '--input', help = 'name of the input file', type=str, required=True)
parser.add_argument('-o', '--output', help = 'name of the output file', type=str, required=True)
parser.add_argument('-s', '--samples', help = 'column names of the samples to process (optional)', type=str, required=False)
args = parser.parse_args()
# check if samples names are given and if all sample names are present in a header
sampleNames = calls.checkSampleNames(args.samples, args.input)
############################# program #############################
counter = 0
print('Opening the file...')
with open(args.input) as datafile:
header_words = datafile.readline().split()
# index samples
sampCol = calls.indexSamples(sampleNames, header_words)
# make output header
print('Creating the output file...')
fileoutput = open(args.output, 'w')
sampHeader = calls.selectSamples([0,1]+sampCol, header_words)
sampHeaderP = '\t'.join(str(el) for el in sampHeader)
fileoutput.write(sampHeaderP+'\n')
for line in datafile:
words = line.split()
chr_pos = words[0:2]
# select samples
genotypes = calls.selectSamples(sampCol, words)
# make output
chr_posP = '\t'.join(str(el) for el in chr_pos)
genotypesP = '\t'.join(str(el) for el in genotypes)
fileoutput.write('%s\t%s\n' % (chr_posP, genotypesP))
# track progress
counter += 1
if counter % 1000000 == 0:
print str(counter), "lines processed"
datafile.close()
fileoutput.close()
print('Done!')
|
unicefuganda/ureport_website | refs/heads/master | ureport_website/ureport_website/settings/production.py | 1 | """Production settings and globals."""
from os import environ
from base import *
# Normally you should not import ANYTHING from Django directly
# into your settings, but ImproperlyConfigured is an exception.
from django.core.exceptions import ImproperlyConfigured
def get_env_setting(setting):
""" Get the environment setting or return exception """
try:
return environ[setting]
except KeyError:
error_msg = "Set the %s env variable" % setting
raise ImproperlyConfigured(error_msg)
########## HOST CONFIGURATION
# See: https://docs.djangoproject.com/en/1.5/releases/1.5/#allowed-hosts-required-in-production
ALLOWED_HOSTS = ['127.0.0.1']
########## END HOST CONFIGURATION
########## EMAIL CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#email-backend
EMAIL_BACKEND = 'django.core.mail.backends.smtp.EmailBackend'
# See: https://docs.djangoproject.com/en/dev/ref/settings/#email-host
EMAIL_HOST = environ.get('EMAIL_HOST', 'smtp.gmail.com')
# See: https://docs.djangoproject.com/en/dev/ref/settings/#email-host-password
EMAIL_HOST_PASSWORD = environ.get('EMAIL_HOST_PASSWORD', '')
# See: https://docs.djangoproject.com/en/dev/ref/settings/#email-host-user
EMAIL_HOST_USER = environ.get('EMAIL_HOST_USER', 'your_email@example.com')
# See: https://docs.djangoproject.com/en/dev/ref/settings/#email-port
EMAIL_PORT = environ.get('EMAIL_PORT', 587)
# See: https://docs.djangoproject.com/en/dev/ref/settings/#email-subject-prefix
EMAIL_SUBJECT_PREFIX = '[%s] ' % SITE_NAME
# See: https://docs.djangoproject.com/en/dev/ref/settings/#email-use-tls
EMAIL_USE_TLS = True
# See: https://docs.djangoproject.com/en/dev/ref/settings/#server-email
SERVER_EMAIL = EMAIL_HOST_USER
########## END EMAIL CONFIGURATION
########## DATABASE CONFIGURATION
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'NAME': 'ureport_website',
'HOST': 'localhost',
'USER': 'postgres',
'PORT': '6543',
},
}
########## END DATABASE CONFIGURATION
########## CACHE CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#caches
CACHES = {
'default': {
'BACKEND': 'django.core.cache.backends.memcached.MemcachedCache',
'LOCATION': '127.0.0.1:11211',
'PREFIX': 'ureport-website-',
}
}
########## END CACHE CONFIGURATION
########## SECRET CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#secret-key
SECRET_KEY = get_env_setting('SECRET_KEY')
########## END SECRET CONFIGURATION
########## UREPORT API CONFIGURATION
UREPORT_API_USERNAME = get_env_setting('UREPORT_API_USERNAME')
UREPORT_API_KEY = get_env_setting('UREPORT_API_KEY')
UREPORT_API_BASE = 'http://ureport.ug/api/v1/'
UREPORT_API_LIMIT = 0
REPORT_PULSE_WS = '/static/pulse'
UREPORT_PULSE_DISTRICT_WS = '/static/districts.json'
########## END UREPORT API CONFIGURATION
|
stvstnfrd/edx-platform | refs/heads/master | import_shims/studio/contentstore/views/tests/test_item.py | 2 | """Deprecated import support. Auto-generated by import_shims/generate_shims.sh."""
# pylint: disable=redefined-builtin,wrong-import-position,wildcard-import,useless-suppression,line-too-long
from import_shims.warn import warn_deprecated_import
warn_deprecated_import('contentstore.views.tests.test_item', 'cms.djangoapps.contentstore.views.tests.test_item')
from cms.djangoapps.contentstore.views.tests.test_item import *
|
chris2286266/c2coin | refs/heads/master | contrib/wallettools/walletchangepass.py | 785 | from jsonrpc import ServiceProxy
access = ServiceProxy("http://127.0.0.1:9332")
pwd = raw_input("Enter old wallet passphrase: ")
pwd2 = raw_input("Enter new wallet passphrase: ")
access.walletpassphrasechange(pwd, pwd2)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.