repo_name stringlengths 5 100 | path stringlengths 4 294 | copies stringclasses 990 values | size stringlengths 4 7 | content stringlengths 666 1M | license stringclasses 15 values |
|---|---|---|---|---|---|
michaelkebe/pulseaudio-dlna | pulseaudio_dlna/utils/subprocess.py | 6 | 2828 | #!/usr/bin/python
# This file is part of pulseaudio-dlna.
# pulseaudio-dlna is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# pulseaudio-dlna is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with pulseaudio-dlna. If not, see <http://www.gnu.org/licenses/>.
from __future__ import absolute_import
from __future__ import unicode_literals
import subprocess
import threading
import os
import gobject
import sys
class Subprocess(subprocess.Popen):
def __init__(self, cmd, uid=None, gid=None, cwd=None, env=None,
*args, **kwargs):
self.uid = uid
self.gid = gid
self.cwd = cwd
self.env = env
super(Subprocess, self).__init__(
cmd,
preexec_fn=self.demote(uid, gid), cwd=cwd, env=env,
stdout=subprocess.PIPE, stderr=subprocess.PIPE,
bufsize=1)
def demote(self, uid, gid):
def fn_uid_gid():
os.setgid(gid)
os.setuid(uid)
def fn_uid():
os.setuid(uid)
def fn_gid():
os.setgid(gid)
def fn_nop():
pass
if uid and gid:
return fn_uid_gid
elif uid:
return fn_uid
elif gid:
return fn_gid
return fn_nop
class GobjectMainLoopMixin(object):
def __init__(self, *args, **kwargs):
super(GobjectMainLoopMixin, self).__init__(*args, **kwargs)
for pipe in [self.stdout, self.stderr]:
gobject.io_add_watch(
pipe, gobject.IO_IN | gobject.IO_PRI, self._on_new_data)
def _on_new_data(self, fd, condition):
sys.stdout.write(fd.readline().rstrip() + '\n')
sys.stdout.flush()
return True
class ThreadedMixIn(object):
def __init__(self, *args, **kwargs):
super(ThreadedMixIn, self).__init__(*args, **kwargs)
self.init_thread(self.stdout)
self.init_thread(self.stderr)
def init_thread(self, pipe):
def read_all(pipe):
with pipe:
for line in iter(pipe.readline, ''):
sys.stdout.write(line)
sys.stdout.flush()
t = threading.Thread(target=read_all, args=(pipe, ))
t.daemon = True
t.start()
class ThreadedSubprocess(ThreadedMixIn, Subprocess):
pass
class GobjectSubprocess(GobjectMainLoopMixin, Subprocess):
pass
| gpl-3.0 |
GdZ/scriptfile | software/googleAppEngine/google/appengine/datastore/datastore_index.py | 2 | 24383 | #!/usr/bin/env python
#
# Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Primitives for dealing with datastore indexes.
Example index.yaml file:
------------------------
indexes:
- kind: Cat
ancestor: no
properties:
- name: name
- name: age
direction: desc
- kind: Cat
properties:
- name: name
direction: ascending
- name: whiskers
direction: descending
- kind: Store
ancestor: yes
properties:
- name: business
direction: asc
- name: owner
direction: asc
"""
import itertools
from google.appengine.api import datastore_types
from google.appengine.api import validation
from google.appengine.api import yaml_errors
from google.appengine.api import yaml_object
from google.appengine.datastore import datastore_pb
from google.appengine.datastore import entity_pb
class Property(validation.Validated):
"""Representation for an individual property of an index.
Attributes:
name: Name of attribute to sort by.
direction: Direction of sort.
"""
ATTRIBUTES = {
'name': validation.Type(str, convert=False),
'direction': validation.Options(('asc', ('ascending',)),
('desc', ('descending',)),
default='asc'),
}
class Index(validation.Validated):
"""Individual index definition.
Order of the properties determines a given indexes sort priority.
Attributes:
kind: Datastore kind that index belongs to.
ancestors: Include ancestors in index.
properties: Properties to sort on.
"""
ATTRIBUTES = {
'kind': validation.Type(str, convert=False),
'ancestor': validation.Type(bool, convert=False, default=False),
'properties': validation.Optional(validation.Repeated(Property)),
}
class IndexDefinitions(validation.Validated):
"""Top level for index definition file.
Attributes:
indexes: List of Index definitions.
"""
ATTRIBUTES = {
'indexes': validation.Optional(validation.Repeated(Index)),
}
def ParseIndexDefinitions(document, open_fn=None):
"""Parse an individual index definitions document from string or stream.
Args:
document: Yaml document as a string or file-like stream.
open_fn: Function for opening files. Unused.
Raises:
EmptyConfigurationFile when the configuration file is empty.
MultipleConfigurationFile when the configuration file contains more than
one document.
Returns:
Single parsed yaml file if one is defined, else None.
"""
try:
return yaml_object.BuildSingleObject(IndexDefinitions, document)
except yaml_errors.EmptyConfigurationFile:
return None
def ParseMultipleIndexDefinitions(document):
"""Parse multiple index definitions documents from a string or stream.
Args:
document: Yaml document as a string or file-like stream.
Returns:
A list of datstore_index.IndexDefinitions objects, one for each document.
"""
return yaml_object.BuildObjects(IndexDefinitions, document)
def IndexDefinitionsToKeys(indexes):
"""Convert IndexDefinitions to set of keys.
Args:
indexes: A datastore_index.IndexDefinitions instance, or None.
Returns:
A set of keys constructed from the argument, each key being a
tuple of the form (kind, ancestor, properties) where properties is
a tuple of (name, direction) pairs, direction being ASCENDING or
DESCENDING (the enums).
"""
keyset = set()
if indexes is not None:
if indexes.indexes:
for index in indexes.indexes:
keyset.add(IndexToKey(index))
return keyset
def IndexToKey(index):
"""Convert Index to key.
Args:
index: A datastore_index.Index instance (not None!).
Returns:
A tuple of the form (kind, ancestor, properties) where properties
is a tuple of (name, direction) pairs, direction being ASCENDING
or DESCENDING (the enums).
"""
props = []
if index.properties is not None:
for prop in index.properties:
if prop.direction == 'asc':
direction = ASCENDING
else:
direction = DESCENDING
props.append((prop.name, direction))
return index.kind, index.ancestor, tuple(props)
ASCENDING = datastore_pb.Query_Order.ASCENDING
DESCENDING = datastore_pb.Query_Order.DESCENDING
EQUALITY_OPERATORS = set((datastore_pb.Query_Filter.EQUAL,
))
INEQUALITY_OPERATORS = set((datastore_pb.Query_Filter.LESS_THAN,
datastore_pb.Query_Filter.LESS_THAN_OR_EQUAL,
datastore_pb.Query_Filter.GREATER_THAN,
datastore_pb.Query_Filter.GREATER_THAN_OR_EQUAL,
))
EXISTS_OPERATORS = set((datastore_pb.Query_Filter.EXISTS,
))
_DIRECTION_MAP = {
'asc': entity_pb.Index_Property.ASCENDING,
'ascending': entity_pb.Index_Property.ASCENDING,
'desc': entity_pb.Index_Property.DESCENDING,
'descending': entity_pb.Index_Property.DESCENDING,
}
def Normalize(filters, orders, exists):
""" Normalizes filter and order query components.
The resulting components have the same effect as the given components if used
in a query.
Args:
filters: the filters set on the query
orders: the orders set on the query
exists: the names of properties that require an exists filter if
not already specified
Returns:
(filter, orders) the reduced set of filters and orders
"""
eq_properties = set()
inequality_properties = set()
for f in filters:
if f.op() == datastore_pb.Query_Filter.IN and f.property_size() == 1:
f.set_op(datastore_pb.Query_Filter.EQUAL)
if f.op() in EQUALITY_OPERATORS:
eq_properties.add(f.property(0).name())
elif f.op() in INEQUALITY_OPERATORS:
inequality_properties.add(f.property(0).name())
eq_properties -= inequality_properties
remove_set = eq_properties.copy()
new_orders = []
for o in orders:
if o.property() not in remove_set:
remove_set.add(o.property())
new_orders.append(o)
orders = new_orders
remove_set.update(inequality_properties)
new_filters = []
for f in filters:
if f.op() not in EXISTS_OPERATORS:
new_filters.append(f)
continue
name = f.property(0).name()
if name not in remove_set:
remove_set.add(name)
new_filters.append(f)
for prop in exists:
if prop not in remove_set:
remove_set.add(prop)
new_filter = datastore_pb.Query_Filter()
new_filter.set_op(datastore_pb.Query_Filter.EXISTS)
new_prop = new_filter.add_property()
new_prop.set_name(prop)
new_prop.set_multiple(False)
new_prop.mutable_value()
new_filters.append(new_filter)
filters = new_filters
if datastore_types.KEY_SPECIAL_PROPERTY in eq_properties:
orders = []
new_orders = []
for o in orders:
if o.property() == datastore_types.KEY_SPECIAL_PROPERTY:
new_orders.append(o)
break
new_orders.append(o)
orders = new_orders
return (filters, orders)
def RemoveNativelySupportedComponents(filters, orders, exists):
""" Removes query components that are natively supported by the datastore.
The resulting filters and orders should not be used in an actual query.
Args:
filters: the filters set on the query
orders: the orders set on the query
exists: the names of properties that require an exists filter if
not already specified
Returns:
(filters, orders) the reduced set of filters and orders
"""
(filters, orders) = Normalize(filters, orders, exists)
for f in filters:
if f.op() in EXISTS_OPERATORS:
return (filters, orders)
has_key_desc_order = False
if orders and orders[-1].property() == datastore_types.KEY_SPECIAL_PROPERTY:
if orders[-1].direction() == ASCENDING:
orders = orders[:-1]
else:
has_key_desc_order = True
if not has_key_desc_order:
for f in filters:
if (f.op() in INEQUALITY_OPERATORS and
f.property(0).name() != datastore_types.KEY_SPECIAL_PROPERTY):
break
else:
filters = [f for f in filters
if f.property(0).name() != datastore_types.KEY_SPECIAL_PROPERTY]
return (filters, orders)
def CompositeIndexForQuery(query):
"""Return the composite index needed for a query.
A query is translated into a tuple, as follows:
- The first item is the kind string, or None if we're not filtering
on kind (see below).
- The second item is a bool giving whether the query specifies an
ancestor.
- After that come (property, ASCENDING) pairs for those Filter
entries whose operator is EQUAL or IN. Since the order of these
doesn't matter, they are sorted by property name to normalize them
in order to avoid duplicates.
- After that comes at most one (property, ASCENDING) pair for a
Filter entry whose operator is on of the four inequalities. There
can be at most one of these.
- After that come all the (property, direction) pairs for the Order
entries, in the order given in the query. Exceptions:
(a) if there is a Filter entry with an inequality operator that matches
the first Order entry, the first order pair is omitted (or,
equivalently, in this case the inequality pair is omitted).
(b) if an Order entry corresponds to an equality filter, it is ignored
(since there will only ever be one value returned).
(c) if there is an equality filter on __key__ all orders are dropped
(since there will be at most one result returned).
(d) if there is an order on __key__ all further orders are dropped (since
keys are unique).
(e) orders on __key__ ASCENDING are dropped (since this is supported
natively by the datastore).
- Finally, if there are Filter entries whose operator is EXISTS, and
whose property names are not already listed, they are added, with
the direction set to ASCENDING.
This algorithm should consume all Filter and Order entries.
Additional notes:
- The low-level implementation allows queries that don't specify a
kind; but the Python API doesn't support this yet.
- If there's an inequality filter and one or more sort orders, the
first sort order *must* match the inequality filter.
- The following indexes are always built in and should be suppressed:
- query on kind only;
- query on kind and one filter *or* one order;
- query on ancestor only, without kind (not exposed in Python yet);
- query on kind and equality filters only, no order (with or without
ancestor).
- While the protocol buffer allows a Filter to contain multiple
properties, we don't use this. It is only needed for the IN operator
but this is (currently) handled on the client side, so in practice
each Filter is expected to have exactly one property.
Args:
query: A datastore_pb.Query instance.
Returns:
A tuple of the form (required, kind, ancestor, properties).
required: boolean, whether the index is required;
kind: the kind or None;
ancestor: True if this is an ancestor query;
properties: A tuple consisting of:
- the prefix, represented by a set of property names
- the postfix, represented by a tuple consisting of any number of:
- Sets of property names: Indicates these properties can appear in any
order with any direction.
- Tuples of (property name, direction) tuples. Indicating the properties
must appear in the exact order with the given direction. direction can
be None if direction does not matter.
"""
required = True
kind = query.kind()
ancestor = query.has_ancestor()
filters = query.filter_list()
orders = query.order_list()
for filter in filters:
assert filter.op() != datastore_pb.Query_Filter.IN, 'Filter.op()==IN'
nprops = len(filter.property_list())
assert nprops == 1, 'Filter has %s properties, expected 1' % nprops
if not kind:
required = False
exists = list(query.property_name_list())
exists.extend(query.group_by_property_name_list())
filters, orders = RemoveNativelySupportedComponents(filters, orders, exists)
eq_filters = [f for f in filters if f.op() in EQUALITY_OPERATORS]
ineq_filters = [f for f in filters if f.op() in INEQUALITY_OPERATORS]
exists_filters = [f for f in filters if f.op() in EXISTS_OPERATORS]
assert (len(eq_filters) + len(ineq_filters) +
len(exists_filters)) == len(filters), 'Not all filters used'
if (kind and not ineq_filters and not exists_filters and
not orders):
names = set(f.property(0).name() for f in eq_filters)
if not names.intersection(datastore_types._SPECIAL_PROPERTIES):
required = False
ineq_property = None
if ineq_filters:
for filter in ineq_filters:
if (filter.property(0).name() ==
datastore_types._UNAPPLIED_LOG_TIMESTAMP_SPECIAL_PROPERTY):
continue
if not ineq_property:
ineq_property = filter.property(0).name()
else:
assert filter.property(0).name() == ineq_property
group_by_props = set(query.group_by_property_name_list())
prefix = frozenset(f.property(0).name() for f in eq_filters)
postfix_ordered = [(order.property(), order.direction()) for order in orders]
postfix_group_by = frozenset(f.property(0).name() for f in exists_filters
if f.property(0).name() in group_by_props)
postfix_unordered = frozenset(f.property(0).name() for f in exists_filters
if f.property(0).name() not in group_by_props)
if ineq_property:
if orders:
assert ineq_property == orders[0].property()
else:
postfix_ordered.append((ineq_property, None))
property_count = (len(prefix) + len(postfix_ordered) + len(postfix_group_by)
+ len(postfix_unordered))
if kind and not ancestor and property_count <= 1:
required = False
if postfix_ordered:
prop, dir = postfix_ordered[0]
if prop == datastore_types.KEY_SPECIAL_PROPERTY and dir is DESCENDING:
required = True
props = prefix, (tuple(postfix_ordered), postfix_group_by, postfix_unordered)
return required, kind, ancestor, props
def GetRecommendedIndexProperties(properties):
"""Converts the properties returned by datastore_index.CompositeIndexForQuery
into a recommended list of index properties and directions.
All unordered components are sorted and assigned an ASCENDING direction. All
ordered components with out a direction are assigned an ASCEDNING direction.
Args:
properties: See datastore_index.CompositeIndexForQuery
Returns:
A tuple of (name, direction) tuples where:
name: a property name
direction: datastore_pb.Query_Order.ASCENDING or ...DESCENDING
"""
prefix, postfix = properties
result = []
for sub_list in itertools.chain((prefix,), postfix):
if isinstance(sub_list, (frozenset, set)):
for prop in sorted(sub_list):
result.append((prop, ASCENDING))
else:
for prop, dir in sub_list:
result.append((prop, dir if dir is not None else ASCENDING))
return tuple(result)
def _MatchPostfix(postfix_props, index_props):
"""Matches a postfix constraint with an existing index.
Args:
postfix_props: A tuple of sets and lists, as output by
CompositeIndexForQuery. They should define the requirements for the
postfix of the index.
index_props: A list of tuples (property_name, property_direction), that
define the index to try and match.
Returns:
The list of tuples that define the prefix properties in the given index.
None if the constraints could not be satisfied.
"""
index_props = reversed(index_props)
for property_group in reversed(postfix_props):
index_group = itertools.islice(index_props, len(property_group))
if isinstance(property_group, (frozenset, set)):
curr_set = set(property_group)
for index_prop, _ in index_group:
if index_prop not in curr_set:
return None
curr_set.remove(index_prop)
else:
for (index_prop, index_dir), (prop, direction) in itertools.izip_longest(
index_group, reversed(property_group), fillvalue=(None, None)):
if prop is None or index_prop != prop or (direction and
index_dir != direction):
return None
remaining = list(index_props)
remaining.reverse()
return remaining
def MinimalCompositeIndexForQuery(query, index_defs):
"""Computes the minimal composite index for this query.
Unlike datastore_index.CompositeIndexForQuery, this function takes into
account indexes that already exist in the system.
Args:
query: the datastore_pb.Query to compute suggestions for
index_defs: a list of datastore_index.Index objects that already exist.
Returns:
None if no index is needed, otherwise the minimal index in the form
(is_most_efficient, kind, ancestor, properties). Where is_most_efficient is a
boolean denoting if the suggested index is the most efficient (i.e. the one
returned by datastore_index.CompositeIndexForQuery). kind and ancestor
are the same variables returned by datastore_index.CompositeIndexForQuery.
properties is a tuple consisting of the prefix and postfix properties
returend by datastore_index.CompositeIndexForQuery.
"""
required, kind, ancestor, (prefix, postfix) = CompositeIndexForQuery(query)
if not required:
return None
remaining_dict = {}
for definition in index_defs:
if (kind != definition.kind or
(not ancestor and definition.ancestor)):
continue
_, _, index_props = IndexToKey(definition)
index_prefix = _MatchPostfix(postfix, index_props)
if index_prefix is None:
continue
remaining_index_props = set([prop for prop, _ in index_prefix])
if remaining_index_props - prefix:
continue
index_postfix = tuple(index_props[len(index_prefix):])
remaining = remaining_dict.get(index_postfix)
if remaining is None:
remaining = prefix.copy(), ancestor
props_remaining, ancestor_remaining = remaining
props_remaining -= remaining_index_props
if definition.ancestor:
ancestor_remaining = False
if not (props_remaining or ancestor_remaining):
return None
if (props_remaining, ancestor_remaining) == remaining:
continue
remaining_dict[index_postfix] = (props_remaining, ancestor_remaining)
if not remaining_dict:
return (True, kind, ancestor, (prefix, postfix))
def calc_cost(minimal_props, minimal_ancestor):
result = len(minimal_props)
if minimal_ancestor:
result += 2
return result
minimal_postfix, remaining = remaining_dict.popitem()
minimal_props, minimal_ancestor = remaining
minimal_cost = calc_cost(minimal_props, minimal_ancestor)
for index_postfix, (props_remaining, ancestor_remaining) in (
remaining_dict.iteritems()):
cost = calc_cost(props_remaining, ancestor_remaining)
if cost < minimal_cost:
minimal_cost = cost
minimal_postfix = index_postfix
minimal_props = props_remaining
minimal_ancestor = ancestor_remaining
props = frozenset(minimal_props), (minimal_postfix, frozenset(), frozenset())
return False, kind, minimal_ancestor, props
def IndexYamlForQuery(kind, ancestor, props):
"""Return the composite index definition YAML needed for a query.
Given a query, the arguments for this method can be computed with:
_, kind, ancestor, props = datastore_index.CompositeIndexForQuery(query)
props = datastore_index.GetRecommendedIndexProperties(props)
Args:
kind: the kind or None
ancestor: True if this is an ancestor query, False otherwise
props: tuples of the form (name, direction) where:
name - a property name;
direction - datastore_pb.Query_Order.ASCENDING or ...DESCENDING;
Returns:
A string with the YAML for the composite index needed by the query.
"""
yaml = []
yaml.append('- kind: %s' % kind)
if ancestor:
yaml.append(' ancestor: yes')
if props:
yaml.append(' properties:')
for name, direction in props:
yaml.append(' - name: %s' % name)
if direction == DESCENDING:
yaml.append(' direction: desc')
return '\n'.join(yaml)
def IndexXmlForQuery(kind, ancestor, props):
"""Return the composite index definition XML needed for a query.
Given a query, the arguments for this method can be computed with:
_, kind, ancestor, props = datastore_index.CompositeIndexForQuery(query)
props = datastore_index.GetRecommendedIndexProperties(props)
Args:
kind: the kind or None
ancestor: True if this is an ancestor query, False otherwise
props: tuples of the form (name, direction) where:
name - a property name;
direction - datastore_pb.Query_Order.ASCENDING or ...DESCENDING;
Returns:
A string with the XML for the composite index needed by the query.
"""
xml = []
xml.append('<datastore-index kind="%s" ancestor="%s">'
% (kind, 'true' if ancestor else 'false'))
for name, direction in props:
xml.append(' <property name="%s" direction="%s" />'
% (name, 'asc' if direction == ASCENDING else 'desc'))
xml.append('</datastore-index>')
return '\n'.join(xml)
def IndexDefinitionToProto(app_id, index_definition):
"""Transform individual Index definition to protocol buffer.
Args:
app_id: Application id for new protocol buffer CompositeIndex.
index_definition: datastore_index.Index object to transform.
Returns:
New entity_pb.CompositeIndex with default values set and index
information filled in.
"""
proto = entity_pb.CompositeIndex()
proto.set_app_id(app_id)
proto.set_id(0)
proto.set_state(entity_pb.CompositeIndex.WRITE_ONLY)
definition_proto = proto.mutable_definition()
definition_proto.set_entity_type(index_definition.kind)
definition_proto.set_ancestor(index_definition.ancestor)
if index_definition.properties is not None:
for prop in index_definition.properties:
prop_proto = definition_proto.add_property()
prop_proto.set_name(prop.name)
prop_proto.set_direction(_DIRECTION_MAP[prop.direction])
return proto
def IndexDefinitionsToProtos(app_id, index_definitions):
"""Transform multiple index definitions to composite index records
Args:
app_id: Application id for new protocol buffer CompositeIndex.
index_definition: A list of datastore_index.Index objects to transform.
Returns:
A list of tranformed entity_pb.Compositeindex entities with default values
set and index information filled in.
"""
return [IndexDefinitionToProto(app_id, index)
for index in index_definitions]
def ProtoToIndexDefinition(proto):
"""Transform individual index protocol buffer to index definition.
Args:
proto: An instance of entity_pb.CompositeIndex to transform.
Returns:
A new instance of datastore_index.Index.
"""
properties = []
proto_index = proto.definition()
for prop_proto in proto_index.property_list():
prop_definition = Property(name=prop_proto.name())
if prop_proto.direction() == entity_pb.Index_Property.DESCENDING:
prop_definition.direction = 'descending'
properties.append(prop_definition)
index = Index(kind=proto_index.entity_type(), properties=properties)
if proto_index.ancestor():
index.ancestor = True
return index
def ProtosToIndexDefinitions(protos):
"""Transform multiple index protocol buffers to index definitions.
Args:
A list of entity_pb.Index records.
"""
return [ProtoToIndexDefinition(definition) for definition in protos]
| mit |
BigBrother1984/android_external_chromium_org | media/tools/layout_tests/test_expectations_history.py | 156 | 5156 | # Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""A module for the history of the test expectation file."""
from datetime import datetime
from datetime import timedelta
import os
import re
import sys
import tempfile
import time
import pysvn
TEST_EXPECTATIONS_ROOT = 'http://src.chromium.org/blink/trunk/'
# A map from earliest revision to path.
# TODO(imasaki): support multiple test expectation files.
TEST_EXPECTATIONS_LOCATIONS = {
148348: 'LayoutTests/TestExpectations',
119317: 'LayoutTests/platform/chromium/TestExpectations',
0: 'LayoutTests/platform/chromium/test_expectations.txt'}
TEST_EXPECTATIONS_DEFAULT_PATH = (
TEST_EXPECTATIONS_ROOT + TEST_EXPECTATIONS_LOCATIONS[148348])
class TestExpectationsHistory(object):
"""A class to represent history of the test expectation file.
The history is obtained by calling PySVN.log()/diff() APIs.
TODO(imasaki): Add more functionalities here like getting some statistics
about the test expectation file.
"""
@staticmethod
def GetTestExpectationsPathForRevision(revision):
for i in sorted(TEST_EXPECTATIONS_LOCATIONS.keys(), reverse=True):
if revision >= i:
return TEST_EXPECTATIONS_ROOT + TEST_EXPECTATIONS_LOCATIONS[i]
@staticmethod
def GetDiffBetweenTimes(start, end, testname_list,
te_location=TEST_EXPECTATIONS_DEFAULT_PATH):
"""Get difference between time period for the specified test names.
Given the time period, this method first gets the revision number. Then,
it gets the diff for each revision. Finally, it keeps the diff relating to
the test names and returns them along with other information about
revision.
Args:
start: A timestamp specifying start of the time period to be
looked at.
end: A timestamp object specifying end of the time period to be
looked at.
testname_list: A list of strings representing test names of interest.
te_location: A location of the test expectation file.
Returns:
A list of tuples (old_rev, new_rev, author, date, message, lines). The
|lines| contains the diff of the tests of interest.
"""
temp_directory = tempfile.mkdtemp()
test_expectations_path = os.path.join(temp_directory, 'TestExpectations')
# Get directory name which is necesary to call PySVN.checkout().
te_location_dir = te_location[0:te_location.rindex('/')]
client = pysvn.Client()
client.checkout(te_location_dir, temp_directory, recurse=False)
# PySVN.log() (http://pysvn.tigris.org/docs/pysvn_prog_ref.html
# #pysvn_client_log) returns the log messages (including revision
# number in chronological order).
logs = client.log(test_expectations_path,
revision_start=pysvn.Revision(
pysvn.opt_revision_kind.date, start),
revision_end=pysvn.Revision(
pysvn.opt_revision_kind.date, end))
result_list = []
gobackdays = 1
while gobackdays < sys.maxint:
goback_start = time.mktime(
(datetime.fromtimestamp(start) - (
timedelta(days=gobackdays))).timetuple())
logs_before_time_period = (
client.log(test_expectations_path,
revision_start=pysvn.Revision(
pysvn.opt_revision_kind.date, goback_start),
revision_end=pysvn.Revision(
pysvn.opt_revision_kind.date, start)))
if logs_before_time_period:
# Prepend at the beginning of logs.
logs.insert(0, logs_before_time_period[len(logs_before_time_period)-1])
break
gobackdays *= 2
for i in xrange(len(logs) - 1):
old_rev = logs[i].revision.number
new_rev = logs[i + 1].revision.number
# Parsing the actual diff.
new_path = TestExpectationsHistory.GetTestExpectationsPathForRevision(
new_rev);
old_path = TestExpectationsHistory.GetTestExpectationsPathForRevision(
old_rev);
text = client.diff(temp_directory,
url_or_path=old_path,
revision1=pysvn.Revision(
pysvn.opt_revision_kind.number, old_rev),
url_or_path2=new_path,
revision2=pysvn.Revision(
pysvn.opt_revision_kind.number, new_rev))
lines = text.split('\n')
target_lines = []
for line in lines:
for testname in testname_list:
matches = re.findall(testname, line)
if matches:
if line[0] == '+' or line[0] == '-':
target_lines.append(line)
if target_lines:
# Needs to convert to normal date string for presentation.
result_list.append((
old_rev, new_rev, logs[i + 1].author,
datetime.fromtimestamp(
logs[i + 1].date).strftime('%Y-%m-%d %H:%M:%S'),
logs[i + 1].message, target_lines))
return result_list
| bsd-3-clause |
xyuanmu/XX-Net | python3.8.2/Lib/site-packages/pip/_vendor/requests/sessions.py | 70 | 29332 | # -*- coding: utf-8 -*-
"""
requests.session
~~~~~~~~~~~~~~~~
This module provides a Session object to manage and persist settings across
requests (cookies, auth, proxies).
"""
import os
import sys
import time
from datetime import timedelta
from .auth import _basic_auth_str
from .compat import cookielib, is_py3, OrderedDict, urljoin, urlparse, Mapping
from .cookies import (
cookiejar_from_dict, extract_cookies_to_jar, RequestsCookieJar, merge_cookies)
from .models import Request, PreparedRequest, DEFAULT_REDIRECT_LIMIT
from .hooks import default_hooks, dispatch_hook
from ._internal_utils import to_native_string
from .utils import to_key_val_list, default_headers, DEFAULT_PORTS
from .exceptions import (
TooManyRedirects, InvalidSchema, ChunkedEncodingError, ContentDecodingError)
from .structures import CaseInsensitiveDict
from .adapters import HTTPAdapter
from .utils import (
requote_uri, get_environ_proxies, get_netrc_auth, should_bypass_proxies,
get_auth_from_url, rewind_body
)
from .status_codes import codes
# formerly defined here, reexposed here for backward compatibility
from .models import REDIRECT_STATI
# Preferred clock, based on which one is more accurate on a given system.
if sys.platform == 'win32':
try: # Python 3.4+
preferred_clock = time.perf_counter
except AttributeError: # Earlier than Python 3.
preferred_clock = time.clock
else:
preferred_clock = time.time
def merge_setting(request_setting, session_setting, dict_class=OrderedDict):
"""Determines appropriate setting for a given request, taking into account
the explicit setting on that request, and the setting in the session. If a
setting is a dictionary, they will be merged together using `dict_class`
"""
if session_setting is None:
return request_setting
if request_setting is None:
return session_setting
# Bypass if not a dictionary (e.g. verify)
if not (
isinstance(session_setting, Mapping) and
isinstance(request_setting, Mapping)
):
return request_setting
merged_setting = dict_class(to_key_val_list(session_setting))
merged_setting.update(to_key_val_list(request_setting))
# Remove keys that are set to None. Extract keys first to avoid altering
# the dictionary during iteration.
none_keys = [k for (k, v) in merged_setting.items() if v is None]
for key in none_keys:
del merged_setting[key]
return merged_setting
def merge_hooks(request_hooks, session_hooks, dict_class=OrderedDict):
"""Properly merges both requests and session hooks.
This is necessary because when request_hooks == {'response': []}, the
merge breaks Session hooks entirely.
"""
if session_hooks is None or session_hooks.get('response') == []:
return request_hooks
if request_hooks is None or request_hooks.get('response') == []:
return session_hooks
return merge_setting(request_hooks, session_hooks, dict_class)
class SessionRedirectMixin(object):
def get_redirect_target(self, resp):
"""Receives a Response. Returns a redirect URI or ``None``"""
# Due to the nature of how requests processes redirects this method will
# be called at least once upon the original response and at least twice
# on each subsequent redirect response (if any).
# If a custom mixin is used to handle this logic, it may be advantageous
# to cache the redirect location onto the response object as a private
# attribute.
if resp.is_redirect:
location = resp.headers['location']
# Currently the underlying http module on py3 decode headers
# in latin1, but empirical evidence suggests that latin1 is very
# rarely used with non-ASCII characters in HTTP headers.
# It is more likely to get UTF8 header rather than latin1.
# This causes incorrect handling of UTF8 encoded location headers.
# To solve this, we re-encode the location in latin1.
if is_py3:
location = location.encode('latin1')
return to_native_string(location, 'utf8')
return None
def should_strip_auth(self, old_url, new_url):
"""Decide whether Authorization header should be removed when redirecting"""
old_parsed = urlparse(old_url)
new_parsed = urlparse(new_url)
if old_parsed.hostname != new_parsed.hostname:
return True
# Special case: allow http -> https redirect when using the standard
# ports. This isn't specified by RFC 7235, but is kept to avoid
# breaking backwards compatibility with older versions of requests
# that allowed any redirects on the same host.
if (old_parsed.scheme == 'http' and old_parsed.port in (80, None)
and new_parsed.scheme == 'https' and new_parsed.port in (443, None)):
return False
# Handle default port usage corresponding to scheme.
changed_port = old_parsed.port != new_parsed.port
changed_scheme = old_parsed.scheme != new_parsed.scheme
default_port = (DEFAULT_PORTS.get(old_parsed.scheme, None), None)
if (not changed_scheme and old_parsed.port in default_port
and new_parsed.port in default_port):
return False
# Standard case: root URI must match
return changed_port or changed_scheme
def resolve_redirects(self, resp, req, stream=False, timeout=None,
verify=True, cert=None, proxies=None, yield_requests=False, **adapter_kwargs):
"""Receives a Response. Returns a generator of Responses or Requests."""
hist = [] # keep track of history
url = self.get_redirect_target(resp)
previous_fragment = urlparse(req.url).fragment
while url:
prepared_request = req.copy()
# Update history and keep track of redirects.
# resp.history must ignore the original request in this loop
hist.append(resp)
resp.history = hist[1:]
try:
resp.content # Consume socket so it can be released
except (ChunkedEncodingError, ContentDecodingError, RuntimeError):
resp.raw.read(decode_content=False)
if len(resp.history) >= self.max_redirects:
raise TooManyRedirects('Exceeded %s redirects.' % self.max_redirects, response=resp)
# Release the connection back into the pool.
resp.close()
# Handle redirection without scheme (see: RFC 1808 Section 4)
if url.startswith('//'):
parsed_rurl = urlparse(resp.url)
url = '%s:%s' % (to_native_string(parsed_rurl.scheme), url)
# Normalize url case and attach previous fragment if needed (RFC 7231 7.1.2)
parsed = urlparse(url)
if parsed.fragment == '' and previous_fragment:
parsed = parsed._replace(fragment=previous_fragment)
elif parsed.fragment:
previous_fragment = parsed.fragment
url = parsed.geturl()
# Facilitate relative 'location' headers, as allowed by RFC 7231.
# (e.g. '/path/to/resource' instead of 'http://domain.tld/path/to/resource')
# Compliant with RFC3986, we percent encode the url.
if not parsed.netloc:
url = urljoin(resp.url, requote_uri(url))
else:
url = requote_uri(url)
prepared_request.url = to_native_string(url)
self.rebuild_method(prepared_request, resp)
# https://github.com/requests/requests/issues/1084
if resp.status_code not in (codes.temporary_redirect, codes.permanent_redirect):
# https://github.com/requests/requests/issues/3490
purged_headers = ('Content-Length', 'Content-Type', 'Transfer-Encoding')
for header in purged_headers:
prepared_request.headers.pop(header, None)
prepared_request.body = None
headers = prepared_request.headers
try:
del headers['Cookie']
except KeyError:
pass
# Extract any cookies sent on the response to the cookiejar
# in the new request. Because we've mutated our copied prepared
# request, use the old one that we haven't yet touched.
extract_cookies_to_jar(prepared_request._cookies, req, resp.raw)
merge_cookies(prepared_request._cookies, self.cookies)
prepared_request.prepare_cookies(prepared_request._cookies)
# Rebuild auth and proxy information.
proxies = self.rebuild_proxies(prepared_request, proxies)
self.rebuild_auth(prepared_request, resp)
# A failed tell() sets `_body_position` to `object()`. This non-None
# value ensures `rewindable` will be True, allowing us to raise an
# UnrewindableBodyError, instead of hanging the connection.
rewindable = (
prepared_request._body_position is not None and
('Content-Length' in headers or 'Transfer-Encoding' in headers)
)
# Attempt to rewind consumed file-like object.
if rewindable:
rewind_body(prepared_request)
# Override the original request.
req = prepared_request
if yield_requests:
yield req
else:
resp = self.send(
req,
stream=stream,
timeout=timeout,
verify=verify,
cert=cert,
proxies=proxies,
allow_redirects=False,
**adapter_kwargs
)
extract_cookies_to_jar(self.cookies, prepared_request, resp.raw)
# extract redirect url, if any, for the next loop
url = self.get_redirect_target(resp)
yield resp
def rebuild_auth(self, prepared_request, response):
"""When being redirected we may want to strip authentication from the
request to avoid leaking credentials. This method intelligently removes
and reapplies authentication where possible to avoid credential loss.
"""
headers = prepared_request.headers
url = prepared_request.url
if 'Authorization' in headers and self.should_strip_auth(response.request.url, url):
# If we get redirected to a new host, we should strip out any
# authentication headers.
del headers['Authorization']
# .netrc might have more auth for us on our new host.
new_auth = get_netrc_auth(url) if self.trust_env else None
if new_auth is not None:
prepared_request.prepare_auth(new_auth)
return
def rebuild_proxies(self, prepared_request, proxies):
"""This method re-evaluates the proxy configuration by considering the
environment variables. If we are redirected to a URL covered by
NO_PROXY, we strip the proxy configuration. Otherwise, we set missing
proxy keys for this URL (in case they were stripped by a previous
redirect).
This method also replaces the Proxy-Authorization header where
necessary.
:rtype: dict
"""
proxies = proxies if proxies is not None else {}
headers = prepared_request.headers
url = prepared_request.url
scheme = urlparse(url).scheme
new_proxies = proxies.copy()
no_proxy = proxies.get('no_proxy')
bypass_proxy = should_bypass_proxies(url, no_proxy=no_proxy)
if self.trust_env and not bypass_proxy:
environ_proxies = get_environ_proxies(url, no_proxy=no_proxy)
proxy = environ_proxies.get(scheme, environ_proxies.get('all'))
if proxy:
new_proxies.setdefault(scheme, proxy)
if 'Proxy-Authorization' in headers:
del headers['Proxy-Authorization']
try:
username, password = get_auth_from_url(new_proxies[scheme])
except KeyError:
username, password = None, None
if username and password:
headers['Proxy-Authorization'] = _basic_auth_str(username, password)
return new_proxies
def rebuild_method(self, prepared_request, response):
"""When being redirected we may want to change the method of the request
based on certain specs or browser behavior.
"""
method = prepared_request.method
# https://tools.ietf.org/html/rfc7231#section-6.4.4
if response.status_code == codes.see_other and method != 'HEAD':
method = 'GET'
# Do what the browsers do, despite standards...
# First, turn 302s into GETs.
if response.status_code == codes.found and method != 'HEAD':
method = 'GET'
# Second, if a POST is responded to with a 301, turn it into a GET.
# This bizarre behaviour is explained in Issue 1704.
if response.status_code == codes.moved and method == 'POST':
method = 'GET'
prepared_request.method = method
class Session(SessionRedirectMixin):
"""A Requests session.
Provides cookie persistence, connection-pooling, and configuration.
Basic Usage::
>>> import requests
>>> s = requests.Session()
>>> s.get('https://httpbin.org/get')
<Response [200]>
Or as a context manager::
>>> with requests.Session() as s:
>>> s.get('https://httpbin.org/get')
<Response [200]>
"""
__attrs__ = [
'headers', 'cookies', 'auth', 'proxies', 'hooks', 'params', 'verify',
'cert', 'prefetch', 'adapters', 'stream', 'trust_env',
'max_redirects',
]
def __init__(self):
#: A case-insensitive dictionary of headers to be sent on each
#: :class:`Request <Request>` sent from this
#: :class:`Session <Session>`.
self.headers = default_headers()
#: Default Authentication tuple or object to attach to
#: :class:`Request <Request>`.
self.auth = None
#: Dictionary mapping protocol or protocol and host to the URL of the proxy
#: (e.g. {'http': 'foo.bar:3128', 'http://host.name': 'foo.bar:4012'}) to
#: be used on each :class:`Request <Request>`.
self.proxies = {}
#: Event-handling hooks.
self.hooks = default_hooks()
#: Dictionary of querystring data to attach to each
#: :class:`Request <Request>`. The dictionary values may be lists for
#: representing multivalued query parameters.
self.params = {}
#: Stream response content default.
self.stream = False
#: SSL Verification default.
self.verify = True
#: SSL client certificate default, if String, path to ssl client
#: cert file (.pem). If Tuple, ('cert', 'key') pair.
self.cert = None
#: Maximum number of redirects allowed. If the request exceeds this
#: limit, a :class:`TooManyRedirects` exception is raised.
#: This defaults to requests.models.DEFAULT_REDIRECT_LIMIT, which is
#: 30.
self.max_redirects = DEFAULT_REDIRECT_LIMIT
#: Trust environment settings for proxy configuration, default
#: authentication and similar.
self.trust_env = True
#: A CookieJar containing all currently outstanding cookies set on this
#: session. By default it is a
#: :class:`RequestsCookieJar <requests.cookies.RequestsCookieJar>`, but
#: may be any other ``cookielib.CookieJar`` compatible object.
self.cookies = cookiejar_from_dict({})
# Default connection adapters.
self.adapters = OrderedDict()
self.mount('https://', HTTPAdapter())
self.mount('http://', HTTPAdapter())
def __enter__(self):
return self
def __exit__(self, *args):
self.close()
def prepare_request(self, request):
"""Constructs a :class:`PreparedRequest <PreparedRequest>` for
transmission and returns it. The :class:`PreparedRequest` has settings
merged from the :class:`Request <Request>` instance and those of the
:class:`Session`.
:param request: :class:`Request` instance to prepare with this
session's settings.
:rtype: requests.PreparedRequest
"""
cookies = request.cookies or {}
# Bootstrap CookieJar.
if not isinstance(cookies, cookielib.CookieJar):
cookies = cookiejar_from_dict(cookies)
# Merge with session cookies
merged_cookies = merge_cookies(
merge_cookies(RequestsCookieJar(), self.cookies), cookies)
# Set environment's basic authentication if not explicitly set.
auth = request.auth
if self.trust_env and not auth and not self.auth:
auth = get_netrc_auth(request.url)
p = PreparedRequest()
p.prepare(
method=request.method.upper(),
url=request.url,
files=request.files,
data=request.data,
json=request.json,
headers=merge_setting(request.headers, self.headers, dict_class=CaseInsensitiveDict),
params=merge_setting(request.params, self.params),
auth=merge_setting(auth, self.auth),
cookies=merged_cookies,
hooks=merge_hooks(request.hooks, self.hooks),
)
return p
def request(self, method, url,
params=None, data=None, headers=None, cookies=None, files=None,
auth=None, timeout=None, allow_redirects=True, proxies=None,
hooks=None, stream=None, verify=None, cert=None, json=None):
"""Constructs a :class:`Request <Request>`, prepares it and sends it.
Returns :class:`Response <Response>` object.
:param method: method for the new :class:`Request` object.
:param url: URL for the new :class:`Request` object.
:param params: (optional) Dictionary or bytes to be sent in the query
string for the :class:`Request`.
:param data: (optional) Dictionary, list of tuples, bytes, or file-like
object to send in the body of the :class:`Request`.
:param json: (optional) json to send in the body of the
:class:`Request`.
:param headers: (optional) Dictionary of HTTP Headers to send with the
:class:`Request`.
:param cookies: (optional) Dict or CookieJar object to send with the
:class:`Request`.
:param files: (optional) Dictionary of ``'filename': file-like-objects``
for multipart encoding upload.
:param auth: (optional) Auth tuple or callable to enable
Basic/Digest/Custom HTTP Auth.
:param timeout: (optional) How long to wait for the server to send
data before giving up, as a float, or a :ref:`(connect timeout,
read timeout) <timeouts>` tuple.
:type timeout: float or tuple
:param allow_redirects: (optional) Set to True by default.
:type allow_redirects: bool
:param proxies: (optional) Dictionary mapping protocol or protocol and
hostname to the URL of the proxy.
:param stream: (optional) whether to immediately download the response
content. Defaults to ``False``.
:param verify: (optional) Either a boolean, in which case it controls whether we verify
the server's TLS certificate, or a string, in which case it must be a path
to a CA bundle to use. Defaults to ``True``.
:param cert: (optional) if String, path to ssl client cert file (.pem).
If Tuple, ('cert', 'key') pair.
:rtype: requests.Response
"""
# Create the Request.
req = Request(
method=method.upper(),
url=url,
headers=headers,
files=files,
data=data or {},
json=json,
params=params or {},
auth=auth,
cookies=cookies,
hooks=hooks,
)
prep = self.prepare_request(req)
proxies = proxies or {}
settings = self.merge_environment_settings(
prep.url, proxies, stream, verify, cert
)
# Send the request.
send_kwargs = {
'timeout': timeout,
'allow_redirects': allow_redirects,
}
send_kwargs.update(settings)
resp = self.send(prep, **send_kwargs)
return resp
def get(self, url, **kwargs):
r"""Sends a GET request. Returns :class:`Response` object.
:param url: URL for the new :class:`Request` object.
:param \*\*kwargs: Optional arguments that ``request`` takes.
:rtype: requests.Response
"""
kwargs.setdefault('allow_redirects', True)
return self.request('GET', url, **kwargs)
def options(self, url, **kwargs):
r"""Sends a OPTIONS request. Returns :class:`Response` object.
:param url: URL for the new :class:`Request` object.
:param \*\*kwargs: Optional arguments that ``request`` takes.
:rtype: requests.Response
"""
kwargs.setdefault('allow_redirects', True)
return self.request('OPTIONS', url, **kwargs)
def head(self, url, **kwargs):
r"""Sends a HEAD request. Returns :class:`Response` object.
:param url: URL for the new :class:`Request` object.
:param \*\*kwargs: Optional arguments that ``request`` takes.
:rtype: requests.Response
"""
kwargs.setdefault('allow_redirects', False)
return self.request('HEAD', url, **kwargs)
def post(self, url, data=None, json=None, **kwargs):
r"""Sends a POST request. Returns :class:`Response` object.
:param url: URL for the new :class:`Request` object.
:param data: (optional) Dictionary, list of tuples, bytes, or file-like
object to send in the body of the :class:`Request`.
:param json: (optional) json to send in the body of the :class:`Request`.
:param \*\*kwargs: Optional arguments that ``request`` takes.
:rtype: requests.Response
"""
return self.request('POST', url, data=data, json=json, **kwargs)
def put(self, url, data=None, **kwargs):
r"""Sends a PUT request. Returns :class:`Response` object.
:param url: URL for the new :class:`Request` object.
:param data: (optional) Dictionary, list of tuples, bytes, or file-like
object to send in the body of the :class:`Request`.
:param \*\*kwargs: Optional arguments that ``request`` takes.
:rtype: requests.Response
"""
return self.request('PUT', url, data=data, **kwargs)
def patch(self, url, data=None, **kwargs):
r"""Sends a PATCH request. Returns :class:`Response` object.
:param url: URL for the new :class:`Request` object.
:param data: (optional) Dictionary, list of tuples, bytes, or file-like
object to send in the body of the :class:`Request`.
:param \*\*kwargs: Optional arguments that ``request`` takes.
:rtype: requests.Response
"""
return self.request('PATCH', url, data=data, **kwargs)
def delete(self, url, **kwargs):
r"""Sends a DELETE request. Returns :class:`Response` object.
:param url: URL for the new :class:`Request` object.
:param \*\*kwargs: Optional arguments that ``request`` takes.
:rtype: requests.Response
"""
return self.request('DELETE', url, **kwargs)
def send(self, request, **kwargs):
"""Send a given PreparedRequest.
:rtype: requests.Response
"""
# Set defaults that the hooks can utilize to ensure they always have
# the correct parameters to reproduce the previous request.
kwargs.setdefault('stream', self.stream)
kwargs.setdefault('verify', self.verify)
kwargs.setdefault('cert', self.cert)
kwargs.setdefault('proxies', self.proxies)
# It's possible that users might accidentally send a Request object.
# Guard against that specific failure case.
if isinstance(request, Request):
raise ValueError('You can only send PreparedRequests.')
# Set up variables needed for resolve_redirects and dispatching of hooks
allow_redirects = kwargs.pop('allow_redirects', True)
stream = kwargs.get('stream')
hooks = request.hooks
# Get the appropriate adapter to use
adapter = self.get_adapter(url=request.url)
# Start time (approximately) of the request
start = preferred_clock()
# Send the request
r = adapter.send(request, **kwargs)
# Total elapsed time of the request (approximately)
elapsed = preferred_clock() - start
r.elapsed = timedelta(seconds=elapsed)
# Response manipulation hooks
r = dispatch_hook('response', hooks, r, **kwargs)
# Persist cookies
if r.history:
# If the hooks create history then we want those cookies too
for resp in r.history:
extract_cookies_to_jar(self.cookies, resp.request, resp.raw)
extract_cookies_to_jar(self.cookies, request, r.raw)
# Redirect resolving generator.
gen = self.resolve_redirects(r, request, **kwargs)
# Resolve redirects if allowed.
history = [resp for resp in gen] if allow_redirects else []
# Shuffle things around if there's history.
if history:
# Insert the first (original) request at the start
history.insert(0, r)
# Get the last request made
r = history.pop()
r.history = history
# If redirects aren't being followed, store the response on the Request for Response.next().
if not allow_redirects:
try:
r._next = next(self.resolve_redirects(r, request, yield_requests=True, **kwargs))
except StopIteration:
pass
if not stream:
r.content
return r
def merge_environment_settings(self, url, proxies, stream, verify, cert):
"""
Check the environment and merge it with some settings.
:rtype: dict
"""
# Gather clues from the surrounding environment.
if self.trust_env:
# Set environment's proxies.
no_proxy = proxies.get('no_proxy') if proxies is not None else None
env_proxies = get_environ_proxies(url, no_proxy=no_proxy)
for (k, v) in env_proxies.items():
proxies.setdefault(k, v)
# Look for requests environment configuration and be compatible
# with cURL.
if verify is True or verify is None:
verify = (os.environ.get('REQUESTS_CA_BUNDLE') or
os.environ.get('CURL_CA_BUNDLE'))
# Merge all the kwargs.
proxies = merge_setting(proxies, self.proxies)
stream = merge_setting(stream, self.stream)
verify = merge_setting(verify, self.verify)
cert = merge_setting(cert, self.cert)
return {'verify': verify, 'proxies': proxies, 'stream': stream,
'cert': cert}
def get_adapter(self, url):
"""
Returns the appropriate connection adapter for the given URL.
:rtype: requests.adapters.BaseAdapter
"""
for (prefix, adapter) in self.adapters.items():
if url.lower().startswith(prefix.lower()):
return adapter
# Nothing matches :-/
raise InvalidSchema("No connection adapters were found for '%s'" % url)
def close(self):
"""Closes all adapters and as such the session"""
for v in self.adapters.values():
v.close()
def mount(self, prefix, adapter):
"""Registers a connection adapter to a prefix.
Adapters are sorted in descending order by prefix length.
"""
self.adapters[prefix] = adapter
keys_to_move = [k for k in self.adapters if len(k) < len(prefix)]
for key in keys_to_move:
self.adapters[key] = self.adapters.pop(key)
def __getstate__(self):
state = {attr: getattr(self, attr, None) for attr in self.__attrs__}
return state
def __setstate__(self, state):
for attr, value in state.items():
setattr(self, attr, value)
def session():
"""
Returns a :class:`Session` for context-management.
.. deprecated:: 1.0.0
This method has been deprecated since version 1.0.0 and is only kept for
backwards compatibility. New code should use :class:`~requests.sessions.Session`
to create a session. This may be removed at a future date.
:rtype: Session
"""
return Session()
| bsd-2-clause |
brianlorenz/COSMOS_IMACS_Redshifts | PlotCodes/Plotfits.py | 1 | 1041 | #Plot a .fits file
import numpy as np
import matplotlib.pyplot as plt
from astropy.io import ascii, fits
import sys, os, string
import pandas as pd
fitsfile = sys.argv[1]
data = fits.open(fitsfile)[0].data
head = fits.open(fitsfile)[0].header
d0 = data[0]
d1 = data[1]
d2 = data[2]
d3 = data[3]
d4 = data[4]
#d5 = data[5]
#d6 = data[6]
#d7 = data[7]
#d8 = data[8]
crval1 = head["crval1"]
crpix1 = head["crpix1"]
cdelt1 = head["cdelt1"]
naxis1 = head["naxis1"]
dcflag = head["dc-flag"]
exptime = head['exptime']
wavelength = (1.0+np.arange(naxis1)-crpix1)*cdelt1 + crval1
fig,axarr = plt.subplots(figsize=(13,7))
#ax0,ax1,ax2,ax3,ax4,ax5,ax6,ax7,ax8 = axarr[0,0],axarr[0,1],axarr[0,2],axarr[1,0],axarr[1,1],axarr[1,2],axarr[2,0],axarr[2,1],axarr[2,2]
axarr.plot(wavelength,data[0])
#ax1.plot(wavelength,data[1])
#ax2.plot(wavelength,data[2])
#ax3.plot(wavelength,data[3])
#ax4.plot(wavelength,data[4])
#ax5.plot(wavelength,data[5])
#ax6.plot(wavelength,data[6])
#ax7.plot(wavelength,data[7])
#ax8.plot(wavelength,data[8])
plt.show()
| mit |
hwroitzsch/DayLikeTodayClone | venv/lib/python3.5/site-packages/wheel/pkginfo.py | 565 | 1225 | """Tools for reading and writing PKG-INFO / METADATA without caring
about the encoding."""
from email.parser import Parser
try:
unicode
_PY3 = False
except NameError:
_PY3 = True
if not _PY3:
from email.generator import Generator
def read_pkg_info_bytes(bytestr):
return Parser().parsestr(bytestr)
def read_pkg_info(path):
with open(path, "r") as headers:
message = Parser().parse(headers)
return message
def write_pkg_info(path, message):
with open(path, 'w') as metadata:
Generator(metadata, maxheaderlen=0).flatten(message)
else:
from email.generator import BytesGenerator
def read_pkg_info_bytes(bytestr):
headers = bytestr.decode(encoding="ascii", errors="surrogateescape")
message = Parser().parsestr(headers)
return message
def read_pkg_info(path):
with open(path, "r",
encoding="ascii",
errors="surrogateescape") as headers:
message = Parser().parse(headers)
return message
def write_pkg_info(path, message):
with open(path, "wb") as out:
BytesGenerator(out, maxheaderlen=0).flatten(message)
| mit |
smathot/Gnotero | pygnotero/libzotero.py | 1 | 9041 | """
This file is part of Gnotero.
Gnotero is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
Gnotero is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with Gnotero. If not, see <http://www.gnu.org/licenses/>.
"""
import sqlite3
import os
import os.path
import sys
import shutil
import shlex
import sys
import time
from pygnotero.zotero_item import zotero_item
class libzotero:
"""
Libzotero provides access to the zotero database.
This is an object oriented reimplementation of the
original zoterotools.
"""
attachment_query = """
select items.itemID, itemAttachments.path, itemAttachments.itemID
from items, itemAttachments
where items.itemID = itemAttachments.sourceItemID
"""
info_query = """
select items.itemID, fields.fieldName, itemDataValues.value, items.key
from items, itemData, fields, itemDataValues
where
items.itemID = itemData.itemID
and itemData.fieldID = fields.fieldID
and itemData.valueID = itemDataValues.valueID
and (fields.fieldName = "date" or fields.fieldName = "publicationTitle" or fields.fieldName = "volume" or fields.fieldName = "issue" or fields.fieldName = "title")
"""
author_query = """
select items.itemID, creatorData.lastName
from items, itemCreators, creators, creatorData
where
items.itemID = itemCreators.itemID
and itemCreators.creatorID = creators.creatorID
and creators.creatorDataID = creatorData.creatorDataID
order by itemCreators.orderIndex
"""
collection_query = """
select items.itemID, collections.collectionName
from items, collections, collectionItems
where
items.itemID = collectionItems.itemID
and collections.collectionID = collectionItems.collectionID
order by collections.collectionName != "To Read", collections.collectionName
"""
deleted_query = "select itemID from deletedItems"
def __init__(self, zotero_path):
"""
Intialize libzotero
"""
# Set paths
self.zotero_path = zotero_path
self.storage_path = os.path.join(self.zotero_path, "storage")
self.zotero_database = os.path.join(self.zotero_path, "zotero.sqlite")
if os.name == "nt":
home_folder = os.environ["USERPROFILE"].decode('iso8859-15')
elif os.name == "posix":
home_folder = os.environ["HOME"].decode('iso8859-15')
else:
print "libzotero.__init__(): you appear to be running an unsupported OS"
self.gnotero_database = os.path.join(home_folder, ".gnotero.sqlite")
# Remember search results so results speed up over time
self.search_cache = {}
# Check whether verbosity is turned on
self.verbose = "-v" in sys.argv
# These dates are treated as special and are not parsed into a year representation
self.special_dates = "in press", "submitted", "in preparation", "unpublished"
# These extensions are recognized as fulltext attachments
self.attachment_ext = ".pdf", ".epub"
self.index = {}
self.collection_index = []
self.last_update = None
# The notry parameter can be used to show errors which would
# otherwise be obscured by the try clause
if "--notry" in sys.argv:
self.search("dummy")
# Start by updating the database
try:
self.search("dummy")
self.error = False
except:
self.error = True
def log(self, msg):
"""
Print a message to the output if verbosity is on.
"""
if self.verbose:
print "zoterotools2: " + msg
def update(self, force = False):
"""
This function checks if the local copy of the zotero
database is up to date. If not, the data is also indexed.
"""
stats = os.stat(self.zotero_database)
# Only update if necessary
if not force and stats[8] > self.last_update:
t = time.time()
self.last_update = stats[8]
self.index = {}
self.collection_index = []
self.search_cache = {}
# Copy the zotero database to the gnotero copy
shutil.copyfile(self.zotero_database, self.gnotero_database)
self.conn = sqlite3.connect(self.gnotero_database)
self.cur = self.conn.cursor()
# First create a list of deleted items, so we can ignore those later
deleted = []
self.cur.execute(self.deleted_query)
for item in self.cur.fetchall():
deleted.append(item[0])
# Retrieve information about date, publication, volume, issue and title
self.cur.execute(self.info_query)
for item in self.cur.fetchall():
item_id = item[0]
key = item[3]
if item_id not in deleted:
item_name = item[1]
# Parse date fields, because t = time.time() we only want a year or a 'special' date
if item_name == "date":
item_value = None
for sd in self.special_dates:
if sd in item[2].lower():
item_value = sd
break
if item_value == None:
item_value = item[2][-4:]
else:
item_value = item[2]
if item_id not in self.index:
self.index[item_id] = zotero_item(item_id)
self.index[item_id].key = key
if item_name == "publicationTitle":
self.index[item_id].publication = item_value
elif item_name == "date":
self.index[item_id].date = item_value
elif item_name == "volume":
self.index[item_id].volume = item_value
elif item_name == "issue":
self.index[item_id].issue = item_value
elif item_name == "title":
self.index[item_id].title = item_value
# Retrieve author information
self.cur.execute(self.author_query)
for item in self.cur.fetchall():
item_id = item[0]
if item_id not in deleted:
item_author = item[1].capitalize()
if item_id not in self.index:
self.index[item_id] = zotero_item(item_id)
self.index[item_id].authors.append(item_author)
# Retrieve collection information
self.cur.execute(self.collection_query)
for item in self.cur.fetchall():
item_id = item[0]
if item_id not in deleted:
item_collection = item[1]
if item_id not in self.index:
self.index[item_id] = zotero_item(item_id)
self.index[item_id].collections.append(item_collection)
if item_collection not in self.collection_index:
self.collection_index.append(item_collection)
# Retrieve attachments
self.cur.execute(self.attachment_query)
for item in self.cur.fetchall():
item_id = item[0]
if item_id not in deleted:
if item[1] != None:
att = item[1].encode("latin-1")
# If the attachment is stored in the Zotero folder, it is preceded
# by "storage:"
if att[:8] == "storage:":
item_attachment = att[8:]
attachment_id = item[2]
if item_attachment[-4:].lower() in self.attachment_ext:
if item_id not in self.index:
self.index[item_id] = zotero_item(item_id)
self.cur.execute("select items.key from items where itemID = %d" % attachment_id)
key = self.cur.fetchone()[0]
self.index[item_id].fulltext = os.path.join(self.storage_path, key, item_attachment)
# If the attachment is linked, it is simply the full path to the attachment
else:
self.index[item_id].fulltext = att
self.cur.close()
print "libzotero.update(): indexing completed in %.3fs" % (time.time() - t)
def parse_query(self, query):
"""
Parses a text search query into a list of tuples,
which are acceptable for zotero_item.match()
"""
# Make sure that spaces are handled correctly after
# semicolons. E.g., Author: Mathot
while ": " in query:
query = query.replace(": ", ":")
# Parse the terms into a suitable format
terms = []
# Check if the criterium is type-specified, like "author: doe"
for term in shlex.split(query.strip().lower()):
s = term.split(":")
if len(s) == 2:
terms.append( (s[0].strip(), s[1].strip()) )
else:
terms.append( (None, term.strip()) )
return terms
def search(self, query):
"""
Search the zotero database
"""
self.update()
if query in self.search_cache:
print "libzotero.search(): retrieving results for '%s' from cache" % query
return self.search_cache[query]
t = time.time()
terms = self.parse_query(query)
results = []
for item_id, item in self.index.items():
if item.match(terms):
results.append(item)
self.search_cache[query] = results
print "libzotero.search(): search for '%s' completed in %.3fs" % (query, time.time() - t)
return results
def valid_location(path):
"""
Checks if a given path is a valid Zotero folder,
i.e., if it it contains zotero.sqlite
"""
return os.path.exists(os.path.join(path, "zotero.sqlite"))
| gpl-3.0 |
jamdin/jdiner-mobile-byte3 | lib/numpy/lib/tests/test_recfunctions.py | 23 | 25445 | import sys
import numpy as np
import numpy.ma as ma
from numpy.ma.testutils import *
from numpy.ma.mrecords import MaskedRecords
from numpy.lib.recfunctions import *
get_names = np.lib.recfunctions.get_names
get_names_flat = np.lib.recfunctions.get_names_flat
zip_descr = np.lib.recfunctions.zip_descr
class TestRecFunctions(TestCase):
"""
Misc tests
"""
#
def setUp(self):
x = np.array([1, 2, ])
y = np.array([10, 20, 30])
z = np.array([('A', 1.), ('B', 2.)],
dtype=[('A', '|S3'), ('B', float)])
w = np.array([(1, (2, 3.0)), (4, (5, 6.0))],
dtype=[('a', int), ('b', [('ba', float), ('bb', int)])])
self.data = (w, x, y, z)
def test_zip_descr(self):
"Test zip_descr"
(w, x, y, z) = self.data
# Std array
test = zip_descr((x, x), flatten=True)
assert_equal(test,
np.dtype([('', int), ('', int)]))
test = zip_descr((x, x), flatten=False)
assert_equal(test,
np.dtype([('', int), ('', int)]))
# Std & flexible-dtype
test = zip_descr((x, z), flatten=True)
assert_equal(test,
np.dtype([('', int), ('A', '|S3'), ('B', float)]))
test = zip_descr((x, z), flatten=False)
assert_equal(test,
np.dtype([('', int),
('', [('A', '|S3'), ('B', float)])]))
# Standard & nested dtype
test = zip_descr((x, w), flatten=True)
assert_equal(test,
np.dtype([('', int),
('a', int),
('ba', float), ('bb', int)]))
test = zip_descr((x, w), flatten=False)
assert_equal(test,
np.dtype([('', int),
('', [('a', int),
('b', [('ba', float), ('bb', int)])])]))
def test_drop_fields(self):
"Test drop_fields"
a = np.array([(1, (2, 3.0)), (4, (5, 6.0))],
dtype=[('a', int), ('b', [('ba', float), ('bb', int)])])
# A basic field
test = drop_fields(a, 'a')
control = np.array([((2, 3.0),), ((5, 6.0),)],
dtype=[('b', [('ba', float), ('bb', int)])])
assert_equal(test, control)
# Another basic field (but nesting two fields)
test = drop_fields(a, 'b')
control = np.array([(1,), (4,)], dtype=[('a', int)])
assert_equal(test, control)
# A nested sub-field
test = drop_fields(a, ['ba', ])
control = np.array([(1, (3.0,)), (4, (6.0,))],
dtype=[('a', int), ('b', [('bb', int)])])
assert_equal(test, control)
# All the nested sub-field from a field: zap that field
test = drop_fields(a, ['ba', 'bb'])
control = np.array([(1,), (4,)], dtype=[('a', int)])
assert_equal(test, control)
#
test = drop_fields(a, ['a', 'b'])
assert(test is None)
def test_rename_fields(self):
"Tests rename fields"
a = np.array([(1, (2, [3.0, 30.])), (4, (5, [6.0, 60.]))],
dtype=[('a', int),
('b', [('ba', float), ('bb', (float, 2))])])
test = rename_fields(a, {'a':'A', 'bb':'BB'})
newdtype = [('A', int), ('b', [('ba', float), ('BB', (float, 2))])]
control = a.view(newdtype)
assert_equal(test.dtype, newdtype)
assert_equal(test, control)
def test_get_names(self):
"Tests get_names"
ndtype = np.dtype([('A', '|S3'), ('B', float)])
test = get_names(ndtype)
assert_equal(test, ('A', 'B'))
#
ndtype = np.dtype([('a', int), ('b', [('ba', float), ('bb', int)])])
test = get_names(ndtype)
assert_equal(test, ('a', ('b', ('ba', 'bb'))))
def test_get_names_flat(self):
"Test get_names_flat"
ndtype = np.dtype([('A', '|S3'), ('B', float)])
test = get_names_flat(ndtype)
assert_equal(test, ('A', 'B'))
#
ndtype = np.dtype([('a', int), ('b', [('ba', float), ('bb', int)])])
test = get_names_flat(ndtype)
assert_equal(test, ('a', 'b', 'ba', 'bb'))
def test_get_fieldstructure(self):
"Test get_fieldstructure"
# No nested fields
ndtype = np.dtype([('A', '|S3'), ('B', float)])
test = get_fieldstructure(ndtype)
assert_equal(test, {'A':[], 'B':[]})
# One 1-nested field
ndtype = np.dtype([('A', int), ('B', [('BA', float), ('BB', '|S1')])])
test = get_fieldstructure(ndtype)
assert_equal(test, {'A': [], 'B': [], 'BA':['B', ], 'BB':['B']})
# One 2-nested fields
ndtype = np.dtype([('A', int),
('B', [('BA', int),
('BB', [('BBA', int), ('BBB', int)])])])
test = get_fieldstructure(ndtype)
control = {'A': [], 'B': [], 'BA': ['B'], 'BB': ['B'],
'BBA': ['B', 'BB'], 'BBB': ['B', 'BB']}
assert_equal(test, control)
def test_find_duplicates(self):
"Test find_duplicates"
a = ma.array([(2, (2., 'B')), (1, (2., 'B')), (2, (2., 'B')),
(1, (1., 'B')), (2, (2., 'B')), (2, (2., 'C'))],
mask=[(0, (0, 0)), (0, (0, 0)), (0, (0, 0)),
(0, (0, 0)), (1, (0, 0)), (0, (1, 0))],
dtype=[('A', int), ('B', [('BA', float), ('BB', '|S1')])])
#
test = find_duplicates(a, ignoremask=False, return_index=True)
control = [0, 2]
assert_equal(sorted(test[-1]), control)
assert_equal(test[0], a[test[-1]])
#
test = find_duplicates(a, key='A', return_index=True)
control = [0, 1, 2, 3, 5]
assert_equal(sorted(test[-1]), control)
assert_equal(test[0], a[test[-1]])
#
test = find_duplicates(a, key='B', return_index=True)
control = [0, 1, 2, 4]
assert_equal(sorted(test[-1]), control)
assert_equal(test[0], a[test[-1]])
#
test = find_duplicates(a, key='BA', return_index=True)
control = [0, 1, 2, 4]
assert_equal(sorted(test[-1]), control)
assert_equal(test[0], a[test[-1]])
#
test = find_duplicates(a, key='BB', return_index=True)
control = [0, 1, 2, 3, 4]
assert_equal(sorted(test[-1]), control)
assert_equal(test[0], a[test[-1]])
def test_find_duplicates_ignoremask(self):
"Test the ignoremask option of find_duplicates"
ndtype = [('a', int)]
a = ma.array([1, 1, 1, 2, 2, 3, 3],
mask=[0, 0, 1, 0, 0, 0, 1]).view(ndtype)
test = find_duplicates(a, ignoremask=True, return_index=True)
control = [0, 1, 3, 4]
assert_equal(sorted(test[-1]), control)
assert_equal(test[0], a[test[-1]])
#
test = find_duplicates(a, ignoremask=False, return_index=True)
control = [0, 1, 2, 3, 4, 6]
assert_equal(sorted(test[-1]), control)
assert_equal(test[0], a[test[-1]])
class TestRecursiveFillFields(TestCase):
"""
Test recursive_fill_fields.
"""
def test_simple_flexible(self):
"Test recursive_fill_fields on flexible-array"
a = np.array([(1, 10.), (2, 20.)], dtype=[('A', int), ('B', float)])
b = np.zeros((3,), dtype=a.dtype)
test = recursive_fill_fields(a, b)
control = np.array([(1, 10.), (2, 20.), (0, 0.)],
dtype=[('A', int), ('B', float)])
assert_equal(test, control)
#
def test_masked_flexible(self):
"Test recursive_fill_fields on masked flexible-array"
a = ma.array([(1, 10.), (2, 20.)], mask=[(0, 1), (1, 0)],
dtype=[('A', int), ('B', float)])
b = ma.zeros((3,), dtype=a.dtype)
test = recursive_fill_fields(a, b)
control = ma.array([(1, 10.), (2, 20.), (0, 0.)],
mask=[(0, 1), (1, 0), (0, 0)],
dtype=[('A', int), ('B', float)])
assert_equal(test, control)
#
class TestMergeArrays(TestCase):
"""
Test merge_arrays
"""
def setUp(self):
x = np.array([1, 2, ])
y = np.array([10, 20, 30])
z = np.array([('A', 1.), ('B', 2.)], dtype=[('A', '|S3'), ('B', float)])
w = np.array([(1, (2, 3.0)), (4, (5, 6.0))],
dtype=[('a', int), ('b', [('ba', float), ('bb', int)])])
self.data = (w, x, y, z)
#
def test_solo(self):
"Test merge_arrays on a single array."
(_, x, _, z) = self.data
#
test = merge_arrays(x)
control = np.array([(1,), (2,)], dtype=[('f0', int)])
assert_equal(test, control)
test = merge_arrays((x,))
assert_equal(test, control)
#
test = merge_arrays(z, flatten=False)
assert_equal(test, z)
test = merge_arrays(z, flatten=True)
assert_equal(test, z)
#
def test_solo_w_flatten(self):
"Test merge_arrays on a single array w & w/o flattening"
w = self.data[0]
test = merge_arrays(w, flatten=False)
assert_equal(test, w)
#
test = merge_arrays(w, flatten=True)
control = np.array([(1, 2, 3.0), (4, 5, 6.0)],
dtype=[('a', int), ('ba', float), ('bb', int)])
assert_equal(test, control)
#
def test_standard(self):
"Test standard & standard"
# Test merge arrays
(_, x, y, _) = self.data
test = merge_arrays((x, y), usemask=False)
control = np.array([(1, 10), (2, 20), (-1, 30)],
dtype=[('f0', int), ('f1', int)])
assert_equal(test, control)
#
test = merge_arrays((x, y), usemask=True)
control = ma.array([(1, 10), (2, 20), (-1, 30)],
mask=[(0, 0), (0, 0), (1, 0)],
dtype=[('f0', int), ('f1', int)])
assert_equal(test, control)
assert_equal(test.mask, control.mask)
#
def test_flatten(self):
"Test standard & flexible"
(_, x, _, z) = self.data
test = merge_arrays((x, z), flatten=True)
control = np.array([(1, 'A', 1.), (2, 'B', 2.)],
dtype=[('f0', int), ('A', '|S3'), ('B', float)])
assert_equal(test, control)
#
test = merge_arrays((x, z), flatten=False)
control = np.array([(1, ('A', 1.)), (2, ('B', 2.))],
dtype=[('f0', int),
('f1', [('A', '|S3'), ('B', float)])])
assert_equal(test, control)
#
def test_flatten_wflexible(self):
"Test flatten standard & nested"
(w, x, _, _) = self.data
test = merge_arrays((x, w), flatten=True)
control = np.array([(1, 1, 2, 3.0), (2, 4, 5, 6.0)],
dtype=[('f0', int),
('a', int), ('ba', float), ('bb', int)])
assert_equal(test, control)
#
test = merge_arrays((x, w), flatten=False)
controldtype = dtype = [('f0', int),
('f1', [('a', int),
('b', [('ba', float), ('bb', int)])])]
control = np.array([(1., (1, (2, 3.0))), (2, (4, (5, 6.0)))],
dtype=controldtype)
#
def test_wmasked_arrays(self):
"Test merge_arrays masked arrays"
(_, x, _, _) = self.data
mx = ma.array([1, 2, 3], mask=[1, 0, 0])
test = merge_arrays((x, mx), usemask=True)
control = ma.array([(1, 1), (2, 2), (-1, 3)],
mask=[(0, 1), (0, 0), (1, 0)],
dtype=[('f0', int), ('f1', int)])
assert_equal(test, control)
test = merge_arrays((x, mx), usemask=True, asrecarray=True)
assert_equal(test, control)
assert(isinstance(test, MaskedRecords))
#
def test_w_singlefield(self):
"Test single field"
test = merge_arrays((np.array([1, 2]).view([('a', int)]),
np.array([10., 20., 30.])),)
control = ma.array([(1, 10.), (2, 20.), (-1, 30.)],
mask=[(0, 0), (0, 0), (1, 0)],
dtype=[('a', int), ('f1', float)])
assert_equal(test, control)
#
def test_w_shorter_flex(self):
"Test merge_arrays w/ a shorter flexndarray."
z = self.data[-1]
test = merge_arrays((z, np.array([10, 20, 30]).view([('C', int)])))
control = np.array([('A', 1., 10), ('B', 2., 20), ('-1', -1, 20)],
dtype=[('A', '|S3'), ('B', float), ('C', int)])
#
def test_singlerecord(self):
(_, x, y, z) = self.data
test = merge_arrays((x[0], y[0], z[0]), usemask=False)
control = np.array([(1, 10, ('A', 1))],
dtype=[('f0', int),
('f1', int),
('f2', [('A', '|S3'), ('B', float)])])
assert_equal(test, control)
class TestAppendFields(TestCase):
"""
Test append_fields
"""
def setUp(self):
x = np.array([1, 2, ])
y = np.array([10, 20, 30])
z = np.array([('A', 1.), ('B', 2.)], dtype=[('A', '|S3'), ('B', float)])
w = np.array([(1, (2, 3.0)), (4, (5, 6.0))],
dtype=[('a', int), ('b', [('ba', float), ('bb', int)])])
self.data = (w, x, y, z)
#
def test_append_single(self):
"Test simple case"
(_, x, _, _) = self.data
test = append_fields(x, 'A', data=[10, 20, 30])
control = ma.array([(1, 10), (2, 20), (-1, 30)],
mask=[(0, 0), (0, 0), (1, 0)],
dtype=[('f0', int), ('A', int)],)
assert_equal(test, control)
#
def test_append_double(self):
"Test simple case"
(_, x, _, _) = self.data
test = append_fields(x, ('A', 'B'), data=[[10, 20, 30], [100, 200]])
control = ma.array([(1, 10, 100), (2, 20, 200), (-1, 30, -1)],
mask=[(0, 0, 0), (0, 0, 0), (1, 0, 1)],
dtype=[('f0', int), ('A', int), ('B', int)],)
assert_equal(test, control)
#
def test_append_on_flex(self):
"Test append_fields on flexible type arrays"
z = self.data[-1]
test = append_fields(z, 'C', data=[10, 20, 30])
control = ma.array([('A', 1., 10), ('B', 2., 20), (-1, -1., 30)],
mask=[(0, 0, 0), (0, 0, 0), (1, 1, 0)],
dtype=[('A', '|S3'), ('B', float), ('C', int)],)
assert_equal(test, control)
#
def test_append_on_nested(self):
"Test append_fields on nested fields"
w = self.data[0]
test = append_fields(w, 'C', data=[10, 20, 30])
control = ma.array([(1, (2, 3.0), 10),
(4, (5, 6.0), 20),
(-1, (-1, -1.), 30)],
mask=[(0, (0, 0), 0), (0, (0, 0), 0), (1, (1, 1), 0)],
dtype=[('a', int),
('b', [('ba', float), ('bb', int)]),
('C', int)],)
assert_equal(test, control)
class TestStackArrays(TestCase):
"""
Test stack_arrays
"""
def setUp(self):
x = np.array([1, 2, ])
y = np.array([10, 20, 30])
z = np.array([('A', 1.), ('B', 2.)], dtype=[('A', '|S3'), ('B', float)])
w = np.array([(1, (2, 3.0)), (4, (5, 6.0))],
dtype=[('a', int), ('b', [('ba', float), ('bb', int)])])
self.data = (w, x, y, z)
#
def test_solo(self):
"Test stack_arrays on single arrays"
(_, x, _, _) = self.data
test = stack_arrays((x,))
assert_equal(test, x)
self.assertTrue(test is x)
#
test = stack_arrays(x)
assert_equal(test, x)
self.assertTrue(test is x)
#
def test_unnamed_fields(self):
"Tests combinations of arrays w/o named fields"
(_, x, y, _) = self.data
#
test = stack_arrays((x, x), usemask=False)
control = np.array([1, 2, 1, 2])
assert_equal(test, control)
#
test = stack_arrays((x, y), usemask=False)
control = np.array([1, 2, 10, 20, 30])
assert_equal(test, control)
#
test = stack_arrays((y, x), usemask=False)
control = np.array([10, 20, 30, 1, 2])
assert_equal(test, control)
#
def test_unnamed_and_named_fields(self):
"Test combination of arrays w/ & w/o named fields"
(_, x, _, z) = self.data
#
test = stack_arrays((x, z))
control = ma.array([(1, -1, -1), (2, -1, -1),
(-1, 'A', 1), (-1, 'B', 2)],
mask=[(0, 1, 1), (0, 1, 1),
(1, 0, 0), (1, 0, 0)],
dtype=[('f0', int), ('A', '|S3'), ('B', float)])
assert_equal(test, control)
assert_equal(test.mask, control.mask)
#
test = stack_arrays((z, x))
control = ma.array([('A', 1, -1), ('B', 2, -1),
(-1, -1, 1), (-1, -1, 2), ],
mask=[(0, 0, 1), (0, 0, 1),
(1, 1, 0), (1, 1, 0)],
dtype=[('A', '|S3'), ('B', float), ('f2', int)])
assert_equal(test, control)
assert_equal(test.mask, control.mask)
#
test = stack_arrays((z, z, x))
control = ma.array([('A', 1, -1), ('B', 2, -1),
('A', 1, -1), ('B', 2, -1),
(-1, -1, 1), (-1, -1, 2), ],
mask=[(0, 0, 1), (0, 0, 1),
(0, 0, 1), (0, 0, 1),
(1, 1, 0), (1, 1, 0)],
dtype=[('A', '|S3'), ('B', float), ('f2', int)])
assert_equal(test, control)
#
def test_matching_named_fields(self):
"Test combination of arrays w/ matching field names"
(_, x, _, z) = self.data
zz = np.array([('a', 10., 100.), ('b', 20., 200.), ('c', 30., 300.)],
dtype=[('A', '|S3'), ('B', float), ('C', float)])
test = stack_arrays((z, zz))
control = ma.array([('A', 1, -1), ('B', 2, -1),
('a', 10., 100.), ('b', 20., 200.), ('c', 30., 300.)],
dtype=[('A', '|S3'), ('B', float), ('C', float)],
mask=[(0, 0, 1), (0, 0, 1),
(0, 0, 0), (0, 0, 0), (0, 0, 0)])
assert_equal(test, control)
assert_equal(test.mask, control.mask)
#
test = stack_arrays((z, zz, x))
ndtype = [('A', '|S3'), ('B', float), ('C', float), ('f3', int)]
control = ma.array([('A', 1, -1, -1), ('B', 2, -1, -1),
('a', 10., 100., -1), ('b', 20., 200., -1),
('c', 30., 300., -1),
(-1, -1, -1, 1), (-1, -1, -1, 2)],
dtype=ndtype,
mask=[(0, 0, 1, 1), (0, 0, 1, 1),
(0, 0, 0, 1), (0, 0, 0, 1), (0, 0, 0, 1),
(1, 1, 1, 0), (1, 1, 1, 0)])
assert_equal(test, control)
assert_equal(test.mask, control.mask)
def test_defaults(self):
"Test defaults: no exception raised if keys of defaults are not fields."
(_, _, _, z) = self.data
zz = np.array([('a', 10., 100.), ('b', 20., 200.), ('c', 30., 300.)],
dtype=[('A', '|S3'), ('B', float), ('C', float)])
defaults = {'A':'???', 'B':-999., 'C':-9999., 'D':-99999.}
test = stack_arrays((z, zz), defaults=defaults)
control = ma.array([('A', 1, -9999.), ('B', 2, -9999.),
('a', 10., 100.), ('b', 20., 200.), ('c', 30., 300.)],
dtype=[('A', '|S3'), ('B', float), ('C', float)],
mask=[(0, 0, 1), (0, 0, 1),
(0, 0, 0), (0, 0, 0), (0, 0, 0)])
assert_equal(test, control)
assert_equal(test.data, control.data)
assert_equal(test.mask, control.mask)
def test_autoconversion(self):
"Tests autoconversion"
adtype = [('A', int), ('B', bool), ('C', float)]
a = ma.array([(1, 2, 3)], mask=[(0, 1, 0)], dtype=adtype)
bdtype = [('A', int), ('B', float), ('C', float)]
b = ma.array([(4, 5, 6)], dtype=bdtype)
control = ma.array([(1, 2, 3), (4, 5, 6)], mask=[(0, 1, 0), (0, 0, 0)],
dtype=bdtype)
test = stack_arrays((a, b), autoconvert=True)
assert_equal(test, control)
assert_equal(test.mask, control.mask)
try:
test = stack_arrays((a, b), autoconvert=False)
except TypeError:
pass
else:
raise AssertionError
def test_checktitles(self):
"Test using titles in the field names"
adtype = [(('a', 'A'), int), (('b', 'B'), bool), (('c', 'C'), float)]
a = ma.array([(1, 2, 3)], mask=[(0, 1, 0)], dtype=adtype)
bdtype = [(('a', 'A'), int), (('b', 'B'), bool), (('c', 'C'), float)]
b = ma.array([(4, 5, 6)], dtype=bdtype)
test = stack_arrays((a, b))
control = ma.array([(1, 2, 3), (4, 5, 6)], mask=[(0, 1, 0), (0, 0, 0)],
dtype=bdtype)
assert_equal(test, control)
assert_equal(test.mask, control.mask)
class TestJoinBy(TestCase):
def setUp(self):
self.a = np.array(zip(np.arange(10), np.arange(50, 60),
np.arange(100, 110)),
dtype=[('a', int), ('b', int), ('c', int)])
self.b = np.array(zip(np.arange(5, 15), np.arange(65, 75),
np.arange(100, 110)),
dtype=[('a', int), ('b', int), ('d', int)])
#
def test_inner_join(self):
"Basic test of join_by"
a, b = self.a, self.b
#
test = join_by('a', a, b, jointype='inner')
control = np.array([(5, 55, 65, 105, 100), (6, 56, 66, 106, 101),
(7, 57, 67, 107, 102), (8, 58, 68, 108, 103),
(9, 59, 69, 109, 104)],
dtype=[('a', int), ('b1', int), ('b2', int),
('c', int), ('d', int)])
assert_equal(test, control)
def test_join(self):
a, b = self.a, self.b
#
test = join_by(('a', 'b'), a, b)
control = np.array([(5, 55, 105, 100), (6, 56, 106, 101),
(7, 57, 107, 102), (8, 58, 108, 103),
(9, 59, 109, 104)],
dtype=[('a', int), ('b', int),
('c', int), ('d', int)])
def test_outer_join(self):
a, b = self.a, self.b
#
test = join_by(('a', 'b'), a, b, 'outer')
control = ma.array([(0, 50, 100, -1), (1, 51, 101, -1),
(2, 52, 102, -1), (3, 53, 103, -1),
(4, 54, 104, -1), (5, 55, 105, -1),
(5, 65, -1, 100), (6, 56, 106, -1),
(6, 66, -1, 101), (7, 57, 107, -1),
(7, 67, -1, 102), (8, 58, 108, -1),
(8, 68, -1, 103), (9, 59, 109, -1),
(9, 69, -1, 104), (10, 70, -1, 105),
(11, 71, -1, 106), (12, 72, -1, 107),
(13, 73, -1, 108), (14, 74, -1, 109)],
mask=[(0, 0, 0, 1), (0, 0, 0, 1),
(0, 0, 0, 1), (0, 0, 0, 1),
(0, 0, 0, 1), (0, 0, 0, 1),
(0, 0, 1, 0), (0, 0, 0, 1),
(0, 0, 1, 0), (0, 0, 0, 1),
(0, 0, 1, 0), (0, 0, 0, 1),
(0, 0, 1, 0), (0, 0, 0, 1),
(0, 0, 1, 0), (0, 0, 1, 0),
(0, 0, 1, 0), (0, 0, 1, 0),
(0, 0, 1, 0), (0, 0, 1, 0)],
dtype=[('a', int), ('b', int),
('c', int), ('d', int)])
assert_equal(test, control)
def test_leftouter_join(self):
a, b = self.a, self.b
#
test = join_by(('a', 'b'), a, b, 'leftouter')
control = ma.array([(0, 50, 100, -1), (1, 51, 101, -1),
(2, 52, 102, -1), (3, 53, 103, -1),
(4, 54, 104, -1), (5, 55, 105, -1),
(6, 56, 106, -1), (7, 57, 107, -1),
(8, 58, 108, -1), (9, 59, 109, -1)],
mask=[(0, 0, 0, 1), (0, 0, 0, 1),
(0, 0, 0, 1), (0, 0, 0, 1),
(0, 0, 0, 1), (0, 0, 0, 1),
(0, 0, 0, 1), (0, 0, 0, 1),
(0, 0, 0, 1), (0, 0, 0, 1)],
dtype=[('a', int), ('b', int), ('c', int), ('d', int)])
if __name__ == '__main__':
run_module_suite()
| apache-2.0 |
thepaul/uftrace | tests/t152_read_proc_statm.py | 1 | 1437 | #!/usr/bin/env python
from runtest import TestBase
class TestCase(TestBase):
def __init__(self):
TestBase.__init__(self, 'abc', """
# DURATION TID FUNCTION
[32417] | main() {
[32417] | a() {
[32417] | b() {
[32417] | /* read:proc/statm (size=6812KB, rss=780KB, shared=716KB) */
[32417] | c() {
0.479 us [32417] | getpid();
3.014 us [32417] | } /* c */
[32417] | /* diff:proc/statm (size=+0KB, rss=+0KB, shared=+0KB) */
16.914 us [32417] | } /* b */
17.083 us [32417] | } /* a */
17.873 us [32417] | } /* main */
""")
def runcmd(self):
uftrace = TestBase.uftrace_cmd
args = '-F main -T b@read=proc/statm'
prog = 't-' + self.name
return '%s %s %s' % (uftrace, args, prog)
def sort(self, output):
result = []
for ln in output.split('\n'):
# ignore blank lines and comments
if ln.strip() == '' or ln.startswith('#'):
continue
func = ln.split('|', 1)[-1]
# remove actual numbers in proc.statm
if func.find('read:proc/statm') > 0:
func = ' /* read:proc/statm */'
if func.find('diff:proc/statm') > 0:
func = ' /* diff:proc/statm */'
result.append(func)
return '\n'.join(result)
| gpl-2.0 |
MaxWayne/Beginning-Game-Development-with-Python-and-Pygame | Chapter 7/gameobjects/vector2-init.py | 6 | 1625 | import math
class Vector2:
def __init__(self, x=0, y=0):
self.x = x
self.y = y
if hasattr(x, "__getitem__"):
x, y = x
self._v = [float(x), float(y)]
else:
self._v = [float(x), float(y)]
def __str__(self):
return "(%s, %s)"%(self.x, self.y)
def from_points(P1, P2):
return Vector2( P2[0] - P1[0], P2[1] - P1[1] )
def get_magnitude(self):
return math.sqrt( self.x**2 + self.y**2 )
def normalize(self):
magnitude = self.get_magnitude()
try:
self.x /= magnitude
self.y /= magnitude
except ZeroDivisionError:
self.x = 0
self.y = 0
def __add__(self, rhs):
return Vector2(self.x + rhs.x, self.y + rhs.y)
def __sub__(self, rhs):
return Vector2(self.x - rhs.x, self.y - rhs.y)
def __neg__(self):
return Vector2(-self.x, -self.y)
def __mul__(self, scalar):
return Vector2(self.x * scalar, self.y * scalar)
def __truediv__(self, scalar):
return Vector2(self.x / scalar, self.y / scalar)
def __getitem__(self, index):
return self._v[index]
def __setitem__(self, index, value):
self._v[index] = 1.0 * value
if __name__ == "__main__":
A = (10.0, 20.0)
B = (30.0, 35.0)
AB = Vector2.from_points(A, B)
step = AB * .1
position = Vector2(*A)
for n in range(10):
position += step
print(position)
| mit |
shuizaiku7/shadowsocks | shadowsocks/udprelay.py | 924 | 11154 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright 2015 clowwindy
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
# SOCKS5 UDP Request
# +----+------+------+----------+----------+----------+
# |RSV | FRAG | ATYP | DST.ADDR | DST.PORT | DATA |
# +----+------+------+----------+----------+----------+
# | 2 | 1 | 1 | Variable | 2 | Variable |
# +----+------+------+----------+----------+----------+
# SOCKS5 UDP Response
# +----+------+------+----------+----------+----------+
# |RSV | FRAG | ATYP | DST.ADDR | DST.PORT | DATA |
# +----+------+------+----------+----------+----------+
# | 2 | 1 | 1 | Variable | 2 | Variable |
# +----+------+------+----------+----------+----------+
# shadowsocks UDP Request (before encrypted)
# +------+----------+----------+----------+
# | ATYP | DST.ADDR | DST.PORT | DATA |
# +------+----------+----------+----------+
# | 1 | Variable | 2 | Variable |
# +------+----------+----------+----------+
# shadowsocks UDP Response (before encrypted)
# +------+----------+----------+----------+
# | ATYP | DST.ADDR | DST.PORT | DATA |
# +------+----------+----------+----------+
# | 1 | Variable | 2 | Variable |
# +------+----------+----------+----------+
# shadowsocks UDP Request and Response (after encrypted)
# +-------+--------------+
# | IV | PAYLOAD |
# +-------+--------------+
# | Fixed | Variable |
# +-------+--------------+
# HOW TO NAME THINGS
# ------------------
# `dest` means destination server, which is from DST fields in the SOCKS5
# request
# `local` means local server of shadowsocks
# `remote` means remote server of shadowsocks
# `client` means UDP clients that connects to other servers
# `server` means the UDP server that handles user requests
from __future__ import absolute_import, division, print_function, \
with_statement
import socket
import logging
import struct
import errno
import random
from shadowsocks import encrypt, eventloop, lru_cache, common, shell
from shadowsocks.common import parse_header, pack_addr
BUF_SIZE = 65536
def client_key(source_addr, server_af):
# notice this is server af, not dest af
return '%s:%s:%d' % (source_addr[0], source_addr[1], server_af)
class UDPRelay(object):
def __init__(self, config, dns_resolver, is_local, stat_callback=None):
self._config = config
if is_local:
self._listen_addr = config['local_address']
self._listen_port = config['local_port']
self._remote_addr = config['server']
self._remote_port = config['server_port']
else:
self._listen_addr = config['server']
self._listen_port = config['server_port']
self._remote_addr = None
self._remote_port = None
self._dns_resolver = dns_resolver
self._password = common.to_bytes(config['password'])
self._method = config['method']
self._timeout = config['timeout']
self._is_local = is_local
self._cache = lru_cache.LRUCache(timeout=config['timeout'],
close_callback=self._close_client)
self._client_fd_to_server_addr = \
lru_cache.LRUCache(timeout=config['timeout'])
self._dns_cache = lru_cache.LRUCache(timeout=300)
self._eventloop = None
self._closed = False
self._sockets = set()
if 'forbidden_ip' in config:
self._forbidden_iplist = config['forbidden_ip']
else:
self._forbidden_iplist = None
addrs = socket.getaddrinfo(self._listen_addr, self._listen_port, 0,
socket.SOCK_DGRAM, socket.SOL_UDP)
if len(addrs) == 0:
raise Exception("can't get addrinfo for %s:%d" %
(self._listen_addr, self._listen_port))
af, socktype, proto, canonname, sa = addrs[0]
server_socket = socket.socket(af, socktype, proto)
server_socket.bind((self._listen_addr, self._listen_port))
server_socket.setblocking(False)
self._server_socket = server_socket
self._stat_callback = stat_callback
def _get_a_server(self):
server = self._config['server']
server_port = self._config['server_port']
if type(server_port) == list:
server_port = random.choice(server_port)
if type(server) == list:
server = random.choice(server)
logging.debug('chosen server: %s:%d', server, server_port)
return server, server_port
def _close_client(self, client):
if hasattr(client, 'close'):
self._sockets.remove(client.fileno())
self._eventloop.remove(client)
client.close()
else:
# just an address
pass
def _handle_server(self):
server = self._server_socket
data, r_addr = server.recvfrom(BUF_SIZE)
if not data:
logging.debug('UDP handle_server: data is empty')
if self._stat_callback:
self._stat_callback(self._listen_port, len(data))
if self._is_local:
frag = common.ord(data[2])
if frag != 0:
logging.warn('drop a message since frag is not 0')
return
else:
data = data[3:]
else:
data = encrypt.encrypt_all(self._password, self._method, 0, data)
# decrypt data
if not data:
logging.debug('UDP handle_server: data is empty after decrypt')
return
header_result = parse_header(data)
if header_result is None:
return
addrtype, dest_addr, dest_port, header_length = header_result
if self._is_local:
server_addr, server_port = self._get_a_server()
else:
server_addr, server_port = dest_addr, dest_port
addrs = self._dns_cache.get(server_addr, None)
if addrs is None:
addrs = socket.getaddrinfo(server_addr, server_port, 0,
socket.SOCK_DGRAM, socket.SOL_UDP)
if not addrs:
# drop
return
else:
self._dns_cache[server_addr] = addrs
af, socktype, proto, canonname, sa = addrs[0]
key = client_key(r_addr, af)
client = self._cache.get(key, None)
if not client:
# TODO async getaddrinfo
if self._forbidden_iplist:
if common.to_str(sa[0]) in self._forbidden_iplist:
logging.debug('IP %s is in forbidden list, drop' %
common.to_str(sa[0]))
# drop
return
client = socket.socket(af, socktype, proto)
client.setblocking(False)
self._cache[key] = client
self._client_fd_to_server_addr[client.fileno()] = r_addr
self._sockets.add(client.fileno())
self._eventloop.add(client, eventloop.POLL_IN, self)
if self._is_local:
data = encrypt.encrypt_all(self._password, self._method, 1, data)
if not data:
return
else:
data = data[header_length:]
if not data:
return
try:
client.sendto(data, (server_addr, server_port))
except IOError as e:
err = eventloop.errno_from_exception(e)
if err in (errno.EINPROGRESS, errno.EAGAIN):
pass
else:
shell.print_exception(e)
def _handle_client(self, sock):
data, r_addr = sock.recvfrom(BUF_SIZE)
if not data:
logging.debug('UDP handle_client: data is empty')
return
if self._stat_callback:
self._stat_callback(self._listen_port, len(data))
if not self._is_local:
addrlen = len(r_addr[0])
if addrlen > 255:
# drop
return
data = pack_addr(r_addr[0]) + struct.pack('>H', r_addr[1]) + data
response = encrypt.encrypt_all(self._password, self._method, 1,
data)
if not response:
return
else:
data = encrypt.encrypt_all(self._password, self._method, 0,
data)
if not data:
return
header_result = parse_header(data)
if header_result is None:
return
# addrtype, dest_addr, dest_port, header_length = header_result
response = b'\x00\x00\x00' + data
client_addr = self._client_fd_to_server_addr.get(sock.fileno())
if client_addr:
self._server_socket.sendto(response, client_addr)
else:
# this packet is from somewhere else we know
# simply drop that packet
pass
def add_to_loop(self, loop):
if self._eventloop:
raise Exception('already add to loop')
if self._closed:
raise Exception('already closed')
self._eventloop = loop
server_socket = self._server_socket
self._eventloop.add(server_socket,
eventloop.POLL_IN | eventloop.POLL_ERR, self)
loop.add_periodic(self.handle_periodic)
def handle_event(self, sock, fd, event):
if sock == self._server_socket:
if event & eventloop.POLL_ERR:
logging.error('UDP server_socket err')
self._handle_server()
elif sock and (fd in self._sockets):
if event & eventloop.POLL_ERR:
logging.error('UDP client_socket err')
self._handle_client(sock)
def handle_periodic(self):
if self._closed:
if self._server_socket:
self._server_socket.close()
self._server_socket = None
for sock in self._sockets:
sock.close()
logging.info('closed UDP port %d', self._listen_port)
self._cache.sweep()
self._client_fd_to_server_addr.sweep()
def close(self, next_tick=False):
logging.debug('UDP close')
self._closed = True
if not next_tick:
if self._eventloop:
self._eventloop.remove_periodic(self.handle_periodic)
self._eventloop.remove(self._server_socket)
self._server_socket.close()
for client in list(self._cache.values()):
client.close()
| apache-2.0 |
rhertzog/django | django/db/migrations/optimizer.py | 127 | 2872 | from __future__ import unicode_literals
class MigrationOptimizer(object):
"""
Powers the optimization process, where you provide a list of Operations
and you are returned a list of equal or shorter length - operations
are merged into one if possible.
For example, a CreateModel and an AddField can be optimized into a
new CreateModel, and CreateModel and DeleteModel can be optimized into
nothing.
"""
def optimize(self, operations, app_label=None):
"""
Main optimization entry point. Pass in a list of Operation instances,
get out a new list of Operation instances.
Unfortunately, due to the scope of the optimization (two combinable
operations might be separated by several hundred others), this can't be
done as a peephole optimization with checks/output implemented on
the Operations themselves; instead, the optimizer looks at each
individual operation and scans forwards in the list to see if there
are any matches, stopping at boundaries - operations which can't
be optimized over (RunSQL, operations on the same field/model, etc.)
The inner loop is run until the starting list is the same as the result
list, and then the result is returned. This means that operation
optimization must be stable and always return an equal or shorter list.
The app_label argument is optional, but if you pass it you'll get more
efficient optimization.
"""
# Internal tracking variable for test assertions about # of loops
self._iterations = 0
while True:
result = self.optimize_inner(operations, app_label)
self._iterations += 1
if result == operations:
return result
operations = result
def optimize_inner(self, operations, app_label=None):
"""
Inner optimization loop.
"""
new_operations = []
for i, operation in enumerate(operations):
# Compare it to each operation after it
for j, other in enumerate(operations[i + 1:]):
in_between = operations[i + 1:i + j + 1]
result = operation.reduce(other, in_between, app_label)
if isinstance(result, list):
# Optimize! Add result, then remaining others, then return
new_operations.extend(result)
new_operations.extend(in_between)
new_operations.extend(operations[i + j + 2:])
return new_operations
if not result:
# We can't optimize across `other`.
new_operations.append(operation)
break
else:
new_operations.append(operation)
return new_operations
| bsd-3-clause |
joone/chromium-crosswalk | tools/telemetry/telemetry/internal/image_processing/_bitmap.py | 15 | 7855 | # Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Bitmap is a basic wrapper for image pixels. It includes some basic processing
tools: crop, find bounding box of a color and compute histogram of color values.
"""
import array
import cStringIO
import struct
import subprocess
from telemetry.internal.util import binary_manager
from telemetry.core import platform
from telemetry.util import color_histogram
from telemetry.util import rgba_color
import png
class _BitmapTools(object):
"""Wraps a child process of bitmaptools and allows for one command."""
CROP_PIXELS = 0
HISTOGRAM = 1
BOUNDING_BOX = 2
def __init__(self, dimensions, pixels):
binary = binary_manager.FetchPath(
'bitmaptools',
platform.GetHostPlatform().GetArchName(),
platform.GetHostPlatform().GetOSName())
assert binary, 'You must build bitmaptools first!'
self._popen = subprocess.Popen([binary],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
# dimensions are: bpp, width, height, boxleft, boxtop, boxwidth, boxheight
packed_dims = struct.pack('iiiiiii', *dimensions)
self._popen.stdin.write(packed_dims)
# If we got a list of ints, we need to convert it into a byte buffer.
if type(pixels) is not bytearray:
pixels = bytearray(pixels)
self._popen.stdin.write(pixels)
def _RunCommand(self, *command):
assert not self._popen.stdin.closed, (
'Exactly one command allowed per instance of tools.')
packed_command = struct.pack('i' * len(command), *command)
self._popen.stdin.write(packed_command)
self._popen.stdin.close()
length_packed = self._popen.stdout.read(struct.calcsize('i'))
if not length_packed:
raise Exception(self._popen.stderr.read())
length = struct.unpack('i', length_packed)[0]
return self._popen.stdout.read(length)
def CropPixels(self):
return self._RunCommand(_BitmapTools.CROP_PIXELS)
def Histogram(self, ignore_color, tolerance):
ignore_color_int = -1 if ignore_color is None else int(ignore_color)
response = self._RunCommand(_BitmapTools.HISTOGRAM,
ignore_color_int, tolerance)
out = array.array('i')
out.fromstring(response)
assert len(out) == 768, (
'The ColorHistogram has the wrong number of buckets: %s' % len(out))
return color_histogram.ColorHistogram(out[:256], out[256:512], out[512:],
ignore_color)
def BoundingBox(self, color, tolerance):
response = self._RunCommand(_BitmapTools.BOUNDING_BOX, int(color),
tolerance)
unpacked = struct.unpack('iiiii', response)
box, count = unpacked[:4], unpacked[-1]
if box[2] < 0 or box[3] < 0:
box = None
return box, count
class Bitmap(object):
"""Utilities for parsing and inspecting a bitmap."""
def __init__(self, bpp, width, height, pixels, metadata=None):
assert bpp in [3, 4], 'Invalid bytes per pixel'
assert width > 0, 'Invalid width'
assert height > 0, 'Invalid height'
assert pixels, 'Must specify pixels'
assert bpp * width * height == len(pixels), 'Dimensions and pixels mismatch'
self._bpp = bpp
self._width = width
self._height = height
self._pixels = pixels
self._metadata = metadata or {}
self._crop_box = None
@property
def bpp(self):
return self._bpp
@property
def width(self):
return self._crop_box[2] if self._crop_box else self._width
@property
def height(self):
return self._crop_box[3] if self._crop_box else self._height
def _PrepareTools(self):
"""Prepares an instance of _BitmapTools which allows exactly one command.
"""
crop_box = self._crop_box or (0, 0, self._width, self._height)
return _BitmapTools((self._bpp, self._width, self._height) + crop_box,
self._pixels)
@property
def pixels(self):
if self._crop_box:
self._pixels = self._PrepareTools().CropPixels()
# pylint: disable=unpacking-non-sequence
_, _, self._width, self._height = self._crop_box
self._crop_box = None
if type(self._pixels) is not bytearray:
self._pixels = bytearray(self._pixels)
return self._pixels
@property
def metadata(self):
self._metadata['size'] = (self.width, self.height)
self._metadata['alpha'] = self.bpp == 4
self._metadata['bitdepth'] = 8
return self._metadata
def GetPixelColor(self, x, y):
pixels = self.pixels
base = self._bpp * (y * self._width + x)
if self._bpp == 4:
return rgba_color.RgbaColor(pixels[base + 0], pixels[base + 1],
pixels[base + 2], pixels[base + 3])
return rgba_color.RgbaColor(pixels[base + 0], pixels[base + 1],
pixels[base + 2])
@staticmethod
def FromPng(png_data):
width, height, pixels, meta = png.Reader(bytes=png_data).read_flat()
return Bitmap(4 if meta['alpha'] else 3, width, height, pixels, meta)
@staticmethod
def FromPngFile(path):
with open(path, "rb") as f:
return Bitmap.FromPng(f.read())
def WritePngFile(self, path):
with open(path, "wb") as f:
png.Writer(**self.metadata).write_array(f, self.pixels)
def IsEqual(self, other, tolerance=0):
# Dimensions must be equal
if self.width != other.width or self.height != other.height:
return False
# Loop over each pixel and test for equality
if tolerance or self.bpp != other.bpp:
for y in range(self.height):
for x in range(self.width):
c0 = self.GetPixelColor(x, y)
c1 = other.GetPixelColor(x, y)
if not c0.IsEqual(c1, tolerance):
return False
else:
return self.pixels == other.pixels
return True
def Diff(self, other):
# Output dimensions will be the maximum of the two input dimensions
out_width = max(self.width, other.width)
out_height = max(self.height, other.height)
diff = [[0 for x in xrange(out_width * 3)] for x in xrange(out_height)]
# Loop over each pixel and write out the difference
for y in range(out_height):
for x in range(out_width):
if x < self.width and y < self.height:
c0 = self.GetPixelColor(x, y)
else:
c0 = rgba_color.RgbaColor(0, 0, 0, 0)
if x < other.width and y < other.height:
c1 = other.GetPixelColor(x, y)
else:
c1 = rgba_color.RgbaColor(0, 0, 0, 0)
offset = x * 3
diff[y][offset] = abs(c0.r - c1.r)
diff[y][offset+1] = abs(c0.g - c1.g)
diff[y][offset+2] = abs(c0.b - c1.b)
# This particular method can only save to a file, so the result will be
# written into an in-memory buffer and read back into a Bitmap
diff_img = png.from_array(diff, mode='RGB')
output = cStringIO.StringIO()
try:
diff_img.save(output)
diff = Bitmap.FromPng(output.getvalue())
finally:
output.close()
return diff
def GetBoundingBox(self, color, tolerance=0):
return self._PrepareTools().BoundingBox(color, tolerance)
def Crop(self, left, top, width, height):
cur_box = self._crop_box or (0, 0, self._width, self._height)
cur_left, cur_top, cur_width, cur_height = cur_box
if (left < 0 or top < 0 or
(left + width) > cur_width or
(top + height) > cur_height):
raise ValueError('Invalid dimensions')
self._crop_box = cur_left + left, cur_top + top, width, height
return self
def ColorHistogram(self, ignore_color=None, tolerance=0):
return self._PrepareTools().Histogram(ignore_color, tolerance)
| bsd-3-clause |
BonaventureCS/Programming-Contest-Tracker | bower_components/bootstrap/test-infra/s3_cache.py | 1700 | 3523 | #!/usr/bin/env python2.7
from __future__ import absolute_import, unicode_literals, print_function, division
from sys import argv
from os import environ, stat, remove as _delete_file
from os.path import isfile, dirname, basename, abspath
from hashlib import sha256
from subprocess import check_call as run
from boto.s3.connection import S3Connection
from boto.s3.key import Key
from boto.exception import S3ResponseError
NEED_TO_UPLOAD_MARKER = '.need-to-upload'
BYTES_PER_MB = 1024 * 1024
try:
BUCKET_NAME = environ['TWBS_S3_BUCKET']
except KeyError:
raise SystemExit("TWBS_S3_BUCKET environment variable not set!")
def _sha256_of_file(filename):
hasher = sha256()
with open(filename, 'rb') as input_file:
hasher.update(input_file.read())
file_hash = hasher.hexdigest()
print('sha256({}) = {}'.format(filename, file_hash))
return file_hash
def _delete_file_quietly(filename):
try:
_delete_file(filename)
except (OSError, IOError):
pass
def _tarball_size(directory):
kib = stat(_tarball_filename_for(directory)).st_size // BYTES_PER_MB
return "{} MiB".format(kib)
def _tarball_filename_for(directory):
return abspath('./{}.tar.gz'.format(basename(directory)))
def _create_tarball(directory):
print("Creating tarball of {}...".format(directory))
run(['tar', '-czf', _tarball_filename_for(directory), '-C', dirname(directory), basename(directory)])
def _extract_tarball(directory):
print("Extracting tarball of {}...".format(directory))
run(['tar', '-xzf', _tarball_filename_for(directory), '-C', dirname(directory)])
def download(directory):
_delete_file_quietly(NEED_TO_UPLOAD_MARKER)
try:
print("Downloading {} tarball from S3...".format(friendly_name))
key.get_contents_to_filename(_tarball_filename_for(directory))
except S3ResponseError as err:
open(NEED_TO_UPLOAD_MARKER, 'a').close()
print(err)
raise SystemExit("Cached {} download failed!".format(friendly_name))
print("Downloaded {}.".format(_tarball_size(directory)))
_extract_tarball(directory)
print("{} successfully installed from cache.".format(friendly_name))
def upload(directory):
_create_tarball(directory)
print("Uploading {} tarball to S3... ({})".format(friendly_name, _tarball_size(directory)))
key.set_contents_from_filename(_tarball_filename_for(directory))
print("{} cache successfully updated.".format(friendly_name))
_delete_file_quietly(NEED_TO_UPLOAD_MARKER)
if __name__ == '__main__':
# Uses environment variables:
# AWS_ACCESS_KEY_ID -- AWS Access Key ID
# AWS_SECRET_ACCESS_KEY -- AWS Secret Access Key
argv.pop(0)
if len(argv) != 4:
raise SystemExit("USAGE: s3_cache.py <download | upload> <friendly name> <dependencies file> <directory>")
mode, friendly_name, dependencies_file, directory = argv
conn = S3Connection()
bucket = conn.lookup(BUCKET_NAME, validate=False)
if bucket is None:
raise SystemExit("Could not access bucket!")
dependencies_file_hash = _sha256_of_file(dependencies_file)
key = Key(bucket, dependencies_file_hash)
key.storage_class = 'REDUCED_REDUNDANCY'
if mode == 'download':
download(directory)
elif mode == 'upload':
if isfile(NEED_TO_UPLOAD_MARKER): # FIXME
upload(directory)
else:
print("No need to upload anything.")
else:
raise SystemExit("Unrecognized mode {!r}".format(mode))
| mit |
Matt-Deacalion/django | tests/template_tests/filter_tests/test_unordered_list.py | 204 | 8179 | from django.template.defaultfilters import unordered_list
from django.test import SimpleTestCase, ignore_warnings
from django.utils.deprecation import RemovedInDjango110Warning
from django.utils.encoding import python_2_unicode_compatible
from django.utils.safestring import mark_safe
from ..utils import setup
class UnorderedListTests(SimpleTestCase):
@setup({'unordered_list01': '{{ a|unordered_list }}'})
def test_unordered_list01(self):
output = self.engine.render_to_string('unordered_list01', {'a': ['x>', ['<y']]})
self.assertEqual(output, '\t<li>x>\n\t<ul>\n\t\t<li><y</li>\n\t</ul>\n\t</li>')
@ignore_warnings(category=RemovedInDjango110Warning)
@setup({'unordered_list02': '{% autoescape off %}{{ a|unordered_list }}{% endautoescape %}'})
def test_unordered_list02(self):
output = self.engine.render_to_string('unordered_list02', {'a': ['x>', ['<y']]})
self.assertEqual(output, '\t<li>x>\n\t<ul>\n\t\t<li><y</li>\n\t</ul>\n\t</li>')
@setup({'unordered_list03': '{{ a|unordered_list }}'})
def test_unordered_list03(self):
output = self.engine.render_to_string('unordered_list03', {'a': ['x>', [mark_safe('<y')]]})
self.assertEqual(output, '\t<li>x>\n\t<ul>\n\t\t<li><y</li>\n\t</ul>\n\t</li>')
@setup({'unordered_list04': '{% autoescape off %}{{ a|unordered_list }}{% endautoescape %}'})
def test_unordered_list04(self):
output = self.engine.render_to_string('unordered_list04', {'a': ['x>', [mark_safe('<y')]]})
self.assertEqual(output, '\t<li>x>\n\t<ul>\n\t\t<li><y</li>\n\t</ul>\n\t</li>')
@setup({'unordered_list05': '{% autoescape off %}{{ a|unordered_list }}{% endautoescape %}'})
def test_unordered_list05(self):
output = self.engine.render_to_string('unordered_list05', {'a': ['x>', ['<y']]})
self.assertEqual(output, '\t<li>x>\n\t<ul>\n\t\t<li><y</li>\n\t</ul>\n\t</li>')
@ignore_warnings(category=RemovedInDjango110Warning)
class DeprecatedUnorderedListSyntaxTests(SimpleTestCase):
@setup({'unordered_list01': '{{ a|unordered_list }}'})
def test_unordered_list01(self):
output = self.engine.render_to_string('unordered_list01', {'a': ['x>', [['<y', []]]]})
self.assertEqual(output, '\t<li>x>\n\t<ul>\n\t\t<li><y</li>\n\t</ul>\n\t</li>')
@setup({'unordered_list02': '{% autoescape off %}{{ a|unordered_list }}{% endautoescape %}'})
def test_unordered_list02(self):
output = self.engine.render_to_string('unordered_list02', {'a': ['x>', [['<y', []]]]})
self.assertEqual(output, '\t<li>x>\n\t<ul>\n\t\t<li><y</li>\n\t</ul>\n\t</li>')
@setup({'unordered_list03': '{{ a|unordered_list }}'})
def test_unordered_list03(self):
output = self.engine.render_to_string('unordered_list03', {'a': ['x>', [[mark_safe('<y'), []]]]})
self.assertEqual(output, '\t<li>x>\n\t<ul>\n\t\t<li><y</li>\n\t</ul>\n\t</li>')
@setup({'unordered_list04': '{% autoescape off %}{{ a|unordered_list }}{% endautoescape %}'})
def test_unordered_list04(self):
output = self.engine.render_to_string('unordered_list04', {'a': ['x>', [[mark_safe('<y'), []]]]})
self.assertEqual(output, '\t<li>x>\n\t<ul>\n\t\t<li><y</li>\n\t</ul>\n\t</li>')
@setup({'unordered_list05': '{% autoescape off %}{{ a|unordered_list }}{% endautoescape %}'})
def test_unordered_list05(self):
output = self.engine.render_to_string('unordered_list05', {'a': ['x>', [['<y', []]]]})
self.assertEqual(output, '\t<li>x>\n\t<ul>\n\t\t<li><y</li>\n\t</ul>\n\t</li>')
class FunctionTests(SimpleTestCase):
def test_list(self):
self.assertEqual(unordered_list(['item 1', 'item 2']), '\t<li>item 1</li>\n\t<li>item 2</li>')
def test_nested(self):
self.assertEqual(
unordered_list(['item 1', ['item 1.1']]),
'\t<li>item 1\n\t<ul>\n\t\t<li>item 1.1</li>\n\t</ul>\n\t</li>',
)
def test_nested2(self):
self.assertEqual(
unordered_list(['item 1', ['item 1.1', 'item1.2'], 'item 2']),
'\t<li>item 1\n\t<ul>\n\t\t<li>item 1.1</li>\n\t\t<li>item1.2'
'</li>\n\t</ul>\n\t</li>\n\t<li>item 2</li>',
)
def test_nested3(self):
self.assertEqual(
unordered_list(['item 1', 'item 2', ['item 2.1']]),
'\t<li>item 1</li>\n\t<li>item 2\n\t<ul>\n\t\t<li>item 2.1'
'</li>\n\t</ul>\n\t</li>',
)
def test_nested_multiple(self):
self.assertEqual(
unordered_list(['item 1', ['item 1.1', ['item 1.1.1', ['item 1.1.1.1']]]]),
'\t<li>item 1\n\t<ul>\n\t\t<li>item 1.1\n\t\t<ul>\n\t\t\t<li>'
'item 1.1.1\n\t\t\t<ul>\n\t\t\t\t<li>item 1.1.1.1</li>\n\t\t\t'
'</ul>\n\t\t\t</li>\n\t\t</ul>\n\t\t</li>\n\t</ul>\n\t</li>',
)
def test_nested_multiple2(self):
self.assertEqual(
unordered_list(['States', ['Kansas', ['Lawrence', 'Topeka'], 'Illinois']]),
'\t<li>States\n\t<ul>\n\t\t<li>Kansas\n\t\t<ul>\n\t\t\t<li>'
'Lawrence</li>\n\t\t\t<li>Topeka</li>\n\t\t</ul>\n\t\t</li>'
'\n\t\t<li>Illinois</li>\n\t</ul>\n\t</li>',
)
def test_autoescape(self):
self.assertEqual(
unordered_list(['<a>item 1</a>', 'item 2']),
'\t<li><a>item 1</a></li>\n\t<li>item 2</li>',
)
def test_autoescape_off(self):
self.assertEqual(
unordered_list(['<a>item 1</a>', 'item 2'], autoescape=False),
'\t<li><a>item 1</a></li>\n\t<li>item 2</li>',
)
def test_ulitem(self):
@python_2_unicode_compatible
class ULItem(object):
def __init__(self, title):
self.title = title
def __str__(self):
return 'ulitem-%s' % str(self.title)
a = ULItem('a')
b = ULItem('b')
c = ULItem('<a>c</a>')
self.assertEqual(
unordered_list([a, b, c]),
'\t<li>ulitem-a</li>\n\t<li>ulitem-b</li>\n\t<li>ulitem-<a>c</a></li>',
)
def item_generator():
yield a
yield b
yield c
self.assertEqual(
unordered_list(item_generator()),
'\t<li>ulitem-a</li>\n\t<li>ulitem-b</li>\n\t<li>ulitem-<a>c</a></li>',
)
def test_ulitem_autoescape_off(self):
@python_2_unicode_compatible
class ULItem(object):
def __init__(self, title):
self.title = title
def __str__(self):
return 'ulitem-%s' % str(self.title)
a = ULItem('a')
b = ULItem('b')
c = ULItem('<a>c</a>')
self.assertEqual(
unordered_list([a, b, c], autoescape=False),
'\t<li>ulitem-a</li>\n\t<li>ulitem-b</li>\n\t<li>ulitem-<a>c</a></li>',
)
def item_generator():
yield a
yield b
yield c
self.assertEqual(
unordered_list(item_generator(), autoescape=False),
'\t<li>ulitem-a</li>\n\t<li>ulitem-b</li>\n\t<li>ulitem-<a>c</a></li>',
)
@ignore_warnings(category=RemovedInDjango110Warning)
def test_legacy(self):
"""
Old format for unordered lists should still work
"""
self.assertEqual(unordered_list(['item 1', []]), '\t<li>item 1</li>')
self.assertEqual(
unordered_list(['item 1', [['item 1.1', []]]]),
'\t<li>item 1\n\t<ul>\n\t\t<li>item 1.1</li>\n\t</ul>\n\t</li>',
)
self.assertEqual(
unordered_list(['item 1', [['item 1.1', []],
['item 1.2', []]]]), '\t<li>item 1\n\t<ul>\n\t\t<li>item 1.1'
'</li>\n\t\t<li>item 1.2</li>\n\t</ul>\n\t</li>',
)
self.assertEqual(
unordered_list(['States', [['Kansas', [['Lawrence', []], ['Topeka', []]]], ['Illinois', []]]]),
'\t<li>States\n\t<ul>\n\t\t<li>Kansas\n\t\t<ul>\n\t\t\t<li>Lawrence</li>'
'\n\t\t\t<li>Topeka</li>\n\t\t</ul>\n\t\t</li>\n\t\t<li>Illinois</li>\n\t</ul>\n\t</li>',
)
| bsd-3-clause |
tst-ccamp/earthenterprise | earth_enterprise/src/fusion/portableglobe/servers/portable_server_base.py | 5 | 5243 | #!/usr/bin/env python2.7
#
# Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Base class for the handlers used in the portable server."""
import os
import sys
import traceback
import tornado.web
import portable_globe
class BaseHandler(tornado.web.RequestHandler):
"""Historically, the base class for local and remote servers.
Remote servers are no longer supported.
"""
def WriteFile(self, path):
"""Return a simple file as content."""
# If local override is on, return the local file if it exists.
if (tornado.web.globe_.config_.LocalOverride() and
os.path.isfile("./local/%s" % path)):
print "Using local file:", path
return self.WriteLocalFile(path)
# Write the file from the package.
try:
self.write(tornado.web.globe_.ReadFile(path))
return True
except portable_globe.UnableToFindException as e:
return False
def WriteLocalFile(self, path):
"""Return a simple local file as content."""
path = path.replace("..", "__")
# Write the file from the package.
try:
fp = open("./local/%s" % path, "rb")
self.write(fp.read())
fp.close()
return True
except portable_globe.UnableToFindException as e:
print e.message
return False
def ShowUri(self, host):
"""Show the uri that was requested."""
# Comment out next line to increase performance.
if tornado.web.globe_.config_.Debug():
print "Host: %s Request: %s" % (host, self.request.uri)
def IsLocalhost(self):
"""Checks if request is from localhost."""
host = self.request.headers["Host"]
try:
(server, server_port) = host.split(":")
except:
server = host
# Accept localhost requests
if server == "localhost" or server == "127.0.0.1":
return True
return False
def IsValidLocalRequest(self):
"""Make sure that the request looks good before processing.
Returns:
Whether request should be processed.
"""
host = self.request.headers["Host"]
try:
(caller_host, _) = host.split(":")
except:
caller_host = host
# Accept all localhost requests
if caller_host == "localhost" or caller_host == "127.0.0.1":
self.ShowUri(host)
return True
return False
def IsBroadcasting(self):
return (not tornado.web.globe_.config_.disable_broadcasting_ and
tornado.web.globe_.config_.accept_all_requests_)
def IsValidRequest(self):
"""Makes sure that the request looks valid.
Returns:
Whether request should be processed.
"""
return self.IsBroadcasting() or self.IsValidLocalRequest()
class LocalDocsHandler(BaseHandler):
"""Class for returning the content of files directly from disk."""
def get(self, path):
"""Handle GET request for some local file.
For example it is used for setup pages.
Args:
path: Path to file to be returned.
"""
if not self.IsValidRequest():
raise tornado.web.HTTPError(404)
if path[-3:].lower() == "gif":
self.set_header("Content-Type", "image/gif")
elif path[-3:].lower() == "png":
self.set_header("Content-Type", "image/png")
elif path[-3:].lower() == "css":
self.set_header("Content-Type", "text/css")
else:
self.set_header("Content-Type", "text/html")
self.WriteLocalFile(path)
class ExtHandler(BaseHandler):
"""Class for passing control to externally defined routines."""
def get(self, path):
"""Handle GET request for some external request.
Args:
path: Path relative to the ext directory.
"""
try:
tornado.web.local_server_.ext_service_.ExtGetHandler(self, path)
except:
if self.get_argument("debug", "") or tornado.web.globe_.config_.Debug():
e = sys.exc_info()
self.set_header("Content-Type", "text/html")
self.write("<pre>%s</pre>" % "".join(
traceback.format_exception(e[0], e[1], e[2])
))
else:
self.set_header("Content-Type", "text/html")
self.write("GET service failed or is undefined.")
def post(self, path):
"""Handle POST request for some external request.
Args:
path: Path relative to the ext directory.
"""
try:
tornado.web.local_server_.ext_service_.ExtPostHandler(self, path)
except:
if self.get_argument("debug", "") or tornado.web.globe_.config_.Debug():
e = sys.exc_info()
self.set_header("Content-Type", "text/html")
self.write("<pre>%s</pre>" % "".join(
traceback.format_exception(e[0], e[1], e[2])
))
else:
self.set_header("Content-Type", "text/html")
self.write("POST service failed or is undefined.")
| apache-2.0 |
divio/django-shop | shop/models/cart.py | 1 | 12269 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from six import with_metaclass
import warnings
from collections import OrderedDict
from django.core import checks
from django.db import models
from django.utils.translation import ugettext_lazy as _
from shop import deferred
from shop.models.fields import JSONField
from shop.models.customer import CustomerModel
from shop.models.product import BaseProduct
from shop.modifiers.pool import cart_modifiers_pool
from shop.money import Money
class CartItemManager(models.Manager):
"""
Customized model manager for our CartItem model.
"""
def get_or_create(self, **kwargs):
"""
Create a unique cart item. If the same product exists already in the given cart,
increase its quantity, if the product in the cart seems to be the same.
"""
cart = kwargs.pop('cart')
product = kwargs.pop('product')
quantity = int(kwargs.pop('quantity', 1))
# add a new item to the cart, or reuse an existing one, increasing the quantity
watched = not quantity
cart_item = product.is_in_cart(cart, watched=watched, **kwargs)
if cart_item:
if not watched:
cart_item.quantity += quantity
created = False
else:
cart_item = self.model(cart=cart, product=product, quantity=quantity, **kwargs)
created = True
cart_item.save()
return cart_item, created
def filter_cart_items(self, cart, request):
"""
Use this method to fetch items for shopping from the cart. It rearranges the result set
according to the defined modifiers.
"""
cart_items = self.filter(cart=cart, quantity__gt=0).order_by('updated_at')
for modifier in cart_modifiers_pool.get_all_modifiers():
cart_items = modifier.arrange_cart_items(cart_items, request)
return cart_items
def filter_watch_items(self, cart, request):
"""
Use this method to fetch items from the watch list. It rearranges the result set
according to the defined modifiers.
"""
watch_items = self.filter(cart=cart, quantity=0)
for modifier in cart_modifiers_pool.get_all_modifiers():
watch_items = modifier.arrange_watch_items(watch_items, request)
return watch_items
class BaseCartItem(with_metaclass(deferred.ForeignKeyBuilder, models.Model)):
"""
This is a holder for the quantity of items in the cart and, obviously, a
pointer to the actual Product being purchased
"""
cart = deferred.ForeignKey(
'BaseCart',
on_delete=models.CASCADE,
related_name='items',
)
product = deferred.ForeignKey(
BaseProduct,
on_delete=models.CASCADE,
)
product_code = models.CharField(
_("Product code"),
max_length=255,
null=True,
blank=True,
help_text=_("Product code of added item."),
)
updated_at = models.DateTimeField(
_("Updated at"),
auto_now=True,
)
extra = JSONField(verbose_name=_("Arbitrary information for this cart item"))
objects = CartItemManager()
class Meta:
abstract = True
verbose_name = _("Cart item")
verbose_name_plural = _("Cart items")
@classmethod
def check(cls, **kwargs):
errors = super(BaseCartItem, cls).check(**kwargs)
allowed_types = ['IntegerField', 'SmallIntegerField', 'PositiveIntegerField',
'PositiveSmallIntegerField', 'DecimalField', 'FloatField']
for field in cls._meta.fields:
if field.attname == 'quantity':
if field.get_internal_type() not in allowed_types:
msg = "Class `{}.quantity` must be of one of the types: {}."
errors.append(checks.Error(msg.format(cls.__name__, allowed_types)))
break
else:
msg = "Class `{}` must implement a field named `quantity`."
errors.append(checks.Error(msg.format(cls.__name__)))
return errors
def __init__(self, *args, **kwargs):
# reduce the given fields to what the model actually can consume
all_field_names = [field.name for field in self._meta.get_fields(include_parents=True)]
model_kwargs = {k: v for k, v in kwargs.items() if k in all_field_names}
super(BaseCartItem, self).__init__(*args, **model_kwargs)
self.extra_rows = OrderedDict()
self._dirty = True
def save(self, *args, **kwargs):
super(BaseCartItem, self).save(*args, **kwargs)
self.cart.save(update_fields=['updated_at'])
self._dirty = True
def update(self, request):
"""
Loop over all registered cart modifier, change the price per cart item and optionally add
some extra rows.
"""
if not self._dirty:
return
self.refresh_from_db()
self.extra_rows = OrderedDict() # reset the dictionary
for modifier in cart_modifiers_pool.get_all_modifiers():
modifier.process_cart_item(self, request)
self._dirty = False
CartItemModel = deferred.MaterializedModel(BaseCartItem)
class CartManager(models.Manager):
"""
The Model Manager for any Cart inheriting from BaseCart.
"""
def get_from_request(self, request):
"""
Return the cart for current customer.
"""
if request.customer.is_visitor:
raise self.model.DoesNotExist("Cart for visiting customer does not exist.")
if not hasattr(request, '_cached_cart') or request._cached_cart.customer.user_id != request.customer.user_id:
request._cached_cart, created = self.get_or_create(customer=request.customer)
return request._cached_cart
def get_or_create_from_request(self, request):
has_cached_cart = hasattr(request, '_cached_cart')
if request.customer.is_visitor:
request.customer = CustomerModel.objects.get_or_create_from_request(request)
has_cached_cart = False
if not has_cached_cart or request._cached_cart.customer.user_id != request.customer.user_id:
request._cached_cart, created = self.get_or_create(customer=request.customer)
return request._cached_cart
class BaseCart(with_metaclass(deferred.ForeignKeyBuilder, models.Model)):
"""
The fundamental part of a shopping cart.
"""
customer = deferred.OneToOneField(
'BaseCustomer',
on_delete=models.CASCADE,
related_name='cart',
verbose_name=_("Customer"),
)
created_at = models.DateTimeField(
_("Created at"),
auto_now_add=True,
)
updated_at = models.DateTimeField(
_("Updated at"),
auto_now=True,
)
extra = JSONField(verbose_name=_("Arbitrary information for this cart"))
# our CartManager determines the cart object from the request.
objects = CartManager()
class Meta:
abstract = True
verbose_name = _("Shopping Cart")
verbose_name_plural = _("Shopping Carts")
def __init__(self, *args, **kwargs):
super(BaseCart, self).__init__(*args, **kwargs)
# That will hold things like tax totals or total discount
self.extra_rows = OrderedDict()
self._cached_cart_items = None
self._dirty = True
def save(self, force_update=False, *args, **kwargs):
if self.pk or force_update is False:
super(BaseCart, self).save(force_update=force_update, *args, **kwargs)
self._dirty = True
def update(self, request, raise_exception=False):
"""
This should be called after a cart item changed quantity, has been added or removed.
It will loop over all items in the cart, and call all the configured cart modifiers.
After this is done, it will compute and update the order's total and subtotal fields, along
with any supplement added along the way by modifiers.
Note that theses added fields are not stored - we actually want to
reflect rebate and tax changes on the *cart* items, but we don't want
that for the order items (since they are legally binding after the
"purchase" button was pressed)
"""
if not self._dirty:
return
if self._cached_cart_items:
items = self._cached_cart_items
else:
items = CartItemModel.objects.filter_cart_items(self, request)
# This calls all the pre_process_cart methods and the pre_process_cart_item for each item,
# before processing the cart. This allows to prepare and collect data on the cart.
for modifier in cart_modifiers_pool.get_all_modifiers():
modifier.pre_process_cart(self, request, raise_exception)
for item in items:
modifier.pre_process_cart_item(self, item, request, raise_exception)
self.extra_rows = OrderedDict() # reset the dictionary
self.subtotal = 0 # reset the subtotal
for item in items:
# item.update iterates over all cart modifiers and invokes method `process_cart_item`
item.update(request)
self.subtotal += item.line_total
# Iterate over the registered modifiers, to process the cart's summary
for modifier in cart_modifiers_pool.get_all_modifiers():
for item in items:
modifier.post_process_cart_item(self, item, request)
modifier.process_cart(self, request)
# This calls the post_process_cart method from cart modifiers, if any.
# It allows for a last bit of processing on the "finished" cart, before
# it is displayed
for modifier in reversed(cart_modifiers_pool.get_all_modifiers()):
modifier.post_process_cart(self, request)
# Cache updated cart items
self._cached_cart_items = items
self._dirty = False
def empty(self):
"""
Remove the cart with all its items.
"""
if self.pk:
self.items.all().delete()
self.delete()
def merge_with(self, other_cart):
"""
Merge the contents of the other cart into this one, afterwards delete it.
This is done item by item, so that duplicate items increase the quantity.
"""
# iterate over the cart and add quantities for items from other cart considered as equal
if self.id == other_cart.id:
raise RuntimeError("Can not merge cart with itself")
for item in self.items.all():
other_item = item.product.is_in_cart(other_cart, extra=item.extra)
if other_item:
item.quantity += other_item.quantity
item.save()
other_item.delete()
# the remaining items from the other cart are merged into this one
other_cart.items.update(cart=self)
other_cart.delete()
def __str__(self):
return "{}".format(self.pk) if self.pk else "(unsaved)"
@property
def num_items(self):
"""
Returns the number of items in the cart.
"""
return self.items.filter(quantity__gt=0).count()
@property
def total_quantity(self):
"""
Returns the total quantity of all items in the cart.
"""
aggr = self.items.aggregate(quantity=models.Sum('quantity'))
return aggr['quantity'] or 0
# if we would know, that self.items is already evaluated, then this might be faster:
# return sum([ci.quantity for ci in self.items.all()])
@property
def is_empty(self):
return self.num_items == 0 and self.total_quantity == 0
def get_caption_data(self):
warnings.warn("This method is deprecated")
return {'num_items': self.num_items, 'total_quantity': self.total_quantity,
'subtotal': self.subtotal, 'total': self.total}
@classmethod
def get_default_caption_data(cls):
warnings.warn("This method is deprecated")
return {'num_items': 0, 'total_quantity': 0, 'subtotal': Money(), 'total': Money()}
CartModel = deferred.MaterializedModel(BaseCart)
| bsd-3-clause |
Himmele/git-repo | subcmds/status.py | 1 | 7748 | #
# Copyright (C) 2008 The Android Open Source Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from command import PagedCommand
#try:
import threading as _threading
#except ImportError:
# import dummy_threading as _threading
import glob
import itertools
import os
import sys
import io
from color import Coloring
class Status(PagedCommand):
common = True
helpSummary = "Show the working tree status"
helpUsage = """
%prog [<project>...]
"""
helpDescription = """
'%prog' compares the working tree to the staging area (aka index),
and the most recent commit on this branch (HEAD), in each project
specified. A summary is displayed, one line per file where there
is a difference between these three states.
The -j/--jobs option can be used to run multiple status queries
in parallel.
The -o/--orphans option can be used to show objects that are in
the working directory, but not associated with a repo project.
This includes unmanaged top-level files and directories, but also
includes deeper items. For example, if dir/subdir/proj1 and
dir/subdir/proj2 are repo projects, dir/subdir/proj3 will be shown
if it is not known to repo.
Status Display
--------------
The status display is organized into three columns of information,
for example if the file 'subcmds/status.py' is modified in the
project 'repo' on branch 'devwork':
project repo/ branch devwork
-m subcmds/status.py
The first column explains how the staging area (index) differs from
the last commit (HEAD). Its values are always displayed in upper
case and have the following meanings:
-: no difference
A: added (not in HEAD, in index )
M: modified ( in HEAD, in index, different content )
D: deleted ( in HEAD, not in index )
R: renamed (not in HEAD, in index, path changed )
C: copied (not in HEAD, in index, copied from another)
T: mode changed ( in HEAD, in index, same content )
U: unmerged; conflict resolution required
The second column explains how the working directory differs from
the index. Its values are always displayed in lower case and have
the following meanings:
-: new / unknown (not in index, in work tree )
m: modified ( in index, in work tree, modified )
d: deleted ( in index, not in work tree )
"""
def _Options(self, p):
p.add_option('-j', '--jobs',
dest='jobs', action='store', type='int', default=2,
help="number of projects to check simultaneously")
p.add_option('-o', '--orphans',
dest='orphans', action='store_true',
help="include objects in working directory outside of repo projects")
def _StatusHelper(self, project, clean_counter, sem, output):
"""Obtains the status for a specific project.
Obtains the status for a project, redirecting the output to
the specified object. It will release the semaphore
when done.
Args:
project: Project to get status of.
clean_counter: Counter for clean projects.
sem: Semaphore, will call release() when complete.
output: Where to output the status.
"""
try:
state = project.PrintWorkTreeStatus(output)
if state == 'CLEAN':
clean_counter.__next__()
finally:
sem.release()
def _FindOrphans(self, dirs, proj_dirs, proj_dirs_parents, outstring):
"""find 'dirs' that are present in 'proj_dirs_parents' but not in 'proj_dirs'"""
status_header = ' --\t'
for item in dirs:
if not os.path.isdir(item):
outstring.write(''.join([status_header, item, '\n']))
continue
if item in proj_dirs:
continue
if item in proj_dirs_parents:
self._FindOrphans(glob.glob('%s/.*' % item) + \
glob.glob('%s/*' % item), \
proj_dirs, proj_dirs_parents, outstring)
continue
outstring.write(''.join([status_header, item, '/', '\n']))
def Execute(self, opt, args):
all_projects = self.GetProjects(args)
counter = itertools.count()
if opt.jobs == 1:
for project in all_projects:
state = project.PrintWorkTreeStatus()
if state == 'CLEAN':
counter.__next__()
else:
sem = _threading.Semaphore(opt.jobs)
threads_and_output = []
for project in all_projects:
sem.acquire()
class BufList(io.StringIO):
def dump(self, ostream):
ostream.write(self.getvalue())
output = BufList()
t = _threading.Thread(target=self._StatusHelper,
args=(project, counter, sem, output))
threads_and_output.append((t, output))
t.daemon = True
t.start()
for (t, output) in threads_and_output:
t.join()
output.dump(sys.stdout)
output.close()
if len(all_projects) == counter.__next__():
print('nothing to commit (working directory clean)')
if opt.orphans:
proj_dirs = set()
proj_dirs_parents = set()
for project in self.GetProjects(None, missing_ok=True):
proj_dirs.add(project.relpath)
(head, _tail) = os.path.split(project.relpath)
while head != "":
proj_dirs_parents.add(head)
(head, _tail) = os.path.split(head)
proj_dirs.add('.repo')
class StatusColoring(Coloring):
def __init__(self, config):
Coloring.__init__(self, config, 'status')
self.project = self.printer('header', attr='bold')
self.untracked = self.printer('untracked', fg='red')
orig_path = os.getcwd()
try:
os.chdir(self.manifest.topdir)
outstring = io.StringIO()
self._FindOrphans(glob.glob('.*') + \
glob.glob('*'), \
proj_dirs, proj_dirs_parents, outstring)
if outstring.tell() > 0:
output = StatusColoring(self.manifest.globalConfig)
output.project('Objects not within a project (orphans)')
output.nl()
outstring.seek(0)
lines = outstring.readlines()
for line in lines[0:len(lines)-1]:
output.untracked(line)
output.untracked(str.rstrip(lines[len(lines)-1]))
else:
print('No orphan files or directories')
outstring.close()
finally:
# Restore CWD.
os.chdir(orig_path)
| apache-2.0 |
Kubuxu/cjdns | node_build/dependencies/libuv/build/gyp/test/mac/gyptest-rpath.py | 88 | 1303 | #!/usr/bin/env python
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Verifies that LD_DYLIB_INSTALL_NAME and DYLIB_INSTALL_NAME_BASE are handled
correctly.
"""
import TestGyp
import re
import subprocess
import sys
if sys.platform == 'darwin':
test = TestGyp.TestGyp(formats=['ninja', 'make', 'xcode'])
CHDIR = 'rpath'
test.run_gyp('test.gyp', chdir=CHDIR)
test.build('test.gyp', test.ALL, chdir=CHDIR)
def GetRpaths(p):
p = test.built_file_path(p, chdir=CHDIR)
r = re.compile(r'cmd LC_RPATH.*?path (.*?) \(offset \d+\)', re.DOTALL)
proc = subprocess.Popen(['otool', '-l', p], stdout=subprocess.PIPE)
o = proc.communicate()[0]
assert not proc.returncode
return r.findall(o)
if GetRpaths('libdefault_rpath.dylib') != []:
test.fail_test()
if GetRpaths('libexplicit_rpath.dylib') != ['@executable_path/.']:
test.fail_test()
if (GetRpaths('libexplicit_rpaths_escaped.dylib') !=
['First rpath', 'Second rpath']):
test.fail_test()
if GetRpaths('My Framework.framework/My Framework') != ['@loader_path/.']:
test.fail_test()
if GetRpaths('executable') != ['@executable_path/.']:
test.fail_test()
test.pass_test()
| gpl-3.0 |
mindnervestech/mnrp | addons/website_mail/controllers/email_designer.py | 37 | 3093 | # -*- coding: utf-8 -*-
from urllib import urlencode
from openerp.addons.web import http
from openerp.addons.web.http import request
from openerp.tools.mail import html_sanitize
class WebsiteEmailDesigner(http.Controller):
@http.route('/website_mail/email_designer', type='http', auth="user", website=True)
def index(self, model, res_id, template_model=None, **kw):
if not model or not model in request.registry or not res_id:
return request.redirect('/')
model_fields = request.registry[model]._fields
if 'body' not in model_fields and 'body_html' not in model_fields or \
'email' not in model_fields and 'email_from' not in model_fields or \
'name' not in model_fields and 'subject' not in model_fields:
return request.redirect('/')
res_id = int(res_id)
obj_ids = request.registry[model].exists(request.cr, request.uid, [res_id], context=request.context)
if not obj_ids:
return request.redirect('/')
# try to find fields to display / edit -> as t-field is static, we have to limit
# the available fields to a given subset
email_from_field = 'email'
if 'email_from' in model_fields:
email_from_field = 'email_from'
subject_field = 'name'
if 'subject' in model_fields:
subject_field = 'subject'
body_field = 'body'
if 'body_html' in model_fields:
body_field = 'body_html'
cr, uid, context = request.cr, request.uid, request.context
record = request.registry[model].browse(cr, uid, res_id, context=context)
values = {
'record': record,
'templates': None,
'model': model,
'res_id': res_id,
'email_from_field': email_from_field,
'subject_field': subject_field,
'body_field': body_field,
}
if getattr(record, body_field):
values['mode'] = 'email_designer'
else:
if kw.get('enable_editor'):
kw.pop('enable_editor')
fragments = dict(model=model, res_id=res_id, **kw)
if template_model:
fragments['template_model'] = template_model
return request.redirect('/website_mail/email_designer?%s' % urlencode(fragments))
values['mode'] = 'email_template'
tmpl_obj = request.registry['email.template']
if template_model:
tids = tmpl_obj.search(cr, uid, [('model', '=', template_model)], context=context)
else:
tids = tmpl_obj.search(cr, uid, [], context=context)
templates = tmpl_obj.browse(cr, uid, tids, context=context)
values['templates'] = templates
values['html_sanitize'] = html_sanitize
return request.website.render("website_mail.email_designer", values)
@http.route(['/website_mail/snippets'], type='json', auth="user", website=True)
def snippets(self):
return request.website._render('website_mail.email_designer_snippets')
| agpl-3.0 |
Nick-Hall/gramps | gramps/gen/filters/rules/repository/_regexpidof.py | 5 | 1743 | #
# Gramps - a GTK+/GNOME based genealogy program
#
# Copyright (C) 2002-2006 Donald N. Allingham
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
#-------------------------------------------------------------------------
#
# Standard Python modules
#
#-------------------------------------------------------------------------
from ....const import GRAMPS_LOCALE as glocale
_ = glocale.translation.gettext
#-------------------------------------------------------------------------
#
# Gramps modules
#
#-------------------------------------------------------------------------
from .._regexpidbase import RegExpIdBase
#-------------------------------------------------------------------------
#
# HasIdOf
#
#-------------------------------------------------------------------------
class RegExpIdOf(RegExpIdBase):
"""
Rule that checks for a repo whose Gramps ID
matches regular expression.
"""
name = _('Repositories with Id containing <text>')
description = _("Matches repositories whose Gramps ID matches "
"the regular expression")
| gpl-2.0 |
alyakhtar/Katastrophe | katastrophe/latest.py | 1 | 22503 | import requests
from bs4 import BeautifulSoup
from tabulate import tabulate
from sys import platform
import subprocess
import os,time
from run import download
import getpass
try:
raw_input_ = raw_input
except NameError:
raw_input_ = input
try:
xrange_ = xrange
except NameError:
xrange_ = range
def download_torrent(link, name, ssl):
file_name = "".join(name.split())
source_code = requests.get(link, verify = ssl)
plain_text = source_code.text.encode('utf-8')
soup = BeautifulSoup(plain_text, "lxml")
magnet = soup.find('a', {'title': 'Magnet link'})
magnet_link = magnet.get('href')
torr = soup.find('a', {'title': 'Download verified torrent file'})
torr_file = torr.get('href')
user = getpass.getuser()
directory = 'Torrents'
if platform == "linux" or platform == "linux2" or platform == "darwin":
directory = '/home/'+ user +'/Torrents'
try:
subprocess.Popen(['xdg-open', magnet_link],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
except:
if not os.path.exists(directory):
os.makedirs(directory)
os.system('wget -O %s/%s.gz %s' %(directory,file_name,torr_file[torrent-1]))
os.system('gunzip %s/%s.gz' %(directory,file_name))
download(file_name)
print '\n\nDownload Complete\n'
elif platform == "win32":
directory = 'C:\Users' + user + '\Torrents'
procs = []
flag = 0
client = ''
cmd = 'WMIC PROCESS get Caption'
proc = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE)
for line in proc.stdout:
procs.append(line.strip())
clients = ['BitTorrent.exe',
'uTorrent.exe',
'deluge.exe']
for c in clients:
if c in procs:
client = c
break
if client:
cmd = 'wmic process where "name=\'{}\'" get ExecutablePath'.format(client)
proc = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE)
loc = proc.stdout.readlines()
exe = loc[1].strip()
subprocess.Popen([exe.decode(), magnet_link])
else:
pwrshell = subprocess.Popen([r'C:\WINDOWS\system32\WindowsPowerShell\v1.0\powershell.exe',
'-ExecutionPolicy',
'Unrestricted',
'wget %s -Outfile %s/%s.torrent' %(directory,torr_file, file_name)], cwd=os.getcwd())
result = pwrshell.wait()
print '\n'
download(file_name+'.torrent')
print '\n\nDownload Complete\n'
def fetch(ssl):
link = 'https://kickasstorrents.to/full/'
source_code = requests.get(link,verify = ssl)
plain_text = source_code.text.encode('utf-8')
soup = BeautifulSoup(plain_text, "lxml")
global torrent_href
torrent_name = []
torrent_seeds = []
torrent_href = []
torrent_size = []
torrent_leechers = []
movie = []
tv = []
music = []
games = []
applications = []
anime = []
books = []
losslessmusic = []
sno = []
for i in soup.findAll('table', {'class': 'data frontPageWidget'}):
for j in i('a', {'class': 'cellMainLink'}):
torrent_name.append(
''.join([k if ord(k) < 128 else '' for k in j.get_text()]))
torrent_href.append(j.get('href'))
for j in i('td', {'class': 'nobr center'}):
torrent_size.append(j.get_text())
for j in i('td', {'class': 'green center'}):
torrent_seeds.append(j.get_text())
for j in i('td', {'class': 'red lasttd center'}):
torrent_leechers.append(j.get_text())
for i in xrange_(8):
for j in xrange_(15):
sno.append(j+1)
combine = zip(sno,torrent_name, torrent_size, torrent_seeds, torrent_leechers)
return combine
def movies_torrent(ssl):
torrents = fetch(ssl)
movies = []
headers = ['SNO.', 'NAME', 'SIZE', 'SEEDS', 'LEECHERS']
for i in xrange_(15):
movies.append(torrents[i])
print '\nLATEST MOVIE TORRENTS\n'
print(tabulate(movies, headers, tablefmt='psql', numalign="center"))
print('Enter torrent No.(s) to download or e to exit : '),
serial = raw_input_()
if serial == 'e' or serial == 'E':
exit()
else:
if ',' in serial:
numbs = serial.split(',')
if len(numbs) < 3:
if numbs[0] != '' and numbs[1] != '' :
start = int(numbs[0])
end = int(numbs[1])
if start < end:
if end < 16 and start > 0:
for i in xrange_(start,end+1):
download_torrent('https://kickasstorrents.to' + torrent_href[i - 1],movies[i - 1][1], ssl)
elif numbs[0] != '' and numbs[1] == '' :
start = int(numbs[0])
if start > 0 and start < 16:
for i in xrange_(start,16):
download_torrent('https://kickasstorrents.to' + torrent_href[i - 1],movies[i - 1][1], ssl)
else:
end = int(numbs[1])
if end > 0 and end < 16:
for i in xrange_(1,end+1):
download_torrent('https://kickasstorrents.to' + torrent_href[i - 1],movies[i - 1][1], ssl)
else:
for sn in numbs:
i = int(sn)
if i > 0 and i < 16:
download_torrent('https://kickasstorrents.to' + torrent_href[i - 1],movies[i - 1][1], ssl)
else:
print "\n\n\tINCORRECT SERIAL NUMBERS....TRY AGAIN!!\n\n"
else:
if int(serial) <= 15 and int(serial) >= 1:
download_torrent('https://kickasstorrents.to' + torrent_href[int(serial) - 1],movies[int(serial) - 1][1], ssl)
else:
print "\n\n\tINCORRECT SERIAL, TORRRENT DOES NOT EXIST...TRY AGAIN!!\n\n"
def tv_torrent(ssl):
torrents = fetch(ssl)
tv = []
headers = ['SNO.', 'NAME', 'SIZE', 'SEEDS', 'LEECHERS']
for i in xrange_(15,30):
tv.append(torrents[i])
print '\nLATEST TV TORRENTS\n'
print(tabulate(tv, headers, tablefmt='psql', numalign="center"))
print('Enter torrent No.(s) to download or e to exit : '),
serial = raw_input_()
if serial == 'e' or serial == 'E':
exit()
else:
if ',' in serial:
numbs = serial.split(',')
if len(numbs) < 3:
if numbs[0] != '' and numbs[1] != '' :
start = int(numbs[0])
end = int(numbs[1])
if start < end:
if end < 16 and start > 0:
for i in xrange_(start,end+1):
download_torrent('https://kickasstorrents.to' + torrent_href[(i+15) - 1],tv[i - 1][1], ssl)
elif numbs[0] != '' and numbs[1] == '' :
start = int(numbs[0])
if start > 0 and start < 16:
for i in xrange_(start,16):
download_torrent('https://kickasstorrents.to' + torrent_href[(i+15) - 1],tv[i - 1][1], ssl)
else:
end = int(numbs[1])
if end > 0 and end < 16:
for i in xrange_(1,end+1):
download_torrent('https://kickasstorrents.to' + torrent_href[(i+15) - 1],tv[i - 1][1], ssl)
else:
for sn in numbs:
i = int(sn)
if i > 0 and i < 16:
download_torrent('https://kickasstorrents.to' + torrent_href[(i+15) - 1],tv[i - 1][1], ssl)
else:
print "\n\n\tINCORRECT SERIAL NUMBERS....TRY AGAIN!!\n\n"
else:
if int(serial) <= 15 and int(serial) >= 1:
download_torrent('https://kickasstorrents.to' + torrent_href[(int(serial) + 15) - 1],tv[int(serial) - 1][1], ssl)
else:
print "\n\n\tINCORRECT SERIAL, TORRRENT DOES NOT EXIST...TRY AGAIN!!\n\n"
def music_torrent(ssl):
torrents = fetch(ssl)
music = []
headers = ['SNO.', 'NAME', 'SIZE', 'SEEDS', 'LEECHERS']
for i in xrange_(30,45):
music.append(torrents[i])
print '\nLATEST MUSIC TORRENTS\n'
print(tabulate(music, headers, tablefmt='psql', numalign="center"))
print('Enter torrent No.(s) to download or e to exit : '),
serial = raw_input_()
if serial == 'e' or serial == 'E':
exit()
else:
if ',' in serial:
numbs = serial.split(',')
if len(numbs) < 3:
if numbs[0] != '' and numbs[1] != '' :
start = int(numbs[0])
end = int(numbs[1])
if start < end:
if end < 16 and start > 0:
for i in xrange_(start,end+1):
download_torrent('https://kickasstorrents.to' + torrent_href[(i+30) - 1],music[i - 1][1], ssl)
elif numbs[0] != '' and numbs[1] == '' :
start = int(numbs[0])
if start > 0 and start < 16:
for i in xrange_(start,16):
download_torrent('https://kickasstorrents.to' + torrent_href[(i+30) - 1],music[i - 1][1], ssl)
else:
end = int(numbs[1])
if end > 0 and end < 16:
for i in xrange_(1,end+1):
download_torrent('https://kickasstorrents.to' + torrent_href[(i+30) - 1],music[i - 1][1], ssl)
else:
for sn in numbs:
i = int(sn)
if i > 0 and i < 16:
download_torrent('https://kickasstorrents.to' + torrent_href[(i+30) - 1],music[i - 1][1], ssl)
else:
print "\n\n\tINCORRECT SERIAL NUMBERS....TRY AGAIN!!\n\n"
else:
if int(serial) <= 15 and int(serial) >= 1:
download_torrent('https://kickasstorrents.to' + torrent_href[(int(serial) + 30) - 1],music[int(serial) - 1][1], ssl)
else:
print "\n\n\tINCORRECT SERIAL, TORRRENT DOES NOT EXIST...TRY AGAIN!!\n\n"
def games_torrent(ssl):
torrents = fetch(ssl)
games = []
headers = ['SNO.', 'NAME', 'SIZE', 'SEEDS', 'LEECHERS']
for i in xrange_(45,60):
games.append(torrents[i])
print '\nLATEST GAME TORRENTS\n'
print(tabulate(games, headers, tablefmt='psql', numalign="center"))
print('Enter torrent No.(s) to download or e to exit : '),
serial = raw_input_()
if serial == 'e' or serial == 'E':
exit()
else:
if ',' in serial:
numbs = serial.split(',')
if len(numbs) < 3:
if numbs[0] != '' and numbs[1] != '' :
start = int(numbs[0])
end = int(numbs[1])
if start < end:
if end < 16 and start > 0:
for i in xrange_(start,end+1):
download_torrent('https://kickasstorrents.to' + torrent_href[(i+45) - 1],games[i - 1][1], ssl)
elif numbs[0] != '' and numbs[1] == '' :
start = int(numbs[0])
if start > 0 and start < 16:
for i in xrange_(start,16):
download_torrent('https://kickasstorrents.to' + torrent_href[(i+45) - 1],games[i - 1][1], ssl)
else:
end = int(numbs[1])
if end > 0 and end < 16:
for i in xrange_(1,end+1):
download_torrent('https://kickasstorrents.to' + torrent_href[(i+45) - 1],games[i - 1][1], ssl)
else:
for sn in numbs:
i = int(sn)
if i > 0 and i < 16:
download_torrent('https://kickasstorrents.to' + torrent_href[(i+45) - 1],games[i - 1][1], ssl)
else:
print "\n\n\tINCORRECT SERIAL NUMBERS....TRY AGAIN!!\n\n"
else:
if int(serial) <= 15 and int(serial) >= 1:
download_torrent('https://kickasstorrents.to' + torrent_href[(int(serial) + 45) - 1],games[int(serial) - 1][1], ssl)
else:
print "\n\n\tINCORRECT SERIAL, TORRRENT DOES NOT EXIST...TRY AGAIN!!\n\n"
def applications_torrent(ssl):
torrents = fetch(ssl)
applications = []
headers = ['SNO.', 'NAME', 'SIZE', 'SEEDS', 'LEECHERS']
for i in xrange_(60,75):
applications.append(torrents[i])
print '\nLATEST APPLICATION TORRENTS\n'
print(tabulate(applications, headers, tablefmt='psql', numalign="center"))
print('Enter torrent No.(s) to download or e to exit : '),
serial = raw_input_()
if serial == 'e' or serial == 'E':
exit()
else:
if ',' in serial:
numbs = serial.split(',')
if len(numbs) < 3:
if numbs[0] != '' and numbs[1] != '' :
start = int(numbs[0])
end = int(numbs[1])
if start < end:
if end < 16 and start > 0:
for i in xrange_(start,end+1):
download_torrent('https://kickasstorrents.to' + torrent_href[(i+60) - 1],applications[i - 1][1], ssl)
elif numbs[0] != '' and numbs[1] == '' :
start = int(numbs[0])
if start > 0 and start < 16:
for i in xrange_(start,16):
download_torrent('https://kickasstorrents.to' + torrent_href[(i+60) - 1],applications[i - 1][1], ssl)
else:
end = int(numbs[1])
if end > 0 and end < 16:
for i in xrange_(1,end+1):
download_torrent('https://kickasstorrents.to' + torrent_href[(i+60) - 1],applications[i - 1][1], ssl)
else:
for sn in numbs:
i = int(sn)
if i > 0 and i < 16:
download_torrent('https://kickasstorrents.to' + torrent_href[(i+60) - 1],applications[i - 1][1], ssl)
else:
print "\n\n\tINCORRECT SERIAL NUMBERS....TRY AGAIN!!\n\n"
else:
if int(serial) <= 15 and int(serial) >= 1:
download_torrent('https://kickasstorrents.to' + torrent_href[(int(serial) + 60) - 1],applications[int(serial) - 1][1], ssl)
else:
print "\n\n\tINCORRECT SERIAL, TORRRENT DOES NOT EXIST...TRY AGAIN!!\n\n"
def anime_torrent(ssl):
torrents = fetch(ssl)
anime = []
headers = ['SNO.', 'NAME', 'SIZE', 'SEEDS', 'LEECHERS']
for i in xrange_(75,90):
anime.append(torrents[i])
print '\nLATEST ANIME TORRENTS\n'
print(tabulate(anime, headers, tablefmt='psql', numalign="center"))
print('Enter torrent No.(s) to download or e to exit : '),
serial = raw_input_()
if serial == 'e' or serial == 'E':
exit()
else:
if ',' in serial:
numbs = serial.split(',')
if len(numbs) < 3:
if numbs[0] != '' and numbs[1] != '' :
start = int(numbs[0])
end = int(numbs[1])
if start < end:
if end < 16 and start > 0:
for i in xrange_(start,end+1):
download_torrent('https://kickasstorrents.to' + torrent_href[(i+75) - 1],anime[i - 1][1], ssl)
elif numbs[0] != '' and numbs[1] == '' :
start = int(numbs[0])
if start > 0 and start < 16:
for i in xrange_(start,16):
download_torrent('https://kickasstorrents.to' + torrent_href[(i+75) - 1],anime[i - 1][1], ssl)
else:
end = int(numbs[1])
if end > 0 and end < 16:
for i in xrange_(1,end+1):
download_torrent('https://kickasstorrents.to' + torrent_href[(i+75) - 1],anime[i - 1][1], ssl)
else:
for sn in numbs:
i = int(sn)
if i > 0 and i < 16:
download_torrent('https://kickasstorrents.to' + torrent_href[(i+75) - 1],anime[i - 1][1], ssl)
else:
print "\n\n\tINCORRECT SERIAL NUMBERS....TRY AGAIN!!\n\n"
else:
if int(serial) <= 15 and int(serial) >= 1:
download_torrent('https://kickasstorrents.to' + torrent_href[(int(serial) + 75) - 1],anime[int(serial) - 1][1], ssl)
else:
print "\n\n\tINCORRECT SERIAL, TORRRENT DOES NOT EXIST...TRY AGAIN!!\n\n"
def books_torrent(ssl):
torrents = fetch(ssl)
books = []
headers = ['SNO.', 'NAME', 'SIZE', 'SEEDS', 'LEECHERS']
for i in xrange_(90,105):
books.append(torrents[i])
print '\nLATEST BOOK TORRENTS\n'
print(tabulate(books, headers, tablefmt='psql', numalign="center"))
print('Enter torrent No.(s) to download or e to exit : '),
serial = raw_input_()
if serial == 'e' or serial == 'E':
exit()
else:
if ',' in serial:
numbs = serial.split(',')
if len(numbs) < 3:
if numbs[0] != '' and numbs[1] != '' :
start = int(numbs[0])
end = int(numbs[1])
if start < end:
if end < 16 and start > 0:
for i in xrange_(start,end+1):
download_torrent('https://kickasstorrents.to' + torrent_href[(i+90) - 1],books[i - 1][1], ssl)
elif numbs[0] != '' and numbs[1] == '' :
start = int(numbs[0])
if start > 0 and start < 16:
for i in xrange_(start,16):
download_torrent('https://kickasstorrents.to' + torrent_href[(i+90) - 1],books[i - 1][1], ssl)
else:
end = int(numbs[1])
if end > 0 and end < 16:
for i in xrange_(1,end+1):
download_torrent('https://kickasstorrents.to' + torrent_href[(i+90) - 1],books[i - 1][1], ssl)
else:
for sn in numbs:
i = int(sn)
if i > 0 and i < 16:
download_torrent('https://kickasstorrents.to' + torrent_href[(i+90) - 1],books[i - 1][1], ssl)
else:
print "\n\n\tINCORRECT SERIAL NUMBERS....TRY AGAIN!!\n\n"
else:
if int(serial) <= 15 and int(serial) >= 1:
download_torrent('https://kickasstorrents.to' + torrent_href[(int(serial) + 90) - 1],books[int(serial) - 1][1], ssl)
else:
print "\n\n\tINCORRECT SERIAL, TORRRENT DOES NOT EXIST...TRY AGAIN!!\n\n"
def losslessmusic_torrent(ssl):
torrents = fetch(ssl)
losslessmusic = []
headers = ['SNO.', 'NAME', 'SIZE', 'SEEDS', 'LEECHERS']
for i in xrange_(105,120):
losslessmusic.append(torrents[i])
print '\nLATEST LOSSLESS MUSIC TORRENTS\n'
print(tabulate(losslessmusic, headers, tablefmt='psql', numalign="center"))
print('Enter torrent No.(s) to download or e to exit : '),
serial = raw_input_()
if serial == 'e' or serial == 'E':
exit()
else:
if ',' in serial:
numbs = serial.split(',')
if len(numbs) < 3:
if numbs[0] != '' and numbs[1] != '' :
start = int(numbs[0])
end = int(numbs[1])
if start < end:
if end < 16 and start > 0:
for i in xrange_(start,end+1):
download_torrent('https://kickasstorrents.to' + torrent_href[(i+105) - 1],losslessmusic[i - 1][1], ssl)
elif numbs[0] != '' and numbs[1] == '' :
start = int(numbs[0])
if start > 0 and start < 16:
for i in xrange_(start,16):
download_torrent('https://kickasstorrents.to' + torrent_href[(i+105) - 1],losslessmusic[i - 1][1], ssl)
else:
end = int(numbs[1])
if end > 0 and end < 16:
for i in xrange_(1,end+1):
download_torrent('https://kickasstorrents.to' + torrent_href[(i+105) - 1],losslessmusic[i - 1][1], ssl)
else:
for sn in numbs:
i = int(sn)
if i > 0 and i < 16:
download_torrent('https://kickasstorrents.to' + torrent_href[(i+105) - 1],losslessmusic[i - 1][1], ssl)
else:
print "\n\n\tINCORRECT SERIAL NUMBERS....TRY AGAIN!!\n\n"
else:
if int(serial) <= 15 and int(serial) >= 1:
download_torrent('https://kickasstorrents.to' + torrent_href[(int(serial) + 105) - 1],losslessmusic[int(serial) - 1][1], ssl)
else:
print "\n\n\tINCORRECT SERIAL, TORRRENT DOES NOT EXIST...TRY AGAIN!!\n\n" | mit |
mars-knowsnothing/amos-bot | src/Lib/encodings/charmap.py | 860 | 2084 | """ Generic Python Character Mapping Codec.
Use this codec directly rather than through the automatic
conversion mechanisms supplied by unicode() and .encode().
Written by Marc-Andre Lemburg (mal@lemburg.com).
(c) Copyright CNRI, All Rights Reserved. NO WARRANTY.
"""#"
import codecs
### Codec APIs
class Codec(codecs.Codec):
# Note: Binding these as C functions will result in the class not
# converting them to methods. This is intended.
encode = codecs.charmap_encode
decode = codecs.charmap_decode
class IncrementalEncoder(codecs.IncrementalEncoder):
def __init__(self, errors='strict', mapping=None):
codecs.IncrementalEncoder.__init__(self, errors)
self.mapping = mapping
def encode(self, input, final=False):
return codecs.charmap_encode(input, self.errors, self.mapping)[0]
class IncrementalDecoder(codecs.IncrementalDecoder):
def __init__(self, errors='strict', mapping=None):
codecs.IncrementalDecoder.__init__(self, errors)
self.mapping = mapping
def decode(self, input, final=False):
return codecs.charmap_decode(input, self.errors, self.mapping)[0]
class StreamWriter(Codec,codecs.StreamWriter):
def __init__(self,stream,errors='strict',mapping=None):
codecs.StreamWriter.__init__(self,stream,errors)
self.mapping = mapping
def encode(self,input,errors='strict'):
return Codec.encode(input,errors,self.mapping)
class StreamReader(Codec,codecs.StreamReader):
def __init__(self,stream,errors='strict',mapping=None):
codecs.StreamReader.__init__(self,stream,errors)
self.mapping = mapping
def decode(self,input,errors='strict'):
return Codec.decode(input,errors,self.mapping)
### encodings module API
def getregentry():
return codecs.CodecInfo(
name='charmap',
encode=Codec.encode,
decode=Codec.decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamwriter=StreamWriter,
streamreader=StreamReader,
)
| gpl-3.0 |
samsu/neutron | openstack/common/lockutils.py | 11 | 9939 | # Copyright 2011 OpenStack Foundation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import contextlib
import errno
import functools
import os
import shutil
import subprocess
import sys
import tempfile
import threading
import time
import weakref
from oslo.config import cfg
from neutron.openstack.common import fileutils
from neutron.openstack.common.gettextutils import _
from neutron.openstack.common import local
from neutron.openstack.common import log as logging
LOG = logging.getLogger(__name__)
util_opts = [
cfg.BoolOpt('disable_process_locking', default=False,
help='Whether to disable inter-process locks'),
cfg.StrOpt('lock_path',
default=os.environ.get("NEUTRON_LOCK_PATH"),
help=('Directory to use for lock files.'))
]
CONF = cfg.CONF
CONF.register_opts(util_opts)
def set_defaults(lock_path):
cfg.set_defaults(util_opts, lock_path=lock_path)
class _InterProcessLock(object):
"""Lock implementation which allows multiple locks, working around
issues like bugs.debian.org/cgi-bin/bugreport.cgi?bug=632857 and does
not require any cleanup. Since the lock is always held on a file
descriptor rather than outside of the process, the lock gets dropped
automatically if the process crashes, even if __exit__ is not executed.
There are no guarantees regarding usage by multiple green threads in a
single process here. This lock works only between processes. Exclusive
access between local threads should be achieved using the semaphores
in the @synchronized decorator.
Note these locks are released when the descriptor is closed, so it's not
safe to close the file descriptor while another green thread holds the
lock. Just opening and closing the lock file can break synchronisation,
so lock files must be accessed only using this abstraction.
"""
def __init__(self, name):
self.lockfile = None
self.fname = name
def __enter__(self):
self.lockfile = open(self.fname, 'w')
while True:
try:
# Using non-blocking locks since green threads are not
# patched to deal with blocking locking calls.
# Also upon reading the MSDN docs for locking(), it seems
# to have a laughable 10 attempts "blocking" mechanism.
self.trylock()
return self
except IOError as e:
if e.errno in (errno.EACCES, errno.EAGAIN):
# external locks synchronise things like iptables
# updates - give it some time to prevent busy spinning
time.sleep(0.01)
else:
raise
def __exit__(self, exc_type, exc_val, exc_tb):
try:
self.unlock()
self.lockfile.close()
except IOError:
LOG.exception(_("Could not release the acquired lock `%s`"),
self.fname)
def trylock(self):
raise NotImplementedError()
def unlock(self):
raise NotImplementedError()
class _WindowsLock(_InterProcessLock):
def trylock(self):
msvcrt.locking(self.lockfile.fileno(), msvcrt.LK_NBLCK, 1)
def unlock(self):
msvcrt.locking(self.lockfile.fileno(), msvcrt.LK_UNLCK, 1)
class _PosixLock(_InterProcessLock):
def trylock(self):
fcntl.lockf(self.lockfile, fcntl.LOCK_EX | fcntl.LOCK_NB)
def unlock(self):
fcntl.lockf(self.lockfile, fcntl.LOCK_UN)
if os.name == 'nt':
import msvcrt
InterProcessLock = _WindowsLock
else:
import fcntl
InterProcessLock = _PosixLock
_semaphores = weakref.WeakValueDictionary()
_semaphores_lock = threading.Lock()
@contextlib.contextmanager
def lock(name, lock_file_prefix=None, external=False, lock_path=None):
"""Context based lock
This function yields a `threading.Semaphore` instance (if we don't use
eventlet.monkey_patch(), else `semaphore.Semaphore`) unless external is
True, in which case, it'll yield an InterProcessLock instance.
:param lock_file_prefix: The lock_file_prefix argument is used to provide
lock files on disk with a meaningful prefix.
:param external: The external keyword argument denotes whether this lock
should work across multiple processes. This means that if two different
workers both run a method decorated with @synchronized('mylock',
external=True), only one of them will execute at a time.
:param lock_path: The lock_path keyword argument is used to specify a
special location for external lock files to live. If nothing is set, then
CONF.lock_path is used as a default.
"""
with _semaphores_lock:
try:
sem = _semaphores[name]
except KeyError:
sem = threading.Semaphore()
_semaphores[name] = sem
with sem:
LOG.debug(_('Got semaphore "%(lock)s"'), {'lock': name})
# NOTE(mikal): I know this looks odd
if not hasattr(local.strong_store, 'locks_held'):
local.strong_store.locks_held = []
local.strong_store.locks_held.append(name)
try:
if external and not CONF.disable_process_locking:
LOG.debug(_('Attempting to grab file lock "%(lock)s"'),
{'lock': name})
# We need a copy of lock_path because it is non-local
local_lock_path = lock_path or CONF.lock_path
if not local_lock_path:
raise cfg.RequiredOptError('lock_path')
if not os.path.exists(local_lock_path):
fileutils.ensure_tree(local_lock_path)
LOG.info(_('Created lock path: %s'), local_lock_path)
def add_prefix(name, prefix):
if not prefix:
return name
sep = '' if prefix.endswith('-') else '-'
return '%s%s%s' % (prefix, sep, name)
# NOTE(mikal): the lock name cannot contain directory
# separators
lock_file_name = add_prefix(name.replace(os.sep, '_'),
lock_file_prefix)
lock_file_path = os.path.join(local_lock_path, lock_file_name)
try:
lock = InterProcessLock(lock_file_path)
with lock as lock:
LOG.debug(_('Got file lock "%(lock)s" at %(path)s'),
{'lock': name, 'path': lock_file_path})
yield lock
finally:
LOG.debug(_('Released file lock "%(lock)s" at %(path)s'),
{'lock': name, 'path': lock_file_path})
else:
yield sem
finally:
local.strong_store.locks_held.remove(name)
def synchronized(name, lock_file_prefix=None, external=False, lock_path=None):
"""Synchronization decorator.
Decorating a method like so::
@synchronized('mylock')
def foo(self, *args):
...
ensures that only one thread will execute the foo method at a time.
Different methods can share the same lock::
@synchronized('mylock')
def foo(self, *args):
...
@synchronized('mylock')
def bar(self, *args):
...
This way only one of either foo or bar can be executing at a time.
"""
def wrap(f):
@functools.wraps(f)
def inner(*args, **kwargs):
try:
with lock(name, lock_file_prefix, external, lock_path):
LOG.debug(_('Got semaphore / lock "%(function)s"'),
{'function': f.__name__})
return f(*args, **kwargs)
finally:
LOG.debug(_('Semaphore / lock released "%(function)s"'),
{'function': f.__name__})
return inner
return wrap
def synchronized_with_prefix(lock_file_prefix):
"""Partial object generator for the synchronization decorator.
Redefine @synchronized in each project like so::
(in nova/utils.py)
from nova.openstack.common import lockutils
synchronized = lockutils.synchronized_with_prefix('nova-')
(in nova/foo.py)
from nova import utils
@utils.synchronized('mylock')
def bar(self, *args):
...
The lock_file_prefix argument is used to provide lock files on disk with a
meaningful prefix.
"""
return functools.partial(synchronized, lock_file_prefix=lock_file_prefix)
def main(argv):
"""Create a dir for locks and pass it to command from arguments
If you run this:
python -m openstack.common.lockutils python setup.py testr <etc>
a temporary directory will be created for all your locks and passed to all
your tests in an environment variable. The temporary dir will be deleted
afterwards and the return value will be preserved.
"""
lock_dir = tempfile.mkdtemp()
os.environ["NEUTRON_LOCK_PATH"] = lock_dir
try:
ret_val = subprocess.call(argv[1:])
finally:
shutil.rmtree(lock_dir, ignore_errors=True)
return ret_val
if __name__ == '__main__':
sys.exit(main(sys.argv))
| apache-2.0 |
neonsoftware/yard | old/apply/yard/static/bower_components/web-animations-js/test/update-testcases.py | 170 | 1754 | #!/usr/bin/python
import cStringIO as StringIO
from fnmatch import fnmatch
import difflib
import os
import sys
def get_name(filename):
return os.path.splitext(filename)[0]
def list_dir(dir_path, filter_func):
return sorted(filter(filter_func, os.listdir(dir_path)), key=get_name)
def main():
test_dir = os.path.dirname(os.path.realpath(__file__))
testcase_dir = os.path.join(test_dir, 'testcases')
testcase_file = os.path.join(test_dir, 'testcases.js')
def is_testcase_file(filename):
return (
fnmatch(filename, "*.html") and
not fnmatch(filename, "manual-test*") and
not fnmatch(filename, "disabled-*"))
new_testcases = StringIO.StringIO()
new_testcases.write("""\
// This file is automatically generated by test/update-testcases.py.
// Disable tests by adding them to test/disabled-testcases
""")
new_testcases.write('var tests = [\n \'')
new_testcases.write(
'\',\n \''.join(list_dir(testcase_dir, is_testcase_file)))
new_testcases.write('\',\n];\n')
new_testcases.seek(0)
new_testcases_lines = new_testcases.readlines()
current_testcases_lines = file(testcase_file).readlines()
lines = list(difflib.unified_diff(
current_testcases_lines, new_testcases_lines,
fromfile=testcase_file, tofile=testcase_file))
if len(lines) == 0:
sys.stdout.write('Nothing to do\n')
sys.exit(0)
if not "--dry-run" in sys.argv:
file(testcase_file, "w").write("".join(new_testcases_lines))
sys.stdout.write(
'Updating %s with the following diff.\n' % testcase_file)
for line in lines:
sys.stdout.write(line)
sys.exit(1)
if __name__ == '__main__':
main()
| mit |
dancingdan/tensorflow | tensorflow/python/eager/function.py | 2 | 82892 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# pylint: disable=unidiomatic-typecheck
"""Defun decorator for defining graph-mode functions."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import functools
import re
import sys
import threading
import weakref
import numpy as np
import six
from tensorflow.core.framework import attr_value_pb2
from tensorflow.core.framework import function_pb2
from tensorflow.python import autograph
from tensorflow.python import pywrap_tensorflow
from tensorflow.python.eager import context
from tensorflow.python.eager import execute
from tensorflow.python.eager import tape
from tensorflow.python.eager.graph_only_ops import graph_placeholder
from tensorflow.python.framework import c_api_util
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import device as pydev
from tensorflow.python.framework import dtypes as dtypes_module
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_spec
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import cond_v2_impl
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import functional_ops
from tensorflow.python.ops import gradients_impl
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.training import distribution_strategy_context
from tensorflow.python.util import compat
from tensorflow.python.util import nest
from tensorflow.python.util import tf_decorator
from tensorflow.python.util import tf_inspect
# This is to avoid a circular dependency with cond_v2_impl
# (function -> gradients_impl -> control_flow_ops -> cond_v2_impl).
cond_v2_impl._function = sys.modules[__name__] # pylint: disable=protected-access
# This is to avoid a circular dependency with gradients_impl
gradients_impl._function = sys.modules[__name__] # pylint: disable=protected-access
FORWARD_FUNCTION_ATTRIBUTE_NAME = "forward_function_name"
BACKWARD_FUNCTION_ATTRIBUTE_NAME = "backward_function_name"
# TODO(scottzhu): Update this to allow arbitrary attribute names in future.
WHITELIST_FUNCTION_ATTRIBUTE_REGEX = [
"experimental_.*",
FORWARD_FUNCTION_ATTRIBUTE_NAME,
BACKWARD_FUNCTION_ATTRIBUTE_NAME
]
def _create_substitute_placeholder(value, name=None, dtype=None):
"""Creates a placeholder for `value` and propagates shape info to it."""
# Note: setting ops.control_dependencies(None) ensures we always put
# capturing placeholders outside of any control flow context.
with ops.control_dependencies(None):
placeholder = graph_placeholder(
dtype=dtype or value.dtype, shape=value.shape, name=name)
_copy_handle_data(value, placeholder)
return placeholder
def _copy_handle_data(source_t, target_t):
"""Copies HandleData for variant and resource type tensors if available.
The CppShapeInferenceResult::HandleData proto contains information about the
shapes and types of the element tensors of resource/variant type tensors.
We need to copy this across function boundaries, i.e., when capturing a
placeholder or when returning a function tensor as output. If we don't do this
the element tensors will have unknown shapes, e.g., if a TensorList variant
tensor is captured as a placeholder, elements popped from that list would have
unknown shape.
Args:
source_t: The tensor to copy HandleData from.
target_t: The tensor to copy HandleData to.
"""
if (target_t.dtype == dtypes_module.resource or
target_t.dtype == dtypes_module.variant):
if isinstance(source_t, ops.EagerTensor):
handle_data = source_t._handle_data # pylint: disable=protected-access
else:
handle_data = resource_variable_ops.get_resource_handle_data(source_t)
if handle_data is not None and handle_data.is_set:
# pylint: disable=protected-access
pywrap_tensorflow.SetHandleShapeAndType(target_t.graph._c_graph,
target_t._as_tf_output(),
handle_data.SerializeToString())
# pylint: enable=protected-access
# Ensure that shapes and dtypes are propagated.
shapes, types = zip(*[(pair.shape, pair.dtype)
for pair in handle_data.shape_and_type])
ranks = [len(s.dim) if not s.unknown_rank else -1 for s in shapes]
shapes = [[d.size for d in s.dim]
if not s.unknown_rank else None for s in shapes]
pywrap_tensorflow.TF_GraphSetOutputHandleShapesAndTypes_wrapper(
target_t._op._graph._c_graph, # pylint: disable=protected-access
target_t._as_tf_output(), # pylint: disable=protected-access
shapes, ranks, types)
def _get_device_functions(ctx, graph):
"""Returns a tuple of device functions representing the device stack."""
if ctx.executing_eagerly():
return (pydev.merge_device(ctx.device_name),)
else:
return tuple(graph._device_functions_outer_to_inner) # pylint: disable=protected-access
def _parse_func_attrs(attributes):
"""Convert the keyword arguments into function_def attributes.
Currently only support primitive types: bool, int, float and string.
Args:
attributes: the dictionary of attributes.
Returns:
A dict of attributes where the key is the name of attribute and the value
is the AttrValue proto.
Raises:
ValueError: If the kwargs contains unwhitelisted name or unsupported value
types.
"""
attrs = {}
for key, value in attributes.items():
if not any([re.match(reg, key)
for reg in WHITELIST_FUNCTION_ATTRIBUTE_REGEX]):
raise ValueError("Attribute name is not whitelisted. "
"Whitelisted: prefix %s, got: %s" %
(WHITELIST_FUNCTION_ATTRIBUTE_REGEX, key))
if isinstance(value, attr_value_pb2.AttrValue):
attrs[key] = value
# bool type check has to happen before int since bool is a subclass of int.
elif isinstance(value, bool):
attrs[key] = attr_value_pb2.AttrValue(b=value)
elif isinstance(value, int):
attrs[key] = attr_value_pb2.AttrValue(i=value)
elif isinstance(value, float):
attrs[key] = attr_value_pb2.AttrValue(f=value)
elif isinstance(value, (str, bytes)):
attrs[key] = attr_value_pb2.AttrValue(s=compat.as_bytes(value))
else:
raise ValueError("Unsupported attribute type for %s with type %s" %
(key, type(value)))
return attrs
class FuncGraph(ops.Graph):
"""Graph representing a function body.
Attributes:
name: The name of the function.
inputs: Placeholder tensors representing the inputs to this function. The
tensors are in this FuncGraph. This represents "regular" inputs as well as
captured inputs (i.e. the values of self.captures), with the regular
inputs coming first.
outputs: Tensors that will be returned by this function. The tensors are in
this FuncGraph.
structured_outputs: A possibly-nested python object which will be returned
by this function. The Tensors in this structure are the same as those of
self.outputs. Note that this structure might contain Python `None`s.
variables: Variables that should be watched during function execution.
outer_graph: The graph this function is defined in. May be another FuncGraph
or the global default Graph.
captures: Maps external tensor -> internal tensor (i.e. input placeholder).
The entries are in the order they were captured.
seed: The graph-level random seed.
"""
def __init__(self, name):
"""Construct a new FuncGraph.
The graph will inherit its graph key, collections, seed, device stack, and
distribution strategy stack from the current context or graph.
Args:
name: the name of the function.
"""
super(FuncGraph, self).__init__()
self.name = name
self.inputs = []
self.outputs = []
self.structured_outputs = None
self._weak_variables = []
self.outer_graph = ops.get_default_graph()
self.captures = collections.OrderedDict()
self._building_function = True
# Map from resource tensor name to last op (in program order) which uses
# this tensor. Used to enforce that execution order matches program order
# for resource tensors.
self._last_op_using_resource_tensor = {}
graph = self.outer_graph
if context.executing_eagerly():
self.seed = context.global_seed()
self._xla_compile = (context.context().device_spec.device_type == "TPU")
self._add_device_to_stack(context.context().device_name)
else:
self.seed = graph.seed
self._xla_compile = getattr(graph, "_xla_compile", False)
self._device_function_stack = graph._device_function_stack.copy() # pylint: disable=protected-access
self._colocation_stack = graph._colocation_stack.copy() # pylint: disable=protected-access
# TODO(b/112165328, b/112906995): summaries depend on inheriting collections
# from the default graph even in eager mode. It'd be nice to not have a
# default graph with eager execution, so hopefully this will go away when we
# remove collections.
# pylint: disable=protected-access
self._collections = graph._collections
# TODO(b/112906995): distribution strategy depends on inheriting this stack
# from the default graph even in eager mode. Maybe it should be part of the
# eager context?
self._distribution_strategy_stack = graph._distribution_strategy_stack
# Inherit the graph key, since this is used for matching variables in
# optimizers.
self._graph_key = graph._graph_key
# pylint: enable=protected-access
@property
def variables(self):
"""A list of variables accessed by this FuncGraph.
Note that functions keep only weak references to variables. Calling the
function after a variable it accesses has been deleted is an error.
Yields:
Strong references to variables accessed by this FuncGraph.
"""
for weak_v in self._weak_variables:
v = weak_v()
if v is None:
raise AssertionError(
"Called a function referencing variables which have been deleted. "
"This likely means that function-local variables were created and "
"not referenced elsewhere in the program. This is generally a "
"mistake; consider storing variables in an object attribute on "
"first call.")
yield v
@variables.setter
def variables(self, var_list):
self._weak_variables = [weakref.ref(v) for v in var_list]
def create_op(
self,
op_type,
inputs,
dtypes,
input_types=None,
name=None,
attrs=None,
op_def=None,
compute_shapes=True,
compute_device=True):
"""Like Graph.create_op, except handles external input tensors.
This overload adds functionality to create_op to "capture" any external
input tensors, i.e. tensors from the eager context or outer function graphs
if this is a nested function. See `capture` for more information.
Args:
op_type: The `Operation` type to create. This corresponds to the
`OpDef.name` field for the proto that defines the operation.
inputs: A list of `Tensor` objects that will be inputs to the `Operation`.
dtypes: A list of `DType` objects that will be the types of the tensors
that the operation produces.
input_types: (Optional.) A list of `DType`s that will be the types of
the tensors that the operation consumes. By default, uses the base
`DType` of each input in `inputs`. Operations that expect
reference-typed inputs must specify `input_types` explicitly.
name: (Optional.) A string name for the operation. If not specified, a
name is generated based on `op_type`.
attrs: (Optional.) A dictionary where the key is the attribute name (a
string) and the value is the respective `attr` attribute of the
`NodeDef` proto that will represent the operation (an `AttrValue`
proto).
op_def: (Optional.) The `OpDef` proto that describes the `op_type` that
the operation will have.
compute_shapes: (Optional.) Deprecated. Has no effect (shapes are always
computed).
compute_device: (Optional.) If True, device functions will be executed
to compute the device property of the Operation.
Returns:
An `Operation` object.
"""
# This capturing logic interacts poorly with control flow contexts which
# want to replace inputs of ops far too late in the process. This can lead
# the context to get confused and try to create an Enter for an Enter. We
# can detect this here and skip the additional Enter which can confuse loop
# validation logic.
if op_type == "Enter" and inputs[0].op.type == "Enter":
if inputs[0].op.get_attr("frame_name") == attrs["frame_name"].s:
return inputs[0].op
# Calling AddValue on the control flow contexts to force creation of the
# backward accumulators in the original graph before we create placeholders
# to capture the inputs.
ctxt = ops.get_default_graph()._control_flow_context # pylint: disable=protected-access
for i, inp in enumerate(inputs):
# TPU Estimator defines a control flow context with no AddValue method.
if ctxt is not None and hasattr(ctxt, "AddValue"):
inp = ctxt.AddValue(inp)
inp = self.capture(inp)
inputs[i] = inp
return super(FuncGraph, self).create_op(
op_type, inputs, dtypes, input_types, name, attrs, op_def,
compute_device=compute_device)
def capture(self, tensor, name=None):
"""Captures `tensor` if it's external to this graph.
If `tensor` is from a different graph, returns a placeholder for it.
`tensor` and the placeholder will appear in self.captures, and the
placeholder will appear in self.inputs. Multiple calls to this method with
the same `tensor` argument will return the same placeholder. If `tensor` is
from this graph, returns `tensor`.
Args:
tensor: Tensor. May be from this FuncGraph or a different graph.
name: Optional name if a placeholder is created.
Returns:
Tensor from this FuncGraph.
"""
if isinstance(tensor, ops.EagerTensor):
if name is None:
name = str(ops.uid())
return self._capture_helper(tensor, name)
if tensor.graph is not self:
if name is None:
name = tensor.op.name
return self._capture_helper(tensor, name)
return tensor
def _capture_helper(self, tensor, name):
captured_tensor = self.captures.get(tensor, None)
if captured_tensor is None:
captured_tensor = _create_substitute_placeholder(tensor, name=name,
dtype=tensor.dtype)
self.captures[tensor] = captured_tensor
self.inputs.append(captured_tensor)
tape.record_operation("captured_value", [captured_tensor], [tensor],
lambda x: [x])
return captured_tensor
@property
def external_captures(self):
"""External tensors captured by this function."""
return list(self.captures.keys())
@property
def internal_captures(self):
"""Placeholders in this function corresponding captured tensors."""
return list(self.captures.values())
def _forward_name(n):
"""The name of a generated forward defun named n."""
return "__forward_%s_%s" % (n, ops.uid())
def _backward_name(n):
"""The name of a generated backward defun named n."""
return "__backward_%s_%s" % (n, ops.uid())
def _inference_name(n):
"""The name of a forward-but-no-gradient defun named n."""
return "__inference_%s_%s" % (n, ops.uid())
def _register(fn):
"""Registers the function `fn`."""
context.context().add_function(fn)
# TODO(apassos) get rid of this by splitting framework.function._DefinedFunction
# so it doesn't have the definition-generating logic and is just a container for
# an already-defined function.
class _EagerDefinedFunction(object):
"""Callable with the interface of `framework.function._DefinedFunction.`
`_EagerDefinedFunction` encapsulates a function definition and its properties,
and it provides a method for calling the encapsulated function. Some Ops
take functions as attributes, which have type `func`; an instance of this
class may be provided as the value of these `func` attributes.
"""
def __init__(self, name, graph, inputs, outputs, attrs):
"""Initializes an eager defined function.
Args:
name: str, the name for the created function.
graph: Graph, the graph containing the operations in the function
inputs: the tensors in the graph to be used as inputs to the function
outputs: the tensors in the graph which will be outputs to the function
attrs: dict mapping names of attributes to their AttrValue values
"""
operations = [
op for op in graph.get_operations()
if op not in set(arg.op for arg in inputs)
]
fn = pywrap_tensorflow.TF_GraphToFunction_wrapper(
graph._c_graph, # pylint: disable=protected-access
compat.as_str(name),
False,
[o._c_op for o in operations], # pylint: disable=protected-access
[t._as_tf_output() for t in inputs], # pylint: disable=protected-access
[t._as_tf_output() for t in outputs], # pylint: disable=protected-access
[],
None,
compat.as_str(""))
for name, attr_value in attrs.items():
serialized = attr_value.SerializeToString()
# TODO(iga): this creates and deletes a new TF_Status for every attr.
# It might be worth creating a convenient way to re-use status.
pywrap_tensorflow.TF_FunctionSetAttrValueProto(
fn, compat.as_str(name), serialized)
# TODO(apassos) avoid creating a FunctionDef (specially to grab the
# signature, but also in general it's nice not to depend on it.
with c_api_util.tf_buffer() as buffer_:
pywrap_tensorflow.TF_FunctionToFunctionDef(fn, buffer_)
proto_data = pywrap_tensorflow.TF_GetBuffer(buffer_)
function_def = function_pb2.FunctionDef()
function_def.ParseFromString(compat.as_bytes(proto_data))
if context.executing_eagerly():
_register(fn)
self.definition = function_def
self.name = compat.as_bytes(function_def.signature.name)
self.signature = function_def.signature
self._num_outputs = len(self.signature.output_arg)
self._output_types = [o.type for o in self.signature.output_arg]
self._output_shapes = [o.shape for o in outputs]
self._func_graph_outputs = outputs
self.grad_func_name = None
self.python_grad_func = None
self._c_func = c_api_util.ScopedTFFunction(fn)
self._grad_func = None
self._graph = graph
self._stateful_ops = tuple(op for op in operations if op.op_def.is_stateful)
def add_to_graph(self, g):
# pylint: disable=protected-access
if self.name not in g._functions:
g._add_function(self)
for f in self._graph._functions.values():
if f.name not in g._functions:
g._add_function(f)
# pylint: enable=protected-access
@property
def stateful_ops(self):
return self._stateful_ops
def call(self, ctx, args):
"""Calls this function with `args` as inputs.
Function execution respects device annotations only if the function won't
be compiled with xla.
Args:
ctx: a Context object
args: a list of arguments to supply this function with.
Returns:
The outputs of the function call.
Raises:
ValueError: if the number of arguments is incorrect.
"""
executing_eagerly = ctx.executing_eagerly()
if self._graph._xla_compile: # pylint: disable=protected-access
# XLA compilation relies upon a custom kernel creator to run functions.
signature = self.signature
if executing_eagerly:
outputs = execute.execute(
str(signature.name),
num_outputs=self._num_outputs,
inputs=args,
attrs=None,
ctx=ctx)
else:
g = ops.get_default_graph()
self.add_to_graph(g)
op = g.create_op(
signature.name,
[ops.internal_convert_to_tensor(x, ctx=ctx) for x in args],
tuple(dtypes_module.DType(x.type) for x in signature.output_arg),
op_def=signature,
name="FunctionCall",
compute_shapes=False)
outputs = op.outputs
if not outputs:
return op
outputs = [outputs] if isinstance(
outputs, (ops.Tensor, type(None))) else list(outputs)
else:
# TODO(akshayka): Either remove this if the FunctionLibraryRuntime
# creates `PartitionedCallOp` kernels by default, or remove the previous
# branch if a TPU kernel is registered for `PartitionedCall`.
if len(args) != len(self.signature.input_arg):
raise ValueError(
"Arguments and signature arguments do not match: %s %s " %
(len(args), len(list(self.signature.input_arg))))
outputs = functional_ops.partitioned_call(
args=args,
f=self,
tout=self._output_types,
executing_eagerly=executing_eagerly)
if executing_eagerly:
return outputs
else:
for i, shape in enumerate(self._output_shapes):
outputs[i].set_shape(shape)
for i, func_graph_output in enumerate(self._func_graph_outputs):
_copy_handle_data(func_graph_output, outputs[i])
return outputs
def _flatten(sequence):
"""A wrapper around `nest.flatten` that also unpacks `IndexedSlices`."""
# TODO(akshayka): Support `SparseTensor` in a similar fashion.
flat_sequence = nest.flatten(sequence)
outputs = []
for item in flat_sequence:
if isinstance(item, ops.IndexedSlices):
if item.dense_shape is not None:
outputs.extend([item.values, item.indices, item.dense_shape])
else:
outputs.extend([item.values, item.indices])
else:
outputs.append(item)
return outputs
class Function(object):
"""Callable object encapsulating a function definition and its gradient.
`Function` is a callable that encapsulates a function definition and
is differentiable under `tf.GradientTape` objects.
"""
def __init__(self, func_graph, attrs=None):
"""Initialize a Function.
Args:
func_graph: An instance of FuncGraph: the function body to wrap.
attrs: (optional) dict mapping names of attributes to their AttrValue
values. Attributes in `attrs` will be included in this function's
definition.
Raises:
ValueError: If number of input_placeholders is not equal to the number
of function inputs.
"""
self._func_graph = func_graph
self._captured_inputs = list(self._func_graph.captures.keys())
self._num_outputs = len(self._func_graph.outputs)
self._output_shapes = tuple(
output.shape for output in self._func_graph.outputs)
self._attrs = _parse_func_attrs(attrs or {})
self._device_functions = tuple(
self._func_graph._device_functions_outer_to_inner) # pylint: disable=protected-access
self._inference_function = _EagerDefinedFunction(
_inference_name(self._func_graph.name), self._func_graph,
self._func_graph.inputs, self._func_graph.outputs, self._attrs)
self._backward_graph_function = None
# Map holding distributed variables, keyed by resource handle tensors.
self._distributed_variables = {}
strategy = distribution_strategy_context.get_distribution_strategy()
for variable in self._func_graph.variables:
# If variable is not distributed, unwrap returns [variable].
component_variables = strategy.unwrap(variable)
# Only update the dictionary when the variable is actually distributed.
if (len(component_variables) > 1 or component_variables[0] != variable):
for component_variable in component_variables:
self._distributed_variables[component_variable.handle] = variable
def __call__(self, *args):
"""Executes the wrapped function.
Args:
*args: a list of Tensors or Variables.
Returns:
The result of applying the TF function to `args`.
Raises:
ValueError: If the current device stack does not match the device stack
under which the function was created, or if `args` contains anything
other than Tensors or Variables.
"""
ctx = context.context()
device_functions = _get_device_functions(ctx, ops.get_default_graph())
if device_functions != self._device_functions:
raise ValueError(
"The current device stack does not match the device stack under "
"which the TensorFlow function '%s' was created.\n"
"Current device stack: %s\n%s device stack: %s" %
(self._inference_function.name, device_functions,
self._inference_function.name, self._device_functions))
for v in self._func_graph.variables:
if v.trainable:
tape.variable_accessed(v)
captures = self._resolve_captured_inputs()
tensor_inputs = []
for i, arg in enumerate(nest.flatten(args)):
if isinstance(arg, resource_variable_ops.ResourceVariable):
if arg.trainable:
tape.variable_accessed(arg)
tensor_inputs.append(arg.handle)
elif isinstance(arg, ops.Tensor):
tensor_inputs.append(arg)
else:
raise ValueError("All inputs to `Function`s must be Tensors; "
"on invocation of %s, the %d-th input (%s) was not a "
"Tensor." % (self._func_graph.name, i, str(arg)))
args = tensor_inputs + captures
if tape.should_record(tensor_inputs) or tape.should_record(captures):
return self._backprop_call(args)
outputs = self._inference_function.call(ctx, args)
return self._build_call_outputs(outputs)
@property
def graph(self):
"""Returns the graph from which this function was constructed."""
return self._func_graph
@property
def inputs(self):
"""Returns tensors in `self.graph` corresponding to arguments."""
return self._func_graph.inputs
@property
def outputs(self):
"""Returns tensors in `self.graph` corresponding to return values."""
return self._func_graph.outputs
@property
def captured_inputs(self):
"""Returns external Tensors captured by this function.
self.__call__(*args) passes `args + self.captured_inputs` to the function.
"""
return self._captured_inputs
@property
def function_def(self):
"""Returns a `FunctionDef` object representing this function."""
return self._inference_function.definition
@property
def output_shapes(self):
"""The function's output shapes."""
# TODO(ebrevdo): Should we only keep the output shapes associated
# with len(self._python_returns) outputs?
# TODO(akshayka): Consider removing this.
outputs_list = nest.flatten(self._func_graph.structured_outputs)
j = 0
for i, o in enumerate(outputs_list):
if o is not None:
if isinstance(o, ops.IndexedSlices):
# Extract the shape of the `IndexedSlices` object's `values` field.
outputs_list[i] = self._output_shapes[j] # the `values` shape
if o.dense_shape is not None:
j += 3 # skip over shapes for `values`, `indices`, `dense_shape`
else:
j += 2 # skip over shapes for `values`, `indices`
else:
outputs_list[i] = self._output_shapes[j]
j += 1
return nest.pack_sequence_as(self._func_graph.structured_outputs,
outputs_list)
@property
def output_dtypes(self):
# TODO(akshayka): Consider removing this.
return nest.map_structure(lambda x: x.dtype if x is not None else None,
self._func_graph.structured_outputs)
def _construct_backprop_function(self):
"""Constructs the backprop function object for this function."""
backwards_graph = FuncGraph(_backward_name(self._func_graph.name))
forward_function_name = _forward_name(self._func_graph.name)
with backwards_graph.as_default():
gradients_wrt_outputs = [
graph_placeholder(x.dtype, x.shape) for x in self._func_graph.outputs
]
gradients_wrt_inputs = gradients_impl._GradientsHelper( # pylint: disable=protected-access
self._func_graph.outputs,
self._func_graph.inputs,
grad_ys=gradients_wrt_outputs,
src_graph=self._func_graph)
backwards_graph_captures = list(backwards_graph.captures.keys())
backward_function_attr = _parse_func_attrs(
{FORWARD_FUNCTION_ATTRIBUTE_NAME: forward_function_name})
backward_function_attr.update(self._attrs)
# The ordering of `backwards_graph.inputs` is important: inputs of
# `self._backward_graph_function` correspond to outputs of
# `self._forward_function`.
backwards_graph.inputs = gradients_wrt_outputs + list(
backwards_graph.captures.values())
# Clear captures, since we pass them in as inputs.
backwards_graph.captures = {}
backwards_graph.outputs.extend(
grad for grad in _flatten(gradients_wrt_inputs) if grad is not None)
backwards_graph.structured_outputs = gradients_wrt_inputs
self._backward_graph_function = Function(
backwards_graph, attrs=backward_function_attr)
forward_function_attr = _parse_func_attrs({
BACKWARD_FUNCTION_ATTRIBUTE_NAME:
self._backward_graph_function._inference_function.name}) # pylint: disable=protected-access
forward_function_attr.update(self._attrs)
self._forward_function = _EagerDefinedFunction(
forward_function_name, self._func_graph, self._func_graph.inputs,
self._func_graph.outputs + backwards_graph_captures,
forward_function_attr)
def _backprop_call(self, args):
"""Calls the forward function and records the result on a tape.
(Only records results on a tape if the function has outputs)
Args:
args: All inputs to the function, including resolved captured inputs
Returns:
The call output.
"""
if self._backward_graph_function is None:
self._construct_backprop_function()
ctx = context.context()
outputs = self._forward_function.call(ctx, args)
if isinstance(outputs, ops.Operation) or outputs is None:
return outputs
# `real_outputs` are the actual outputs of the inference graph function;
# `side_outputs` are the intermediate Tensors that were added as outputs to
# the forward graph function so that we can compute its gradient.
real_outputs = outputs[:self._num_outputs]
side_outputs = outputs[self._num_outputs:]
def backward_function(*args):
return self._backward_graph_function(*(list(args) + side_outputs)) # pylint: disable=not-callable
tape.record_operation(self._forward_function.signature.name, real_outputs,
args, backward_function)
return self._build_call_outputs(real_outputs)
def _resolve_captured_inputs(self):
"""Resolve captured distributed variables to their current values.
Some inputs can be distributed variables. Such variables yield a different
component (i.e. actual tf.Variable) variables depending on the context of
execution.
Returns:
a list of resolved captured input tensors.
"""
if self._distributed_variables:
# Loop over each captured input and check if it corresponds to something
# distributed. If so, get its _distributed_container and fetch the
# component appropriate for the current execution context.
resolved_captured_inputs = self._captured_inputs[:]
for i, captured_input in enumerate(self._captured_inputs):
distributed_var = self._distributed_variables.get(captured_input, None)
if distributed_var is not None:
# distributed variables override __getattr__ and substitute the
# right component variable. In here, `distributed_var.handle`
# actually does the equivalent of
# distributed_var.get_current_component_var().handle.
resolved_captured_inputs[i] = distributed_var.handle
return resolved_captured_inputs
return self._captured_inputs
def _build_call_outputs(self, result):
"""Maps the fdef output list to actual output structure.
Args:
result: Output lists defined by FunctionDef.
Returns:
The actual call output.
"""
if self._func_graph.structured_outputs is None:
return result
# Use `nest.flatten` instead of `_flatten` in order to preserve any
# IndexedSlices in `self._func_graph.structured_outputs`.
outputs_list = nest.flatten(self._func_graph.structured_outputs)
j = 0
for i, o in enumerate(outputs_list):
if o is not None:
if isinstance(o, ops.IndexedSlices):
# Repack Tensors for IndexedSlices.
if o.dense_shape is not None:
outputs_list[i] = ops.IndexedSlices(
values=result[j],
indices=result[j + 1],
dense_shape=result[j + 2])
j += 3
else:
outputs_list[i] = ops.IndexedSlices(
values=result[j], indices=result[j + 1])
j += 2
else:
outputs_list[i] = result[j]
j += 1
ret = nest.pack_sequence_as(self._func_graph.structured_outputs,
outputs_list)
return ret
def _get_defun_inputs_from_signature(signature):
"""Maps a signature to graph-construction inputs."""
function_inputs = [
graph_placeholder(spec.dtype, spec.shape)
for spec in nest.flatten(signature)
]
return nest.pack_sequence_as(signature, function_inputs)
def _get_defun_inputs_from_args(args):
"""Maps python function args to graph-construction inputs."""
function_inputs = [
graph_placeholder(arg.dtype, arg.shape)
if isinstance(arg, ops.Tensor) else arg for arg in nest.flatten(args)
]
return nest.pack_sequence_as(args, function_inputs)
def func_graph_from_py_func(name,
python_func,
args,
kwargs,
signature=None,
func_graph=None,
experimental_autograph=False):
"""Returns a `FuncGraph` generated from `python_func`.
Args:
name: an identifier for the function.
python_func: the Python function to trace.
args: the positional args with which the Python function should be called;
ignored if a signature is provided.
kwargs: the keyword args with which the Python function should be called;
ignored if a signature is provided.
signature: a possibly nested sequence of `TensorSpecs` specifying the shapes
and dtypes of the arguments. When a signature is provided, `args` and
`kwargs` are ignored, and `python_func` is traced with Tensors conforming
to `signature`. If `None`, the shapes and dtypes are inferred from the
inputs.
func_graph: Optional. An instance of FuncGraph. If provided, we will use
this graph else a new one is built and returned.
experimental_autograph: whether to use autograph to compile `python_func`.
See https://www.tensorflow.org/guide/autograph for more information.
Returns:
A FuncGraph.
Raises:
TypeError: If any of `python_func`'s return values is neither `None` nor a
`Tensor`.
"""
if func_graph is None:
func_graph = FuncGraph(name)
assert isinstance(func_graph, FuncGraph)
with func_graph.as_default(), AutomaticControlDependencies() as a:
variable_scope.get_variable_scope().set_use_resource(True)
if signature is None:
func_args = _get_defun_inputs_from_args(args)
func_kwargs = _get_defun_inputs_from_args(kwargs)
else:
func_args = _get_defun_inputs_from_signature(signature)
func_kwargs = {}
# Note: `nest.flatten` sorts by keys, as does `_deterministic_dict_values`.
# Variables to help check whether mutation happens in calling the function
# Copy the recursive list, tuple and map structure, but not base objects
func_args_before = nest.pack_sequence_as(func_args, nest.flatten(func_args))
func_kwargs_before = nest.pack_sequence_as(
func_kwargs, nest.flatten(func_kwargs))
def convert(x):
"""Converts an argument to a Tensor."""
if x is None:
return None
try:
x = ops.convert_to_tensor_or_indexed_slices(x)
except (ValueError, TypeError):
raise TypeError(
"To be compatible with tf.contrib.eager.defun, Python functions "
"must return zero or more Tensors; in compilation of %s, found "
"return value of type %s, which is not a Tensor." %
(str(python_func), type(x)))
x = a.mark_as_return(x)
return x
this_tape = tape.push_new_tape()
try:
if experimental_autograph:
func_outputs = autograph.converted_call(
python_func,
autograph.ConversionOptions(
verbose=True,
recursive=True,
force_conversion=False,
strip_decorators=(defun,),
arg_types={}), *func_args, **func_kwargs)
else:
func_outputs = python_func(*func_args, **func_kwargs)
# invariant: `func_outputs` contains only Tensors and `None`s.
func_outputs = nest.map_structure(convert, func_outputs)
def check_mutation(n1, n2):
"""Check if two list of arguments are exactly the same."""
errmsg = ("Function to be traced should not modify structure of input "
"arguments. Check if your function has list and dictionary "
"operations that alter input arguments, "
"such as `list.pop`, `list.append`")
try:
nest.assert_same_structure(n1, n2)
except ValueError:
raise ValueError(errmsg)
for arg1, arg2 in zip(nest.flatten(n1), nest.flatten(n2)):
if arg1 is not arg2:
raise ValueError(errmsg)
check_mutation(func_args_before, func_args)
check_mutation(func_kwargs_before, func_kwargs)
finally:
tape.pop_tape(this_tape)
# Variables in `func_args`, `func_kwargs` should be explicit inputs
# to the function, not captured inputs.
tape_variables = this_tape.watched_variables()
arg_variables = set()
inputs = []
for arg in nest.flatten(func_args) + nest.flatten(func_kwargs):
if isinstance(arg, resource_variable_ops.ResourceVariable):
try:
resource_placeholder = func_graph.captures.pop(arg.handle)
arg_variables.add(arg)
except KeyError:
# This case occurs if a Variable among the inputs is not actually
# used by the function; we still add an explicit input for it
# because the user should presumably pass the Variable as an input
# to the corresponding graph function.
resource_placeholder = _create_substitute_placeholder(arg.handle)
inputs.append(resource_placeholder)
elif isinstance(arg, ops.Tensor):
inputs.append(arg)
variables = [v for v in tape_variables if v not in arg_variables]
func_graph.inputs = inputs + list(func_graph.captures.values())
func_graph.structured_outputs = func_outputs
# Returning a closed-over tensor does not trigger convert_to_tensor.
func_graph.outputs.extend(
func_graph.capture(x)
for x in _flatten(func_graph.structured_outputs)
if x is not None)
# Some captured variables might be components of DistributedValues.
# Instead of storing non-distributed component variables, we
# store their distributed containers so we can retrieve the correct
# component variables at call-time.
strategy = distribution_strategy_context.get_distribution_strategy()
for i, variable in enumerate(variables):
# If variable is not distributed value_container returns itself.
variables[i] = strategy.value_container(variable)
func_graph.variables = variables
# Register any other functions defined in the graph.
if context.executing_eagerly():
for f in func_graph._functions.values(): # pylint: disable=protected-access
# TODO(ashankar): What about the gradient registry?
_register(f._c_func.func) # pylint: disable=protected-access
return func_graph
pywrap_tensorflow.RegisterType("Tensor", ops.Tensor)
pywrap_tensorflow.RegisterType("IndexedSlices", ops.IndexedSlices)
def _deterministic_dict_values(dictionary):
return tuple(dictionary[key] for key in sorted(dictionary))
class PolymorphicFunction(object):
"""Wrapper class for the graph functions defined for a Python function.
See the documentation for `defun` for more information on the semantics of
defined functions.
PolymorphicFunction class is thread-compatible meaning that minimal
usage of defuns (defining and calling) is thread-safe, but if users call other
methods or invoke the base `python_function` themselves, external
synchronization is necessary.
"""
def __init__(self,
python_function,
name,
input_signature=None,
attributes=None,
experimental_autograph=False):
"""Initializes a polymorphic function.
Args:
python_function: the function to be wrapped.
name: the name given to it.
input_signature: a possibly nested sequence of `TensorSpec` objects
specifying the input signature of this function. If `None`, a separate
function is instantiated for each inferred input signature.
attributes: dict, extra keyword arguments that will be added as attribute
of the function.
experimental_autograph: whether to use autograph to compile
`python_function`. See https://www.tensorflow.org/guide/autograph for
more information.
Raises:
ValueError: if `input_signature` is not None and the `python_function`'s
argspec has keyword arguments.
"""
if isinstance(python_function, functools.partial):
self._python_function = python_function.func
self._args_to_prepend = python_function.args or tuple()
self._kwargs_to_include = python_function.keywords or {}
else:
self._python_function = python_function
self._args_to_prepend = tuple()
self._kwargs_to_include = {}
self._name = name
self._experimental_autograph = experimental_autograph
self._function_cache = collections.OrderedDict()
self._function_attributes = attributes or {}
self._lock = threading.Lock()
fullargspec = tf_inspect.getfullargspec(self._python_function)
if tf_inspect.ismethod(self._python_function):
# Remove `self`: default arguments shouldn't be matched to it.
args = fullargspec.args[1:]
else:
args = fullargspec.args
# A cache mapping from argument name to index, for canonicalizing
# arguments that are called in a keyword-like fashion.
self._args_to_indices = {arg: i for i, arg in enumerate(args)}
# A cache mapping from arg index to default value, for canonicalization.
offset = len(args) - len(fullargspec.defaults or [])
self._arg_indices_to_default_values = {
offset + index: default
for index, default in enumerate(fullargspec.defaults or [])
}
self._default_values = fullargspec.defaults
self._default_values_start_index = offset
if input_signature is None:
self._input_signature = None
else:
if fullargspec.varkw is not None or fullargspec.kwonlyargs:
raise ValueError("Cannot define a TensorFlow function from a Python "
"function with keyword arguments when "
"input_signature is provided.")
if not isinstance(input_signature, (tuple, list)):
raise TypeError("input_signature must be either a tuple or a "
"list, received " + str(type(input_signature)))
self._input_signature = tuple(input_signature)
self._flat_input_signature = tuple(nest.flatten(input_signature))
def __call__(self, *args, **kwargs):
"""Calls a graph function specialized to the inputs."""
graph_function, inputs = self._maybe_define_function(args, kwargs)
return graph_function(*inputs)
@property
def python_function(self):
"""Returns the wrapped Python function."""
return self._python_function
def get_concrete_function(self, *args, **kwargs):
"""Returns a `Function` object specialized to inputs and execution context.
`args` and `kwargs` are ignored if this `PolymorphicFunction` was created
with an `input_signature`.
Args:
*args: inputs to specialize on.
**kwargs: inputs to specialize on.
"""
graph_function, _ = self._maybe_define_function(args, kwargs)
return graph_function
def __get__(self, instance, owner):
"""Makes it possible to defun instance methods."""
del owner
# `instance` here is the instance that this `PolymorphicFunction` was
# accessed through; e.g., for
#
# class Foo(object):
#
# @function.defun
# def bar(self):
# ...
#
# foo = Foo()
# foo.bar() # `foo.bar` is a `PolymorphicFunction` instance
#
# then `instance` will be `foo` (and `owner` will be `Foo`).
return functools.partial(self.__call__, instance)
def _cache_key(self, args, kwargs):
"""Computes the cache key given inputs and execution context."""
if self._input_signature is None:
inputs = (args, kwargs) if kwargs else args
cache_key = pywrap_tensorflow.TFE_Py_EncodeArg(inputs)
else:
del args, kwargs
cache_key = self._flat_input_signature
ctx = context.context()
with ops.init_scope():
# The graph, or whether we're executing eagerly, should be a part of the
# cache key so we don't improperly capture tensors such as variables.
executing_eagerly = ctx.executing_eagerly()
execution_context = executing_eagerly or ops.get_default_graph()
if executing_eagerly:
device_functions = (pydev.merge_device(ctx.device_name),)
colocation_stack = ()
else:
default_graph = ops.get_default_graph()
# Putting the device in the cache key ensures that call-site device
# annotations are respected.
device_functions = tuple(default_graph._device_functions_outer_to_inner) # pylint: disable=protected-access
colocation_stack = tuple(default_graph._colocation_stack.peek_objs()) # pylint: disable=protected-access
return (cache_key, execution_context, device_functions, colocation_stack)
def _canonicalize_function_inputs(self, *args, **kwargs):
"""Canonicalizes `args` and `kwargs`.
Canonicalize the inputs to the Python function using its fullargspec. In
particular, we parse the varags and kwargs that this
`PolymorphicFunction` was called with into a tuple corresponding to the
Python function's positional (named) arguments and a dictionary
corresponding to its kwargs.
Args:
*args: The varargs this object was called with.
**kwargs: The keyword args this function was called with.
Returns:
A canonicalized ordering of the inputs.
Raises:
ValueError: If a keyword in `kwargs` cannot be matched with a positional
argument when an input signature is specified, or when the inputs
do not conform to the input signature.
"""
args = self._args_to_prepend + args
kwargs = dict(kwargs, **self._kwargs_to_include)
if not kwargs:
if self._default_values:
inputs = args + self._default_values[len(args) -
self._default_values_start_index:]
else:
inputs = args
else:
# Maps from index of arg to its corresponding value, according to `args`
# and `kwargs`; seeded with the default values for the named args that
# aren't in `args`.
arg_indices_to_values = {
index: default for index, default in six.iteritems(
self._arg_indices_to_default_values) if index >= len(args)
}
consumed_args = []
for arg, value in six.iteritems(kwargs):
index = self._args_to_indices.get(arg, None)
if index is not None:
arg_indices_to_values[index] = value
consumed_args.append(arg)
elif self._input_signature is not None:
raise ValueError("Cannot define a TensorFlow function from a Python "
"function with keyword arguments when "
"input_signature is provided.")
for arg in consumed_args:
# After this loop, `kwargs` will only contain true keyword arguments, as
# opposed to named arguments called in a keyword-like fashion.
kwargs.pop(arg)
inputs = args + _deterministic_dict_values(arg_indices_to_values)
flat_inputs = nest.flatten(inputs)
# Check for NumPy arrays in arguments and convert them to Tensors.
# TODO(nareshmodi): Skip ndarray conversion to tensor altogether, perhaps
# finding a way to store them directly in the cache key (currently not
# possible since ndarrays are not hashable).
need_packing = False
for index, value in enumerate(flat_inputs):
if type(value) == np.ndarray:
flat_inputs[index] = constant_op.constant(value)
need_packing = True
if need_packing:
inputs = nest.pack_sequence_as(structure=inputs,
flat_sequence=flat_inputs)
if self._input_signature is None:
return inputs, kwargs
else:
assert not kwargs
try:
nest.assert_same_structure(self._input_signature, inputs)
except (ValueError, TypeError):
raise ValueError("Structure of Python function inputs does not match "
"input_signature.")
if any(not isinstance(arg, ops.Tensor) for arg in flat_inputs):
raise ValueError("When input_signature is provided, all inputs to "
"the Python function must be Tensors.")
tensor_specs = [
tensor_spec.TensorSpec.from_tensor(tensor) for tensor in flat_inputs
]
if any(not spec.is_compatible_with(other)
for spec, other in zip(self._flat_input_signature, tensor_specs)):
raise ValueError("Python inputs incompatible with input_signature: "
"inputs (%s), input_signature (%s)" %
(str(inputs), str(self._input_signature)))
return inputs, {}
def _maybe_define_function(self, args, kwargs):
"""Gets a function for these inputs, defining it if necessary.
`args` and `kwargs` can be None if this `PolymorphicFunction` was created
with an `input_signature`.
Args:
args: The varargs for the Python function.
kwargs: The keyword args for the Python function.
Returns:
A graph function corresponding to the input signature implied by args and
kwargs, as well as the inputs that the object should be called with.
Raises:
ValueError: If inputs are incompatible with the input signature.
TypeError: If the function inputs include non-hashable objects
"""
if self._input_signature is None or args is not None or kwargs is not None:
args, kwargs = self._canonicalize_function_inputs(*args, **kwargs)
cache_key = self._cache_key(args, kwargs)
with self._lock:
try:
graph_function = self._function_cache.get(cache_key, None)
except TypeError:
raise TypeError("Arguments supplied to `defun`-generated functions "
"must be hashable.")
if graph_function is None:
graph_function = Function(
func_graph_from_py_func(
self._name,
self._python_function,
args,
kwargs,
self._input_signature,
experimental_autograph=self._experimental_autograph),
self._function_attributes)
self._function_cache[cache_key] = graph_function
return graph_function, [
t for t in nest.flatten((args, kwargs))
if isinstance(t, (ops.Tensor, resource_variable_ops.ResourceVariable))
]
def register(func, *args, **kwargs):
"""Register the defun function into the graph.
This won't actually call the function with the inputs, and only put the
function definition into graph. Register function with different input param
will result into multiple version of functions registered in graph.
Args:
func: the PolymorphicFunction instance that generated by a @defun
*args: input arguments for the Python function.
**kwargs: input keyword arguments for the Python function.
Returns:
a `Function` object specialized to inputs and execution context.
Raises:
ValueError: When the input function is not a defun wrapped python function.
"""
if not isinstance(func, PolymorphicFunction):
raise ValueError("Only defun function is allowed to be registered. "
"Got type: %s" % type(func))
concrete_func = func.get_concrete_function(*args, **kwargs)
graph = ops.get_default_graph()
# There are two situations for the actual call of a defun:
# 1. If none of the input args are resource variables or watch by any tape,
# it will run the _inference_function of concrete_func for forward pass, and
# the gradient will be generated by standard mechanism.
# 2. Otherwise, defun will create two functions, one for forward pass, and the
# backward pass will be created via tape.
# When registering the function, we put both cases into graph.
# pylint: disable=protected-access
concrete_func._inference_function.add_to_graph(graph)
if concrete_func._backward_graph_function is None:
concrete_func._construct_backprop_function()
forward_function = concrete_func._forward_function
backward_function = concrete_func._backward_graph_function._inference_function
forward_function.add_to_graph(graph)
backward_function.add_to_graph(graph)
# pylint: enable=protected-access
return concrete_func
def _validate_signature(signature):
if any(not isinstance(arg, tensor_spec.TensorSpec)
for arg in nest.flatten(signature)):
raise TypeError("Invalid input_signature %s; input_signature must be "
"a possibly nested sequence of TensorSpec objects.")
def defun(func=None, input_signature=None, experimental_autograph=False):
"""Compiles a Python function into a callable TensorFlow graph.
`defun` (short for "define function") trace-compiles a Python function
composed of TensorFlow operations into a callable that executes a `tf.Graph`
containing those operations. The callable produced by `defun` contains only
the subgraph of TensorFlow operations that were executed when the Python
function was called with a particular input signature, defined as a list
of the shapes and dtypes of the Python function's Tensor-valued arguments and
the values of its non-Tensor Python objects. In particular, `defun` is _not_ a
compiler for arbitrary Python code.
When eager execution is enabled, the ability to create graphs from Python
functions makes it possible to incrementally trade off debugability and
interactivity for performance. Functions compiled with `defun` cannot be
inspected with `pdb` and `print` statements; however, executing a graph
generated by `defun` sometimes takes less time and memory than eagerly
executing the corresponding Python function, since specifying computations as
graphs allows for optimizations like automatic buffer reuse and
parallelization among ops. Note that executing a `defun`-compiled function
incurs a small constant overhead, so eagerly executing sufficiently small
Python functions might take less time than executing their corresponding
`defun`-generated graphs.
For a Python function to be compatible with `defun`, all of its arguments must
be hashable Python objects or lists thereof. The function itself may not
modify the list/map structure of its arguments. Additionally, it must return
zero or more `tf.Tensor` objects. If the Python function returns
a `tf.Variable`, its compiled version will return the value of that variable
as a `tf.Tensor`.
Executing a graph generated by `defun` respects device annotations (i.e.,
all `with tf.device` directives present in a Python function will also be
present in its corresponding graph), but it is not yet possible to execute the
generated graphs across multiple machines.
_Example Usage_
```python
import tensorflow as tf
tf.enable_eager_execution()
# A simple example.
def f(x, y):
return tf.reduce_mean(tf.multiply(x ** 2, 3) + y)
g = tf.contrib.eager.defun(f)
x = tf.constant([[2.0, 3.0]])
y = tf.constant([[3.0, -2.0]])
# `f` and `g` will return the same value, but `g` will be executed as a
# TensorFlow graph.
assert f(x, y).numpy() == g(x, y).numpy()
# `defun` is capable of compiling Python functions that close over Python
# objects, including Tensors and Variables.
@tf.contrib.eager.defun
def h():
return f(x, y)
assert (h().numpy() == f(x, y).numpy()).all()
# `defun` automatically lifts variables out of the graphs it creates,
# allowing you to compile the `call` methods of `tf.keras.layers.Layer` and
# `tf.keras.Model` objects.
class MyModel(tf.keras.Model):
def __init__(self, keep_probability=0.2):
super(MyModel, self).__init__()
self.dense1 = tf.keras.layers.Dense(4, activation=tf.nn.relu)
self.dense2 = tf.keras.layers.Dense(5, activation=tf.nn.softmax)
self.keep_probability = keep_probability
@tf.contrib.eager.defun
def call(self, inputs, training=True):
x = self.dense2(self.dense1(inputs))
if training:
return tf.nn.dropout(x, self.keep_probability)
else:
return x
model = MyModel()
model(x, training=True) # executes a graph, with dropout
model(x, training=False) # executes a graph, without dropout
# `defun`-compiled functions are differentiable.
optimizer = tf.train.GradientDescentOptimizer(learning_rate=0.01)
with tf.GradientTape() as tape:
outputs = model(x)
gradient = tape.gradient(outputs, model.trainable_variables)
optimizer.apply_gradients((grad, var) for grad, var in zip(gradient,
model.trainable_variables))
```
When using `defun`, there are subtleties regarding inputs, Python control
flow, and variable creation that one should be aware of. For concreteness, let
`f` be a Python function that returns zero or more `tf.Tensor` objects and
let `F = defun(f)`. `F` builds a graph for each unique input signature it
sees, Python control flow is baked into graphs, and operations related to
variable initialization are automatically lifted out of the graphs that `F`
generates and placed in the eager context if executing eagerly or into an
outer graph otherwise.
_Input Signatures_
By default, `F = tf.contrib.eager.defun(f)` instantiates a separate graph
for every unique sequence of the shapes and dtypes of Tensor arguments and
the values of Python objects it is invoked with. For example, calling
`F(tf.random_uniform([2])` will execute a different graph than
`F(tf.random_uniform([3])` because the two inputs have different shapes.
The first time that `F(*args, **kwargs)` is called with a particular sequence
of Tensor shapes and dtypes and Python values, it constructs a graph by
tracing the execution of `f(*args, **kwargs)`; this graph is bound to an
input signature inferred from `(*args, **kwargs)` and cached for future reuse.
NumPy arrays passed as inputs to `F` are converted to `tf.Tensor` objects
before being passed to `f`, and are treated as Tensors for caching. This
allows a function to be called multiple times with NumPy arrays having
different values but the same shape and dtype without re-tracing each time.
`tf.contrib.eager.defun` caches graphs for your convenience, letting you
define TensorFlow functions without explicitly specifying their signatures.
However, this policy is conservative and potentially expensive; for example,
when different invocations of your function have differently-shaped Tensor
inputs, this policy might generate more graph functions than necessary. To
eliminate such costs, `tf.contrib.eager.defun` allows you to supply an
optional `input_signature` argument specifying the shapes and dtypes of the
inputs. In particular, the shapes may be partially unspecified, with `None`s
in the unknown dimensions. When an input signature is provided,
`tf.contrib.eager.defun` will only instantiate a single graph for the
decorated Python function. The following is an example:
```python
import tensorflow as tf
# The first `TensorSpec` below describes the shape and dtype of `words`,
# and the second describes the shape and dtype of `another_tensor`. Note that
# the last dimension of the `words` `TensorSpec` is left unspecified.
@tf.contrib.eager.defun(input_signature=[
tf.contrib.eager.TensorSpec(shape=[50, 300, None], dtype=tf.float32),
tf.contrib.eager.TensorSpec(shape=[300, 100], dtype=tf.float32)
])
def my_sequence_model(words, another_tensor):
...
# Note how the third dimension of the first input can vary freely.
words = tf.random_uniform(([50, 300, 10])
second_input = tf.random_uniform([300, 100])
my_sequence_model(words, second_input)
words = tf.random_uniform(([50, 300, 20])
my_sequence_model(words, second_input)
# Passing an input with an incompatible shape will raise an error.
words = tf.random_uniform(([50, 100, 20])
my_sequence_model(words, second_input) # <---- This will raise an error.
```
Python functions that are compiled with an `input_signature` must only accept
Tensors as arguments and must not take unnamed keyword arguments (**kwargs).
_Tracing_
Be aware that because `F` only logs TensorFlow operations, all the other
Python code that `f` executes will only shape the _construction_ of the graphs
that `F` executes: the Python code won't be executed when the graphs
themselves are executed, though it will be executed every time the Python
function is traced (and a given Python function might be traced multiple
times, once for each input signature it is invoked with). For example, whereas
the Python function
```python
import tensorflow as tf
import numpy as np
tf.enable_eager_execution()
def add_noise():
return tf.eye(5) + np.random.randn(5, 5)
```
will return a different output everytime it is invoked, the compiled function
`compiled = tf.contrib.eager.defun(add_noise)` will return the same value
every time it is called, since a particular random offset generated by NumPy
will be inserted into the graph as a TensorFlow constant. The solution is to
replace the call to `np.random.randn` with `tf.random_normal((5, 5))`.
_Python Side-Effects_
A corollary of the previous discussion on tracing is the following: If a
Python function `f` has Python side-effects, then executing `f` multiple times
will not necessarily be semantically equivalent to executing `F =
tf.contrib.eager.defun(f)` multiple times; this difference is due to the fact
that `defun` only captures the subgraph of TensorFlow operations that is
constructed when `f` is called in a graph-building context.
_Python Control Flow_.
The structure of many machine learning computations depend upon whether one is
training or validating, and it is common to nest specialized logic under `if
training:` blocks. By mapping each input signature to a unique graph, `defun`
lets users transparently compile such code, as the following code snippet
demonstrates:
```python
import tensorflow as tf
tf.enable_eager_execution()
@tf.contrib.eager.defun
def lossy_matmul(W, x, training=True):
outputs = tf.matmul(W, x)
if training:
outputs = tf.nn.dropout(outputs, keep_probability=0.2)
return outputs
W = tf.random_normal((3, 5))
x = tf.random_normal((5, 1))
# Executes a graph that applies dropout.
lossy_outputs = lossy_matmul(W, x, training=True)
# Executes a graph that does not apply dropout.
exact_outputs = lossy_matmul(W, x, training=False)
```
On the other hand, because `defun` generates graphs by tracing and not by
source code analysis, it fully unrolls Python `for` and `while` loops,
potentially creating large graphs. If your Python function has native loops
that run for many iterations, consider replacing them with `tf.while_loop`
operations.
When constructing graphs, `tf.Tensor` objects cannot be used as Python
`bool` objects. This means, for example, that you should replace code in `f`
resembling
```python
if tensor < 10:
true_fn()
else:
false_fn()
```
with `tf.cond(tensor < 10, true_fn, false_fn)`.
_Variables_
TensorFlow operations related to variable creation and initialization are
automatically lifted out of the graphs generated by `defun`. In practice, this
implies that variable creation and initialization only happen the first time
`F` is called, and that variables are reused every time thereafter. Many
TensorFlow APIs, like `tf.keras.layers.Layer` objects, create variables the
first time they are called and reuse them thereafter. Automatic variable
lifting makes it possible to compile these APIs without extra effort, at the
cost of introducing a discrepancy between the semantics of executing Python
functions and their corresponding compiled functions. For example:
```python
import tensorflow as tf
tf.enable_eager_execution()
def fn():
x = tf.Variable(0.0)
x.assign_add(1.0)
return x.read_value()
# `fn` is a Python function, so x is created, initialized, and destroyed upon
# every invocation
assert fn().numpy() == fn().numpy() == 1.0
compiled = tf.contrib.eager.defun(fn)
# Compiling `fn` with `defun` hoists all variables outside of the generated
# graph, so initialization happens exactly once.
assert compiled().numpy() == 1.0
assert compiled().numpy() == 2.0
```
Finally, because each input signature is bound to a unique graph, if your
Python function constructs `tf.Variable` objects, then each graph constructed
for that Python function will reference a unique set of variables. To
circumvent this problem, we recommend against compiling Python functions that
create `tf.Variable` objects. Instead, Python functions should either
lexically close over `tf.Variable` objects or accept them as arguments,
preferably encapsulated in an object-oriented container. If you must create
variables inside your Python function and you want each graph generated for it
to reference the same set of variables, add logic to your Python function that
ensures that variables are only created the first time it is called and are
reused for every subsequent invocation; note that this is precisely what
`tf.keras.layers.Layer` objects do, so we recommend using them to represent
variable-bearing computations whenever possible.
Args:
func: function to be compiled. If `func` is None, returns a
decorator that can be invoked with a single argument - `func`. The
end result is equivalent to providing all the arguments up front.
In other words, defun(input_signature=...)(func) is equivalent to
defun(func, input_signature=...). The former allows
the following use case:
@tf.contrib.eager.defun(input_signature=...)
def foo(...):
...
input_signature: A possibly nested sequence of
`tf.contrib.eager.TensorSpec` objects specifying the shapes and dtypes of
the Tensors that will be supplied to this function. If `None`, a separate
function is instantiated for each inferred input signature. If a
signature is specified, every input to `func` must be a `Tensor`, and
`func` cannot accept `**kwargs`.
experimental_autograph: Whether `func` should be compiled before
constructing the graph. See https://www.tensorflow.org/guide/autograph
for more information.
Returns:
If `func` is not None, returns a callable that will execute the compiled
function (and return zero or more `tf.Tensor` objects).
If `func` is None, returns a decorator that, when invoked with a single
`func` argument, returns a callable equivalent to the case above.
Raises:
TypeError: If `input_signature` is neither `None` nor a sequence of
`tf.contrib.eager.TensorSpec` objects.
"""
return defun_with_attributes(
func=func,
input_signature=input_signature,
experimental_autograph=experimental_autograph)
def defun_with_attributes(func=None,
input_signature=None,
attributes=None,
experimental_autograph=False):
"""Compiles a Python function into a callable TensorFlow graph.
This function supports adding extra function attributes. See detailed
documentation in defun(). Currently this is not exposed in public API since we
don't expect user to directly use attributes, and attribute won't work by
itself. This assumption might change in future.
Args:
func: function to be compiled.
input_signature: same as defun()'s input_signature.
attributes: A dictionary of arguments which will be added to function def as
attributes. Currently only support primitive types as value, and only
whitelisted attribute name is allowed. Unwhitelisted attribute name or
unsupported value will result into ValueError.
experimental_autograph: same as defun()'s experimental_autograph.
Returns:
Same as the return value of defun, with attributes added to the function in
graph.
"""
if input_signature is not None:
_validate_signature(input_signature)
# TODO(apassos): deal with captured global state. Deal with control flow.
def decorated(function):
try:
name = function.__name__
except AttributeError:
name = "function"
return tf_decorator.make_decorator(
function,
PolymorphicFunction(
function,
name,
input_signature=input_signature,
attributes=attributes,
experimental_autograph=experimental_autograph))
# This code path is for the `foo = tfe.defun(foo, ...)` use case
if func is not None:
return decorated(func)
# This code path is for the
#
# @tfe.defun(...)
# def foo(...):
# ...
#
# use case, which is equivalent to `foo = tfe.defun(...)(foo)`
return decorated
class AutomaticControlDependencies(object):
"""Context manager to automatically add control dependencies.
Code under this context manager will act as if a sensible set of control
dependencies were present. More specifically:
1. All stateful ops in the scope will execute
2. Stateful ops which modify the same resource will execute in program order
Note: creating variables in an automatic control dependencies context is not
supported (the value of the variables will never change as they will keep
getting reinitialized).
NOT THREAD SAFE
"""
def __init__(self):
self._returned_tensors = set()
def mark_as_return(self, tensor):
"""Acts like identity but marks the `Tensor` as a return value.
This will possibly return a copy of the `Tensor`. Usage:
```
with AutomaticControlDependencies() as a:
...
t = a.mark_as_return(t)
_ = ...(t...) # i.e. it's safe to use t here
```
Args:
tensor: the `Tensor` to be marked
Returns:
a copy of the `Tensor`.
"""
if isinstance(tensor, ops.IndexedSlices):
values = array_ops.identity(tensor.values)
indices = array_ops.identity(tensor.indices)
self._returned_tensors.add(indices)
self._returned_tensors.add(values)
return ops.IndexedSlices(values, indices, dense_shape=tensor.dense_shape)
# We want to make the return values depend on the stateful operations, but
# we don't want to introduce a cycle, so we make the return value the result
# of a new identity operation that the stateful operations definitely don't
# depend on.
tensor = array_ops.identity(tensor)
self._returned_tensors.add(tensor)
return tensor
def __enter__(self):
if context.executing_eagerly():
return self
# This code assumes no other thread is adding ops to the graph while
# we're adding ops to the graph.
# TODO(apassos): Fix this by locking the graph or using a temporary
# graph (but that would mess up devices and collections at least,
# probably other things as well).
self._graph = ops.get_default_graph()
self._n_operations = len(self._graph.get_operations())
return self
def _process_switch(self, switch_op, ops_which_must_run,
last_op_using_resource_tensor, merge_for_resource):
"""Processes a switch node for a resource input.
When tensorflow creates a cond, it creates a control flow context for each
branch of the cond. Each external tensor accessed by that branch is routed
through a switch op, which gets created in the graph _after_ the op which
uses that tensor get created.
If the resource comes from another switch op we process that one first.
_process_switch creates a corresponding merge node for the switch node. This
merge node is added to the outer control flow context of the switch
node. We also ensure that:
1. The switch node executes after the previous op which used the resource
tensor
2. Any op which uses a resource output of the switch node executes before
the merge for the switch node.
3. The next op which uses the input resource to the switch node (which
might be another switch node for the other branch of the conditional)
will execute after the merge node is done.
4. The merge node is marked as must_run so it will run even if no
subsequent operation uses the resource.
Args:
switch_op: the switch op to be processed
ops_which_must_run: the set of ops which must run
last_op_using_resource_tensor: map from resource tensor to last op using
it
merge_for_resource: map from resource tensor to merge which must follow
all usages of it.
"""
inp = switch_op.inputs[0]
if inp.dtype == dtypes_module.resource and inp.op.type == "Switch":
self._process_switch(inp.op, ops_which_must_run,
last_op_using_resource_tensor, merge_for_resource)
if switch_op.outputs[0] in merge_for_resource:
return
new_merge = control_flow_ops.merge(switch_op.outputs,
name="artificial_merge")
new_merge[0].op._control_flow_context = ( # pylint: disable=protected-access
switch_op._control_flow_context.outer_context) # pylint: disable=protected-access
# Ensures the merge always runs
ops_which_must_run.add(new_merge[0].op)
if inp in last_op_using_resource_tensor:
# Ensures the switch executes after the previous op using the resource.
switch_op._add_control_input(last_op_using_resource_tensor[inp]) # pylint: disable=protected-access
# Ensure the next op outside the cond happens after the merge.
last_op_using_resource_tensor[inp] = new_merge[0].op
if inp in merge_for_resource:
merge_for_resource[inp]._add_control_input(new_merge[0].op) # pylint: disable=protected-access
for o in switch_op.outputs:
# Ensures the merge will execute after all ops inside the cond
merge_for_resource[o] = new_merge[0].op
def __exit__(self, unused_type, unused_value, unused_traceback):
if context.executing_eagerly():
return
if self._graph is not ops.get_default_graph():
raise RuntimeError(
"Graph changed while trying to add control dependencies.")
# map from resource tensor to the last op which used it
last_op_using_resource_tensor = {}
# set of conditional and loop exits
ops_which_must_run = set()
# merge which must depend on ops which use this resource
merge_for_resource = {}
new_operations = self._graph.get_operations()[self._n_operations:]
# Ensures that uses of resource tensors get serialized properly and all
# execute. This is done by keeping a map from resource tensor to the last op
# in graph-construction order which used it (last_op_using_resource_tensor).
#
# Conditionals are written in TensorFlow such that every external tensor
# accessed in the conditional goes through a switch op and every return
# tensor (it's guaranteed that there will be at least one) goes through a
# merge op.
#
# To handle conditionals, switches are handled in a special way (see
# comments for _process_switch). Merge nodes created by TF's conditional
# logic (as opposed to by _process_switch) are forced to run and also get a
# control dependency added to them to ensure all stateful ops inside their
# control flow context run.
#
# We also ensure that if an op is using a resource output by a switch node
# (that is, a resource tensor for which there's a value in
# merge_for_resource) this op will run before the merge for that resource.
#
# We try to add control inputs to nodes respecting their control flow
# contexts to avoid dead nodes propagating everywhere and leading to
# "retval[0] doesn't have value" errors. If a node gets a control dependency
# on a dead node (i.e. a note from an untaken control flow branch) that node
# will be marked as dead unless it's a merge node.
#
# TODO(apassos): serialize non-resource-taking stateful ops as well, and
# test that it works. Support while loops. Support init_scope escaping from
# this.
for op in new_operations:
# TODO(apassos) make this code safely support while loops.
if isinstance(op._control_flow_context, control_flow_ops.WhileContext): # pylint: disable=protected-access
continue
control_inputs = set()
# Ensure stateful ops run
if (op.type not in self._graph._registered_ops # pylint: disable=protected-access
or self._graph._registered_ops[op.type].is_stateful): # pylint: disable=protected-access
ops_which_must_run.add(op)
# Ignore switches (they're handled separately)
if op.type == "Switch" and op.inputs[0].dtype == dtypes_module.resource:
continue
# Make merges trigger all other computation which must run
if op.type == "Merge":
for o in ops_which_must_run:
op._add_control_input(o) # pylint: disable=protected-access
for inp in o.inputs:
if inp in last_op_using_resource_tensor:
last_op_using_resource_tensor[inp] = op
ops_which_must_run = set([op])
continue
found_resource = False
for inp in op.inputs:
if inp.dtype == dtypes_module.resource:
found_resource = True
# Deal with switches, finally.
if inp.op.type == "Switch":
self._process_switch(inp.op, ops_which_must_run,
last_op_using_resource_tensor,
merge_for_resource)
# Ensure uses of resources are serialized
if inp in last_op_using_resource_tensor:
if (last_op_using_resource_tensor[inp]._control_flow_context # pylint: disable=protected-access
is op._control_flow_context): # pylint: disable=protected-access
control_inputs.add(last_op_using_resource_tensor[inp])
# Ensure merges happen after the closing of a cond block
if inp in merge_for_resource:
merge_for_resource[inp]._add_control_input(op) # pylint: disable=protected-access
last_op_using_resource_tensor[inp] = op
if (op.op_def.is_stateful and not found_resource
and op._control_flow_context is None): # pylint: disable=protected-access
if None in last_op_using_resource_tensor:
op._add_control_input(last_op_using_resource_tensor[None]) # pylint: disable=protected-access
last_op_using_resource_tensor[None] = op
control_inputs = [c for c in control_inputs
if c._control_flow_context is op._control_flow_context] # pylint: disable=protected-access
op._add_control_inputs(control_inputs) # pylint: disable=protected-access
# Ensure all ops which must run do run
for r in self._returned_tensors:
if ops_which_must_run:
r.op._add_control_inputs( # pylint: disable=protected-access
[o for o in ops_which_must_run
if o._control_flow_context is r.op._control_flow_context]) # pylint: disable=protected-access
def automatic_control_dependencies(f):
"""Wraps f to automatically insert control dependencies.
The inserted dependencies ensure that:
1. All stateful ops in f run when the result of f runs
2. Updates to the same resources happen in order.
Args:
f: the function to be wrapped.
Returns:
The wrapped function.
"""
def wrapper(*args, **kwargs):
with AutomaticControlDependencies() as a:
result = f(*args, **kwargs)
result_flat = [a.mark_as_return(t) for t in nest.flatten(result)]
return nest.pack_sequence_as(result, result_flat)
return tf_decorator.make_decorator(f, wrapper)
| apache-2.0 |
kisom/pypcapfile | pcapfile/test/fixture.py | 3 | 14109 | """
Test fixtures for the pypcapfile test suite.
"""
TESTPCAP2 = b"""
UydceGQ0XHhjM1x4YjJceGExXHgwMlx4MDBceDA0XHgwMFx4MDBceDAwXHgwMFx4MDBceDAwXHgw
MFx4MDBceDAwXHgwMFx4MDFceDAwXHgwMFx4MDFceDAwXHgwMFx4MDB4XHhiM1x4MWFQXHg5MD5c
eDAwXHgwME5ceDAwXHgwMFx4MDBOXHgwMFx4MDBceDAwXHhjMD9ceDBlXHgxMVx4MWNceGMwXHgw
MCZceGIwXHhmNFx4ZmNceGI4XHgwOFx4MDBFXHgwMFx4MDBAXHhiY0hAXHgwMEBceDA2XHhjOVx4
YTNceGMwXHhhOFx4MDFceDE2Mlx4MTJceGMwXHhmYlx4ZDU6XHgwMFBFYVx4ZTlceGIwXHgwMFx4
MDBceDAwXHgwMFx4YjBceDAyXHhmZlx4ZmZceGM0XHhkNlx4MDBceDAwXHgwMlx4MDRceDA1XHhi
NFx4MDFceDAzXHgwM1x4MDFceDAxXHgwMVx4MDhcblx4MTZceGVlXHhhMVx4ZDNceDAwXHgwMFx4
MDBceDAwXHgwNFx4MDJceDAwXHgwMHhceGIzXHgxYVBceGZjXHhlMFx4MDBceDAwSlx4MDBceDAw
XHgwMEpceDAwXHgwMFx4MDBceDAwJlx4YjBceGY0XHhmY1x4YjhceGMwP1x4MGVceDExXHgxY1x4
YzBceDA4XHgwMEVceDAwXHgwMDxceDAwXHgwMEBceDAwMVx4MDZceDk0XHhmMDJceDEyXHhjMFx4
ZmJceGMwXHhhOFx4MDFceDE2XHgwMFBceGQ1Olx4YTNEXHhlNFx4ZGVFYVx4ZTlceGIxXHhhMFx4
MTJceDE2XHhhMFx4OGFceDk5XHgwMFx4MDBceDAyXHgwNFx4MDVceGI0XHgwNFx4MDJceDA4XG5c
eDA2XHhkMVx4YTVceDk2XHgxNlx4ZWVceGExXHhkM1x4MDFceDAzXHgwM1x4MDd4XHhiM1x4MWFQ
PFx4ZTFceDAwXHgwMEJceDAwXHgwMFx4MDBCXHgwMFx4MDBceDAwXHhjMD9ceDBlXHgxMVx4MWNc
eGMwXHgwMCZceGIwXHhmNFx4ZmNceGI4XHgwOFx4MDBFXHgwMFx4MDA0XHg5OT5AXHgwMEBceDA2
XHhlY1x4YjlceGMwXHhhOFx4MDFceDE2Mlx4MTJceGMwXHhmYlx4ZDU6XHgwMFBFYVx4ZTlceGIx
XHhhM0RceGU0XHhkZlx4ODBceDEwXHg4Mlx4MThNXHhjNFx4MDBceDAwXHgwMVx4MDFceDA4XG5c
eDE2XHhlZVx4YTFceGZjXHgwNlx4ZDFceGE1XHg5NnhceGIzXHgxYVBceDk4XHhlMlx4MDBceDAw
XHgwMFx4MDFceDAwXHgwMDNceDAxXHgwMFx4MDBceGMwP1x4MGVceDExXHgxY1x4YzBceDAwJlx4
YjBceGY0XHhmY1x4YjhceDA4XHgwMEVceDAwXHgwMSVPcEBceDAwQFx4MDY1XHg5N1x4YzBceGE4
XHgwMVx4MTYyXHgxMlx4YzBceGZiXHhkNTpceDAwUEVhXHhlOVx4YjFceGEzRFx4ZTRceGRmXHg4
MFx4MThceDgyXHgxOFx4OTZceGU2XHgwMFx4MDBceDAxXHgwMVx4MDhcblx4MTZceGVlXHhhMVx4
ZmNceDA2XHhkMVx4YTVceDk2R0VUIC8gSFRUUC8xLjBcclxuSG9zdDogd3d3LmR1Y2tkdWNrZ28u
Y29tXHJcbkFjY2VwdDogdGV4dC9odG1sLCB0ZXh0L3BsYWluLCB0ZXh0L2NzcywgdGV4dC9zZ21s
LCAqLyo7cT0wLjAxXHJcbkFjY2VwdC1FbmNvZGluZzogZ3ppcCwgY29tcHJlc3MsIGJ6aXAyXHJc
bkFjY2VwdC1MYW5ndWFnZTogZW5cclxuVXNlci1BZ2VudDogTHlueC8yLjguN3J4XHhiM1x4MWFQ
Y1x4ODJceDAxXHgwMEJceDAwXHgwMFx4MDBCXHgwMFx4MDBceDAwXHgwMCZceGIwXHhmNFx4ZmNc
eGI4XHhjMD9ceDBlXHgxMVx4MWNceGMwXHgwOFx4MDBFXHgwMFx4MDA0XHhhN1x4MTNAXHgwMDFc
eDA2XHhlZFx4ZTQyXHgxMlx4YzBceGZiXHhjMFx4YThceDAxXHgxNlx4MDBQXHhkNTpceGEzRFx4
ZTRceGRmRWFceGVhXHhhMlx4ODBceDEwXHgwMDZceGNlXHhiMVx4MDBceDAwXHgwMVx4MDFceDA4
XG5ceDA2XHhkMVx4YTVceDlhXHgxNlx4ZWVceGExXHhmY3hceGIzXHgxYVBceGUwXHg5Zlx4MDFc
eDAwXHgwMFx4MDFceDAwXHgwMFx4ZWVceDAxXHgwMFx4MDBceDAwJlx4YjBceGY0XHhmY1x4Yjhc
eGMwP1x4MGVceDExXHgxY1x4YzBceDA4XHgwMEVceDAwXHgwMVx4ZTBceGE3XHgxNEBceDAwMVx4
MDZceGVjNzJceDEyXHhjMFx4ZmJceGMwXHhhOFx4MDFceDE2XHgwMFBceGQ1Olx4YTNEXHhlNFx4
ZGZFYVx4ZWFceGEyXHg4MFx4MThceDAwNkNceGE2XHgwMFx4MDBceDAxXHgwMVx4MDhcblx4MDZc
eGQxXHhhNVx4OWFceDE2XHhlZVx4YTFceGZjSFRUUC8xLjEgMzAxIE1vdmVkIFBlcm1hbmVudGx5
XHJcblNlcnZlcjogbmdpbnhcclxuRGF0ZTogVGh1LCAwMiBBdWcgMjAxMiAxNzowNjowMCBHTVRc
clxuQ29udGVudC1UeXBlOiB0ZXh0L2h0bWxcclxuQ29udGVudC1MZW5ndGg6IDE3OFxyXG5Db25u
ZWN0aW9uOiBjbG9zZVxyXG5Mb2NhdGlvbjogaHR0cDovL2R1Y2tkdWNrZ28uY29tL1xyXG5FeHBp
cmVzeFx4YjNceDFhUFx4ZTNceDlmXHgwMVx4MDBCXHgwMFx4MDBceDAwQlx4MDBceDAwXHgwMFx4
MDAmXHhiMFx4ZjRceGZjXHhiOFx4YzA/XHgwZVx4MTFceDFjXHhjMFx4MDhceDAwRVx4MDBceDAw
NFx4YTdceDE1QFx4MDAxXHgwNlx4ZWRceGUyMlx4MTJceGMwXHhmYlx4YzBceGE4XHgwMVx4MTZc
eDAwUFx4ZDU6XHhhM0RceGU2XHg4YkVhXHhlYVx4YTJceDgwXHgxMVx4MDA2XHhjZFx4MDRceDAw
XHgwMFx4MDFceDAxXHgwOFxuXHgwNlx4ZDFceGE1XHg5YVx4MTZceGVlXHhhMVx4ZmN4XHhiM1x4
MWFQXHgwYlx4YTBceDAxXHgwMEJceDAwXHgwMFx4MDBCXHgwMFx4MDBceDAwXHhjMD9ceDBlXHgx
MVx4MWNceGMwXHgwMCZceGIwXHhmNFx4ZmNceGI4XHgwOFx4MDBFXHgwMFx4MDA0XHhmZThAXHgw
MEBceDA2XHg4N1x4YmZceGMwXHhhOFx4MDFceDE2Mlx4MTJceGMwXHhmYlx4ZDU6XHgwMFBFYVx4
ZWFceGEyXHhhM0RceGU2XHg4Ylx4ODBceDEwXHg4MUJLXHhjOVx4MDBceDAwXHgwMVx4MDFceDA4
XG5ceDE2XHhlZVx4YTIsXHgwNlx4ZDFceGE1XHg5YXhceGIzXHgxYVA1XHhhMFx4MDFceDAwQlx4
MDBceDAwXHgwMEJceDAwXHgwMFx4MDBceGMwP1x4MGVceDExXHgxY1x4YzBceDAwJlx4YjBceGY0
XHhmY1x4YjhceDA4XHgwMEVceDAwXHgwMDRceDAzXG5AXHgwMEBceDA2XHg4Mlx4ZWVceGMwXHhh
OFx4MDFceDE2Mlx4MTJceGMwXHhmYlx4ZDU6XHgwMFBFYVx4ZWFceGEyXHhhM0RceGU2XHg4Y1x4
ODBceDEwXHg4MUJLXHhjOFx4MDBceDAwXHgwMVx4MDFceDA4XG5ceDE2XHhlZVx4YTIsXHgwNlx4
ZDFceGE1XHg5YXhceGIzXHgxYVBgXHhhMlx4MDFceDAwQlx4MDBceDAwXHgwMEJceDAwXHgwMFx4
MDBceGMwP1x4MGVceDExXHgxY1x4YzBceDAwJlx4YjBceGY0XHhmY1x4YjhceDA4XHgwMEVceDAw
XHgwMDQyXHgwNEBceDAwQFx4MDZTXHhmNFx4YzBceGE4XHgwMVx4MTYyXHgxMlx4YzBceGZiXHhk
NTpceDAwUEVhXHhlYVx4YTJceGEzRFx4ZTZceDhjXHg4MFx4MTFceDgyXHgxOEpceGYxXHgwMFx4
MDBceDAxXHgwMVx4MDhcblx4MTZceGVlXHhhMixceDA2XHhkMVx4YTVceDlheFx4YjNceDFhUFx4
OTA4XHgwNFx4MDBCXHgwMFx4MDBceDAwQlx4MDBceDAwXHgwMFx4MDAmXHhiMFx4ZjRceGZjXHhi
OFx4YzA/XHgwZVx4MTFceDFjXHhjMFx4MDhceDAwRVx4MDBceDAwNFx4YTdceDE2QFx4MDAxXHgw
Nlx4ZWRceGUxMlx4MTJceGMwXHhmYlx4YzBceGE4XHgwMVx4MTZceDAwUFx4ZDU6XHhhM0RceGU2
XHg4Y0VhXHhlYVx4YTNceDgwXHgxMFx4MDA2XHhjY1x4Y2NceDAwXHgwMFx4MDFceDAxXHgwOFxu
XHgwNlx4ZDFceGE1XHhhMVx4MTZceGVlXHhhMix6XHhiM1x4MWFQXHhhYVx4YWNceDAxXHgwME5c
eDAwXHgwMFx4MDBOXHgwMFx4MDBceDAwXHhjMD9ceDBlXHgxMVx4MWNceGMwXHgwMCZceGIwXHhm
NFx4ZmNceGI4XHgwOFx4MDBFXHgwMFx4MDBAXHhkZFJAXHgwMEBceDA2XHhhOFx4OTlceGMwXHhh
OFx4MDFceDE2Mlx4MTJceGMwXHhmYlx4ZDU7XHgwMFAgUFx4YjdceGI1XHgwMFx4MDBceDAwXHgw
MFx4YjBceDAyXHhmZlx4ZmZceDEzXHhiZlx4MDBceDAwXHgwMlx4MDRceDA1XHhiNFx4MDFceDAz
XHgwM1x4MDFceDAxXHgwMVx4MDhcblx4MTZceGVlXHhhOVx4ZjZceDAwXHgwMFx4MDBceDAwXHgw
NFx4MDJceDAwXHgwMHpceGIzXHgxYVBBSlx4MDJceDAwSlx4MDBceDAwXHgwMEpceDAwXHgwMFx4
MDBceDAwJlx4YjBceGY0XHhmY1x4YjhceGMwP1x4MGVceDExXHgxY1x4YzBceDA4XHgwMEVceDAw
XHgwMDxceDAwXHgwMEBceDAwMVx4MDZceDk0XHhmMDJceDEyXHhjMFx4ZmJceGMwXHhhOFx4MDFc
eDE2XHgwMFBceGQ1O1x4YjhceGY2XHg5MFcgUFx4YjdceGI2XHhhMFx4MTJceDE2XHhhMFx4MTdc
eDg2XHgwMFx4MDBceDAyXHgwNFx4MDVceGI0XHgwNFx4MDJceDA4XG5ceDA2XHhkMVx4YTZnXHgx
Nlx4ZWVceGE5XHhmNlx4MDFceDAzXHgwM1x4MDd6XHhiM1x4MWFQe0pceDAyXHgwMEJceDAwXHgw
MFx4MDBCXHgwMFx4MDBceDAwXHhjMD9ceDBlXHgxMVx4MWNceGMwXHgwMCZceGIwXHhmNFx4ZmNc
eGI4XHgwOFx4MDBFXHgwMFx4MDA0VVJAXHgwMEBceDA2MFx4YTZceGMwXHhhOFx4MDFceDE2Mlx4
MTJceGMwXHhmYlx4ZDU7XHgwMFAgUFx4YjdceGI2XHhiOFx4ZjZceDkwWFx4ODBceDEwXHg4Mlx4
MThceGRhXHhiMFx4MDBceDAwXHgwMVx4MDFceDA4XG5ceDE2XHhlZVx4YWFceDFmXHgwNlx4ZDFc
eGE2Z3pceGIzXHgxYVBceGRmTlx4MDJceDAwXHgwMFx4MDFceDAwXHgwMC9ceDAxXHgwMFx4MDBc
eGMwP1x4MGVceDExXHgxY1x4YzBceDAwJlx4YjBceGY0XHhmY1x4YjhceDA4XHgwMEVceDAwXHgw
MSFPW0BceDAwQFx4MDY1XHhiMFx4YzBceGE4XHgwMVx4MTYyXHgxMlx4YzBceGZiXHhkNTtceDAw
UCBQXHhiN1x4YjZceGI4XHhmNlx4OTBYXHg4MFx4MThceDgyXHgxOFx4MTJ8XHgwMFx4MDBceDAx
XHgwMVx4MDhcblx4MTZceGVlXHhhYSBceDA2XHhkMVx4YTZnR0VUIC8gSFRUUC8xLjBcclxuSG9z
dDogZHVja2R1Y2tnby5jb21cclxuQWNjZXB0OiB0ZXh0L2h0bWwsIHRleHQvcGxhaW4sIHRleHQv
Y3NzLCB0ZXh0L3NnbWwsICovKjtxPTAuMDFcclxuQWNjZXB0LUVuY29kaW5nOiBnemlwLCBjb21w
cmVzcywgYnppcDJcclxuQWNjZXB0LUxhbmd1YWdlOiBlblxyXG5Vc2VyLUFnZW50OiBMeW54LzIu
OC43cmVsLjJ6XHhiM1x4MWFQXHhlYVx4ZTBceDAyXHgwMEJceDAwXHgwMFx4MDBCXHgwMFx4MDBc
eDAwXHgwMCZceGIwXHhmNFx4ZmNceGI4XHhjMD9ceDBlXHgxMVx4MWNceGMwXHgwOFx4MDBFXHgw
MFx4MDA0XHg4OENAXHgwMDFceDA2XHgwY1x4YjUyXHgxMlx4YzBceGZiXHhjMFx4YThceDAxXHgx
Nlx4MDBQXHhkNTtceGI4XHhmNlx4OTBYIFBceGI4XHhhM1x4ODBceDEwXHgwMDZbXHhhMVx4MDBc
eDAwXHgwMVx4MDFceDA4XG5ceDA2XHhkMVx4YTZrXHgxNlx4ZWVceGFhIHpceGIzXHgxYVBOXHhl
ZFx4MDJceDAwXHgwMFx4MDFceDAwXHgwMFx4ZWFceDA1XHgwMFx4MDBceDAwJlx4YjBceGY0XHhm
Y1x4YjhceGMwP1x4MGVceDExXHgxY1x4YzBceDA4XHgwMEVceDAwXHgwNVx4ZGNceDg4REBceDAw
MVx4MDZceDA3XHgwYzJceDEyXHhjMFx4ZmJceGMwXHhhOFx4MDFceDE2XHgwMFBceGQ1O1x4Yjhc
eGY2XHg5MFggUFx4YjhceGEzXHg4MFx4MTBceDAwNlx4YWFceDgwXHgwMFx4MDBceDAxXHgwMVx4
MDhcblx4MDZceGQxXHhhNmxceDE2XHhlZVx4YWEgSFRUUC8xLjEgMjAwIE9LXHJcblNlcnZlcjog
bmdpbnhcclxuRGF0ZTogVGh1LCAwMiBBdWcgMjAxMiAxNzowNjowMiBHTVRcclxuQ29udGVudC1U
eXBlOiB0ZXh0L2h0bWw7IGNoYXJzZXQ9VVRGLThcclxuQ29ubmVjdGlvbjogY2xvc2VcclxuRXhw
aXJlczogVGh1LCAwMiBBdWcgMjAxMiAxNzowNjowMyBHTVRcclxuQ2FjaGUtQ29udHJvbDogbWF4
LWFnZXpceGIzXHgxYVBceGYxXHhmN1x4MDJceDAwXHgwMFx4MDFceDAwXHgwMFx4ODJceDA0XHgw
MFx4MDBceDAwJlx4YjBceGY0XHhmY1x4YjhceGMwP1x4MGVceDExXHgxY1x4YzBceDA4XHgwMEVc
eDAwXHgwNHRceDg4RUBceDAwMVx4MDZceDA4czJceDEyXHhjMFx4ZmJceGMwXHhhOFx4MDFceDE2
XHgwMFBceGQ1O1x4YjhceGY2XHg5Nlx4MDAgUFx4YjhceGEzXHg4MFx4MThceDAwNlx4MTJceGI1
XHgwMFx4MDBceDAxXHgwMVx4MDhcblx4MDZceGQxXHhhNmxceDE2XHhlZVx4YWEgLmh0bWwtb25s
eSB7XHJcbiAgICAgIGZvbnQtc2l6ZTogMTJweDtcclxuICAgIH1cclxuICAgIGEge1xyXG4gICAg
ICB0ZXh0LWRlY29yYXRpb246IG5vbmU7XHJcbiAgICAgIGNvbG9yOiAjMTE2OENDO1xyXG4gICAg
fVxyXG4gICAgYTpob3ZlciB7XHJcbiAgICAgIHRleHQtZGVjb3JhdGlvbjogdW5kZXJsaW5lO1xy
XG4gICAgfVxyXG4gICAgYTp2aXNpdGVkIHtcclxuICB6XHhiM1x4MWFQXHhmN1x4ZjdceDAyXHgw
MEJceDAwXHgwMFx4MDBCXHgwMFx4MDBceDAwXHgwMCZceGIwXHhmNFx4ZmNceGI4XHhjMD9ceDBl
XHgxMVx4MWNceGMwXHgwOFx4MDBFXHgwMFx4MDA0XHg4OEZAXHgwMDFceDA2XHgwY1x4YjIyXHgx
Mlx4YzBceGZiXHhjMFx4YThceDAxXHgxNlx4MDBQXHhkNTtceGI4XHhmNlx4OWFAIFBceGI4XHhh
M1x4ODBceDExXHgwMDZRXHhiN1x4MDBceDAwXHgwMVx4MDFceDA4XG5ceDA2XHhkMVx4YTZsXHgx
Nlx4ZWVceGFhIHpceGIzXHgxYVA0XHhmOFx4MDJceDAwQlx4MDBceDAwXHgwMEJceDAwXHgwMFx4
MDBceGMwP1x4MGVceDExXHgxY1x4YzBceDAwJlx4YjBceGY0XHhmY1x4YjhceDA4XHgwMEVceDAw
XHgwMDRceGI2WUBceDAwQFx4MDZceGNmXHg5ZVx4YzBceGE4XHgwMVx4MTYyXHgxMlx4YzBceGZi
XHhkNTtceDAwUCBQXHhiOFx4YTNceGI4XHhmNlx4OWFAXHg4MFx4MTBceDdmXHhmOFx4ZDFceGNi
XHgwMFx4MDBceDAxXHgwMVx4MDhcblx4MTZceGVlXHhhYUpceDA2XHhkMVx4YTZselx4YjNceDFh
UG1ceGY4XHgwMlx4MDBCXHgwMFx4MDBceDAwQlx4MDBceDAwXHgwMFx4YzA/XHgwZVx4MTFceDFj
XHhjMFx4MDAmXHhiMFx4ZjRceGZjXHhiOFx4MDhceDAwRVx4MDBceDAwNFx4ODVceDAwQFx4MDBA
XHgwNlx4MDBceGY4XHhjMFx4YThceDAxXHgxNjJceDEyXHhjMFx4ZmJceGQ1O1x4MDBQIFBceGI4
XHhhM1x4YjhceGY2XHg5YUFceDgwXHgxMFx4N2ZceGY4XHhkMVx4Y2FceDAwXHgwMFx4MDFceDAx
XHgwOFxuXHgxNlx4ZWVceGFhSlx4MDZceGQxXHhhNmx6XHhiM1x4MWFQXHhiMFx4ZmNceDAyXHgw
MEJceDAwXHgwMFx4MDBCXHgwMFx4MDBceDAwXHhjMD9ceDBlXHgxMVx4MWNceGMwXHgwMCZceGIw
XHhmNFx4ZmNceGI4XHgwOFx4MDBFXHgwMFx4MDA0TiZAXHgwMEBceDA2N1x4ZDJceGMwXHhhOFx4
MDFceDE2Mlx4MTJceGMwXHhmYlx4ZDU7XHgwMFAgUFx4YjhceGEzXHhiOFx4ZjZceDlhQVx4ODBc
eDExXHg4Mlx4MThceGNmXHhhOFx4MDBceDAwXHgwMVx4MDFceDA4XG5ceDE2XHhlZVx4YWFLXHgw
Nlx4ZDFceGE2bHpceGIzXHgxYVBceGM2XHg4Y1x4MDNceDAwQlx4MDBceDAwXHgwMEJceDAwXHgw
MFx4MDBceDAwJlx4YjBceGY0XHhmY1x4YjhceGMwP1x4MGVceDExXHgxY1x4YzBceDA4XHgwMEVc
eDAwXHgwMDRceDg4R0BceDAwMVx4MDZceDBjXHhiMTJceDEyXHhjMFx4ZmJceGMwXHhhOFx4MDFc
eDE2XHgwMFBceGQ1O1x4YjhceGY2XHg5YUEgUFx4YjhceGE0XHg4MFx4MTBceDAwNlFceDg3XHgw
MFx4MDBceDAxXHgwMVx4MDhcblx4MDZceGQxXHhhNnBceDE2XHhlZVx4YWFLJwpwMAou
"""
TESTPCAP3 = b"""
gANCVAsAANTDsqECAAQAAAAAAAAAAAAAAQAAAQAAAHizGlCQPgAATgAAAE4AAADAPw4RHMAAJrD0
/LgIAEUAAEC8SEAAQAbJo8CoARYyEsD71ToAUEVh6bAAAAAAsAL//8TWAAACBAW0AQMDAQEBCAoW
7qHTAAAAAAQCAAB4sxpQ/OAAAEoAAABKAAAAACaw9Py4wD8OERzACABFAAA8AABAADEGlPAyEsD7
wKgBFgBQ1TqjROTeRWHpsaASFqCKmQAAAgQFtAQCCAoG0aWWFu6h0wEDAwd4sxpQPOEAAEIAAABC
AAAAwD8OERzAACaw9Py4CABFAAA0mT5AAEAG7LnAqAEWMhLA+9U6AFBFYemxo0Tk34AQghhNxAAA
AQEIChbuofwG0aWWeLMaUJjiAAAAAQAAMwEAAMA/DhEcwAAmsPT8uAgARQABJU9wQABABjWXwKgB
FjISwPvVOgBQRWHpsaNE5N+AGIIYluYAAAEBCAoW7qH8BtGllkdFVCAvIEhUVFAvMS4wDQpIb3N0
OiB3d3cuZHVja2R1Y2tnby5jb20NCkFjY2VwdDogdGV4dC9odG1sLCB0ZXh0L3BsYWluLCB0ZXh0
L2NzcywgdGV4dC9zZ21sLCAqLyo7cT0wLjAxDQpBY2NlcHQtRW5jb2Rpbmc6IGd6aXAsIGNvbXBy
ZXNzLCBiemlwMg0KQWNjZXB0LUxhbmd1YWdlOiBlbg0KVXNlci1BZ2VudDogTHlueC8yLjguN3J4
sxpQY4IBAEIAAABCAAAAACaw9Py4wD8OERzACABFAAA0pxNAADEG7eQyEsD7wKgBFgBQ1TqjROTf
RWHqooAQADbOsQAAAQEICgbRpZoW7qH8eLMaUOCfAQAAAQAA7gEAAAAmsPT8uMA/DhEcwAgARQAB
4KcUQAAxBuw3MhLA+8CoARYAUNU6o0Tk30Vh6qKAGAA2Q6YAAAEBCAoG0aWaFu6h/EhUVFAvMS4x
IDMwMSBNb3ZlZCBQZXJtYW5lbnRseQ0KU2VydmVyOiBuZ2lueA0KRGF0ZTogVGh1LCAwMiBBdWcg
MjAxMiAxNzowNjowMCBHTVQNCkNvbnRlbnQtVHlwZTogdGV4dC9odG1sDQpDb250ZW50LUxlbmd0
aDogMTc4DQpDb25uZWN0aW9uOiBjbG9zZQ0KTG9jYXRpb246IGh0dHA6Ly9kdWNrZHVja2dvLmNv
bS8NCkV4cGlyZXN4sxpQ458BAEIAAABCAAAAACaw9Py4wD8OERzACABFAAA0pxVAADEG7eIyEsD7
wKgBFgBQ1TqjROaLRWHqooARADbNBAAAAQEICgbRpZoW7qH8eLMaUAugAQBCAAAAQgAAAMA/DhEc
wAAmsPT8uAgARQAANP44QABABoe/wKgBFjISwPvVOgBQRWHqoqNE5ouAEIFCS8kAAAEBCAoW7qIs
BtGlmnizGlA1oAEAQgAAAEIAAADAPw4RHMAAJrD0/LgIAEUAADQDCkAAQAaC7sCoARYyEsD71ToA
UEVh6qKjROaMgBCBQkvIAAABAQgKFu6iLAbRpZp4sxpQYKIBAEIAAABCAAAAwD8OERzAACaw9Py4
CABFAAA0MgRAAEAGU/TAqAEWMhLA+9U6AFBFYeqio0TmjIARghhK8QAAAQEIChbuoiwG0aWaeLMa
UJA4BABCAAAAQgAAAAAmsPT8uMA/DhEcwAgARQAANKcWQAAxBu3hMhLA+8CoARYAUNU6o0TmjEVh
6qOAEAA2zMwAAAEBCAoG0aWhFu6iLHqzGlCqrAEATgAAAE4AAADAPw4RHMAAJrD0/LgIAEUAAEDd
UkAAQAaomcCoARYyEsD71TsAUCBQt7UAAAAAsAL//xO/AAACBAW0AQMDAQEBCAoW7qn2AAAAAAQC
AAB6sxpQQUoCAEoAAABKAAAAACaw9Py4wD8OERzACABFAAA8AABAADEGlPAyEsD7wKgBFgBQ1Tu4
9pBXIFC3tqASFqAXhgAAAgQFtAQCCAoG0aZnFu6p9gEDAwd6sxpQe0oCAEIAAABCAAAAwD8OERzA
ACaw9Py4CABFAAA0VVJAAEAGMKbAqAEWMhLA+9U7AFAgULe2uPaQWIAQghjasAAAAQEIChbuqh8G
0aZnerMaUN9OAgAAAQAALwEAAMA/DhEcwAAmsPT8uAgARQABIU9bQABABjWwwKgBFjISwPvVOwBQ
IFC3trj2kFiAGIIYEnwAAAEBCAoW7qogBtGmZ0dFVCAvIEhUVFAvMS4wDQpIb3N0OiBkdWNrZHVj
a2dvLmNvbQ0KQWNjZXB0OiB0ZXh0L2h0bWwsIHRleHQvcGxhaW4sIHRleHQvY3NzLCB0ZXh0L3Nn
bWwsICovKjtxPTAuMDENCkFjY2VwdC1FbmNvZGluZzogZ3ppcCwgY29tcHJlc3MsIGJ6aXAyDQpB
Y2NlcHQtTGFuZ3VhZ2U6IGVuDQpVc2VyLUFnZW50OiBMeW54LzIuOC43cmVsLjJ6sxpQ6uACAEIA
AABCAAAAACaw9Py4wD8OERzACABFAAA0iENAADEGDLUyEsD7wKgBFgBQ1Tu49pBYIFC4o4AQADZb
oQAAAQEICgbRpmsW7qogerMaUE7tAgAAAQAA6gUAAAAmsPT8uMA/DhEcwAgARQAF3IhEQAAxBgcM
MhLA+8CoARYAUNU7uPaQWCBQuKOAEAA2qoAAAAEBCAoG0aZsFu6qIEhUVFAvMS4xIDIwMCBPSw0K
U2VydmVyOiBuZ2lueA0KRGF0ZTogVGh1LCAwMiBBdWcgMjAxMiAxNzowNjowMiBHTVQNCkNvbnRl
bnQtVHlwZTogdGV4dC9odG1sOyBjaGFyc2V0PVVURi04DQpDb25uZWN0aW9uOiBjbG9zZQ0KRXhw
aXJlczogVGh1LCAwMiBBdWcgMjAxMiAxNzowNjowMyBHTVQNCkNhY2hlLUNvbnRyb2w6IG1heC1h
Z2V6sxpQ8fcCAAABAACCBAAAACaw9Py4wD8OERzACABFAAR0iEVAADEGCHMyEsD7wKgBFgBQ1Tu4
9pYAIFC4o4AYADYStQAAAQEICgbRpmwW7qogLmh0bWwtb25seSB7DQogICAgICBmb250LXNpemU6
IDEycHg7DQogICAgfQ0KICAgIGEgew0KICAgICAgdGV4dC1kZWNvcmF0aW9uOiBub25lOw0KICAg
ICAgY29sb3I6ICMxMTY4Q0M7DQogICAgfQ0KICAgIGE6aG92ZXIgew0KICAgICAgdGV4dC1kZWNv
cmF0aW9uOiB1bmRlcmxpbmU7DQogICAgfQ0KICAgIGE6dmlzaXRlZCB7DQogIHqzGlD39wIAQgAA
AEIAAAAAJrD0/LjAPw4RHMAIAEUAADSIRkAAMQYMsjISwPvAqAEWAFDVO7j2mkAgULijgBEANlG3
AAABAQgKBtGmbBbuqiB6sxpQNPgCAEIAAABCAAAAwD8OERzAACaw9Py4CABFAAA0tllAAEAGz57A
qAEWMhLA+9U7AFAgULijuPaaQIAQf/jRywAAAQEIChbuqkoG0aZserMaUG34AgBCAAAAQgAAAMA/
DhEcwAAmsPT8uAgARQAANIUAQABABgD4wKgBFjISwPvVOwBQIFC4o7j2mkGAEH/40coAAAEBCAoW
7qpKBtGmbHqzGlCw/AIAQgAAAEIAAADAPw4RHMAAJrD0/LgIAEUAADROJkAAQAY30sCoARYyEsD7
1TsAUCBQuKO49ppBgBGCGM+oAAABAQgKFu6qSwbRpmx6sxpQxowDAEIAAABCAAAAACaw9Py4wD8O
ERzACABFAAA0iEdAADEGDLEyEsD7wKgBFgBQ1Tu49ppBIFC4pIAQADZRhwAAAQEICgbRpnAW7qpL
cQAu
"""
| isc |
markslwong/tensorflow | tensorflow/contrib/tensor_forest/python/tensor_forest.py | 46 | 40039 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Extremely random forest graph builder. go/brain-tree."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
import random
import sys
from tensorflow.contrib.framework.python.ops import variables as framework_variables
from tensorflow.contrib.losses.python.losses import loss_ops
from tensorflow.contrib.tensor_forest.python import constants
from tensorflow.contrib.tensor_forest.python.ops import data_ops
from tensorflow.contrib.tensor_forest.python.ops import tensor_forest_ops
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables as tf_variables
from tensorflow.python.platform import tf_logging as logging
# A convenience class for holding random forest hyperparameters.
#
# To just get some good default parameters, use:
# hparams = ForestHParams(num_classes=2, num_features=40).fill()
#
# Note that num_classes can not be inferred and so must always be specified.
# Also, either num_splits_to_consider or num_features should be set.
#
# To override specific values, pass them to the constructor:
# hparams = ForestHParams(num_classes=5, num_trees=10, num_features=5).fill()
#
# TODO(thomaswc): Inherit from tf.HParams when that is publicly available.
class ForestHParams(object):
"""A base class for holding hyperparameters and calculating good defaults."""
def __init__(self,
num_trees=100,
max_nodes=10000,
bagging_fraction=1.0,
num_splits_to_consider=0,
feature_bagging_fraction=1.0,
max_fertile_nodes=0,
split_after_samples=250,
min_split_samples=5,
valid_leaf_threshold=1,
dominate_method='bootstrap',
dominate_fraction=0.99,
**kwargs):
self.num_trees = num_trees
self.max_nodes = max_nodes
self.bagging_fraction = bagging_fraction
self.feature_bagging_fraction = feature_bagging_fraction
self.num_splits_to_consider = num_splits_to_consider
self.max_fertile_nodes = max_fertile_nodes
self.split_after_samples = split_after_samples
self.min_split_samples = min_split_samples
self.valid_leaf_threshold = valid_leaf_threshold
self.dominate_method = dominate_method
self.dominate_fraction = dominate_fraction
for name, value in kwargs.items():
setattr(self, name, value)
def values(self):
return self.__dict__
def fill(self):
"""Intelligently sets any non-specific parameters."""
# Fail fast if num_classes or num_features isn't set.
_ = getattr(self, 'num_classes')
_ = getattr(self, 'num_features')
self.bagged_num_features = int(self.feature_bagging_fraction *
self.num_features)
self.bagged_features = None
if self.feature_bagging_fraction < 1.0:
self.bagged_features = [random.sample(
range(self.num_features),
self.bagged_num_features) for _ in range(self.num_trees)]
self.regression = getattr(self, 'regression', False)
# Num_outputs is the actual number of outputs (a single prediction for
# classification, a N-dimenensional point for regression).
self.num_outputs = self.num_classes if self.regression else 1
# Add an extra column to classes for storing counts, which is needed for
# regression and avoids having to recompute sums for classification.
self.num_output_columns = self.num_classes + 1
# Our experiments have found that num_splits_to_consider = num_features
# gives good accuracy.
self.num_splits_to_consider = self.num_splits_to_consider or min(
self.num_features, 1000)
self.max_fertile_nodes = (self.max_fertile_nodes or
int(math.ceil(self.max_nodes / 2.0)))
# We have num_splits_to_consider slots to fill, and we want to spend
# approximately split_after_samples samples initializing them.
num_split_initializiations_per_input = max(1, int(math.floor(
self.num_splits_to_consider / self.split_after_samples)))
self.split_initializations_per_input = getattr(
self, 'split_initializations_per_input',
num_split_initializiations_per_input)
# If base_random_seed is 0, the current time will be used to seed the
# random number generators for each tree. If non-zero, the i-th tree
# will be seeded with base_random_seed + i.
self.base_random_seed = getattr(self, 'base_random_seed', 0)
return self
def get_epoch_variable():
"""Returns the epoch variable, or [0] if not defined."""
# Grab epoch variable defined in
# //third_party/tensorflow/python/training/input.py::limit_epochs
for v in tf_variables.local_variables():
if 'limit_epochs/epoch' in v.op.name:
return array_ops.reshape(v, [1])
# TODO(thomaswc): Access epoch from the data feeder.
return [0]
# A simple container to hold the training variables for a single tree.
class TreeTrainingVariables(object):
"""Stores tf.Variables for training a single random tree.
Uses tf.get_variable to get tree-specific names so that this can be used
with a tf.learn-style implementation (one that trains a model, saves it,
then relies on restoring that model to evaluate).
"""
def __init__(self, params, tree_num, training):
self.tree = variable_scope.get_variable(
name=self.get_tree_name('tree', tree_num), dtype=dtypes.int32,
shape=[params.max_nodes, 2],
initializer=init_ops.constant_initializer(-2))
self.tree_thresholds = variable_scope.get_variable(
name=self.get_tree_name('tree_thresholds', tree_num),
shape=[params.max_nodes],
initializer=init_ops.constant_initializer(-1.0))
self.end_of_tree = variable_scope.get_variable(
name=self.get_tree_name('end_of_tree', tree_num),
dtype=dtypes.int32,
initializer=constant_op.constant([1]))
self.start_epoch = variable_scope.get_variable(
name=self.get_tree_name('start_epoch', tree_num),
dtype=dtypes.int32, shape=[params.max_nodes],
initializer=init_ops.constant_initializer(0))
if training:
self.node_to_accumulator_map = variable_scope.get_variable(
name=self.get_tree_name('node_to_accumulator_map', tree_num),
shape=[params.max_nodes],
dtype=dtypes.int32,
initializer=init_ops.constant_initializer(-1))
self.accumulator_to_node_map = variable_scope.get_variable(
name=self.get_tree_name('accumulator_to_node_map', tree_num),
shape=[params.max_fertile_nodes],
dtype=dtypes.int32,
initializer=init_ops.constant_initializer(-1))
self.candidate_split_features = variable_scope.get_variable(
name=self.get_tree_name('candidate_split_features', tree_num),
shape=[params.max_fertile_nodes, params.num_splits_to_consider],
dtype=dtypes.int32,
initializer=init_ops.constant_initializer(-1))
self.candidate_split_thresholds = variable_scope.get_variable(
name=self.get_tree_name('candidate_split_thresholds', tree_num),
shape=[params.max_fertile_nodes, params.num_splits_to_consider],
initializer=init_ops.constant_initializer(0.0))
# Statistics shared by classification and regression.
self.node_sums = variable_scope.get_variable(
name=self.get_tree_name('node_sums', tree_num),
shape=[params.max_nodes, params.num_output_columns],
initializer=init_ops.constant_initializer(0.0))
if training:
self.candidate_split_sums = variable_scope.get_variable(
name=self.get_tree_name('candidate_split_sums', tree_num),
shape=[params.max_fertile_nodes, params.num_splits_to_consider,
params.num_output_columns],
initializer=init_ops.constant_initializer(0.0))
self.accumulator_sums = variable_scope.get_variable(
name=self.get_tree_name('accumulator_sums', tree_num),
shape=[params.max_fertile_nodes, params.num_output_columns],
initializer=init_ops.constant_initializer(-1.0))
# Regression also tracks second order stats.
if params.regression:
self.node_squares = variable_scope.get_variable(
name=self.get_tree_name('node_squares', tree_num),
shape=[params.max_nodes, params.num_output_columns],
initializer=init_ops.constant_initializer(0.0))
self.candidate_split_squares = variable_scope.get_variable(
name=self.get_tree_name('candidate_split_squares', tree_num),
shape=[params.max_fertile_nodes, params.num_splits_to_consider,
params.num_output_columns],
initializer=init_ops.constant_initializer(0.0))
self.accumulator_squares = variable_scope.get_variable(
name=self.get_tree_name('accumulator_squares', tree_num),
shape=[params.max_fertile_nodes, params.num_output_columns],
initializer=init_ops.constant_initializer(-1.0))
else:
self.node_squares = constant_op.constant(
0.0, name=self.get_tree_name('node_squares', tree_num))
self.candidate_split_squares = constant_op.constant(
0.0, name=self.get_tree_name('candidate_split_squares', tree_num))
self.accumulator_squares = constant_op.constant(
0.0, name=self.get_tree_name('accumulator_squares', tree_num))
def get_tree_name(self, name, num):
return '{0}-{1}'.format(name, num)
class ForestStats(object):
def __init__(self, tree_stats, params):
"""A simple container for stats about a forest."""
self.tree_stats = tree_stats
self.params = params
def get_average(self, thing):
val = 0.0
for i in range(self.params.num_trees):
val += getattr(self.tree_stats[i], thing)
return val / self.params.num_trees
class TreeStats(object):
def __init__(self, num_nodes, num_leaves):
self.num_nodes = num_nodes
self.num_leaves = num_leaves
class ForestTrainingVariables(object):
"""A container for a forests training data, consisting of multiple trees.
Instantiates a TreeTrainingVariables object for each tree. We override the
__getitem__ and __setitem__ function so that usage looks like this:
forest_variables = ForestTrainingVariables(params)
... forest_variables.tree ...
"""
def __init__(self, params, device_assigner, training=True,
tree_variables_class=TreeTrainingVariables):
self.variables = []
# Set up some scalar variables to run through the device assigner, then
# we can use those to colocate everything related to a tree.
self.device_dummies = []
with ops.device(device_assigner):
for i in range(params.num_trees):
self.device_dummies.append(variable_scope.get_variable(
name='device_dummy_%d' % i, shape=0))
for i in range(params.num_trees):
with ops.device(self.device_dummies[i].device):
self.variables.append(tree_variables_class(params, i, training))
def __setitem__(self, t, val):
self.variables[t] = val
def __getitem__(self, t):
return self.variables[t]
class RandomForestGraphs(object):
"""Builds TF graphs for random forest training and inference."""
def __init__(self,
params,
device_assigner=None,
variables=None,
tree_variables_class=TreeTrainingVariables,
tree_graphs=None,
training=True):
self.params = params
self.device_assigner = (
device_assigner or framework_variables.VariableDeviceChooser())
logging.info('Constructing forest with params = ')
logging.info(self.params.__dict__)
self.variables = variables or ForestTrainingVariables(
self.params, device_assigner=self.device_assigner, training=training,
tree_variables_class=tree_variables_class)
tree_graph_class = tree_graphs or RandomTreeGraphs
self.trees = [
tree_graph_class(self.variables[i], self.params, i)
for i in range(self.params.num_trees)
]
def _bag_features(self, tree_num, input_data):
split_data = array_ops.split(
value=input_data, num_or_size_splits=self.params.num_features, axis=1)
return array_ops.concat(
[split_data[ind] for ind in self.params.bagged_features[tree_num]], 1)
def training_graph(self,
input_data,
input_labels,
num_trainers=1,
trainer_id=0,
**tree_kwargs):
"""Constructs a TF graph for training a random forest.
Args:
input_data: A tensor or dict of string->Tensor for input data.
input_labels: A tensor or placeholder for labels associated with
input_data.
num_trainers: Number of parallel trainers to split trees among.
trainer_id: Which trainer this instance is.
**tree_kwargs: Keyword arguments passed to each tree's training_graph.
Returns:
The last op in the random forest training graph.
Raises:
NotImplementedError: If trying to use bagging with sparse features.
"""
processed_dense_features, processed_sparse_features, data_spec = (
data_ops.ParseDataTensorOrDict(input_data))
if input_labels is not None:
labels = data_ops.ParseLabelTensorOrDict(input_labels)
data_spec = data_spec or self.get_default_data_spec(input_data)
tree_graphs = []
trees_per_trainer = self.params.num_trees / num_trainers
tree_start = int(trainer_id * trees_per_trainer)
tree_end = int((trainer_id + 1) * trees_per_trainer)
for i in range(tree_start, tree_end):
logging.info('training graph for tree: %d' % i)
with ops.device(self.variables.device_dummies[i].device):
seed = self.params.base_random_seed
if seed != 0:
seed += i
# If using bagging, randomly select some of the input.
tree_data = processed_dense_features
tree_labels = labels
if self.params.bagging_fraction < 1.0:
# TODO(gilberth): Support bagging for sparse features.
if processed_sparse_features is not None:
raise NotImplementedError(
'Bagging not supported with sparse features.')
# TODO(thomaswc): This does sampling without replacment. Consider
# also allowing sampling with replacement as an option.
batch_size = array_ops.strided_slice(
array_ops.shape(processed_dense_features), [0], [1])
r = random_ops.random_uniform(batch_size, seed=seed)
mask = math_ops.less(
r, array_ops.ones_like(r) * self.params.bagging_fraction)
gather_indices = array_ops.squeeze(
array_ops.where(mask), squeeze_dims=[1])
# TODO(thomaswc): Calculate out-of-bag data and labels, and store
# them for use in calculating statistics later.
tree_data = array_ops.gather(processed_dense_features, gather_indices)
tree_labels = array_ops.gather(labels, gather_indices)
if self.params.bagged_features:
if processed_sparse_features is not None:
raise NotImplementedError(
'Feature bagging not supported with sparse features.')
tree_data = self._bag_features(i, tree_data)
initialization = self.trees[i].tree_initialization()
with ops.control_dependencies([initialization]):
tree_graphs.append(self.trees[i].training_graph(
tree_data,
tree_labels,
seed,
data_spec=data_spec,
sparse_features=processed_sparse_features,
**tree_kwargs))
return control_flow_ops.group(*tree_graphs, name='train')
def inference_graph(self, input_data, **inference_args):
"""Constructs a TF graph for evaluating a random forest.
Args:
input_data: A tensor or dict of string->Tensor for input data.
**inference_args: Keyword arguments to pass through to each tree.
Returns:
The last op in the random forest inference graph.
Raises:
NotImplementedError: If trying to use feature bagging with sparse
features.
"""
processed_dense_features, processed_sparse_features, data_spec = (
data_ops.ParseDataTensorOrDict(input_data))
probabilities = []
for i in range(self.params.num_trees):
with ops.device(self.variables.device_dummies[i].device):
tree_data = processed_dense_features
if self.params.bagged_features:
if processed_sparse_features is not None:
raise NotImplementedError(
'Feature bagging not supported with sparse features.')
tree_data = self._bag_features(i, input_data)
probabilities.append(self.trees[i].inference_graph(
tree_data,
data_spec,
sparse_features=processed_sparse_features,
**inference_args))
with ops.device(self.variables.device_dummies[0].device):
all_predict = array_ops.stack(probabilities)
return math_ops.div(
math_ops.reduce_sum(all_predict, 0), self.params.num_trees,
name='probabilities')
def average_size(self):
"""Constructs a TF graph for evaluating the average size of a forest.
Returns:
The average number of nodes over the trees.
"""
sizes = []
for i in range(self.params.num_trees):
with ops.device(self.variables.device_dummies[i].device):
sizes.append(self.trees[i].size())
return math_ops.reduce_mean(math_ops.to_float(array_ops.stack(sizes)))
# pylint: disable=unused-argument
def training_loss(self, features, labels, name='training_loss'):
return math_ops.negative(self.average_size(), name=name)
# pylint: disable=unused-argument
def validation_loss(self, features, labels):
return math_ops.negative(self.average_size())
def average_impurity(self):
"""Constructs a TF graph for evaluating the leaf impurity of a forest.
Returns:
The last op in the graph.
"""
impurities = []
for i in range(self.params.num_trees):
with ops.device(self.variables.device_dummies[i].device):
impurities.append(self.trees[i].average_impurity())
return math_ops.reduce_mean(array_ops.stack(impurities))
def get_stats(self, session):
tree_stats = []
for i in range(self.params.num_trees):
with ops.device(self.variables.device_dummies[i].device):
tree_stats.append(self.trees[i].get_stats(session))
return ForestStats(tree_stats, self.params)
def feature_importances(self):
tree_counts = [self.trees[i].feature_usage_counts()
for i in range(self.params.num_trees)]
total_counts = math_ops.reduce_sum(array_ops.stack(tree_counts, 0), 0)
return total_counts / math_ops.reduce_sum(total_counts)
def one_hot_wrapper(num_classes, loss_fn):
"""Some loss functions take one-hot labels."""
def _loss(probs, targets):
if targets.get_shape().ndims > 1:
targets = array_ops.squeeze(targets, squeeze_dims=[1])
one_hot_labels = array_ops.one_hot(
math_ops.to_int32(targets),
num_classes,
on_value=1.,
off_value=0.,
dtype=dtypes.float32)
return loss_fn(probs, one_hot_labels)
return _loss
class TrainingLossForest(RandomForestGraphs):
"""Random Forest that uses training loss as the termination criteria."""
def __init__(self, params, loss_fn=None, **kwargs):
"""Initialize.
Args:
params: Like RandomForestGraphs, a ForestHParams object.
loss_fn: A function that takes probabilities and targets and returns
a loss for each example.
**kwargs: Keyword args to pass to superclass (RandomForestGraphs).
"""
self.loss_fn = loss_fn or one_hot_wrapper(params.num_classes,
loss_ops.log_loss)
self._loss = None
super(TrainingLossForest, self).__init__(params, **kwargs)
def _get_loss(self, features, labels):
"""Constructs, caches, and returns the inference-based loss."""
if self._loss is not None:
return self._loss
def _average_loss():
probs = self.inference_graph(features)
return math_ops.reduce_sum(self.loss_fn(
probs, labels)) / math_ops.to_float(array_ops.shape(labels)[0])
self._loss = control_flow_ops.cond(
self.average_size() > 0, _average_loss,
lambda: constant_op.constant(sys.maxsize, dtype=dtypes.float32))
return self._loss
def training_graph(self, input_data, input_labels, **kwargs):
loss = self._get_loss(input_data, input_labels)
with ops.control_dependencies([loss.op]):
return super(TrainingLossForest, self).training_graph(
input_data, input_labels, **kwargs)
def training_loss(self, features, labels, name='training_loss'):
return array_ops.identity(self._get_loss(features, labels), name=name)
class RandomTreeGraphs(object):
"""Builds TF graphs for random tree training and inference."""
def __init__(self, variables, params, tree_num):
self.variables = variables
self.params = params
self.tree_num = tree_num
def tree_initialization(self):
def _init_tree():
return state_ops.scatter_update(self.variables.tree, [0], [[-1, -1]]).op
def _nothing():
return control_flow_ops.no_op()
return control_flow_ops.cond(
math_ops.equal(
array_ops.squeeze(
array_ops.strided_slice(self.variables.tree, [0, 0], [1, 1])),
-2), _init_tree, _nothing)
def _gini(self, class_counts):
"""Calculate the Gini impurity.
If c(i) denotes the i-th class count and c = sum_i c(i) then
score = 1 - sum_i ( c(i) / c )^2
Args:
class_counts: A 2-D tensor of per-class counts, usually a slice or
gather from variables.node_sums.
Returns:
A 1-D tensor of the Gini impurities for each row in the input.
"""
smoothed = 1.0 + array_ops.slice(class_counts, [0, 1], [-1, -1])
sums = math_ops.reduce_sum(smoothed, 1)
sum_squares = math_ops.reduce_sum(math_ops.square(smoothed), 1)
return 1.0 - sum_squares / (sums * sums)
def _weighted_gini(self, class_counts):
"""Our split score is the Gini impurity times the number of examples.
If c(i) denotes the i-th class count and c = sum_i c(i) then
score = c * (1 - sum_i ( c(i) / c )^2 )
= c - sum_i c(i)^2 / c
Args:
class_counts: A 2-D tensor of per-class counts, usually a slice or
gather from variables.node_sums.
Returns:
A 1-D tensor of the Gini impurities for each row in the input.
"""
smoothed = 1.0 + array_ops.slice(class_counts, [0, 1], [-1, -1])
sums = math_ops.reduce_sum(smoothed, 1)
sum_squares = math_ops.reduce_sum(math_ops.square(smoothed), 1)
return sums - sum_squares / sums
def _variance(self, sums, squares):
"""Calculate the variance for each row of the input tensors.
Variance is V = E[x^2] - (E[x])^2.
Args:
sums: A tensor containing output sums, usually a slice from
variables.node_sums. Should contain the number of examples seen
in index 0 so we can calculate expected value.
squares: Same as sums, but sums of squares.
Returns:
A 1-D tensor of the variances for each row in the input.
"""
total_count = array_ops.slice(sums, [0, 0], [-1, 1])
e_x = sums / total_count
e_x2 = squares / total_count
return math_ops.reduce_sum(e_x2 - math_ops.square(e_x), 1)
def training_graph(self,
input_data,
input_labels,
random_seed,
data_spec,
sparse_features=None,
input_weights=None):
"""Constructs a TF graph for training a random tree.
Args:
input_data: A tensor or placeholder for input data.
input_labels: A tensor or placeholder for labels associated with
input_data.
random_seed: The random number generator seed to use for this tree. 0
means use the current time as the seed.
data_spec: A data_ops.TensorForestDataSpec object specifying the
original feature/columns of the data.
sparse_features: A tf.SparseTensor for sparse input data.
input_weights: A float tensor or placeholder holding per-input weights,
or None if all inputs are to be weighted equally.
Returns:
The last op in the random tree training graph.
"""
epoch = math_ops.to_int32(get_epoch_variable())
serialized_input_spec = data_spec.SerializeToString()
if input_weights is None:
input_weights = []
if input_data is None:
input_data = []
sparse_indices = []
sparse_values = []
sparse_shape = []
if sparse_features is not None:
sparse_indices = sparse_features.indices
sparse_values = sparse_features.values
sparse_shape = sparse_features.dense_shape
# Count extremely random stats.
(node_sums, node_squares, splits_indices, splits_sums, splits_squares,
totals_indices, totals_sums, totals_squares,
input_leaves) = (tensor_forest_ops.count_extremely_random_stats(
input_data,
sparse_indices,
sparse_values,
sparse_shape,
input_labels,
input_weights,
self.variables.tree,
self.variables.tree_thresholds,
self.variables.node_to_accumulator_map,
self.variables.candidate_split_features,
self.variables.candidate_split_thresholds,
self.variables.start_epoch,
epoch,
input_spec=serialized_input_spec,
num_classes=self.params.num_output_columns,
regression=self.params.regression))
node_update_ops = []
node_update_ops.append(
state_ops.assign_add(self.variables.node_sums, node_sums))
splits_update_ops = []
splits_update_ops.append(
tensor_forest_ops.scatter_add_ndim(self.variables.candidate_split_sums,
splits_indices, splits_sums))
splits_update_ops.append(
tensor_forest_ops.scatter_add_ndim(self.variables.accumulator_sums,
totals_indices, totals_sums))
if self.params.regression:
node_update_ops.append(state_ops.assign_add(self.variables.node_squares,
node_squares))
splits_update_ops.append(
tensor_forest_ops.scatter_add_ndim(
self.variables.candidate_split_squares, splits_indices,
splits_squares))
splits_update_ops.append(
tensor_forest_ops.scatter_add_ndim(self.variables.accumulator_squares,
totals_indices, totals_squares))
# Sample inputs.
update_indices, feature_updates, threshold_updates = (
tensor_forest_ops.sample_inputs(
input_data,
sparse_indices,
sparse_values,
sparse_shape,
input_weights,
self.variables.node_to_accumulator_map,
input_leaves,
self.variables.candidate_split_features,
self.variables.candidate_split_thresholds,
input_spec=serialized_input_spec,
split_initializations_per_input=(
self.params.split_initializations_per_input),
split_sampling_random_seed=random_seed))
update_features_op = state_ops.scatter_update(
self.variables.candidate_split_features, update_indices,
feature_updates)
update_thresholds_op = state_ops.scatter_update(
self.variables.candidate_split_thresholds, update_indices,
threshold_updates)
# Calculate finished nodes.
with ops.control_dependencies(splits_update_ops):
# Passing input_leaves to finished nodes here means that nodes that
# have become stale won't be deallocated until an input reaches them,
# because we're trying to avoid considering every fertile node for
# performance reasons.
finished, stale = tensor_forest_ops.finished_nodes(
input_leaves,
self.variables.node_to_accumulator_map,
self.variables.candidate_split_sums,
self.variables.candidate_split_squares,
self.variables.accumulator_sums,
self.variables.accumulator_squares,
self.variables.start_epoch,
epoch,
num_split_after_samples=self.params.split_after_samples,
min_split_samples=self.params.min_split_samples,
dominate_method=self.params.dominate_method,
dominate_fraction=self.params.dominate_fraction)
# Update leaf scores.
# TODO(thomaswc): Store the leaf scores in a TopN and only update the
# scores of the leaves that were touched by this batch of input.
children = array_ops.squeeze(
array_ops.slice(self.variables.tree, [0, 0], [-1, 1]), squeeze_dims=[1])
is_leaf = math_ops.equal(constants.LEAF_NODE, children)
leaves = math_ops.to_int32(
array_ops.squeeze(
array_ops.where(is_leaf), squeeze_dims=[1]))
non_fertile_leaves = array_ops.boolean_mask(
leaves, math_ops.less(array_ops.gather(
self.variables.node_to_accumulator_map, leaves), 0))
# TODO(gilberth): It should be possible to limit the number of non
# fertile leaves we calculate scores for, especially since we can only take
# at most array_ops.shape(finished)[0] of them.
with ops.control_dependencies(node_update_ops):
sums = array_ops.gather(self.variables.node_sums, non_fertile_leaves)
if self.params.regression:
squares = array_ops.gather(self.variables.node_squares,
non_fertile_leaves)
non_fertile_leaf_scores = self._variance(sums, squares)
else:
non_fertile_leaf_scores = self._weighted_gini(sums)
# Calculate best splits.
with ops.control_dependencies(splits_update_ops):
split_indices = tensor_forest_ops.best_splits(
finished,
self.variables.node_to_accumulator_map,
self.variables.candidate_split_sums,
self.variables.candidate_split_squares,
self.variables.accumulator_sums,
self.variables.accumulator_squares,
regression=self.params.regression)
# Grow tree.
with ops.control_dependencies([update_features_op, update_thresholds_op,
non_fertile_leaves.op]):
(tree_update_indices, tree_children_updates, tree_threshold_updates,
new_eot) = (tensor_forest_ops.grow_tree(
self.variables.end_of_tree, self.variables.node_to_accumulator_map,
finished, split_indices, self.variables.candidate_split_features,
self.variables.candidate_split_thresholds))
tree_update_op = state_ops.scatter_update(
self.variables.tree, tree_update_indices, tree_children_updates)
thresholds_update_op = state_ops.scatter_update(
self.variables.tree_thresholds, tree_update_indices,
tree_threshold_updates)
# TODO(thomaswc): Only update the epoch on the new leaves.
new_epoch_updates = epoch * array_ops.ones_like(tree_threshold_updates,
dtype=dtypes.int32)
epoch_update_op = state_ops.scatter_update(
self.variables.start_epoch, tree_update_indices,
new_epoch_updates)
# Update fertile slots.
with ops.control_dependencies([tree_update_op]):
(n2a_map_updates, a2n_map_updates, accumulators_cleared,
accumulators_allocated) = (tensor_forest_ops.update_fertile_slots(
finished,
non_fertile_leaves,
non_fertile_leaf_scores,
self.variables.end_of_tree,
self.variables.accumulator_sums,
self.variables.node_to_accumulator_map,
stale,
self.variables.node_sums,
regression=self.params.regression))
# Ensure end_of_tree doesn't get updated until UpdateFertileSlots has
# used it to calculate new leaves.
with ops.control_dependencies([n2a_map_updates.op]):
eot_update_op = state_ops.assign(self.variables.end_of_tree, new_eot)
updates = []
updates.append(eot_update_op)
updates.append(tree_update_op)
updates.append(thresholds_update_op)
updates.append(epoch_update_op)
updates.append(
state_ops.scatter_update(self.variables.node_to_accumulator_map,
n2a_map_updates[0], n2a_map_updates[1]))
updates.append(
state_ops.scatter_update(self.variables.accumulator_to_node_map,
a2n_map_updates[0], a2n_map_updates[1]))
cleared_and_allocated_accumulators = array_ops.concat(
[accumulators_cleared, accumulators_allocated], 0)
# Calculate values to put into scatter update for candidate counts.
# Candidate split counts are always reset back to 0 for both cleared
# and allocated accumulators. This means some accumulators might be doubly
# reset to 0 if the were released and not allocated, then later allocated.
split_values = array_ops.tile(
array_ops.expand_dims(array_ops.expand_dims(
array_ops.zeros_like(cleared_and_allocated_accumulators,
dtype=dtypes.float32), 1), 2),
[1, self.params.num_splits_to_consider, self.params.num_output_columns])
updates.append(state_ops.scatter_update(
self.variables.candidate_split_sums,
cleared_and_allocated_accumulators, split_values))
if self.params.regression:
updates.append(state_ops.scatter_update(
self.variables.candidate_split_squares,
cleared_and_allocated_accumulators, split_values))
# Calculate values to put into scatter update for total counts.
total_cleared = array_ops.tile(
array_ops.expand_dims(
math_ops.negative(array_ops.ones_like(accumulators_cleared,
dtype=dtypes.float32)), 1),
[1, self.params.num_output_columns])
total_reset = array_ops.tile(
array_ops.expand_dims(
array_ops.zeros_like(accumulators_allocated,
dtype=dtypes.float32), 1),
[1, self.params.num_output_columns])
accumulator_updates = array_ops.concat([total_cleared, total_reset], 0)
updates.append(state_ops.scatter_update(
self.variables.accumulator_sums,
cleared_and_allocated_accumulators, accumulator_updates))
if self.params.regression:
updates.append(state_ops.scatter_update(
self.variables.accumulator_squares,
cleared_and_allocated_accumulators, accumulator_updates))
# Calculate values to put into scatter update for candidate splits.
split_features_updates = array_ops.tile(
array_ops.expand_dims(
math_ops.negative(array_ops.ones_like(
cleared_and_allocated_accumulators)), 1),
[1, self.params.num_splits_to_consider])
updates.append(state_ops.scatter_update(
self.variables.candidate_split_features,
cleared_and_allocated_accumulators, split_features_updates))
updates += self.finish_iteration()
return control_flow_ops.group(*updates)
def finish_iteration(self):
"""Perform any operations that should be done at the end of an iteration.
This is mostly useful for subclasses that need to reset variables after
an iteration, such as ones that are used to finish nodes.
Returns:
A list of operations.
"""
return []
def inference_graph(self, input_data, data_spec, sparse_features=None):
"""Constructs a TF graph for evaluating a random tree.
Args:
input_data: A tensor or placeholder for input data.
data_spec: A TensorForestDataSpec proto specifying the original
input columns.
sparse_features: A tf.SparseTensor for sparse input data.
Returns:
The last op in the random tree inference graph.
"""
if input_data is None:
input_data = []
sparse_indices = []
sparse_values = []
sparse_shape = []
if sparse_features is not None:
sparse_indices = sparse_features.indices
sparse_values = sparse_features.values
sparse_shape = sparse_features.dense_shape
return tensor_forest_ops.tree_predictions(
input_data,
sparse_indices,
sparse_values,
sparse_shape,
self.variables.tree,
self.variables.tree_thresholds,
self.variables.node_sums,
input_spec=data_spec.SerializeToString(),
valid_leaf_threshold=self.params.valid_leaf_threshold)
def average_impurity(self):
"""Constructs a TF graph for evaluating the average leaf impurity of a tree.
If in regression mode, this is the leaf variance. If in classification mode,
this is the gini impurity.
Returns:
The last op in the graph.
"""
children = array_ops.squeeze(array_ops.slice(
self.variables.tree, [0, 0], [-1, 1]), squeeze_dims=[1])
is_leaf = math_ops.equal(constants.LEAF_NODE, children)
leaves = math_ops.to_int32(array_ops.squeeze(array_ops.where(is_leaf),
squeeze_dims=[1]))
counts = array_ops.gather(self.variables.node_sums, leaves)
gini = self._weighted_gini(counts)
# Guard against step 1, when there often are no leaves yet.
def impurity():
return gini
# Since average impurity can be used for loss, when there's no data just
# return a big number so that loss always decreases.
def big():
return array_ops.ones_like(gini, dtype=dtypes.float32) * 10000000.
return control_flow_ops.cond(math_ops.greater(
array_ops.shape(leaves)[0], 0), impurity, big)
def size(self):
"""Constructs a TF graph for evaluating the current number of nodes.
Returns:
The current number of nodes in the tree.
"""
return self.variables.end_of_tree - 1
def get_stats(self, session):
num_nodes = self.variables.end_of_tree.eval(session=session) - 1
num_leaves = array_ops.where(
math_ops.equal(array_ops.squeeze(array_ops.slice(
self.variables.tree, [0, 0], [-1, 1])), constants.LEAF_NODE)
).eval(session=session).shape[0]
return TreeStats(num_nodes, num_leaves)
def feature_usage_counts(self):
features = array_ops.slice(self.variables.tree, [0, 1], [-1, 1])
# One hot ignores negative values, which is the default for unused nodes.
one_hots = array_ops.one_hot(
array_ops.squeeze(features), self.params.num_features)
return math_ops.reduce_sum(one_hots, 0)
| apache-2.0 |
plotly/plotly.py | packages/python/plotly/plotly/graph_objs/sunburst/_domain.py | 2 | 5773 | from plotly.basedatatypes import BaseTraceHierarchyType as _BaseTraceHierarchyType
import copy as _copy
class Domain(_BaseTraceHierarchyType):
# class properties
# --------------------
_parent_path_str = "sunburst"
_path_str = "sunburst.domain"
_valid_props = {"column", "row", "x", "y"}
# column
# ------
@property
def column(self):
"""
If there is a layout grid, use the domain for this column in
the grid for this sunburst trace .
The 'column' property is a integer and may be specified as:
- An int (or float that will be cast to an int)
in the interval [0, 9223372036854775807]
Returns
-------
int
"""
return self["column"]
@column.setter
def column(self, val):
self["column"] = val
# row
# ---
@property
def row(self):
"""
If there is a layout grid, use the domain for this row in the
grid for this sunburst trace .
The 'row' property is a integer and may be specified as:
- An int (or float that will be cast to an int)
in the interval [0, 9223372036854775807]
Returns
-------
int
"""
return self["row"]
@row.setter
def row(self, val):
self["row"] = val
# x
# -
@property
def x(self):
"""
Sets the horizontal domain of this sunburst trace (in plot
fraction).
The 'x' property is an info array that may be specified as:
* a list or tuple of 2 elements where:
(0) The 'x[0]' property is a number and may be specified as:
- An int or float in the interval [0, 1]
(1) The 'x[1]' property is a number and may be specified as:
- An int or float in the interval [0, 1]
Returns
-------
list
"""
return self["x"]
@x.setter
def x(self, val):
self["x"] = val
# y
# -
@property
def y(self):
"""
Sets the vertical domain of this sunburst trace (in plot
fraction).
The 'y' property is an info array that may be specified as:
* a list or tuple of 2 elements where:
(0) The 'y[0]' property is a number and may be specified as:
- An int or float in the interval [0, 1]
(1) The 'y[1]' property is a number and may be specified as:
- An int or float in the interval [0, 1]
Returns
-------
list
"""
return self["y"]
@y.setter
def y(self, val):
self["y"] = val
# Self properties description
# ---------------------------
@property
def _prop_descriptions(self):
return """\
column
If there is a layout grid, use the domain for this
column in the grid for this sunburst trace .
row
If there is a layout grid, use the domain for this row
in the grid for this sunburst trace .
x
Sets the horizontal domain of this sunburst trace (in
plot fraction).
y
Sets the vertical domain of this sunburst trace (in
plot fraction).
"""
def __init__(self, arg=None, column=None, row=None, x=None, y=None, **kwargs):
"""
Construct a new Domain object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of
:class:`plotly.graph_objs.sunburst.Domain`
column
If there is a layout grid, use the domain for this
column in the grid for this sunburst trace .
row
If there is a layout grid, use the domain for this row
in the grid for this sunburst trace .
x
Sets the horizontal domain of this sunburst trace (in
plot fraction).
y
Sets the vertical domain of this sunburst trace (in
plot fraction).
Returns
-------
Domain
"""
super(Domain, self).__init__("domain")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
# Validate arg
# ------------
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError(
"""\
The first argument to the plotly.graph_objs.sunburst.Domain
constructor must be a dict or
an instance of :class:`plotly.graph_objs.sunburst.Domain`"""
)
# Handle skip_invalid
# -------------------
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
# Populate data dict with properties
# ----------------------------------
_v = arg.pop("column", None)
_v = column if column is not None else _v
if _v is not None:
self["column"] = _v
_v = arg.pop("row", None)
_v = row if row is not None else _v
if _v is not None:
self["row"] = _v
_v = arg.pop("x", None)
_v = x if x is not None else _v
if _v is not None:
self["x"] = _v
_v = arg.pop("y", None)
_v = y if y is not None else _v
if _v is not None:
self["y"] = _v
# Process unknown kwargs
# ----------------------
self._process_kwargs(**dict(arg, **kwargs))
# Reset skip_invalid
# ------------------
self._skip_invalid = False
| mit |
CTSRD-SOAAP/chromium-42.0.2311.135 | tools/perf/benchmarks/thread_times.py | 3 | 2912 | # Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from benchmarks import silk_flags
from measurements import thread_times
import page_sets
from telemetry import benchmark
class _ThreadTimes(benchmark.Benchmark):
@classmethod
def AddBenchmarkCommandLineArgs(cls, parser):
parser.add_option('--report-silk-details', action='store_true',
help='Report details relevant to silk.')
@classmethod
def Name(cls):
return 'thread_times'
def CreatePageTest(self, options):
return thread_times.ThreadTimes(options.report_silk_details)
@benchmark.Enabled('android')
class ThreadTimesKeySilkCases(_ThreadTimes):
"""Measures timeline metrics while performing smoothness action on key silk
cases."""
page_set = page_sets.KeySilkCasesPageSet
@classmethod
def Name(cls):
return 'thread_times.key_silk_cases'
@benchmark.Enabled('android', 'linux')
class ThreadTimesKeyHitTestCases(_ThreadTimes):
"""Measure timeline metrics while performing smoothness action on key hit
testing cases."""
page_set = page_sets.KeyHitTestCasesPageSet
@classmethod
def Name(cls):
return 'thread_times.key_hit_test_cases'
@benchmark.Enabled('android')
class ThreadTimesFastPathMobileSites(_ThreadTimes):
"""Measures timeline metrics while performing smoothness action on
key mobile sites labeled with fast-path tag.
http://www.chromium.org/developers/design-documents/rendering-benchmarks"""
page_set = page_sets.KeyMobileSitesSmoothPageSet
options = {'page_label_filter' : 'fastpath'}
@classmethod
def Name(cls):
return 'thread_times.key_mobile_sites_smooth'
@benchmark.Enabled('android')
class ThreadTimesSimpleMobileSites(_ThreadTimes):
"""Measures timeline metric using smoothness action on simple mobile sites
http://www.chromium.org/developers/design-documents/rendering-benchmarks"""
page_set = page_sets.SimpleMobileSitesPageSet
@classmethod
def Name(cls):
return 'thread_times.simple_mobile_sites'
@benchmark.Disabled('win') # crbug.com/443781
class ThreadTimesCompositorCases(_ThreadTimes):
"""Measures timeline metrics while performing smoothness action on
tough compositor cases, using software rasterization.
http://www.chromium.org/developers/design-documents/rendering-benchmarks"""
page_set = page_sets.ToughCompositorCasesPageSet
def CustomizeBrowserOptions(self, options):
silk_flags.CustomizeBrowserOptionsForSoftwareRasterization(options)
@classmethod
def Name(cls):
return 'thread_times.tough_compositor_cases'
@benchmark.Enabled('android')
class ThreadTimesPolymer(_ThreadTimes):
"""Measures timeline metrics while performing smoothness action on
Polymer cases."""
page_set = page_sets.PolymerPageSet
@classmethod
def Name(cls):
return 'thread_times.polymer'
| bsd-3-clause |
tlatzko/spmcluster | .tox/2.7-cover/lib/python2.7/site-packages/nose/util.py | 48 | 20310 | """Utility functions and classes used by nose internally.
"""
import inspect
import itertools
import logging
import stat
import os
import re
import sys
import types
import unittest
from nose.pyversion import ClassType, TypeType, isgenerator, ismethod
log = logging.getLogger('nose')
ident_re = re.compile(r'^[A-Za-z_][A-Za-z0-9_.]*$')
class_types = (ClassType, TypeType)
skip_pattern = r"(?:\.svn)|(?:[^.]+\.py[co])|(?:.*~)|(?:.*\$py\.class)|(?:__pycache__)"
try:
set()
set = set # make from nose.util import set happy
except NameError:
try:
from sets import Set as set
except ImportError:
pass
def ls_tree(dir_path="",
skip_pattern=skip_pattern,
indent="|-- ", branch_indent="| ",
last_indent="`-- ", last_branch_indent=" "):
# TODO: empty directories look like non-directory files
return "\n".join(_ls_tree_lines(dir_path, skip_pattern,
indent, branch_indent,
last_indent, last_branch_indent))
def _ls_tree_lines(dir_path, skip_pattern,
indent, branch_indent, last_indent, last_branch_indent):
if dir_path == "":
dir_path = os.getcwd()
lines = []
names = os.listdir(dir_path)
names.sort()
dirs, nondirs = [], []
for name in names:
if re.match(skip_pattern, name):
continue
if os.path.isdir(os.path.join(dir_path, name)):
dirs.append(name)
else:
nondirs.append(name)
# list non-directories first
entries = list(itertools.chain([(name, False) for name in nondirs],
[(name, True) for name in dirs]))
def ls_entry(name, is_dir, ind, branch_ind):
if not is_dir:
yield ind + name
else:
path = os.path.join(dir_path, name)
if not os.path.islink(path):
yield ind + name
subtree = _ls_tree_lines(path, skip_pattern,
indent, branch_indent,
last_indent, last_branch_indent)
for x in subtree:
yield branch_ind + x
for name, is_dir in entries[:-1]:
for line in ls_entry(name, is_dir, indent, branch_indent):
yield line
if entries:
name, is_dir = entries[-1]
for line in ls_entry(name, is_dir, last_indent, last_branch_indent):
yield line
def absdir(path):
"""Return absolute, normalized path to directory, if it exists; None
otherwise.
"""
if not os.path.isabs(path):
path = os.path.normpath(os.path.abspath(os.path.join(os.getcwd(),
path)))
if path is None or not os.path.isdir(path):
return None
return path
def absfile(path, where=None):
"""Return absolute, normalized path to file (optionally in directory
where), or None if the file can't be found either in where or the current
working directory.
"""
orig = path
if where is None:
where = os.getcwd()
if isinstance(where, list) or isinstance(where, tuple):
for maybe_path in where:
maybe_abs = absfile(path, maybe_path)
if maybe_abs is not None:
return maybe_abs
return None
if not os.path.isabs(path):
path = os.path.normpath(os.path.abspath(os.path.join(where, path)))
if path is None or not os.path.exists(path):
if where != os.getcwd():
# try the cwd instead
path = os.path.normpath(os.path.abspath(os.path.join(os.getcwd(),
orig)))
if path is None or not os.path.exists(path):
return None
if os.path.isdir(path):
# might want an __init__.py from pacakge
init = os.path.join(path,'__init__.py')
if os.path.isfile(init):
return init
elif os.path.isfile(path):
return path
return None
def anyp(predicate, iterable):
for item in iterable:
if predicate(item):
return True
return False
def file_like(name):
"""A name is file-like if it is a path that exists, or it has a
directory part, or it ends in .py, or it isn't a legal python
identifier.
"""
return (os.path.exists(name)
or os.path.dirname(name)
or name.endswith('.py')
or not ident_re.match(os.path.splitext(name)[0]))
def func_lineno(func):
"""Get the line number of a function. First looks for
compat_co_firstlineno, then func_code.co_first_lineno.
"""
try:
return func.compat_co_firstlineno
except AttributeError:
try:
return func.func_code.co_firstlineno
except AttributeError:
return -1
def isclass(obj):
"""Is obj a class? Inspect's isclass is too liberal and returns True
for objects that can't be subclasses of anything.
"""
obj_type = type(obj)
return obj_type in class_types or issubclass(obj_type, type)
# backwards compat (issue #64)
is_generator = isgenerator
def ispackage(path):
"""
Is this path a package directory?
>>> ispackage('nose')
True
>>> ispackage('unit_tests')
False
>>> ispackage('nose/plugins')
True
>>> ispackage('nose/loader.py')
False
"""
if os.path.isdir(path):
# at least the end of the path must be a legal python identifier
# and __init__.py[co] must exist
end = os.path.basename(path)
if ident_re.match(end):
for init in ('__init__.py', '__init__.pyc', '__init__.pyo'):
if os.path.isfile(os.path.join(path, init)):
return True
if sys.platform.startswith('java') and \
os.path.isfile(os.path.join(path, '__init__$py.class')):
return True
return False
def isproperty(obj):
"""
Is this a property?
>>> class Foo:
... def got(self):
... return 2
... def get(self):
... return 1
... get = property(get)
>>> isproperty(Foo.got)
False
>>> isproperty(Foo.get)
True
"""
return type(obj) == property
def getfilename(package, relativeTo=None):
"""Find the python source file for a package, relative to a
particular directory (defaults to current working directory if not
given).
"""
if relativeTo is None:
relativeTo = os.getcwd()
path = os.path.join(relativeTo, os.sep.join(package.split('.')))
if os.path.exists(path + '/__init__.py'):
return path
filename = path + '.py'
if os.path.exists(filename):
return filename
return None
def getpackage(filename):
"""
Find the full dotted package name for a given python source file
name. Returns None if the file is not a python source file.
>>> getpackage('foo.py')
'foo'
>>> getpackage('biff/baf.py')
'baf'
>>> getpackage('nose/util.py')
'nose.util'
Works for directories too.
>>> getpackage('nose')
'nose'
>>> getpackage('nose/plugins')
'nose.plugins'
And __init__ files stuck onto directories
>>> getpackage('nose/plugins/__init__.py')
'nose.plugins'
Absolute paths also work.
>>> path = os.path.abspath(os.path.join('nose', 'plugins'))
>>> getpackage(path)
'nose.plugins'
"""
src_file = src(filename)
if (os.path.isdir(src_file) or not src_file.endswith('.py')) and not ispackage(src_file):
return None
base, ext = os.path.splitext(os.path.basename(src_file))
if base == '__init__':
mod_parts = []
else:
mod_parts = [base]
path, part = os.path.split(os.path.split(src_file)[0])
while part:
if ispackage(os.path.join(path, part)):
mod_parts.append(part)
else:
break
path, part = os.path.split(path)
mod_parts.reverse()
return '.'.join(mod_parts)
def ln(label):
"""Draw a 70-char-wide divider, with label in the middle.
>>> ln('hello there')
'---------------------------- hello there -----------------------------'
"""
label_len = len(label) + 2
chunk = (70 - label_len) // 2
out = '%s %s %s' % ('-' * chunk, label, '-' * chunk)
pad = 70 - len(out)
if pad > 0:
out = out + ('-' * pad)
return out
def resolve_name(name, module=None):
"""Resolve a dotted name to a module and its parts. This is stolen
wholesale from unittest.TestLoader.loadTestByName.
>>> resolve_name('nose.util') #doctest: +ELLIPSIS
<module 'nose.util' from...>
>>> resolve_name('nose.util.resolve_name') #doctest: +ELLIPSIS
<function resolve_name at...>
"""
parts = name.split('.')
parts_copy = parts[:]
if module is None:
while parts_copy:
try:
log.debug("__import__ %s", name)
module = __import__('.'.join(parts_copy))
break
except ImportError:
del parts_copy[-1]
if not parts_copy:
raise
parts = parts[1:]
obj = module
log.debug("resolve: %s, %s, %s, %s", parts, name, obj, module)
for part in parts:
obj = getattr(obj, part)
return obj
def split_test_name(test):
"""Split a test name into a 3-tuple containing file, module, and callable
names, any of which (but not all) may be blank.
Test names are in the form:
file_or_module:callable
Either side of the : may be dotted. To change the splitting behavior, you
can alter nose.util.split_test_re.
"""
norm = os.path.normpath
file_or_mod = test
fn = None
if not ':' in test:
# only a file or mod part
if file_like(test):
return (norm(test), None, None)
else:
return (None, test, None)
# could be path|mod:callable, or a : in the file path someplace
head, tail = os.path.split(test)
if not head:
# this is a case like 'foo:bar' -- generally a module
# name followed by a callable, but also may be a windows
# drive letter followed by a path
try:
file_or_mod, fn = test.split(':')
if file_like(fn):
# must be a funny path
file_or_mod, fn = test, None
except ValueError:
# more than one : in the test
# this is a case like c:\some\path.py:a_test
parts = test.split(':')
if len(parts[0]) == 1:
file_or_mod, fn = ':'.join(parts[:-1]), parts[-1]
else:
# nonsense like foo:bar:baz
raise ValueError("Test name '%s' could not be parsed. Please "
"format test names as path:callable or "
"module:callable." % (test,))
elif not tail:
# this is a case like 'foo:bar/'
# : must be part of the file path, so ignore it
file_or_mod = test
else:
if ':' in tail:
file_part, fn = tail.split(':')
else:
file_part = tail
file_or_mod = os.sep.join([head, file_part])
if file_or_mod:
if file_like(file_or_mod):
return (norm(file_or_mod), None, fn)
else:
return (None, file_or_mod, fn)
else:
return (None, None, fn)
split_test_name.__test__ = False # do not collect
def test_address(test):
"""Find the test address for a test, which may be a module, filename,
class, method or function.
"""
if hasattr(test, "address"):
return test.address()
# type-based polymorphism sucks in general, but I believe is
# appropriate here
t = type(test)
file = module = call = None
if t == types.ModuleType:
file = getattr(test, '__file__', None)
module = getattr(test, '__name__', None)
return (src(file), module, call)
if t == types.FunctionType or issubclass(t, type) or t == types.ClassType:
module = getattr(test, '__module__', None)
if module is not None:
m = sys.modules[module]
file = getattr(m, '__file__', None)
if file is not None:
file = os.path.abspath(file)
call = getattr(test, '__name__', None)
return (src(file), module, call)
if t == types.MethodType:
cls_adr = test_address(test.im_class)
return (src(cls_adr[0]), cls_adr[1],
"%s.%s" % (cls_adr[2], test.__name__))
# handle unittest.TestCase instances
if isinstance(test, unittest.TestCase):
if (hasattr(test, '_FunctionTestCase__testFunc') # pre 2.7
or hasattr(test, '_testFunc')): # 2.7
# unittest FunctionTestCase
try:
return test_address(test._FunctionTestCase__testFunc)
except AttributeError:
return test_address(test._testFunc)
# regular unittest.TestCase
cls_adr = test_address(test.__class__)
# 2.5 compat: __testMethodName changed to _testMethodName
try:
method_name = test._TestCase__testMethodName
except AttributeError:
method_name = test._testMethodName
return (src(cls_adr[0]), cls_adr[1],
"%s.%s" % (cls_adr[2], method_name))
if (hasattr(test, '__class__') and
test.__class__.__module__ not in ('__builtin__', 'builtins')):
return test_address(test.__class__)
raise TypeError("I don't know what %s is (%s)" % (test, t))
test_address.__test__ = False # do not collect
def try_run(obj, names):
"""Given a list of possible method names, try to run them with the
provided object. Keep going until something works. Used to run
setup/teardown methods for module, package, and function tests.
"""
for name in names:
func = getattr(obj, name, None)
if func is not None:
if type(obj) == types.ModuleType:
# py.test compatibility
if isinstance(func, types.FunctionType):
args, varargs, varkw, defaults = \
inspect.getargspec(func)
else:
# Not a function. If it's callable, call it anyway
if hasattr(func, '__call__') and not inspect.ismethod(func):
func = func.__call__
try:
args, varargs, varkw, defaults = \
inspect.getargspec(func)
args.pop(0) # pop the self off
except TypeError:
raise TypeError("Attribute %s of %r is not a python "
"function. Only functions or callables"
" may be used as fixtures." %
(name, obj))
if len(args):
log.debug("call fixture %s.%s(%s)", obj, name, obj)
return func(obj)
log.debug("call fixture %s.%s", obj, name)
return func()
def src(filename):
"""Find the python source file for a .pyc, .pyo or $py.class file on
jython. Returns the filename provided if it is not a python source
file.
"""
if filename is None:
return filename
if sys.platform.startswith('java') and filename.endswith('$py.class'):
return '.'.join((filename[:-9], 'py'))
base, ext = os.path.splitext(filename)
if ext in ('.pyc', '.pyo', '.py'):
return '.'.join((base, 'py'))
return filename
def regex_last_key(regex):
"""Sort key function factory that puts items that match a
regular expression last.
>>> from nose.config import Config
>>> from nose.pyversion import sort_list
>>> c = Config()
>>> regex = c.testMatch
>>> entries = ['.', '..', 'a_test', 'src', 'lib', 'test', 'foo.py']
>>> sort_list(entries, regex_last_key(regex))
>>> entries
['.', '..', 'foo.py', 'lib', 'src', 'a_test', 'test']
"""
def k(obj):
if regex.search(obj):
return (1, obj)
return (0, obj)
return k
def tolist(val):
"""Convert a value that may be a list or a (possibly comma-separated)
string into a list. The exception: None is returned as None, not [None].
>>> tolist(["one", "two"])
['one', 'two']
>>> tolist("hello")
['hello']
>>> tolist("separate,values, with, commas, spaces , are ,ok")
['separate', 'values', 'with', 'commas', 'spaces', 'are', 'ok']
"""
if val is None:
return None
try:
# might already be a list
val.extend([])
return val
except AttributeError:
pass
# might be a string
try:
return re.split(r'\s*,\s*', val)
except TypeError:
# who knows...
return list(val)
class odict(dict):
"""Simple ordered dict implementation, based on:
http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/107747
"""
def __init__(self, *arg, **kw):
self._keys = []
super(odict, self).__init__(*arg, **kw)
def __delitem__(self, key):
super(odict, self).__delitem__(key)
self._keys.remove(key)
def __setitem__(self, key, item):
super(odict, self).__setitem__(key, item)
if key not in self._keys:
self._keys.append(key)
def __str__(self):
return "{%s}" % ', '.join(["%r: %r" % (k, v) for k, v in self.items()])
def clear(self):
super(odict, self).clear()
self._keys = []
def copy(self):
d = super(odict, self).copy()
d._keys = self._keys[:]
return d
def items(self):
return zip(self._keys, self.values())
def keys(self):
return self._keys[:]
def setdefault(self, key, failobj=None):
item = super(odict, self).setdefault(key, failobj)
if key not in self._keys:
self._keys.append(key)
return item
def update(self, dict):
super(odict, self).update(dict)
for key in dict.keys():
if key not in self._keys:
self._keys.append(key)
def values(self):
return map(self.get, self._keys)
def transplant_func(func, module):
"""
Make a function imported from module A appear as if it is located
in module B.
>>> from pprint import pprint
>>> pprint.__module__
'pprint'
>>> pp = transplant_func(pprint, __name__)
>>> pp.__module__
'nose.util'
The original function is not modified.
>>> pprint.__module__
'pprint'
Calling the transplanted function calls the original.
>>> pp([1, 2])
[1, 2]
>>> pprint([1,2])
[1, 2]
"""
from nose.tools import make_decorator
if isgenerator(func):
def newfunc(*arg, **kw):
for v in func(*arg, **kw):
yield v
else:
def newfunc(*arg, **kw):
return func(*arg, **kw)
newfunc = make_decorator(func)(newfunc)
newfunc.__module__ = module
return newfunc
def transplant_class(cls, module):
"""
Make a class appear to reside in `module`, rather than the module in which
it is actually defined.
>>> from nose.failure import Failure
>>> Failure.__module__
'nose.failure'
>>> Nf = transplant_class(Failure, __name__)
>>> Nf.__module__
'nose.util'
>>> Nf.__name__
'Failure'
"""
class C(cls):
pass
C.__module__ = module
C.__name__ = cls.__name__
return C
def safe_str(val, encoding='utf-8'):
try:
return str(val)
except UnicodeEncodeError:
if isinstance(val, Exception):
return ' '.join([safe_str(arg, encoding)
for arg in val])
return unicode(val).encode(encoding)
def is_executable(file):
if not os.path.exists(file):
return False
st = os.stat(file)
return bool(st.st_mode & (stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH))
if __name__ == '__main__':
import doctest
doctest.testmod()
| bsd-2-clause |
eevee/cocos2d-mirror | cocos/text.py | 1 | 9324 | # ----------------------------------------------------------------------------
# cocos2d
# Copyright (c) 2008-2011 Daniel Moisset, Ricardo Quesada, Rayentray Tappa,
# Lucio Torre
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
# * Neither the name of cocos2d nor the names of its
# contributors may be used to endorse or promote products
# derived from this software without specific prior written
# permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
# ----------------------------------------------------------------------------
'''Text support
CocosNodes subclasses supporting text.
They use a suitable pyglet text object to do the work.
Functionality other that the one common to all cococsnodes, except 'opacity', is
provided by the member 'element' , which is the underlying pyglet object.
'''
__docformat__ = 'restructuredtext'
from director import director
import cocosnode
from batch import *
import pyglet
from pyglet.graphics import OrderedGroup
from pyglet import image
from pyglet.gl import *
from batch import *
class TextElement(cocosnode.CocosNode):
"""
Base class for all cocos text
Provides the CocosNode interfase and a pyglet Batch to store parts
Functionality other that the one common to all cococsnodes, except 'opacity', is
provided by the member 'element' , which is the underlying pyglet object.
"""
def __init__(self, text='', position=(0,0), **kwargs):
super(TextElement, self).__init__()
self.position = position
self.args = []
self.kwargs = kwargs
kwargs['text']=text
self.group = None
self.batch = None
self.batch = pyglet.graphics.Batch()
self.create_element()
def create_element(self):
self.element = self.klass(group=self.group, batch=self.batch, **self.kwargs)
def draw(self):
glPushMatrix()
self.transform()
self.element.draw()
glPopMatrix()
def _get_opacity(self):
return self.element.color[3]
def _set_opacity(self, value):
self.element.color = tuple(self.element.color[:3]) + (int(value),)
opacity = property(_get_opacity, _set_opacity)
class Label(TextElement):
'''Plain text support
Functionality other that the one common to all cococsnodes, except 'opacity', is
provided by the member 'element' , which is the underlying pyglet object.
The undelying pyglet object is pyglet.text.Label
For pyglet 1.1.4 the available init keyword arguments are
- font_name: Font family name(s); the first matching name is used
- font_size: Font size, in points
- bold: bool
- italic: bool
- color: (int, int, int, int) Font colour, as RGBA
- width: Width of the label in pixels, or None
- height: Height of the label in pixels, or None
- anchor_x: one of "left", "center" or "right"
- anchor_y : one of "bottom", "baseline", "center" or "top"
- halign : applies when width is supplied. One of "left", "center" or "right".
- multiline: bool
- dpi: Resolution of the fonts in this layout. Defaults to 96.
'''
klass = pyglet.text.Label
class HTMLLabel(TextElement):
'''HTML formatted text label (supports a subset of HTML 4.01)
Functionality other that the one common to all cococsnodes, except 'opacity', is
provided by the member 'element' , which is the underlying pyglet object.
The undelying pyglet object is pyglet.text.HTMLLabel.
For pyglet 1.1.4 the available init keyword arguments are
- location: Location object for loading images referred to in the document. By default, the working directory is used.
- width: Width of the label in pixels, or None
- height: Height of the label in pixels, or None
- anchor_x: "left", "center" or "right".
- anchor_y: one of "bottom", "baseline", "center" or "top".
- multiline : bool
- dpi : float, defaults to 96
'''
klass = pyglet.text.HTMLLabel
class PygletRichLabel(pyglet.text.DocumentLabel):
'''This is not a CocosNode - let instantiation be handled by RichLabel
Helper class for RichLabel
'''
def __init__(self, text='',
font_name=None, font_size=None, bold=False, italic=False,
color=None,
x=0, y=0, width=None, height=None,
anchor_x='left', anchor_y='baseline',
halign='left',
multiline=False, dpi=None, batch=None, group=None):
'''Create a rich text label.
:Parameters:
`text` : str
Pyglet attributed (rich) text to display.
`font_name` : str or list
Font family name(s). If more than one name is given, the
first matching name is used.
`font_size` : float
Font size, in points.
`bold` : bool
Bold font style.
`italic` : bool
Italic font style.
`color` : (int, int, int, int) or None
Font colour, as RGBA components in range [0, 255].
None to use font colors defined by text attributes.
`x` : int
X coordinate of the label.
`y` : int
Y coordinate of the label.
`width` : int
Width of the label in pixels, or None
`height` : int
Height of the label in pixels, or None
`anchor_x` : str
Anchor point of the X coordinate: one of ``"left"``,
``"center"`` or ``"right"``.
`anchor_y` : str
Anchor point of the Y coordinate: one of ``"bottom"``,
``"baseline"``, ``"center"`` or ``"top"``.
`halign` : str
Horizontal alignment of text on a line, only applies if
a width is supplied. One of ``"left"``, ``"center"``
or ``"right"``.
`multiline` : bool
If True, the label will be word-wrapped and accept newline
characters. You must also set the width of the label.
`dpi` : float
Resolution of the fonts in this layout. Defaults to 96.
`batch` : `Batch`
Optional graphics batch to add the label to.
`group` : `Group`
Optional graphics group to use.
'''
text = '{color (255, 255, 255, 255)}' + text
document = pyglet.text.decode_attributed(text)
super(PygletRichLabel, self).__init__(document, x, y, width, height,
anchor_x, anchor_y,
multiline, dpi, batch, group)
style = dict(halign=halign)
if font_name:
style['font_name'] = font_name
if font_size:
style['font_size'] = font_size
if bold:
style['bold'] = bold
if italic:
style['italic'] = italic
if color:
style['color'] = color
self.document.set_style(0, len(self.document.text), style)
class RichLabel(TextElement):
'''displays pyglet attributed (rich) text
The undelying pyglet object is a custom, cocos provided PygletRichLabel
element, subclass of pyglet.text.DocumentLabel.
For pyglet 1.1.4 the available init keyword arguments are
- font_name: Font family name(s); first matching is used
- font_size: Font size, in points.
- bold: bool
- italic: bool
- color : (int, int, int, int) or None
- width: Width of the label in pixels, or None
- height: Height of the label in pixels, or None
- anchor_x: "left", "center" or "right"
- anchor_y: one of "bottom", "baseline", "center" or "top"
- halign : only when a width is supplied. One of "left", "center", "right".
- multiline : bool
- dpi : Resolution of the fonts in this layout. Defaults to 96.
'''
klass = PygletRichLabel
| bsd-3-clause |
syci/domsense-agilebg-addons | account_followup_choose_payment/account_followup.py | 1 | 1243 | # -*- coding: utf-8 -*-
##############################################################################
#
# Copyright (C) 2011 Agile Business Group sagl (<http://www.agilebg.com>)
# Copyright (C) 2011 Domsense srl (<http://www.domsense.com>)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from osv import fields, osv
class followup_line(osv.osv):
_inherit = 'account_followup.followup.line'
_columns = {
'description2': fields.text('Additional Printed Message', translate=True),
}
followup_line()
| gpl-2.0 |
mrbox/django | django/contrib/sessions/backends/file.py | 40 | 7975 | import datetime
import errno
import logging
import os
import shutil
import tempfile
from django.conf import settings
from django.contrib.sessions.backends.base import (
VALID_KEY_CHARS, CreateError, SessionBase,
)
from django.contrib.sessions.exceptions import InvalidSessionKey
from django.core.exceptions import ImproperlyConfigured, SuspiciousOperation
from django.utils import timezone
from django.utils.encoding import force_text
class SessionStore(SessionBase):
"""
Implements a file based session store.
"""
def __init__(self, session_key=None):
self.storage_path = type(self)._get_storage_path()
self.file_prefix = settings.SESSION_COOKIE_NAME
super(SessionStore, self).__init__(session_key)
@classmethod
def _get_storage_path(cls):
try:
return cls._storage_path
except AttributeError:
storage_path = getattr(settings, "SESSION_FILE_PATH", None)
if not storage_path:
storage_path = tempfile.gettempdir()
# Make sure the storage path is valid.
if not os.path.isdir(storage_path):
raise ImproperlyConfigured(
"The session storage path %r doesn't exist. Please set your"
" SESSION_FILE_PATH setting to an existing directory in which"
" Django can store session data." % storage_path)
cls._storage_path = storage_path
return storage_path
def _key_to_file(self, session_key=None):
"""
Get the file associated with this session key.
"""
if session_key is None:
session_key = self._get_or_create_session_key()
# Make sure we're not vulnerable to directory traversal. Session keys
# should always be md5s, so they should never contain directory
# components.
if not set(session_key).issubset(set(VALID_KEY_CHARS)):
raise InvalidSessionKey(
"Invalid characters in session key")
return os.path.join(self.storage_path, self.file_prefix + session_key)
def _last_modification(self):
"""
Return the modification time of the file storing the session's content.
"""
modification = os.stat(self._key_to_file()).st_mtime
if settings.USE_TZ:
modification = datetime.datetime.utcfromtimestamp(modification)
modification = modification.replace(tzinfo=timezone.utc)
else:
modification = datetime.datetime.fromtimestamp(modification)
return modification
def _expiry_date(self, session_data):
"""
Return the expiry time of the file storing the session's content.
"""
expiry = session_data.get('_session_expiry')
if not expiry:
expiry = self._last_modification() + datetime.timedelta(seconds=settings.SESSION_COOKIE_AGE)
return expiry
def load(self):
session_data = {}
try:
with open(self._key_to_file(), "rb") as session_file:
file_data = session_file.read()
# Don't fail if there is no data in the session file.
# We may have opened the empty placeholder file.
if file_data:
try:
session_data = self.decode(file_data)
except (EOFError, SuspiciousOperation) as e:
if isinstance(e, SuspiciousOperation):
logger = logging.getLogger('django.security.%s' %
e.__class__.__name__)
logger.warning(force_text(e))
self.create()
# Remove expired sessions.
expiry_age = self.get_expiry_age(expiry=self._expiry_date(session_data))
if expiry_age <= 0:
session_data = {}
self.delete()
self.create()
except (IOError, SuspiciousOperation):
self._session_key = None
return session_data
def create(self):
while True:
self._session_key = self._get_new_session_key()
try:
self.save(must_create=True)
except CreateError:
continue
self.modified = True
return
def save(self, must_create=False):
if self.session_key is None:
return self.create()
# Get the session data now, before we start messing
# with the file it is stored within.
session_data = self._get_session(no_load=must_create)
session_file_name = self._key_to_file()
try:
# Make sure the file exists. If it does not already exist, an
# empty placeholder file is created.
flags = os.O_WRONLY | os.O_CREAT | getattr(os, 'O_BINARY', 0)
if must_create:
flags |= os.O_EXCL
fd = os.open(session_file_name, flags)
os.close(fd)
except OSError as e:
if must_create and e.errno == errno.EEXIST:
raise CreateError
raise
# Write the session file without interfering with other threads
# or processes. By writing to an atomically generated temporary
# file and then using the atomic os.rename() to make the complete
# file visible, we avoid having to lock the session file, while
# still maintaining its integrity.
#
# Note: Locking the session file was explored, but rejected in part
# because in order to be atomic and cross-platform, it required a
# long-lived lock file for each session, doubling the number of
# files in the session storage directory at any given time. This
# rename solution is cleaner and avoids any additional overhead
# when reading the session data, which is the more common case
# unless SESSION_SAVE_EVERY_REQUEST = True.
#
# See ticket #8616.
dir, prefix = os.path.split(session_file_name)
try:
output_file_fd, output_file_name = tempfile.mkstemp(dir=dir,
prefix=prefix + '_out_')
renamed = False
try:
try:
os.write(output_file_fd, self.encode(session_data).encode())
finally:
os.close(output_file_fd)
# This will atomically rename the file (os.rename) if the OS
# supports it. Otherwise this will result in a shutil.copy2
# and os.unlink (for example on Windows). See #9084.
shutil.move(output_file_name, session_file_name)
renamed = True
finally:
if not renamed:
os.unlink(output_file_name)
except (OSError, IOError, EOFError):
pass
def exists(self, session_key):
return os.path.exists(self._key_to_file(session_key))
def delete(self, session_key=None):
if session_key is None:
if self.session_key is None:
return
session_key = self.session_key
try:
os.unlink(self._key_to_file(session_key))
except OSError:
pass
def clean(self):
pass
@classmethod
def clear_expired(cls):
storage_path = cls._get_storage_path()
file_prefix = settings.SESSION_COOKIE_NAME
for session_file in os.listdir(storage_path):
if not session_file.startswith(file_prefix):
continue
session_key = session_file[len(file_prefix):]
session = cls(session_key)
# When an expired session is loaded, its file is removed, and a
# new file is immediately created. Prevent this by disabling
# the create() method.
session.create = lambda: None
session.load()
| bsd-3-clause |
pk-sam/crosswalk-test-suite | webapi/tct-animations-css3-tests/inst.xpk.py | 357 | 6759 | #!/usr/bin/env python
import os
import shutil
import glob
import time
import sys
import subprocess
import string
from optparse import OptionParser, make_option
SCRIPT_DIR = os.path.dirname(os.path.abspath(__file__))
PKG_NAME = os.path.basename(SCRIPT_DIR)
PARAMETERS = None
#XW_ENV = "export DBUS_SESSION_BUS_ADDRESS=unix:path=/run/user/5000/dbus/user_bus_socket"
SRC_DIR = ""
PKG_SRC_DIR = ""
def doCMD(cmd):
# Do not need handle timeout in this short script, let tool do it
print "-->> \"%s\"" % cmd
output = []
cmd_return_code = 1
cmd_proc = subprocess.Popen(
cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, shell=True)
while True:
output_line = cmd_proc.stdout.readline().strip("\r\n")
cmd_return_code = cmd_proc.poll()
if output_line == '' and cmd_return_code != None:
break
sys.stdout.write("%s\n" % output_line)
sys.stdout.flush()
output.append(output_line)
return (cmd_return_code, output)
def updateCMD(cmd=None):
if "pkgcmd" in cmd:
cmd = "su - %s -c '%s;%s'" % (PARAMETERS.user, XW_ENV, cmd)
return cmd
def getUSERID():
if PARAMETERS.mode == "SDB":
cmd = "sdb -s %s shell id -u %s" % (
PARAMETERS.device, PARAMETERS.user)
else:
cmd = "ssh %s \"id -u %s\"" % (
PARAMETERS.device, PARAMETERS.user )
return doCMD(cmd)
def getPKGID(pkg_name=None):
if PARAMETERS.mode == "SDB":
cmd = "sdb -s %s shell %s" % (
PARAMETERS.device, updateCMD('pkgcmd -l'))
else:
cmd = "ssh %s \"%s\"" % (
PARAMETERS.device, updateCMD('pkgcmd -l'))
(return_code, output) = doCMD(cmd)
if return_code != 0:
return None
test_pkg_id = None
for line in output:
if line.find("[" + pkg_name + "]") != -1:
pkgidIndex = line.split().index("pkgid")
test_pkg_id = line.split()[pkgidIndex+1].strip("[]")
break
return test_pkg_id
def doRemoteCMD(cmd=None):
if PARAMETERS.mode == "SDB":
cmd = "sdb -s %s shell %s" % (PARAMETERS.device, updateCMD(cmd))
else:
cmd = "ssh %s \"%s\"" % (PARAMETERS.device, updateCMD(cmd))
return doCMD(cmd)
def doRemoteCopy(src=None, dest=None):
if PARAMETERS.mode == "SDB":
cmd_prefix = "sdb -s %s push" % PARAMETERS.device
cmd = "%s %s %s" % (cmd_prefix, src, dest)
else:
cmd = "scp -r %s %s:/%s" % (src, PARAMETERS.device, dest)
(return_code, output) = doCMD(cmd)
doRemoteCMD("sync")
if return_code != 0:
return True
else:
return False
def uninstPKGs():
action_status = True
for root, dirs, files in os.walk(SCRIPT_DIR):
if root.endswith("mediasrc"):
continue
for file in files:
if file.endswith(".xpk"):
pkg_id = getPKGID(os.path.basename(os.path.splitext(file)[0]))
if not pkg_id:
action_status = False
continue
(return_code, output) = doRemoteCMD(
"pkgcmd -u -t xpk -q -n %s" % pkg_id)
for line in output:
if "Failure" in line:
action_status = False
break
(return_code, output) = doRemoteCMD(
"rm -rf %s" % PKG_SRC_DIR)
if return_code != 0:
action_status = False
return action_status
def instPKGs():
action_status = True
(return_code, output) = doRemoteCMD(
"mkdir -p %s" % PKG_SRC_DIR)
if return_code != 0:
action_status = False
for root, dirs, files in os.walk(SCRIPT_DIR):
if root.endswith("mediasrc"):
continue
for file in files:
if file.endswith(".xpk"):
if not doRemoteCopy(os.path.join(root, file), "%s/%s" % (SRC_DIR, file)):
action_status = False
(return_code, output) = doRemoteCMD(
"pkgcmd -i -t xpk -q -p %s/%s" % (SRC_DIR, file))
doRemoteCMD("rm -rf %s/%s" % (SRC_DIR, file))
for line in output:
if "Failure" in line:
action_status = False
break
# Do some special copy/delete... steps
'''
(return_code, output) = doRemoteCMD(
"mkdir -p %s/tests" % PKG_SRC_DIR)
if return_code != 0:
action_status = False
if not doRemoteCopy("specname/tests", "%s/tests" % PKG_SRC_DIR):
action_status = False
'''
return action_status
def main():
try:
usage = "usage: inst.py -i"
opts_parser = OptionParser(usage=usage)
opts_parser.add_option(
"-m", dest="mode", action="store", help="Specify mode")
opts_parser.add_option(
"-s", dest="device", action="store", help="Specify device")
opts_parser.add_option(
"-i", dest="binstpkg", action="store_true", help="Install package")
opts_parser.add_option(
"-u", dest="buninstpkg", action="store_true", help="Uninstall package")
opts_parser.add_option(
"-a", dest="user", action="store", help="User name")
global PARAMETERS
(PARAMETERS, args) = opts_parser.parse_args()
except Exception, e:
print "Got wrong option: %s, exit ..." % e
sys.exit(1)
if not PARAMETERS.user:
PARAMETERS.user = "app"
global SRC_DIR, PKG_SRC_DIR
SRC_DIR = "/home/%s/content" % PARAMETERS.user
PKG_SRC_DIR = "%s/tct/opt/%s" % (SRC_DIR, PKG_NAME)
if not PARAMETERS.mode:
PARAMETERS.mode = "SDB"
if PARAMETERS.mode == "SDB":
if not PARAMETERS.device:
(return_code, output) = doCMD("sdb devices")
for line in output:
if str.find(line, "\tdevice") != -1:
PARAMETERS.device = line.split("\t")[0]
break
else:
PARAMETERS.mode = "SSH"
if not PARAMETERS.device:
print "No device provided"
sys.exit(1)
user_info = getUSERID()
re_code = user_info[0]
if re_code == 0 :
global XW_ENV
userid = user_info[1][0]
XW_ENV = "export DBUS_SESSION_BUS_ADDRESS=unix:path=/run/user/%s/dbus/user_bus_socket"%str(userid)
else:
print "[Error] cmd commands error : %s"%str(user_info[1])
sys.exit(1)
if PARAMETERS.binstpkg and PARAMETERS.buninstpkg:
print "-i and -u are conflict"
sys.exit(1)
if PARAMETERS.buninstpkg:
if not uninstPKGs():
sys.exit(1)
else:
if not instPKGs():
sys.exit(1)
if __name__ == "__main__":
main()
sys.exit(0)
| bsd-3-clause |
erdincay/pyload | module/plugins/accounts/FilerNet.py | 6 | 2020 | # -*- coding: utf-8 -*-
import re
import time
from module.plugins.internal.Account import Account
class FilerNet(Account):
__name__ = "FilerNet"
__type__ = "account"
__version__ = "0.07"
__status__ = "testing"
__description__ = """Filer.net account plugin"""
__license__ = "GPLv3"
__authors__ = [("stickell", "l.stickell@yahoo.it")]
TOKEN_PATTERN = r'name="_csrf_token" value="(.+?)"'
WALID_UNTIL_PATTERN = r'Der Premium-Zugang ist gültig bis (.+)\.\s*</td>'
TRAFFIC_PATTERN = r'Traffic</th>\s*<td>([^<]+)</td>'
FREE_PATTERN = r'Account Status</th>\s*<td>\s*Free'
def parse_info(self, user, password, data, req):
html = self.load("https://filer.net/profile")
#: Free user
if re.search(self.FREE_PATTERN, html):
return {'premium': False, 'validuntil': None, 'trafficleft': None}
until = re.search(self.WALID_UNTIL_PATTERN, html)
traffic = re.search(self.TRAFFIC_PATTERN, html)
if until and traffic:
validuntil = time.mktime(time.strptime(until.group(1), "%d.%m.%Y %H:%M:%S"))
trafficleft = self.parse_traffic(traffic.group(1))
return {'premium': True, 'validuntil': validuntil, 'trafficleft': trafficleft}
else:
self.log_error(_("Unable to retrieve account information"))
return {'premium': False, 'validuntil': None, 'trafficleft': None}
def login(self, user, password, data, req):
html = self.load("https://filer.net/login")
token = re.search(self.TOKEN_PATTERN, html).group(1)
html = self.load("https://filer.net/login_check",
post={'_username' : user,
'_password' : password,
'_remember_me': "on",
'_csrf_token' : token,
'_target_path': "https://filer.net/"})
if 'Logout' not in html:
self.login_fail()
| gpl-3.0 |
maelnor/nova | nova/tests/api/ec2/test_api.py | 10 | 24106 | # Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Unit tests for the API endpoint."""
import random
import re
import StringIO
import boto
import boto.connection
from boto.ec2 import regioninfo
from boto import exception as boto_exc
# newer versions of boto use their own wrapper on top of httplib.HTTPResponse
if hasattr(boto.connection, 'HTTPResponse'):
httplib = boto.connection
else:
import httplib
import fixtures
import webob
from nova.api import auth
from nova.api import ec2
from nova.api.ec2 import ec2utils
from nova import block_device
from nova import context
from nova import exception
from nova.openstack.common import versionutils
from nova import test
from nova.tests import matchers
class FakeHttplibSocket(object):
"""a fake socket implementation for httplib.HTTPResponse, trivial."""
def __init__(self, response_string):
self.response_string = response_string
self._buffer = StringIO.StringIO(response_string)
def makefile(self, _mode, _other):
"""Returns the socket's internal buffer."""
return self._buffer
class FakeHttplibConnection(object):
"""A fake httplib.HTTPConnection for boto to use
requests made via this connection actually get translated and routed into
our WSGI app, we then wait for the response and turn it back into
the HTTPResponse that boto expects.
"""
def __init__(self, app, host, is_secure=False):
self.app = app
self.host = host
def request(self, method, path, data, headers):
req = webob.Request.blank(path)
req.method = method
req.body = data
req.headers = headers
req.headers['Accept'] = 'text/html'
req.host = self.host
# Call the WSGI app, get the HTTP response
resp = str(req.get_response(self.app))
# For some reason, the response doesn't have "HTTP/1.0 " prepended; I
# guess that's a function the web server usually provides.
resp = "HTTP/1.0 %s" % resp
self.sock = FakeHttplibSocket(resp)
self.http_response = httplib.HTTPResponse(self.sock)
# NOTE(vish): boto is accessing private variables for some reason
self._HTTPConnection__response = self.http_response
self.http_response.begin()
def getresponse(self):
return self.http_response
def getresponsebody(self):
return self.sock.response_string
def close(self):
"""Required for compatibility with boto/tornado."""
pass
class XmlConversionTestCase(test.NoDBTestCase):
"""Unit test api xml conversion."""
def test_number_conversion(self):
conv = ec2utils._try_convert
self.assertIsNone(conv('None'))
self.assertEqual(conv('True'), True)
self.assertEqual(conv('TRUE'), True)
self.assertEqual(conv('true'), True)
self.assertEqual(conv('False'), False)
self.assertEqual(conv('FALSE'), False)
self.assertEqual(conv('false'), False)
self.assertEqual(conv('0'), 0)
self.assertEqual(conv('42'), 42)
self.assertEqual(conv('3.14'), 3.14)
self.assertEqual(conv('-57.12'), -57.12)
self.assertEqual(conv('0x57'), 0x57)
self.assertEqual(conv('-0x57'), -0x57)
self.assertEqual(conv('-'), '-')
self.assertEqual(conv('-0'), 0)
self.assertEqual(conv('0.0'), 0.0)
self.assertEqual(conv('1e-8'), 0.0)
self.assertEqual(conv('-1e-8'), 0.0)
self.assertEqual(conv('0xDD8G'), '0xDD8G')
self.assertEqual(conv('0XDD8G'), '0XDD8G')
self.assertEqual(conv('-stringy'), '-stringy')
self.assertEqual(conv('stringy'), 'stringy')
self.assertEqual(conv('add'), 'add')
self.assertEqual(conv('remove'), 'remove')
self.assertEqual(conv(''), '')
class Ec2utilsTestCase(test.NoDBTestCase):
def test_ec2_id_to_id(self):
self.assertEqual(ec2utils.ec2_id_to_id('i-0000001e'), 30)
self.assertEqual(ec2utils.ec2_id_to_id('ami-1d'), 29)
self.assertEqual(ec2utils.ec2_id_to_id('snap-0000001c'), 28)
self.assertEqual(ec2utils.ec2_id_to_id('vol-0000001b'), 27)
def test_bad_ec2_id(self):
self.assertRaises(exception.InvalidEc2Id,
ec2utils.ec2_id_to_id,
'badone')
def test_id_to_ec2_id(self):
self.assertEqual(ec2utils.id_to_ec2_id(30), 'i-0000001e')
self.assertEqual(ec2utils.id_to_ec2_id(29, 'ami-%08x'), 'ami-0000001d')
self.assertEqual(ec2utils.id_to_ec2_snap_id(28), 'snap-0000001c')
self.assertEqual(ec2utils.id_to_ec2_vol_id(27), 'vol-0000001b')
def test_dict_from_dotted_str(self):
in_str = [('BlockDeviceMapping.1.DeviceName', '/dev/sda1'),
('BlockDeviceMapping.1.Ebs.SnapshotId', 'snap-0000001c'),
('BlockDeviceMapping.1.Ebs.VolumeSize', '80'),
('BlockDeviceMapping.1.Ebs.DeleteOnTermination', 'false'),
('BlockDeviceMapping.2.DeviceName', '/dev/sdc'),
('BlockDeviceMapping.2.VirtualName', 'ephemeral0')]
expected_dict = {
'block_device_mapping': {
'1': {'device_name': '/dev/sda1',
'ebs': {'snapshot_id': 'snap-0000001c',
'volume_size': 80,
'delete_on_termination': False}},
'2': {'device_name': '/dev/sdc',
'virtual_name': 'ephemeral0'}}}
out_dict = ec2utils.dict_from_dotted_str(in_str)
self.assertThat(out_dict, matchers.DictMatches(expected_dict))
def test_properties_root_defice_name(self):
mappings = [{"device": "/dev/sda1", "virtual": "root"}]
properties0 = {'mappings': mappings}
properties1 = {'root_device_name': '/dev/sdb', 'mappings': mappings}
root_device_name = block_device.properties_root_device_name(
properties0)
self.assertEqual(root_device_name, '/dev/sda1')
root_device_name = block_device.properties_root_device_name(
properties1)
self.assertEqual(root_device_name, '/dev/sdb')
def test_regex_from_ec2_regex(self):
def _test_re(ec2_regex, expected, literal, match=True):
regex = ec2utils.regex_from_ec2_regex(ec2_regex)
self.assertEqual(regex, expected)
if match:
self.assertIsNotNone(re.match(regex, literal))
else:
self.assertIsNone(re.match(regex, literal))
# wildcards
_test_re('foo', '\Afoo\Z(?s)', 'foo')
_test_re('foo', '\Afoo\Z(?s)', 'baz', match=False)
_test_re('foo?bar', '\Afoo.bar\Z(?s)', 'foo bar')
_test_re('foo?bar', '\Afoo.bar\Z(?s)', 'foo bar', match=False)
_test_re('foo*bar', '\Afoo.*bar\Z(?s)', 'foo QUUX bar')
# backslashes and escaped wildcards
_test_re('foo\\', '\Afoo\\\\\Z(?s)', 'foo\\')
_test_re('foo*bar', '\Afoo.*bar\Z(?s)', 'zork QUUX bar', match=False)
_test_re('foo\\?bar', '\Afoo[?]bar\Z(?s)', 'foo?bar')
_test_re('foo\\?bar', '\Afoo[?]bar\Z(?s)', 'foo bar', match=False)
_test_re('foo\\*bar', '\Afoo[*]bar\Z(?s)', 'foo*bar')
_test_re('foo\\*bar', '\Afoo[*]bar\Z(?s)', 'foo bar', match=False)
# analog to the example given in the EC2 API docs
ec2_regex = '\*nova\?\\end'
expected = r'\A[*]nova[?]\\end\Z(?s)'
literal = r'*nova?\end'
_test_re(ec2_regex, expected, literal)
def test_mapping_prepend_dev(self):
mappings = [
{'virtual': 'ami',
'device': 'sda1'},
{'virtual': 'root',
'device': '/dev/sda1'},
{'virtual': 'swap',
'device': 'sdb1'},
{'virtual': 'swap',
'device': '/dev/sdb2'},
{'virtual': 'ephemeral0',
'device': 'sdc1'},
{'virtual': 'ephemeral1',
'device': '/dev/sdc1'}]
expected_result = [
{'virtual': 'ami',
'device': 'sda1'},
{'virtual': 'root',
'device': '/dev/sda1'},
{'virtual': 'swap',
'device': '/dev/sdb1'},
{'virtual': 'swap',
'device': '/dev/sdb2'},
{'virtual': 'ephemeral0',
'device': '/dev/sdc1'},
{'virtual': 'ephemeral1',
'device': '/dev/sdc1'}]
self.assertThat(block_device.mappings_prepend_dev(mappings),
matchers.DictListMatches(expected_result))
class ApiEc2TestCase(test.TestCase):
"""Unit test for the cloud controller on an EC2 API."""
def setUp(self):
super(ApiEc2TestCase, self).setUp()
self.host = '127.0.0.1'
# NOTE(vish): skipping the Authorizer
roles = ['sysadmin', 'netadmin']
ctxt = context.RequestContext('fake', 'fake', roles=roles)
self.app = auth.InjectContext(ctxt, ec2.FaultWrapper(
ec2.RequestLogging(ec2.Requestify(ec2.Authorizer(ec2.Executor()
), 'nova.api.ec2.cloud.CloudController'))))
self.useFixture(fixtures.FakeLogger('boto'))
def expect_http(self, host=None, is_secure=False, api_version=None):
"""Returns a new EC2 connection."""
self.ec2 = boto.connect_ec2(
aws_access_key_id='fake',
aws_secret_access_key='fake',
is_secure=False,
region=regioninfo.RegionInfo(None, 'test', self.host),
port=8773,
path='/services/Cloud')
if api_version:
self.ec2.APIVersion = api_version
self.mox.StubOutWithMock(self.ec2, 'new_http_connection')
self.http = FakeHttplibConnection(
self.app, '%s:8773' % (self.host), False)
# pylint: disable=E1103
if versionutils.is_compatible('2.14', boto.Version, same_major=False):
self.ec2.new_http_connection(host or self.host, 8773,
is_secure).AndReturn(self.http)
elif versionutils.is_compatible('2', boto.Version, same_major=False):
self.ec2.new_http_connection(host or '%s:8773' % (self.host),
is_secure).AndReturn(self.http)
else:
self.ec2.new_http_connection(host, is_secure).AndReturn(self.http)
return self.http
def test_xmlns_version_matches_request_version(self):
self.expect_http(api_version='2010-10-30')
self.mox.ReplayAll()
# Any request should be fine
self.ec2.get_all_instances()
self.assertTrue(self.ec2.APIVersion in self.http.getresponsebody(),
'The version in the xmlns of the response does '
'not match the API version given in the request.')
def test_describe_instances(self):
"""Test that, after creating a user and a project, the describe
instances call to the API works properly.
"""
self.expect_http()
self.mox.ReplayAll()
self.assertEqual(self.ec2.get_all_instances(), [])
def test_terminate_invalid_instance(self):
# Attempt to terminate an invalid instance.
self.expect_http()
self.mox.ReplayAll()
self.assertRaises(boto_exc.EC2ResponseError,
self.ec2.terminate_instances, "i-00000005")
def test_get_all_key_pairs(self):
"""Test that, after creating a user and project and generating
a key pair, that the API call to list key pairs works properly.
"""
keyname = "".join(random.choice("sdiuisudfsdcnpaqwertasd")
for x in range(random.randint(4, 8)))
self.expect_http()
self.mox.ReplayAll()
self.ec2.create_key_pair(keyname)
rv = self.ec2.get_all_key_pairs()
results = [k for k in rv if k.name == keyname]
self.assertEqual(len(results), 1)
def test_create_duplicate_key_pair(self):
"""Test that, after successfully generating a keypair,
requesting a second keypair with the same name fails sanely.
"""
self.expect_http()
self.mox.ReplayAll()
self.ec2.create_key_pair('test')
try:
self.ec2.create_key_pair('test')
except boto_exc.EC2ResponseError as e:
if e.code == 'InvalidKeyPair.Duplicate':
pass
else:
self.assertEqual('InvalidKeyPair.Duplicate', e.code)
else:
self.fail('Exception not raised.')
def test_get_all_security_groups(self):
# Test that we can retrieve security groups.
self.expect_http()
self.mox.ReplayAll()
rv = self.ec2.get_all_security_groups()
self.assertEqual(len(rv), 1)
self.assertEqual(rv[0].name, 'default')
def test_create_delete_security_group(self):
# Test that we can create a security group.
self.expect_http()
self.mox.ReplayAll()
security_group_name = "".join(random.choice("sdiuisudfsdcnpaqwertasd")
for x in range(random.randint(4, 8)))
self.ec2.create_security_group(security_group_name, 'test group')
self.expect_http()
self.mox.ReplayAll()
rv = self.ec2.get_all_security_groups()
self.assertEqual(len(rv), 2)
self.assertIn(security_group_name, [group.name for group in rv])
self.expect_http()
self.mox.ReplayAll()
self.ec2.delete_security_group(security_group_name)
def test_group_name_valid_chars_security_group(self):
"""Test that we sanely handle invalid security group names.
EC2 API Spec states we should only accept alphanumeric characters,
spaces, dashes, and underscores. Amazon implementation
accepts more characters - so, [:print:] is ok.
"""
bad_strict_ec2 = "aa \t\x01\x02\x7f"
bad_amazon_ec2 = "aa #^% -=99"
test_raise = [
(True, bad_amazon_ec2, "test desc"),
(True, "test name", bad_amazon_ec2),
(False, bad_strict_ec2, "test desc"),
]
for t in test_raise:
self.expect_http()
self.mox.ReplayAll()
self.flags(ec2_strict_validation=t[0])
self.assertRaises(boto_exc.EC2ResponseError,
self.ec2.create_security_group,
t[1],
t[2])
test_accept = [
(False, bad_amazon_ec2, "test desc"),
(False, "test name", bad_amazon_ec2),
]
for t in test_accept:
self.expect_http()
self.mox.ReplayAll()
self.flags(ec2_strict_validation=t[0])
self.ec2.create_security_group(t[1], t[2])
self.expect_http()
self.mox.ReplayAll()
self.ec2.delete_security_group(t[1])
def test_group_name_valid_length_security_group(self):
"""Test that we sanely handle invalid security group names.
API Spec states that the length should not exceed 255 char.
"""
self.expect_http()
self.mox.ReplayAll()
# Test block group_name > 255 chars
security_group_name = "".join(random.choice("poiuytrewqasdfghjklmnbvc")
for x in range(random.randint(256, 266)))
self.assertRaises(boto_exc.EC2ResponseError,
self.ec2.create_security_group,
security_group_name,
'test group')
def test_authorize_revoke_security_group_cidr(self):
"""Test that we can add and remove CIDR based rules
to a security group
"""
self.expect_http()
self.mox.ReplayAll()
security_group_name = "".join(random.choice("sdiuisudfsdcnpaqwertasd")
for x in range(random.randint(4, 8)))
group = self.ec2.create_security_group(security_group_name,
'test group')
self.expect_http()
self.mox.ReplayAll()
group.connection = self.ec2
group.authorize('tcp', 80, 81, '0.0.0.0/0')
group.authorize('icmp', -1, -1, '0.0.0.0/0')
group.authorize('udp', 80, 81, '0.0.0.0/0')
group.authorize('tcp', 1, 65535, '0.0.0.0/0')
group.authorize('udp', 1, 65535, '0.0.0.0/0')
group.authorize('icmp', 1, 0, '0.0.0.0/0')
group.authorize('icmp', 0, 1, '0.0.0.0/0')
group.authorize('icmp', 0, 0, '0.0.0.0/0')
def _assert(message, *args):
try:
group.authorize(*args)
except boto_exc.EC2ResponseError as e:
self.assertEqual(e.status, 400, 'Expected status to be 400')
self.assertIn(message, e.error_message)
else:
raise self.failureException, 'EC2ResponseError not raised'
# Invalid CIDR address
_assert('Invalid CIDR', 'tcp', 80, 81, '0.0.0.0/0444')
# Missing ports
_assert('Not enough parameters', 'tcp', '0.0.0.0/0')
# from port cannot be greater than to port
_assert('Invalid port range', 'tcp', 100, 1, '0.0.0.0/0')
# For tcp, negative values are not allowed
_assert('Invalid port range', 'tcp', -1, 1, '0.0.0.0/0')
# For tcp, valid port range 1-65535
_assert('Invalid port range', 'tcp', 1, 65599, '0.0.0.0/0')
# Invalid Cidr for ICMP type
_assert('Invalid CIDR', 'icmp', -1, -1, '0.0.444.0/4')
# Invalid protocol
_assert('Invalid IP protocol', 'xyz', 1, 14, '0.0.0.0/0')
# Invalid port
_assert('Invalid input received: To and From ports must be integers',
'tcp', " ", "81", '0.0.0.0/0')
# Invalid icmp port
_assert('Invalid input received: '
'Type and Code must be integers for ICMP protocol type',
'icmp', " ", "81", '0.0.0.0/0')
# Invalid CIDR Address
_assert('Invalid CIDR', 'icmp', -1, -1, '0.0.0.0')
# Invalid CIDR Address
_assert('Invalid CIDR', 'icmp', -1, -1, '0.0.0.0/')
# Invalid Cidr ports
_assert('Invalid port range', 'icmp', 1, 256, '0.0.0.0/0')
self.expect_http()
self.mox.ReplayAll()
rv = self.ec2.get_all_security_groups()
group = [grp for grp in rv if grp.name == security_group_name][0]
self.assertEqual(len(group.rules), 8)
self.assertEqual(int(group.rules[0].from_port), 80)
self.assertEqual(int(group.rules[0].to_port), 81)
self.assertEqual(len(group.rules[0].grants), 1)
self.assertEqual(str(group.rules[0].grants[0]), '0.0.0.0/0')
self.expect_http()
self.mox.ReplayAll()
group.connection = self.ec2
group.revoke('tcp', 80, 81, '0.0.0.0/0')
group.revoke('icmp', -1, -1, '0.0.0.0/0')
group.revoke('udp', 80, 81, '0.0.0.0/0')
group.revoke('tcp', 1, 65535, '0.0.0.0/0')
group.revoke('udp', 1, 65535, '0.0.0.0/0')
group.revoke('icmp', 1, 0, '0.0.0.0/0')
group.revoke('icmp', 0, 1, '0.0.0.0/0')
group.revoke('icmp', 0, 0, '0.0.0.0/0')
self.expect_http()
self.mox.ReplayAll()
self.ec2.delete_security_group(security_group_name)
self.expect_http()
self.mox.ReplayAll()
group.connection = self.ec2
rv = self.ec2.get_all_security_groups()
self.assertEqual(len(rv), 1)
self.assertEqual(rv[0].name, 'default')
def test_authorize_revoke_security_group_cidr_v6(self):
"""Test that we can add and remove CIDR based rules
to a security group for IPv6
"""
self.expect_http()
self.mox.ReplayAll()
security_group_name = "".join(random.choice("sdiuisudfsdcnpaqwertasd")
for x in range(random.randint(4, 8)))
group = self.ec2.create_security_group(security_group_name,
'test group')
self.expect_http()
self.mox.ReplayAll()
group.connection = self.ec2
group.authorize('tcp', 80, 81, '::/0')
self.expect_http()
self.mox.ReplayAll()
rv = self.ec2.get_all_security_groups()
group = [grp for grp in rv if grp.name == security_group_name][0]
self.assertEqual(len(group.rules), 1)
self.assertEqual(int(group.rules[0].from_port), 80)
self.assertEqual(int(group.rules[0].to_port), 81)
self.assertEqual(len(group.rules[0].grants), 1)
self.assertEqual(str(group.rules[0].grants[0]), '::/0')
self.expect_http()
self.mox.ReplayAll()
group.connection = self.ec2
group.revoke('tcp', 80, 81, '::/0')
self.expect_http()
self.mox.ReplayAll()
self.ec2.delete_security_group(security_group_name)
self.expect_http()
self.mox.ReplayAll()
group.connection = self.ec2
rv = self.ec2.get_all_security_groups()
self.assertEqual(len(rv), 1)
self.assertEqual(rv[0].name, 'default')
def test_authorize_revoke_security_group_foreign_group(self):
"""Test that we can grant and revoke another security group access
to a security group
"""
self.expect_http()
self.mox.ReplayAll()
rand_string = 'sdiuisudfsdcnpaqwertasd'
security_group_name = "".join(random.choice(rand_string)
for x in range(random.randint(4, 8)))
other_security_group_name = "".join(random.choice(rand_string)
for x in range(random.randint(4, 8)))
group = self.ec2.create_security_group(security_group_name,
'test group')
self.expect_http()
self.mox.ReplayAll()
other_group = self.ec2.create_security_group(other_security_group_name,
'some other group')
self.expect_http()
self.mox.ReplayAll()
group.connection = self.ec2
group.authorize(src_group=other_group)
self.expect_http()
self.mox.ReplayAll()
rv = self.ec2.get_all_security_groups()
# I don't bother checkng that we actually find it here,
# because the create/delete unit test further up should
# be good enough for that.
for group in rv:
if group.name == security_group_name:
self.assertEqual(len(group.rules), 3)
self.assertEqual(len(group.rules[0].grants), 1)
self.assertEqual(str(group.rules[0].grants[0]),
'%s-%s' % (other_security_group_name, 'fake'))
self.expect_http()
self.mox.ReplayAll()
rv = self.ec2.get_all_security_groups()
for group in rv:
if group.name == security_group_name:
self.expect_http()
self.mox.ReplayAll()
group.connection = self.ec2
group.revoke(src_group=other_group)
self.expect_http()
self.mox.ReplayAll()
self.ec2.delete_security_group(security_group_name)
self.ec2.delete_security_group(other_security_group_name)
| apache-2.0 |
backupManager/pyflag | src/pyflag/parser.py | 7 | 5299 | #!/usr/bin/env python
""" This is a parser for the table search widget. The parser
implements a simple language for structured queries depending on the
type of the columns presented.
"""
# Michael Cohen <scudette@users.sourceforge.net>
#
# ******************************************************
# Version: FLAG $Version: 0.87-pre1 Date: Thu Jun 12 00:48:38 EST 2008$
# ******************************************************
#
# * This program is free software; you can redistribute it and/or
# * modify it under the terms of the GNU General Public License
# * as published by the Free Software Foundation; either version 2
# * of the License, or (at your option) any later version.
# *
# * This program is distributed in the hope that it will be useful,
# * but WITHOUT ANY WARRANTY; without even the implied warranty of
# * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# * GNU General Public License for more details.
# *
# * You should have received a copy of the GNU General Public License
# * along with this program; if not, write to the Free Software
# * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
# ******************************************************
def eval_expression(elements, name, operator, arg, result_ui):
# print "Evaluating %s.%s(%r)" % (name,operator,arg)
## Try and find the element with the specified name:
element = None
for e in elements:
if e.name == name:
element = e
break
if not element:
return "1"
#raise RuntimeError("Column %s not known" % name)
## Use the element to parse:
return element.parse(name, operator, arg, ui=result_ui, elements = elements)
# Begin -- grammar generated by Yapps
import sys, re
from yapps import runtime
class SearchParserScanner(runtime.Scanner):
patterns = [
("'\\\\)'", re.compile('\\)')),
("'\\\\('", re.compile('\\(')),
('[ \r\t\n]+', re.compile('[ \r\t\n]+')),
('END', re.compile('$')),
('STR', re.compile('"([^\\\\"]+|\\\\.)*"')),
('STR2', re.compile("'([^\\\\']+|\\\\.)*'")),
('WORD', re.compile('[-:+*/!@$%^&=\\<\\>.a-zA-Z0-9_]+')),
('LOGICAL_OPERATOR', re.compile('(and|or|AND|OR)')),
]
def __init__(self, str,*args,**kw):
runtime.Scanner.__init__(self,None,{'[ \r\t\n]+':None,},str,*args,**kw)
class SearchParser(runtime.Parser):
Context = runtime.Context
def goal(self, types, ui, _parent=None):
_context = self.Context(_parent, self._scanner, 'goal', [types, ui])
clause = self.clause(types, ui, _context)
END = self._scan('END', context=_context)
return clause
def clause(self, types, ui, _parent=None):
_context = self.Context(_parent, self._scanner, 'clause', [types, ui])
expr = self.expr(types, ui, _context)
result = expr
while self._peek('LOGICAL_OPERATOR', 'END', "'\\\\)'", context=_context) == 'LOGICAL_OPERATOR':
LOGICAL_OPERATOR = self._scan('LOGICAL_OPERATOR', context=_context)
logical_operator = LOGICAL_OPERATOR
expr = self.expr(types, ui, _context)
result = "%s %s %s" % (result, logical_operator, expr)
return result
def term(self, _parent=None):
_context = self.Context(_parent, self._scanner, 'term', [])
_token = self._peek('STR', 'STR2', 'WORD', context=_context)
if _token == 'STR':
STR = self._scan('STR', context=_context)
return eval(STR)
elif _token == 'STR2':
STR2 = self._scan('STR2', context=_context)
return eval(STR2)
else: # == 'WORD'
WORD = self._scan('WORD', context=_context)
return WORD
def expr(self, types, ui, _parent=None):
_context = self.Context(_parent, self._scanner, 'expr', [types, ui])
_token = self._peek('STR', 'STR2', 'WORD', "'\\\\('", context=_context)
if _token != "'\\\\('":
term = self.term(_context)
column = term
WORD = self._scan('WORD', context=_context)
operator = WORD
term = self.term(_context)
return eval_expression(types, column,operator,term, ui)
else: # == "'\\\\('"
self._scan("'\\\\('", context=_context)
clause = self.clause(types, ui, _context)
self._scan("'\\\\)'", context=_context)
return "( %s )" % clause
def parse(rule, text):
P = SearchParser(SearchParserScanner(text))
return runtime.wrap_error_reporter(P, rule)
# End -- grammar generated by Yapps
def parse_to_sql(text, types, ui):
P = SearchParser(SearchParserScanner(text))
try:
return P.goal(types, ui)
except runtime.SyntaxError, e:
raise RuntimeError("\n%s\n%s^\n%s" % (text, '-' * e.pos[2], e.msg))
if __name__=='__main__':
import pyflag.TableObj as TableObj
types = [ TableObj.TimestampType(name='Timestamp'),
TableObj.IPType(name='IP Address')]
test = 'Timestamp < "2006-10-01 \\\"10:10:00\\\"" or (Timestamp before \'2006-11-01 "10:10:00"\' and "IP Address" netmask "10.10.10.0/24") or "IP Address" = 192.168.1.1'
print "Will test %s" % test
print parse_to_sql(test,types)
| gpl-2.0 |
dhongu/l10n-romania | currency_rate_update/services/update_service_MX_BdM.py | 1 | 2790 | # -*- coding: utf-8 -*-
# © 2009 Camptocamp
# © 2013-2014 Agustin Cruz openpyme.mx
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl).
from .currency_getter_interface import CurrencyGetterInterface
import logging
_logger = logging.getLogger(__name__)
class MX_BdMGetter(CurrencyGetterInterface):
"""Implementation of Currency_getter_factory interface
for Banco de México service
"""
code = 'MX_BdM'
name = 'Bank of Mexico'
supported_currency_array = [
"ARS", "AUD", "BBD", "BMD", "BOB", "BRL", "BSD", "BZD", "CAD", "CHF",
"CLP", "CNH", "CNY", "COP", "CRC", "CUP", "CZK", "DKK", "DOP", "DZD",
"EGP", "ESD", "EUR", "FJD", "GBP", "GTQ", "GYD", "HKD", "HNL", "HUF",
"IDR", "ILS", "INR", "IQD", "JMD", "JPY", "KES", "KRW", "KWD", "MAD",
"MYR", "NGN", "NIC", "NOK", "NZD", "PAB", "PEN", "PHP", "PLN", "PYG",
"RON", "RUB", "SAR", "SEK", "SGD", "SVC", "THB", "TRY", "TTD", "TWD",
"UAH", "USD", "USD", "UYP", "VEF", "VND", "ZAR"]
def rate_retrieve(self):
""" Get currency exchange from Banxico.xml and proccess it
TODO: Get correct data from xml instead of process string
"""
url = ('http://www.banxico.org.mx/rsscb/rss?'
'BMXC_canal=pagos&BMXC_idioma=es')
from xml.dom.minidom import parse
from io import BytesIO
logger = logging.getLogger(__name__)
logger.debug("Banxico currency rate service : connecting...")
rawfile = self.get_url(url)
dom = parse(BytesIO(rawfile))
logger.debug("Banxico sent a valid XML file")
value = dom.getElementsByTagName('cb:value')[0]
rate = value.firstChild.nodeValue
return float(rate)
def get_updated_currency(self, currency_array, main_currency,
max_delta_days=1):
"""implementation of abstract method of Curreny_getter_interface"""
logger = logging.getLogger(__name__)
# we do not want to update the main currency
if main_currency in currency_array:
currency_array.remove(main_currency)
# Suported currencies
suported = ['MXN', 'USD']
for curr in currency_array:
if curr in suported:
# Get currency data
main_rate = self.rate_retrieve()
if main_currency == 'MXN':
rate = 1 / main_rate
else:
rate = main_rate
else:
# No other currency supported
continue
self.updated_currency[curr] = rate
logger.debug("Rate retrieved : %s = %s %s" %
(main_currency, rate, curr))
return self.updated_currency, self.log_info
| agpl-3.0 |
russellb/nova | nova/tests/test_instance_types_extra_specs.py | 9 | 7223 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2011 University of Southern California
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Unit Tests for instance types extra specs code
"""
from nova import context
from nova import db
from nova import test
class InstanceTypeExtraSpecsTestCase(test.TestCase):
def setUp(self):
super(InstanceTypeExtraSpecsTestCase, self).setUp()
self.context = context.get_admin_context()
values = dict(name="cg1.4xlarge",
memory_mb=22000,
vcpus=8,
root_gb=1690,
ephemeral_gb=2000,
flavorid=105)
specs = dict(cpu_arch="x86_64",
cpu_model="Nehalem",
xpu_arch="fermi",
xpus=2,
xpu_model="Tesla 2050")
values['extra_specs'] = specs
ref = db.instance_type_create(self.context,
values)
self.instance_type_id = ref["id"]
def tearDown(self):
# Remove the instance type from the database
db.instance_type_destroy(self.context, "cg1.4xlarge")
super(InstanceTypeExtraSpecsTestCase, self).tearDown()
def test_instance_type_specs_get(self):
expected_specs = dict(cpu_arch="x86_64",
cpu_model="Nehalem",
xpu_arch="fermi",
xpus="2",
xpu_model="Tesla 2050")
actual_specs = db.instance_type_extra_specs_get(
self.context,
self.instance_type_id)
self.assertEquals(expected_specs, actual_specs)
def test_instance_type_extra_specs_delete(self):
expected_specs = dict(cpu_arch="x86_64",
cpu_model="Nehalem",
xpu_arch="fermi",
xpus="2")
db.instance_type_extra_specs_delete(self.context,
self.instance_type_id,
"xpu_model")
actual_specs = db.instance_type_extra_specs_get(
self.context,
self.instance_type_id)
self.assertEquals(expected_specs, actual_specs)
def test_instance_type_extra_specs_update(self):
expected_specs = dict(cpu_arch="x86_64",
cpu_model="Sandy Bridge",
xpu_arch="fermi",
xpus="2",
xpu_model="Tesla 2050")
db.instance_type_extra_specs_update_or_create(
self.context,
self.instance_type_id,
dict(cpu_model="Sandy Bridge"))
actual_specs = db.instance_type_extra_specs_get(
self.context,
self.instance_type_id)
self.assertEquals(expected_specs, actual_specs)
def test_instance_type_extra_specs_create(self):
expected_specs = dict(cpu_arch="x86_64",
cpu_model="Nehalem",
xpu_arch="fermi",
xpus="2",
xpu_model="Tesla 2050",
net_arch="ethernet",
net_mbps="10000")
db.instance_type_extra_specs_update_or_create(
self.context,
self.instance_type_id,
dict(net_arch="ethernet",
net_mbps=10000))
actual_specs = db.instance_type_extra_specs_get(
self.context,
self.instance_type_id)
self.assertEquals(expected_specs, actual_specs)
def test_instance_type_get_with_extra_specs(self):
instance_type = db.instance_type_get(
self.context,
self.instance_type_id)
self.assertEquals(instance_type['extra_specs'],
dict(cpu_arch="x86_64",
cpu_model="Nehalem",
xpu_arch="fermi",
xpus="2",
xpu_model="Tesla 2050"))
instance_type = db.instance_type_get(
self.context,
5)
self.assertEquals(instance_type['extra_specs'], {})
def test_instance_type_get_by_name_with_extra_specs(self):
instance_type = db.instance_type_get_by_name(
self.context,
"cg1.4xlarge")
self.assertEquals(instance_type['extra_specs'],
dict(cpu_arch="x86_64",
cpu_model="Nehalem",
xpu_arch="fermi",
xpus="2",
xpu_model="Tesla 2050"))
instance_type = db.instance_type_get_by_name(
self.context,
"m1.small")
self.assertEquals(instance_type['extra_specs'], {})
def test_instance_type_get_by_flavor_id_with_extra_specs(self):
instance_type = db.instance_type_get_by_flavor_id(
self.context,
105)
self.assertEquals(instance_type['extra_specs'],
dict(cpu_arch="x86_64",
cpu_model="Nehalem",
xpu_arch="fermi",
xpus="2",
xpu_model="Tesla 2050"))
instance_type = db.instance_type_get_by_flavor_id(
self.context,
2)
self.assertEquals(instance_type['extra_specs'], {})
def test_instance_type_get_all(self):
specs = dict(cpu_arch="x86_64",
cpu_model="Nehalem",
xpu_arch="fermi",
xpus='2',
xpu_model="Tesla 2050")
types = db.instance_type_get_all(self.context)
name2specs = {}
for instance_type in types:
name = instance_type['name']
name2specs[name] = instance_type['extra_specs']
self.assertEquals(name2specs['cg1.4xlarge'], specs)
self.assertEquals(name2specs['m1.small'], {})
| apache-2.0 |
lihui7115/ChromiumGStreamerBackend | build/ios/PRESUBMIT.py | 49 | 1358 | # Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import os
"""Chromium presubmit script for src/tools/ios.
See http://dev.chromium.org/developers/how-tos/depottools/presubmit-scripts
for more details on the presubmit API built into depot_tools.
"""
WHITELIST_FILE = 'build/ios/grit_whitelist.txt'
def _CheckWhitelistSorted(input_api, output_api):
for path in input_api.LocalPaths():
if WHITELIST_FILE == path:
lines = open(os.path.join('../..', WHITELIST_FILE)).readlines()
i = 0
while i < len(lines) - 1 and lines[i] <= lines[i + 1]:
i += 1
if i < len(lines) - 1:
return [output_api.PresubmitError(
'The file ' + WHITELIST_FILE + ' must be sorted. ' +
'First offending line: #' + str(i + 2))]
return []
def _CommonChecks(input_api, output_api):
"""Checks common to both upload and commit."""
results = []
results.extend(_CheckWhitelistSorted(input_api, output_api))
return results
def CheckChangeOnUpload(input_api, output_api):
results = []
results.extend(_CommonChecks(input_api, output_api))
return results
def CheckChangeOnCommit(input_api, output_api):
results = []
results.extend(_CommonChecks(input_api, output_api))
return results
| bsd-3-clause |
taichatha/youtube-dl | youtube_dl/extractor/ultimedia.py | 106 | 3657 | # coding: utf-8
from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..compat import compat_urllib_parse_urlparse
from ..utils import (
ExtractorError,
qualities,
unified_strdate,
clean_html,
)
class UltimediaIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?ultimedia\.com/default/index/video[^/]+/id/(?P<id>[\d+a-z]+)'
_TESTS = [{
# news
'url': 'https://www.ultimedia.com/default/index/videogeneric/id/s8uk0r',
'md5': '276a0e49de58c7e85d32b057837952a2',
'info_dict': {
'id': 's8uk0r',
'ext': 'mp4',
'title': 'Loi sur la fin de vie: le texte prévoit un renforcement des directives anticipées',
'description': 'md5:3e5c8fd65791487333dda5db8aed32af',
'thumbnail': 're:^https?://.*\.jpg',
'upload_date': '20150317',
},
}, {
# music
'url': 'https://www.ultimedia.com/default/index/videomusic/id/xvpfp8',
'md5': '2ea3513813cf230605c7e2ffe7eca61c',
'info_dict': {
'id': 'xvpfp8',
'ext': 'mp4',
'title': "Two - C'est la vie (Clip)",
'description': 'Two',
'thumbnail': 're:^https?://.*\.jpg',
'upload_date': '20150224',
},
}]
def _real_extract(self, url):
video_id = self._match_id(url)
webpage = self._download_webpage(url, video_id)
deliver_url = self._proto_relative_url(self._search_regex(
r'<iframe[^>]+src="((?:https?:)?//(?:www\.)?ultimedia\.com/deliver/[^"]+)"',
webpage, 'deliver URL'), compat_urllib_parse_urlparse(url).scheme + ':')
deliver_page = self._download_webpage(
deliver_url, video_id, 'Downloading iframe page')
if '>This video is currently not available' in deliver_page:
raise ExtractorError(
'Video %s is currently not available' % video_id, expected=True)
player = self._parse_json(
self._search_regex(
r"jwplayer\('player(?:_temp)?'\)\.setup\(({.+?})\)\.on",
deliver_page, 'player'),
video_id)
quality = qualities(['flash', 'html5'])
formats = []
for mode in player['modes']:
video_url = mode.get('config', {}).get('file')
if not video_url:
continue
if re.match(r'https?://www\.youtube\.com/.+?', video_url):
return self.url_result(video_url, 'Youtube')
formats.append({
'url': video_url,
'format_id': mode.get('type'),
'quality': quality(mode.get('type')),
})
self._sort_formats(formats)
thumbnail = player.get('image')
title = clean_html((
self._html_search_regex(
r'(?s)<div\s+id="catArticle">.+?</div>(.+?)</h1>',
webpage, 'title', default=None) or
self._search_regex(
r"var\s+nameVideo\s*=\s*'([^']+)'",
deliver_page, 'title')))
description = clean_html(self._html_search_regex(
r'(?s)<span>Description</span>(.+?)</p>', webpage,
'description', fatal=False))
upload_date = unified_strdate(self._search_regex(
r'Ajouté le\s*<span>([^<]+)', webpage,
'upload date', fatal=False))
return {
'id': video_id,
'title': title,
'description': description,
'thumbnail': thumbnail,
'upload_date': upload_date,
'formats': formats,
}
| unlicense |
sparkslabs/kamaelia | Code/Python/Kamaelia/Kamaelia/Visualisation/PhysicsGraph/ParticleDragger.py | 3 | 4104 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 2010 British Broadcasting Corporation and Kamaelia Contributors(1)
#
# (1) Kamaelia Contributors are listed in the AUTHORS file and at
# http://www.kamaelia.org/AUTHORS - please extend this file,
# not this notice.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# -------------------------------------------------------------------------
"""\
================================
Drag handler for Topology Viewer
================================
A subclass of Kamaelia.UI.MH.DragHandler that implements "click and hold"
dragging of particles for the TopologyViewer.
Example Usage
-------------
See source for TopologyViewer.
How does it work?
-----------------
This is an implementation of Kamaelia.UI.MH.DragHandler. See that for more
details.
The detect() method uses the withinRadius method of the physics attribute of the
'app' to determine which (if any) particle the mouse is hovering over when the
drag is started. If there is no particle, then the drag does not begin.
At the start of the drag, the particle is 'frozen' to prevent motion due to the
physics model of the topology viewer. This is achieved by calling the freeze()
and unfreeze() methods of the particle. The particle is also 'selected'.
During the drag the particle's coordinates are updated and the physics model is
notified of the change.
"""
from Kamaelia.UI.MH import DragHandler
class ParticleDragger(DragHandler):
"""\
ParticleDragger(event,app) -> new ParticleDragger object.
Implements mouse dragging of particles in a topology viewer. Bind the
handle(...) class method to the MOUSEBUTTONDOWN pygame event to use it (via
a lambda function or equivalent)
Keyword Arguments:
- event -- pygame event object cuasing this
- app -- PyGameApp component this is happening in
"""
def detect(self, pos, button):
"""detect( (x,y), button) -> (x,y) of particle or False if mouse (x,y) not over a particle"""
# find particles under the mouse pos
pos = int(pos[0] + self.app.left), int(pos[1] + self.app.top)
inRange = self.app.physics.withinRadius( pos, self.app.biggestRadius )
P = 0
RSQUARED = 1
inRange = list(filter(lambda x : x[P].radius*x[P].radius >= x[RSQUARED], inRange))
if len(inRange) > 0:
# of those in range, find one whose centre is nearest to the mouse pos
best = -1
for (p,rsquared) in inRange:
if best < 0 or rsquared < best:
best = rsquared
self.particle = p
self.particle.freeze() # tell the particle its not allowed to move (zero velocity)
# select this particle
self.app.selectParticle(self.particle)
# return the drag start coordinates
return self.particle.getLoc()
else:
self.app.selectParticle(None)
return False
def drag(self,newx,newy):
"""\
Handler for the duration of the dragging operation.
Updates the particle position as it is dragged.
"""
self.particle.pos = (newx,newy)
self.app.physics.updateLoc(self.particle)
def release(self,newx, newy):
"""\
Handler for the end of the dragging operation
Updates the particle position before releasing it.
"""
self.drag(newx, newy)
self.particle.unFreeze()
| apache-2.0 |
qwcode/pip | pip/_vendor/requests/packages/charade/big5freq.py | 3133 | 82594 | ######################## BEGIN LICENSE BLOCK ########################
# The Original Code is Mozilla Communicator client code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 1998
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
# Big5 frequency table
# by Taiwan's Mandarin Promotion Council
# <http://www.edu.tw:81/mandr/>
#
# 128 --> 0.42261
# 256 --> 0.57851
# 512 --> 0.74851
# 1024 --> 0.89384
# 2048 --> 0.97583
#
# Ideal Distribution Ratio = 0.74851/(1-0.74851) =2.98
# Random Distribution Ration = 512/(5401-512)=0.105
#
# Typical Distribution Ratio about 25% of Ideal one, still much higher than RDR
BIG5_TYPICAL_DISTRIBUTION_RATIO = 0.75
#Char to FreqOrder table
BIG5_TABLE_SIZE = 5376
Big5CharToFreqOrder = (
1,1801,1506, 255,1431, 198, 9, 82, 6,5008, 177, 202,3681,1256,2821, 110, # 16
3814, 33,3274, 261, 76, 44,2114, 16,2946,2187,1176, 659,3971, 26,3451,2653, # 32
1198,3972,3350,4202, 410,2215, 302, 590, 361,1964, 8, 204, 58,4510,5009,1932, # 48
63,5010,5011, 317,1614, 75, 222, 159,4203,2417,1480,5012,3555,3091, 224,2822, # 64
3682, 3, 10,3973,1471, 29,2787,1135,2866,1940, 873, 130,3275,1123, 312,5013, # 80
4511,2052, 507, 252, 682,5014, 142,1915, 124, 206,2947, 34,3556,3204, 64, 604, # 96
5015,2501,1977,1978, 155,1991, 645, 641,1606,5016,3452, 337, 72, 406,5017, 80, # 112
630, 238,3205,1509, 263, 939,1092,2654, 756,1440,1094,3453, 449, 69,2987, 591, # 128
179,2096, 471, 115,2035,1844, 60, 50,2988, 134, 806,1869, 734,2036,3454, 180, # 144
995,1607, 156, 537,2907, 688,5018, 319,1305, 779,2145, 514,2379, 298,4512, 359, # 160
2502, 90,2716,1338, 663, 11, 906,1099,2553, 20,2441, 182, 532,1716,5019, 732, # 176
1376,4204,1311,1420,3206, 25,2317,1056, 113, 399, 382,1950, 242,3455,2474, 529, # 192
3276, 475,1447,3683,5020, 117, 21, 656, 810,1297,2300,2334,3557,5021, 126,4205, # 208
706, 456, 150, 613,4513, 71,1118,2037,4206, 145,3092, 85, 835, 486,2115,1246, # 224
1426, 428, 727,1285,1015, 800, 106, 623, 303,1281,5022,2128,2359, 347,3815, 221, # 240
3558,3135,5023,1956,1153,4207, 83, 296,1199,3093, 192, 624, 93,5024, 822,1898, # 256
2823,3136, 795,2065, 991,1554,1542,1592, 27, 43,2867, 859, 139,1456, 860,4514, # 272
437, 712,3974, 164,2397,3137, 695, 211,3037,2097, 195,3975,1608,3559,3560,3684, # 288
3976, 234, 811,2989,2098,3977,2233,1441,3561,1615,2380, 668,2077,1638, 305, 228, # 304
1664,4515, 467, 415,5025, 262,2099,1593, 239, 108, 300, 200,1033, 512,1247,2078, # 320
5026,5027,2176,3207,3685,2682, 593, 845,1062,3277, 88,1723,2038,3978,1951, 212, # 336
266, 152, 149, 468,1899,4208,4516, 77, 187,5028,3038, 37, 5,2990,5029,3979, # 352
5030,5031, 39,2524,4517,2908,3208,2079, 55, 148, 74,4518, 545, 483,1474,1029, # 368
1665, 217,1870,1531,3138,1104,2655,4209, 24, 172,3562, 900,3980,3563,3564,4519, # 384
32,1408,2824,1312, 329, 487,2360,2251,2717, 784,2683, 4,3039,3351,1427,1789, # 400
188, 109, 499,5032,3686,1717,1790, 888,1217,3040,4520,5033,3565,5034,3352,1520, # 416
3687,3981, 196,1034, 775,5035,5036, 929,1816, 249, 439, 38,5037,1063,5038, 794, # 432
3982,1435,2301, 46, 178,3278,2066,5039,2381,5040, 214,1709,4521, 804, 35, 707, # 448
324,3688,1601,2554, 140, 459,4210,5041,5042,1365, 839, 272, 978,2262,2580,3456, # 464
2129,1363,3689,1423, 697, 100,3094, 48, 70,1231, 495,3139,2196,5043,1294,5044, # 480
2080, 462, 586,1042,3279, 853, 256, 988, 185,2382,3457,1698, 434,1084,5045,3458, # 496
314,2625,2788,4522,2335,2336, 569,2285, 637,1817,2525, 757,1162,1879,1616,3459, # 512
287,1577,2116, 768,4523,1671,2868,3566,2526,1321,3816, 909,2418,5046,4211, 933, # 528
3817,4212,2053,2361,1222,4524, 765,2419,1322, 786,4525,5047,1920,1462,1677,2909, # 544
1699,5048,4526,1424,2442,3140,3690,2600,3353,1775,1941,3460,3983,4213, 309,1369, # 560
1130,2825, 364,2234,1653,1299,3984,3567,3985,3986,2656, 525,1085,3041, 902,2001, # 576
1475, 964,4527, 421,1845,1415,1057,2286, 940,1364,3141, 376,4528,4529,1381, 7, # 592
2527, 983,2383, 336,1710,2684,1846, 321,3461, 559,1131,3042,2752,1809,1132,1313, # 608
265,1481,1858,5049, 352,1203,2826,3280, 167,1089, 420,2827, 776, 792,1724,3568, # 624
4214,2443,3281,5050,4215,5051, 446, 229, 333,2753, 901,3818,1200,1557,4530,2657, # 640
1921, 395,2754,2685,3819,4216,1836, 125, 916,3209,2626,4531,5052,5053,3820,5054, # 656
5055,5056,4532,3142,3691,1133,2555,1757,3462,1510,2318,1409,3569,5057,2146, 438, # 672
2601,2910,2384,3354,1068, 958,3043, 461, 311,2869,2686,4217,1916,3210,4218,1979, # 688
383, 750,2755,2627,4219, 274, 539, 385,1278,1442,5058,1154,1965, 384, 561, 210, # 704
98,1295,2556,3570,5059,1711,2420,1482,3463,3987,2911,1257, 129,5060,3821, 642, # 720
523,2789,2790,2658,5061, 141,2235,1333, 68, 176, 441, 876, 907,4220, 603,2602, # 736
710, 171,3464, 404, 549, 18,3143,2398,1410,3692,1666,5062,3571,4533,2912,4534, # 752
5063,2991, 368,5064, 146, 366, 99, 871,3693,1543, 748, 807,1586,1185, 22,2263, # 768
379,3822,3211,5065,3212, 505,1942,2628,1992,1382,2319,5066, 380,2362, 218, 702, # 784
1818,1248,3465,3044,3572,3355,3282,5067,2992,3694, 930,3283,3823,5068, 59,5069, # 800
585, 601,4221, 497,3466,1112,1314,4535,1802,5070,1223,1472,2177,5071, 749,1837, # 816
690,1900,3824,1773,3988,1476, 429,1043,1791,2236,2117, 917,4222, 447,1086,1629, # 832
5072, 556,5073,5074,2021,1654, 844,1090, 105, 550, 966,1758,2828,1008,1783, 686, # 848
1095,5075,2287, 793,1602,5076,3573,2603,4536,4223,2948,2302,4537,3825, 980,2503, # 864
544, 353, 527,4538, 908,2687,2913,5077, 381,2629,1943,1348,5078,1341,1252, 560, # 880
3095,5079,3467,2870,5080,2054, 973, 886,2081, 143,4539,5081,5082, 157,3989, 496, # 896
4224, 57, 840, 540,2039,4540,4541,3468,2118,1445, 970,2264,1748,1966,2082,4225, # 912
3144,1234,1776,3284,2829,3695, 773,1206,2130,1066,2040,1326,3990,1738,1725,4226, # 928
279,3145, 51,1544,2604, 423,1578,2131,2067, 173,4542,1880,5083,5084,1583, 264, # 944
610,3696,4543,2444, 280, 154,5085,5086,5087,1739, 338,1282,3096, 693,2871,1411, # 960
1074,3826,2445,5088,4544,5089,5090,1240, 952,2399,5091,2914,1538,2688, 685,1483, # 976
4227,2475,1436, 953,4228,2055,4545, 671,2400, 79,4229,2446,3285, 608, 567,2689, # 992
3469,4230,4231,1691, 393,1261,1792,2401,5092,4546,5093,5094,5095,5096,1383,1672, # 1008
3827,3213,1464, 522,1119, 661,1150, 216, 675,4547,3991,1432,3574, 609,4548,2690, # 1024
2402,5097,5098,5099,4232,3045, 0,5100,2476, 315, 231,2447, 301,3356,4549,2385, # 1040
5101, 233,4233,3697,1819,4550,4551,5102, 96,1777,1315,2083,5103, 257,5104,1810, # 1056
3698,2718,1139,1820,4234,2022,1124,2164,2791,1778,2659,5105,3097, 363,1655,3214, # 1072
5106,2993,5107,5108,5109,3992,1567,3993, 718, 103,3215, 849,1443, 341,3357,2949, # 1088
1484,5110,1712, 127, 67, 339,4235,2403, 679,1412, 821,5111,5112, 834, 738, 351, # 1104
2994,2147, 846, 235,1497,1881, 418,1993,3828,2719, 186,1100,2148,2756,3575,1545, # 1120
1355,2950,2872,1377, 583,3994,4236,2581,2995,5113,1298,3699,1078,2557,3700,2363, # 1136
78,3829,3830, 267,1289,2100,2002,1594,4237, 348, 369,1274,2197,2178,1838,4552, # 1152
1821,2830,3701,2757,2288,2003,4553,2951,2758, 144,3358, 882,4554,3995,2759,3470, # 1168
4555,2915,5114,4238,1726, 320,5115,3996,3046, 788,2996,5116,2831,1774,1327,2873, # 1184
3997,2832,5117,1306,4556,2004,1700,3831,3576,2364,2660, 787,2023, 506, 824,3702, # 1200
534, 323,4557,1044,3359,2024,1901, 946,3471,5118,1779,1500,1678,5119,1882,4558, # 1216
165, 243,4559,3703,2528, 123, 683,4239, 764,4560, 36,3998,1793, 589,2916, 816, # 1232
626,1667,3047,2237,1639,1555,1622,3832,3999,5120,4000,2874,1370,1228,1933, 891, # 1248
2084,2917, 304,4240,5121, 292,2997,2720,3577, 691,2101,4241,1115,4561, 118, 662, # 1264
5122, 611,1156, 854,2386,1316,2875, 2, 386, 515,2918,5123,5124,3286, 868,2238, # 1280
1486, 855,2661, 785,2216,3048,5125,1040,3216,3578,5126,3146, 448,5127,1525,5128, # 1296
2165,4562,5129,3833,5130,4242,2833,3579,3147, 503, 818,4001,3148,1568, 814, 676, # 1312
1444, 306,1749,5131,3834,1416,1030, 197,1428, 805,2834,1501,4563,5132,5133,5134, # 1328
1994,5135,4564,5136,5137,2198, 13,2792,3704,2998,3149,1229,1917,5138,3835,2132, # 1344
5139,4243,4565,2404,3580,5140,2217,1511,1727,1120,5141,5142, 646,3836,2448, 307, # 1360
5143,5144,1595,3217,5145,5146,5147,3705,1113,1356,4002,1465,2529,2530,5148, 519, # 1376
5149, 128,2133, 92,2289,1980,5150,4003,1512, 342,3150,2199,5151,2793,2218,1981, # 1392
3360,4244, 290,1656,1317, 789, 827,2365,5152,3837,4566, 562, 581,4004,5153, 401, # 1408
4567,2252, 94,4568,5154,1399,2794,5155,1463,2025,4569,3218,1944,5156, 828,1105, # 1424
4245,1262,1394,5157,4246, 605,4570,5158,1784,2876,5159,2835, 819,2102, 578,2200, # 1440
2952,5160,1502, 436,3287,4247,3288,2836,4005,2919,3472,3473,5161,2721,2320,5162, # 1456
5163,2337,2068, 23,4571, 193, 826,3838,2103, 699,1630,4248,3098, 390,1794,1064, # 1472
3581,5164,1579,3099,3100,1400,5165,4249,1839,1640,2877,5166,4572,4573, 137,4250, # 1488
598,3101,1967, 780, 104, 974,2953,5167, 278, 899, 253, 402, 572, 504, 493,1339, # 1504
5168,4006,1275,4574,2582,2558,5169,3706,3049,3102,2253, 565,1334,2722, 863, 41, # 1520
5170,5171,4575,5172,1657,2338, 19, 463,2760,4251, 606,5173,2999,3289,1087,2085, # 1536
1323,2662,3000,5174,1631,1623,1750,4252,2691,5175,2878, 791,2723,2663,2339, 232, # 1552
2421,5176,3001,1498,5177,2664,2630, 755,1366,3707,3290,3151,2026,1609, 119,1918, # 1568
3474, 862,1026,4253,5178,4007,3839,4576,4008,4577,2265,1952,2477,5179,1125, 817, # 1584
4254,4255,4009,1513,1766,2041,1487,4256,3050,3291,2837,3840,3152,5180,5181,1507, # 1600
5182,2692, 733, 40,1632,1106,2879, 345,4257, 841,2531, 230,4578,3002,1847,3292, # 1616
3475,5183,1263, 986,3476,5184, 735, 879, 254,1137, 857, 622,1300,1180,1388,1562, # 1632
4010,4011,2954, 967,2761,2665,1349, 592,2134,1692,3361,3003,1995,4258,1679,4012, # 1648
1902,2188,5185, 739,3708,2724,1296,1290,5186,4259,2201,2202,1922,1563,2605,2559, # 1664
1871,2762,3004,5187, 435,5188, 343,1108, 596, 17,1751,4579,2239,3477,3709,5189, # 1680
4580, 294,3582,2955,1693, 477, 979, 281,2042,3583, 643,2043,3710,2631,2795,2266, # 1696
1031,2340,2135,2303,3584,4581, 367,1249,2560,5190,3585,5191,4582,1283,3362,2005, # 1712
240,1762,3363,4583,4584, 836,1069,3153, 474,5192,2149,2532, 268,3586,5193,3219, # 1728
1521,1284,5194,1658,1546,4260,5195,3587,3588,5196,4261,3364,2693,1685,4262, 961, # 1744
1673,2632, 190,2006,2203,3841,4585,4586,5197, 570,2504,3711,1490,5198,4587,2633, # 1760
3293,1957,4588, 584,1514, 396,1045,1945,5199,4589,1968,2449,5200,5201,4590,4013, # 1776
619,5202,3154,3294, 215,2007,2796,2561,3220,4591,3221,4592, 763,4263,3842,4593, # 1792
5203,5204,1958,1767,2956,3365,3712,1174, 452,1477,4594,3366,3155,5205,2838,1253, # 1808
2387,2189,1091,2290,4264, 492,5206, 638,1169,1825,2136,1752,4014, 648, 926,1021, # 1824
1324,4595, 520,4596, 997, 847,1007, 892,4597,3843,2267,1872,3713,2405,1785,4598, # 1840
1953,2957,3103,3222,1728,4265,2044,3714,4599,2008,1701,3156,1551, 30,2268,4266, # 1856
5207,2027,4600,3589,5208, 501,5209,4267, 594,3478,2166,1822,3590,3479,3591,3223, # 1872
829,2839,4268,5210,1680,3157,1225,4269,5211,3295,4601,4270,3158,2341,5212,4602, # 1888
4271,5213,4015,4016,5214,1848,2388,2606,3367,5215,4603, 374,4017, 652,4272,4273, # 1904
375,1140, 798,5216,5217,5218,2366,4604,2269, 546,1659, 138,3051,2450,4605,5219, # 1920
2254, 612,1849, 910, 796,3844,1740,1371, 825,3845,3846,5220,2920,2562,5221, 692, # 1936
444,3052,2634, 801,4606,4274,5222,1491, 244,1053,3053,4275,4276, 340,5223,4018, # 1952
1041,3005, 293,1168, 87,1357,5224,1539, 959,5225,2240, 721, 694,4277,3847, 219, # 1968
1478, 644,1417,3368,2666,1413,1401,1335,1389,4019,5226,5227,3006,2367,3159,1826, # 1984
730,1515, 184,2840, 66,4607,5228,1660,2958, 246,3369, 378,1457, 226,3480, 975, # 2000
4020,2959,1264,3592, 674, 696,5229, 163,5230,1141,2422,2167, 713,3593,3370,4608, # 2016
4021,5231,5232,1186, 15,5233,1079,1070,5234,1522,3224,3594, 276,1050,2725, 758, # 2032
1126, 653,2960,3296,5235,2342, 889,3595,4022,3104,3007, 903,1250,4609,4023,3481, # 2048
3596,1342,1681,1718, 766,3297, 286, 89,2961,3715,5236,1713,5237,2607,3371,3008, # 2064
5238,2962,2219,3225,2880,5239,4610,2505,2533, 181, 387,1075,4024, 731,2190,3372, # 2080
5240,3298, 310, 313,3482,2304, 770,4278, 54,3054, 189,4611,3105,3848,4025,5241, # 2096
1230,1617,1850, 355,3597,4279,4612,3373, 111,4280,3716,1350,3160,3483,3055,4281, # 2112
2150,3299,3598,5242,2797,4026,4027,3009, 722,2009,5243,1071, 247,1207,2343,2478, # 2128
1378,4613,2010, 864,1437,1214,4614, 373,3849,1142,2220, 667,4615, 442,2763,2563, # 2144
3850,4028,1969,4282,3300,1840, 837, 170,1107, 934,1336,1883,5244,5245,2119,4283, # 2160
2841, 743,1569,5246,4616,4284, 582,2389,1418,3484,5247,1803,5248, 357,1395,1729, # 2176
3717,3301,2423,1564,2241,5249,3106,3851,1633,4617,1114,2086,4285,1532,5250, 482, # 2192
2451,4618,5251,5252,1492, 833,1466,5253,2726,3599,1641,2842,5254,1526,1272,3718, # 2208
4286,1686,1795, 416,2564,1903,1954,1804,5255,3852,2798,3853,1159,2321,5256,2881, # 2224
4619,1610,1584,3056,2424,2764, 443,3302,1163,3161,5257,5258,4029,5259,4287,2506, # 2240
3057,4620,4030,3162,2104,1647,3600,2011,1873,4288,5260,4289, 431,3485,5261, 250, # 2256
97, 81,4290,5262,1648,1851,1558, 160, 848,5263, 866, 740,1694,5264,2204,2843, # 2272
3226,4291,4621,3719,1687, 950,2479, 426, 469,3227,3720,3721,4031,5265,5266,1188, # 2288
424,1996, 861,3601,4292,3854,2205,2694, 168,1235,3602,4293,5267,2087,1674,4622, # 2304
3374,3303, 220,2565,1009,5268,3855, 670,3010, 332,1208, 717,5269,5270,3603,2452, # 2320
4032,3375,5271, 513,5272,1209,2882,3376,3163,4623,1080,5273,5274,5275,5276,2534, # 2336
3722,3604, 815,1587,4033,4034,5277,3605,3486,3856,1254,4624,1328,3058,1390,4035, # 2352
1741,4036,3857,4037,5278, 236,3858,2453,3304,5279,5280,3723,3859,1273,3860,4625, # 2368
5281, 308,5282,4626, 245,4627,1852,2480,1307,2583, 430, 715,2137,2454,5283, 270, # 2384
199,2883,4038,5284,3606,2727,1753, 761,1754, 725,1661,1841,4628,3487,3724,5285, # 2400
5286, 587, 14,3305, 227,2608, 326, 480,2270, 943,2765,3607, 291, 650,1884,5287, # 2416
1702,1226, 102,1547, 62,3488, 904,4629,3489,1164,4294,5288,5289,1224,1548,2766, # 2432
391, 498,1493,5290,1386,1419,5291,2056,1177,4630, 813, 880,1081,2368, 566,1145, # 2448
4631,2291,1001,1035,2566,2609,2242, 394,1286,5292,5293,2069,5294, 86,1494,1730, # 2464
4039, 491,1588, 745, 897,2963, 843,3377,4040,2767,2884,3306,1768, 998,2221,2070, # 2480
397,1827,1195,1970,3725,3011,3378, 284,5295,3861,2507,2138,2120,1904,5296,4041, # 2496
2151,4042,4295,1036,3490,1905, 114,2567,4296, 209,1527,5297,5298,2964,2844,2635, # 2512
2390,2728,3164, 812,2568,5299,3307,5300,1559, 737,1885,3726,1210, 885, 28,2695, # 2528
3608,3862,5301,4297,1004,1780,4632,5302, 346,1982,2222,2696,4633,3863,1742, 797, # 2544
1642,4043,1934,1072,1384,2152, 896,4044,3308,3727,3228,2885,3609,5303,2569,1959, # 2560
4634,2455,1786,5304,5305,5306,4045,4298,1005,1308,3728,4299,2729,4635,4636,1528, # 2576
2610, 161,1178,4300,1983, 987,4637,1101,4301, 631,4046,1157,3229,2425,1343,1241, # 2592
1016,2243,2570, 372, 877,2344,2508,1160, 555,1935, 911,4047,5307, 466,1170, 169, # 2608
1051,2921,2697,3729,2481,3012,1182,2012,2571,1251,2636,5308, 992,2345,3491,1540, # 2624
2730,1201,2071,2406,1997,2482,5309,4638, 528,1923,2191,1503,1874,1570,2369,3379, # 2640
3309,5310, 557,1073,5311,1828,3492,2088,2271,3165,3059,3107, 767,3108,2799,4639, # 2656
1006,4302,4640,2346,1267,2179,3730,3230, 778,4048,3231,2731,1597,2667,5312,4641, # 2672
5313,3493,5314,5315,5316,3310,2698,1433,3311, 131, 95,1504,4049, 723,4303,3166, # 2688
1842,3610,2768,2192,4050,2028,2105,3731,5317,3013,4051,1218,5318,3380,3232,4052, # 2704
4304,2584, 248,1634,3864, 912,5319,2845,3732,3060,3865, 654, 53,5320,3014,5321, # 2720
1688,4642, 777,3494,1032,4053,1425,5322, 191, 820,2121,2846, 971,4643, 931,3233, # 2736
135, 664, 783,3866,1998, 772,2922,1936,4054,3867,4644,2923,3234, 282,2732, 640, # 2752
1372,3495,1127, 922, 325,3381,5323,5324, 711,2045,5325,5326,4055,2223,2800,1937, # 2768
4056,3382,2224,2255,3868,2305,5327,4645,3869,1258,3312,4057,3235,2139,2965,4058, # 2784
4059,5328,2225, 258,3236,4646, 101,1227,5329,3313,1755,5330,1391,3314,5331,2924, # 2800
2057, 893,5332,5333,5334,1402,4305,2347,5335,5336,3237,3611,5337,5338, 878,1325, # 2816
1781,2801,4647, 259,1385,2585, 744,1183,2272,4648,5339,4060,2509,5340, 684,1024, # 2832
4306,5341, 472,3612,3496,1165,3315,4061,4062, 322,2153, 881, 455,1695,1152,1340, # 2848
660, 554,2154,4649,1058,4650,4307, 830,1065,3383,4063,4651,1924,5342,1703,1919, # 2864
5343, 932,2273, 122,5344,4652, 947, 677,5345,3870,2637, 297,1906,1925,2274,4653, # 2880
2322,3316,5346,5347,4308,5348,4309, 84,4310, 112, 989,5349, 547,1059,4064, 701, # 2896
3613,1019,5350,4311,5351,3497, 942, 639, 457,2306,2456, 993,2966, 407, 851, 494, # 2912
4654,3384, 927,5352,1237,5353,2426,3385, 573,4312, 680, 921,2925,1279,1875, 285, # 2928
790,1448,1984, 719,2168,5354,5355,4655,4065,4066,1649,5356,1541, 563,5357,1077, # 2944
5358,3386,3061,3498, 511,3015,4067,4068,3733,4069,1268,2572,3387,3238,4656,4657, # 2960
5359, 535,1048,1276,1189,2926,2029,3167,1438,1373,2847,2967,1134,2013,5360,4313, # 2976
1238,2586,3109,1259,5361, 700,5362,2968,3168,3734,4314,5363,4315,1146,1876,1907, # 2992
4658,2611,4070, 781,2427, 132,1589, 203, 147, 273,2802,2407, 898,1787,2155,4071, # 3008
4072,5364,3871,2803,5365,5366,4659,4660,5367,3239,5368,1635,3872, 965,5369,1805, # 3024
2699,1516,3614,1121,1082,1329,3317,4073,1449,3873, 65,1128,2848,2927,2769,1590, # 3040
3874,5370,5371, 12,2668, 45, 976,2587,3169,4661, 517,2535,1013,1037,3240,5372, # 3056
3875,2849,5373,3876,5374,3499,5375,2612, 614,1999,2323,3877,3110,2733,2638,5376, # 3072
2588,4316, 599,1269,5377,1811,3735,5378,2700,3111, 759,1060, 489,1806,3388,3318, # 3088
1358,5379,5380,2391,1387,1215,2639,2256, 490,5381,5382,4317,1759,2392,2348,5383, # 3104
4662,3878,1908,4074,2640,1807,3241,4663,3500,3319,2770,2349, 874,5384,5385,3501, # 3120
3736,1859, 91,2928,3737,3062,3879,4664,5386,3170,4075,2669,5387,3502,1202,1403, # 3136
3880,2969,2536,1517,2510,4665,3503,2511,5388,4666,5389,2701,1886,1495,1731,4076, # 3152
2370,4667,5390,2030,5391,5392,4077,2702,1216, 237,2589,4318,2324,4078,3881,4668, # 3168
4669,2703,3615,3504, 445,4670,5393,5394,5395,5396,2771, 61,4079,3738,1823,4080, # 3184
5397, 687,2046, 935, 925, 405,2670, 703,1096,1860,2734,4671,4081,1877,1367,2704, # 3200
3389, 918,2106,1782,2483, 334,3320,1611,1093,4672, 564,3171,3505,3739,3390, 945, # 3216
2641,2058,4673,5398,1926, 872,4319,5399,3506,2705,3112, 349,4320,3740,4082,4674, # 3232
3882,4321,3741,2156,4083,4675,4676,4322,4677,2408,2047, 782,4084, 400, 251,4323, # 3248
1624,5400,5401, 277,3742, 299,1265, 476,1191,3883,2122,4324,4325,1109, 205,5402, # 3264
2590,1000,2157,3616,1861,5403,5404,5405,4678,5406,4679,2573, 107,2484,2158,4085, # 3280
3507,3172,5407,1533, 541,1301, 158, 753,4326,2886,3617,5408,1696, 370,1088,4327, # 3296
4680,3618, 579, 327, 440, 162,2244, 269,1938,1374,3508, 968,3063, 56,1396,3113, # 3312
2107,3321,3391,5409,1927,2159,4681,3016,5410,3619,5411,5412,3743,4682,2485,5413, # 3328
2804,5414,1650,4683,5415,2613,5416,5417,4086,2671,3392,1149,3393,4087,3884,4088, # 3344
5418,1076, 49,5419, 951,3242,3322,3323, 450,2850, 920,5420,1812,2805,2371,4328, # 3360
1909,1138,2372,3885,3509,5421,3243,4684,1910,1147,1518,2428,4685,3886,5422,4686, # 3376
2393,2614, 260,1796,3244,5423,5424,3887,3324, 708,5425,3620,1704,5426,3621,1351, # 3392
1618,3394,3017,1887, 944,4329,3395,4330,3064,3396,4331,5427,3744, 422, 413,1714, # 3408
3325, 500,2059,2350,4332,2486,5428,1344,1911, 954,5429,1668,5430,5431,4089,2409, # 3424
4333,3622,3888,4334,5432,2307,1318,2512,3114, 133,3115,2887,4687, 629, 31,2851, # 3440
2706,3889,4688, 850, 949,4689,4090,2970,1732,2089,4335,1496,1853,5433,4091, 620, # 3456
3245, 981,1242,3745,3397,1619,3746,1643,3326,2140,2457,1971,1719,3510,2169,5434, # 3472
3246,5435,5436,3398,1829,5437,1277,4690,1565,2048,5438,1636,3623,3116,5439, 869, # 3488
2852, 655,3890,3891,3117,4092,3018,3892,1310,3624,4691,5440,5441,5442,1733, 558, # 3504
4692,3747, 335,1549,3065,1756,4336,3748,1946,3511,1830,1291,1192, 470,2735,2108, # 3520
2806, 913,1054,4093,5443,1027,5444,3066,4094,4693, 982,2672,3399,3173,3512,3247, # 3536
3248,1947,2807,5445, 571,4694,5446,1831,5447,3625,2591,1523,2429,5448,2090, 984, # 3552
4695,3749,1960,5449,3750, 852, 923,2808,3513,3751, 969,1519, 999,2049,2325,1705, # 3568
5450,3118, 615,1662, 151, 597,4095,2410,2326,1049, 275,4696,3752,4337, 568,3753, # 3584
3626,2487,4338,3754,5451,2430,2275, 409,3249,5452,1566,2888,3514,1002, 769,2853, # 3600
194,2091,3174,3755,2226,3327,4339, 628,1505,5453,5454,1763,2180,3019,4096, 521, # 3616
1161,2592,1788,2206,2411,4697,4097,1625,4340,4341, 412, 42,3119, 464,5455,2642, # 3632
4698,3400,1760,1571,2889,3515,2537,1219,2207,3893,2643,2141,2373,4699,4700,3328, # 3648
1651,3401,3627,5456,5457,3628,2488,3516,5458,3756,5459,5460,2276,2092, 460,5461, # 3664
4701,5462,3020, 962, 588,3629, 289,3250,2644,1116, 52,5463,3067,1797,5464,5465, # 3680
5466,1467,5467,1598,1143,3757,4342,1985,1734,1067,4702,1280,3402, 465,4703,1572, # 3696
510,5468,1928,2245,1813,1644,3630,5469,4704,3758,5470,5471,2673,1573,1534,5472, # 3712
5473, 536,1808,1761,3517,3894,3175,2645,5474,5475,5476,4705,3518,2929,1912,2809, # 3728
5477,3329,1122, 377,3251,5478, 360,5479,5480,4343,1529, 551,5481,2060,3759,1769, # 3744
2431,5482,2930,4344,3330,3120,2327,2109,2031,4706,1404, 136,1468,1479, 672,1171, # 3760
3252,2308, 271,3176,5483,2772,5484,2050, 678,2736, 865,1948,4707,5485,2014,4098, # 3776
2971,5486,2737,2227,1397,3068,3760,4708,4709,1735,2931,3403,3631,5487,3895, 509, # 3792
2854,2458,2890,3896,5488,5489,3177,3178,4710,4345,2538,4711,2309,1166,1010, 552, # 3808
681,1888,5490,5491,2972,2973,4099,1287,1596,1862,3179, 358, 453, 736, 175, 478, # 3824
1117, 905,1167,1097,5492,1854,1530,5493,1706,5494,2181,3519,2292,3761,3520,3632, # 3840
4346,2093,4347,5495,3404,1193,2489,4348,1458,2193,2208,1863,1889,1421,3331,2932, # 3856
3069,2182,3521, 595,2123,5496,4100,5497,5498,4349,1707,2646, 223,3762,1359, 751, # 3872
3121, 183,3522,5499,2810,3021, 419,2374, 633, 704,3897,2394, 241,5500,5501,5502, # 3888
838,3022,3763,2277,2773,2459,3898,1939,2051,4101,1309,3122,2246,1181,5503,1136, # 3904
2209,3899,2375,1446,4350,2310,4712,5504,5505,4351,1055,2615, 484,3764,5506,4102, # 3920
625,4352,2278,3405,1499,4353,4103,5507,4104,4354,3253,2279,2280,3523,5508,5509, # 3936
2774, 808,2616,3765,3406,4105,4355,3123,2539, 526,3407,3900,4356, 955,5510,1620, # 3952
4357,2647,2432,5511,1429,3766,1669,1832, 994, 928,5512,3633,1260,5513,5514,5515, # 3968
1949,2293, 741,2933,1626,4358,2738,2460, 867,1184, 362,3408,1392,5516,5517,4106, # 3984
4359,1770,1736,3254,2934,4713,4714,1929,2707,1459,1158,5518,3070,3409,2891,1292, # 4000
1930,2513,2855,3767,1986,1187,2072,2015,2617,4360,5519,2574,2514,2170,3768,2490, # 4016
3332,5520,3769,4715,5521,5522, 666,1003,3023,1022,3634,4361,5523,4716,1814,2257, # 4032
574,3901,1603, 295,1535, 705,3902,4362, 283, 858, 417,5524,5525,3255,4717,4718, # 4048
3071,1220,1890,1046,2281,2461,4107,1393,1599, 689,2575, 388,4363,5526,2491, 802, # 4064
5527,2811,3903,2061,1405,2258,5528,4719,3904,2110,1052,1345,3256,1585,5529, 809, # 4080
5530,5531,5532, 575,2739,3524, 956,1552,1469,1144,2328,5533,2329,1560,2462,3635, # 4096
3257,4108, 616,2210,4364,3180,2183,2294,5534,1833,5535,3525,4720,5536,1319,3770, # 4112
3771,1211,3636,1023,3258,1293,2812,5537,5538,5539,3905, 607,2311,3906, 762,2892, # 4128
1439,4365,1360,4721,1485,3072,5540,4722,1038,4366,1450,2062,2648,4367,1379,4723, # 4144
2593,5541,5542,4368,1352,1414,2330,2935,1172,5543,5544,3907,3908,4724,1798,1451, # 4160
5545,5546,5547,5548,2936,4109,4110,2492,2351, 411,4111,4112,3637,3333,3124,4725, # 4176
1561,2674,1452,4113,1375,5549,5550, 47,2974, 316,5551,1406,1591,2937,3181,5552, # 4192
1025,2142,3125,3182, 354,2740, 884,2228,4369,2412, 508,3772, 726,3638, 996,2433, # 4208
3639, 729,5553, 392,2194,1453,4114,4726,3773,5554,5555,2463,3640,2618,1675,2813, # 4224
919,2352,2975,2353,1270,4727,4115, 73,5556,5557, 647,5558,3259,2856,2259,1550, # 4240
1346,3024,5559,1332, 883,3526,5560,5561,5562,5563,3334,2775,5564,1212, 831,1347, # 4256
4370,4728,2331,3909,1864,3073, 720,3910,4729,4730,3911,5565,4371,5566,5567,4731, # 4272
5568,5569,1799,4732,3774,2619,4733,3641,1645,2376,4734,5570,2938, 669,2211,2675, # 4288
2434,5571,2893,5572,5573,1028,3260,5574,4372,2413,5575,2260,1353,5576,5577,4735, # 4304
3183, 518,5578,4116,5579,4373,1961,5580,2143,4374,5581,5582,3025,2354,2355,3912, # 4320
516,1834,1454,4117,2708,4375,4736,2229,2620,1972,1129,3642,5583,2776,5584,2976, # 4336
1422, 577,1470,3026,1524,3410,5585,5586, 432,4376,3074,3527,5587,2594,1455,2515, # 4352
2230,1973,1175,5588,1020,2741,4118,3528,4737,5589,2742,5590,1743,1361,3075,3529, # 4368
2649,4119,4377,4738,2295, 895, 924,4378,2171, 331,2247,3076, 166,1627,3077,1098, # 4384
5591,1232,2894,2231,3411,4739, 657, 403,1196,2377, 542,3775,3412,1600,4379,3530, # 4400
5592,4740,2777,3261, 576, 530,1362,4741,4742,2540,2676,3776,4120,5593, 842,3913, # 4416
5594,2814,2032,1014,4121, 213,2709,3413, 665, 621,4380,5595,3777,2939,2435,5596, # 4432
2436,3335,3643,3414,4743,4381,2541,4382,4744,3644,1682,4383,3531,1380,5597, 724, # 4448
2282, 600,1670,5598,1337,1233,4745,3126,2248,5599,1621,4746,5600, 651,4384,5601, # 4464
1612,4385,2621,5602,2857,5603,2743,2312,3078,5604, 716,2464,3079, 174,1255,2710, # 4480
4122,3645, 548,1320,1398, 728,4123,1574,5605,1891,1197,3080,4124,5606,3081,3082, # 4496
3778,3646,3779, 747,5607, 635,4386,4747,5608,5609,5610,4387,5611,5612,4748,5613, # 4512
3415,4749,2437, 451,5614,3780,2542,2073,4388,2744,4389,4125,5615,1764,4750,5616, # 4528
4390, 350,4751,2283,2395,2493,5617,4391,4126,2249,1434,4127, 488,4752, 458,4392, # 4544
4128,3781, 771,1330,2396,3914,2576,3184,2160,2414,1553,2677,3185,4393,5618,2494, # 4560
2895,2622,1720,2711,4394,3416,4753,5619,2543,4395,5620,3262,4396,2778,5621,2016, # 4576
2745,5622,1155,1017,3782,3915,5623,3336,2313, 201,1865,4397,1430,5624,4129,5625, # 4592
5626,5627,5628,5629,4398,1604,5630, 414,1866, 371,2595,4754,4755,3532,2017,3127, # 4608
4756,1708, 960,4399, 887, 389,2172,1536,1663,1721,5631,2232,4130,2356,2940,1580, # 4624
5632,5633,1744,4757,2544,4758,4759,5634,4760,5635,2074,5636,4761,3647,3417,2896, # 4640
4400,5637,4401,2650,3418,2815, 673,2712,2465, 709,3533,4131,3648,4402,5638,1148, # 4656
502, 634,5639,5640,1204,4762,3649,1575,4763,2623,3783,5641,3784,3128, 948,3263, # 4672
121,1745,3916,1110,5642,4403,3083,2516,3027,4132,3785,1151,1771,3917,1488,4133, # 4688
1987,5643,2438,3534,5644,5645,2094,5646,4404,3918,1213,1407,2816, 531,2746,2545, # 4704
3264,1011,1537,4764,2779,4405,3129,1061,5647,3786,3787,1867,2897,5648,2018, 120, # 4720
4406,4407,2063,3650,3265,2314,3919,2678,3419,1955,4765,4134,5649,3535,1047,2713, # 4736
1266,5650,1368,4766,2858, 649,3420,3920,2546,2747,1102,2859,2679,5651,5652,2000, # 4752
5653,1111,3651,2977,5654,2495,3921,3652,2817,1855,3421,3788,5655,5656,3422,2415, # 4768
2898,3337,3266,3653,5657,2577,5658,3654,2818,4135,1460, 856,5659,3655,5660,2899, # 4784
2978,5661,2900,3922,5662,4408, 632,2517, 875,3923,1697,3924,2296,5663,5664,4767, # 4800
3028,1239, 580,4768,4409,5665, 914, 936,2075,1190,4136,1039,2124,5666,5667,5668, # 4816
5669,3423,1473,5670,1354,4410,3925,4769,2173,3084,4137, 915,3338,4411,4412,3339, # 4832
1605,1835,5671,2748, 398,3656,4413,3926,4138, 328,1913,2860,4139,3927,1331,4414, # 4848
3029, 937,4415,5672,3657,4140,4141,3424,2161,4770,3425, 524, 742, 538,3085,1012, # 4864
5673,5674,3928,2466,5675, 658,1103, 225,3929,5676,5677,4771,5678,4772,5679,3267, # 4880
1243,5680,4142, 963,2250,4773,5681,2714,3658,3186,5682,5683,2596,2332,5684,4774, # 4896
5685,5686,5687,3536, 957,3426,2547,2033,1931,2941,2467, 870,2019,3659,1746,2780, # 4912
2781,2439,2468,5688,3930,5689,3789,3130,3790,3537,3427,3791,5690,1179,3086,5691, # 4928
3187,2378,4416,3792,2548,3188,3131,2749,4143,5692,3428,1556,2549,2297, 977,2901, # 4944
2034,4144,1205,3429,5693,1765,3430,3189,2125,1271, 714,1689,4775,3538,5694,2333, # 4960
3931, 533,4417,3660,2184, 617,5695,2469,3340,3539,2315,5696,5697,3190,5698,5699, # 4976
3932,1988, 618, 427,2651,3540,3431,5700,5701,1244,1690,5702,2819,4418,4776,5703, # 4992
3541,4777,5704,2284,1576, 473,3661,4419,3432, 972,5705,3662,5706,3087,5707,5708, # 5008
4778,4779,5709,3793,4145,4146,5710, 153,4780, 356,5711,1892,2902,4420,2144, 408, # 5024
803,2357,5712,3933,5713,4421,1646,2578,2518,4781,4782,3934,5714,3935,4422,5715, # 5040
2416,3433, 752,5716,5717,1962,3341,2979,5718, 746,3030,2470,4783,4423,3794, 698, # 5056
4784,1893,4424,3663,2550,4785,3664,3936,5719,3191,3434,5720,1824,1302,4147,2715, # 5072
3937,1974,4425,5721,4426,3192, 823,1303,1288,1236,2861,3542,4148,3435, 774,3938, # 5088
5722,1581,4786,1304,2862,3939,4787,5723,2440,2162,1083,3268,4427,4149,4428, 344, # 5104
1173, 288,2316, 454,1683,5724,5725,1461,4788,4150,2597,5726,5727,4789, 985, 894, # 5120
5728,3436,3193,5729,1914,2942,3795,1989,5730,2111,1975,5731,4151,5732,2579,1194, # 5136
425,5733,4790,3194,1245,3796,4429,5734,5735,2863,5736, 636,4791,1856,3940, 760, # 5152
1800,5737,4430,2212,1508,4792,4152,1894,1684,2298,5738,5739,4793,4431,4432,2213, # 5168
479,5740,5741, 832,5742,4153,2496,5743,2980,2497,3797, 990,3132, 627,1815,2652, # 5184
4433,1582,4434,2126,2112,3543,4794,5744, 799,4435,3195,5745,4795,2113,1737,3031, # 5200
1018, 543, 754,4436,3342,1676,4796,4797,4154,4798,1489,5746,3544,5747,2624,2903, # 5216
4155,5748,5749,2981,5750,5751,5752,5753,3196,4799,4800,2185,1722,5754,3269,3270, # 5232
1843,3665,1715, 481, 365,1976,1857,5755,5756,1963,2498,4801,5757,2127,3666,3271, # 5248
433,1895,2064,2076,5758, 602,2750,5759,5760,5761,5762,5763,3032,1628,3437,5764, # 5264
3197,4802,4156,2904,4803,2519,5765,2551,2782,5766,5767,5768,3343,4804,2905,5769, # 5280
4805,5770,2864,4806,4807,1221,2982,4157,2520,5771,5772,5773,1868,1990,5774,5775, # 5296
5776,1896,5777,5778,4808,1897,4158, 318,5779,2095,4159,4437,5780,5781, 485,5782, # 5312
938,3941, 553,2680, 116,5783,3942,3667,5784,3545,2681,2783,3438,3344,2820,5785, # 5328
3668,2943,4160,1747,2944,2983,5786,5787, 207,5788,4809,5789,4810,2521,5790,3033, # 5344
890,3669,3943,5791,1878,3798,3439,5792,2186,2358,3440,1652,5793,5794,5795, 941, # 5360
2299, 208,3546,4161,2020, 330,4438,3944,2906,2499,3799,4439,4811,5796,5797,5798, # 5376 #last 512
#Everything below is of no interest for detection purpose
2522,1613,4812,5799,3345,3945,2523,5800,4162,5801,1637,4163,2471,4813,3946,5802, # 5392
2500,3034,3800,5803,5804,2195,4814,5805,2163,5806,5807,5808,5809,5810,5811,5812, # 5408
5813,5814,5815,5816,5817,5818,5819,5820,5821,5822,5823,5824,5825,5826,5827,5828, # 5424
5829,5830,5831,5832,5833,5834,5835,5836,5837,5838,5839,5840,5841,5842,5843,5844, # 5440
5845,5846,5847,5848,5849,5850,5851,5852,5853,5854,5855,5856,5857,5858,5859,5860, # 5456
5861,5862,5863,5864,5865,5866,5867,5868,5869,5870,5871,5872,5873,5874,5875,5876, # 5472
5877,5878,5879,5880,5881,5882,5883,5884,5885,5886,5887,5888,5889,5890,5891,5892, # 5488
5893,5894,5895,5896,5897,5898,5899,5900,5901,5902,5903,5904,5905,5906,5907,5908, # 5504
5909,5910,5911,5912,5913,5914,5915,5916,5917,5918,5919,5920,5921,5922,5923,5924, # 5520
5925,5926,5927,5928,5929,5930,5931,5932,5933,5934,5935,5936,5937,5938,5939,5940, # 5536
5941,5942,5943,5944,5945,5946,5947,5948,5949,5950,5951,5952,5953,5954,5955,5956, # 5552
5957,5958,5959,5960,5961,5962,5963,5964,5965,5966,5967,5968,5969,5970,5971,5972, # 5568
5973,5974,5975,5976,5977,5978,5979,5980,5981,5982,5983,5984,5985,5986,5987,5988, # 5584
5989,5990,5991,5992,5993,5994,5995,5996,5997,5998,5999,6000,6001,6002,6003,6004, # 5600
6005,6006,6007,6008,6009,6010,6011,6012,6013,6014,6015,6016,6017,6018,6019,6020, # 5616
6021,6022,6023,6024,6025,6026,6027,6028,6029,6030,6031,6032,6033,6034,6035,6036, # 5632
6037,6038,6039,6040,6041,6042,6043,6044,6045,6046,6047,6048,6049,6050,6051,6052, # 5648
6053,6054,6055,6056,6057,6058,6059,6060,6061,6062,6063,6064,6065,6066,6067,6068, # 5664
6069,6070,6071,6072,6073,6074,6075,6076,6077,6078,6079,6080,6081,6082,6083,6084, # 5680
6085,6086,6087,6088,6089,6090,6091,6092,6093,6094,6095,6096,6097,6098,6099,6100, # 5696
6101,6102,6103,6104,6105,6106,6107,6108,6109,6110,6111,6112,6113,6114,6115,6116, # 5712
6117,6118,6119,6120,6121,6122,6123,6124,6125,6126,6127,6128,6129,6130,6131,6132, # 5728
6133,6134,6135,6136,6137,6138,6139,6140,6141,6142,6143,6144,6145,6146,6147,6148, # 5744
6149,6150,6151,6152,6153,6154,6155,6156,6157,6158,6159,6160,6161,6162,6163,6164, # 5760
6165,6166,6167,6168,6169,6170,6171,6172,6173,6174,6175,6176,6177,6178,6179,6180, # 5776
6181,6182,6183,6184,6185,6186,6187,6188,6189,6190,6191,6192,6193,6194,6195,6196, # 5792
6197,6198,6199,6200,6201,6202,6203,6204,6205,6206,6207,6208,6209,6210,6211,6212, # 5808
6213,6214,6215,6216,6217,6218,6219,6220,6221,6222,6223,3670,6224,6225,6226,6227, # 5824
6228,6229,6230,6231,6232,6233,6234,6235,6236,6237,6238,6239,6240,6241,6242,6243, # 5840
6244,6245,6246,6247,6248,6249,6250,6251,6252,6253,6254,6255,6256,6257,6258,6259, # 5856
6260,6261,6262,6263,6264,6265,6266,6267,6268,6269,6270,6271,6272,6273,6274,6275, # 5872
6276,6277,6278,6279,6280,6281,6282,6283,6284,6285,4815,6286,6287,6288,6289,6290, # 5888
6291,6292,4816,6293,6294,6295,6296,6297,6298,6299,6300,6301,6302,6303,6304,6305, # 5904
6306,6307,6308,6309,6310,6311,4817,4818,6312,6313,6314,6315,6316,6317,6318,4819, # 5920
6319,6320,6321,6322,6323,6324,6325,6326,6327,6328,6329,6330,6331,6332,6333,6334, # 5936
6335,6336,6337,4820,6338,6339,6340,6341,6342,6343,6344,6345,6346,6347,6348,6349, # 5952
6350,6351,6352,6353,6354,6355,6356,6357,6358,6359,6360,6361,6362,6363,6364,6365, # 5968
6366,6367,6368,6369,6370,6371,6372,6373,6374,6375,6376,6377,6378,6379,6380,6381, # 5984
6382,6383,6384,6385,6386,6387,6388,6389,6390,6391,6392,6393,6394,6395,6396,6397, # 6000
6398,6399,6400,6401,6402,6403,6404,6405,6406,6407,6408,6409,6410,3441,6411,6412, # 6016
6413,6414,6415,6416,6417,6418,6419,6420,6421,6422,6423,6424,6425,4440,6426,6427, # 6032
6428,6429,6430,6431,6432,6433,6434,6435,6436,6437,6438,6439,6440,6441,6442,6443, # 6048
6444,6445,6446,6447,6448,6449,6450,6451,6452,6453,6454,4821,6455,6456,6457,6458, # 6064
6459,6460,6461,6462,6463,6464,6465,6466,6467,6468,6469,6470,6471,6472,6473,6474, # 6080
6475,6476,6477,3947,3948,6478,6479,6480,6481,3272,4441,6482,6483,6484,6485,4442, # 6096
6486,6487,6488,6489,6490,6491,6492,6493,6494,6495,6496,4822,6497,6498,6499,6500, # 6112
6501,6502,6503,6504,6505,6506,6507,6508,6509,6510,6511,6512,6513,6514,6515,6516, # 6128
6517,6518,6519,6520,6521,6522,6523,6524,6525,6526,6527,6528,6529,6530,6531,6532, # 6144
6533,6534,6535,6536,6537,6538,6539,6540,6541,6542,6543,6544,6545,6546,6547,6548, # 6160
6549,6550,6551,6552,6553,6554,6555,6556,2784,6557,4823,6558,6559,6560,6561,6562, # 6176
6563,6564,6565,6566,6567,6568,6569,3949,6570,6571,6572,4824,6573,6574,6575,6576, # 6192
6577,6578,6579,6580,6581,6582,6583,4825,6584,6585,6586,3950,2785,6587,6588,6589, # 6208
6590,6591,6592,6593,6594,6595,6596,6597,6598,6599,6600,6601,6602,6603,6604,6605, # 6224
6606,6607,6608,6609,6610,6611,6612,4826,6613,6614,6615,4827,6616,6617,6618,6619, # 6240
6620,6621,6622,6623,6624,6625,4164,6626,6627,6628,6629,6630,6631,6632,6633,6634, # 6256
3547,6635,4828,6636,6637,6638,6639,6640,6641,6642,3951,2984,6643,6644,6645,6646, # 6272
6647,6648,6649,4165,6650,4829,6651,6652,4830,6653,6654,6655,6656,6657,6658,6659, # 6288
6660,6661,6662,4831,6663,6664,6665,6666,6667,6668,6669,6670,6671,4166,6672,4832, # 6304
3952,6673,6674,6675,6676,4833,6677,6678,6679,4167,6680,6681,6682,3198,6683,6684, # 6320
6685,6686,6687,6688,6689,6690,6691,6692,6693,6694,6695,6696,6697,4834,6698,6699, # 6336
6700,6701,6702,6703,6704,6705,6706,6707,6708,6709,6710,6711,6712,6713,6714,6715, # 6352
6716,6717,6718,6719,6720,6721,6722,6723,6724,6725,6726,6727,6728,6729,6730,6731, # 6368
6732,6733,6734,4443,6735,6736,6737,6738,6739,6740,6741,6742,6743,6744,6745,4444, # 6384
6746,6747,6748,6749,6750,6751,6752,6753,6754,6755,6756,6757,6758,6759,6760,6761, # 6400
6762,6763,6764,6765,6766,6767,6768,6769,6770,6771,6772,6773,6774,6775,6776,6777, # 6416
6778,6779,6780,6781,4168,6782,6783,3442,6784,6785,6786,6787,6788,6789,6790,6791, # 6432
4169,6792,6793,6794,6795,6796,6797,6798,6799,6800,6801,6802,6803,6804,6805,6806, # 6448
6807,6808,6809,6810,6811,4835,6812,6813,6814,4445,6815,6816,4446,6817,6818,6819, # 6464
6820,6821,6822,6823,6824,6825,6826,6827,6828,6829,6830,6831,6832,6833,6834,6835, # 6480
3548,6836,6837,6838,6839,6840,6841,6842,6843,6844,6845,6846,4836,6847,6848,6849, # 6496
6850,6851,6852,6853,6854,3953,6855,6856,6857,6858,6859,6860,6861,6862,6863,6864, # 6512
6865,6866,6867,6868,6869,6870,6871,6872,6873,6874,6875,6876,6877,3199,6878,6879, # 6528
6880,6881,6882,4447,6883,6884,6885,6886,6887,6888,6889,6890,6891,6892,6893,6894, # 6544
6895,6896,6897,6898,6899,6900,6901,6902,6903,6904,4170,6905,6906,6907,6908,6909, # 6560
6910,6911,6912,6913,6914,6915,6916,6917,6918,6919,6920,6921,6922,6923,6924,6925, # 6576
6926,6927,4837,6928,6929,6930,6931,6932,6933,6934,6935,6936,3346,6937,6938,4838, # 6592
6939,6940,6941,4448,6942,6943,6944,6945,6946,4449,6947,6948,6949,6950,6951,6952, # 6608
6953,6954,6955,6956,6957,6958,6959,6960,6961,6962,6963,6964,6965,6966,6967,6968, # 6624
6969,6970,6971,6972,6973,6974,6975,6976,6977,6978,6979,6980,6981,6982,6983,6984, # 6640
6985,6986,6987,6988,6989,6990,6991,6992,6993,6994,3671,6995,6996,6997,6998,4839, # 6656
6999,7000,7001,7002,3549,7003,7004,7005,7006,7007,7008,7009,7010,7011,7012,7013, # 6672
7014,7015,7016,7017,7018,7019,7020,7021,7022,7023,7024,7025,7026,7027,7028,7029, # 6688
7030,4840,7031,7032,7033,7034,7035,7036,7037,7038,4841,7039,7040,7041,7042,7043, # 6704
7044,7045,7046,7047,7048,7049,7050,7051,7052,7053,7054,7055,7056,7057,7058,7059, # 6720
7060,7061,7062,7063,7064,7065,7066,7067,7068,7069,7070,2985,7071,7072,7073,7074, # 6736
7075,7076,7077,7078,7079,7080,4842,7081,7082,7083,7084,7085,7086,7087,7088,7089, # 6752
7090,7091,7092,7093,7094,7095,7096,7097,7098,7099,7100,7101,7102,7103,7104,7105, # 6768
7106,7107,7108,7109,7110,7111,7112,7113,7114,7115,7116,7117,7118,4450,7119,7120, # 6784
7121,7122,7123,7124,7125,7126,7127,7128,7129,7130,7131,7132,7133,7134,7135,7136, # 6800
7137,7138,7139,7140,7141,7142,7143,4843,7144,7145,7146,7147,7148,7149,7150,7151, # 6816
7152,7153,7154,7155,7156,7157,7158,7159,7160,7161,7162,7163,7164,7165,7166,7167, # 6832
7168,7169,7170,7171,7172,7173,7174,7175,7176,7177,7178,7179,7180,7181,7182,7183, # 6848
7184,7185,7186,7187,7188,4171,4172,7189,7190,7191,7192,7193,7194,7195,7196,7197, # 6864
7198,7199,7200,7201,7202,7203,7204,7205,7206,7207,7208,7209,7210,7211,7212,7213, # 6880
7214,7215,7216,7217,7218,7219,7220,7221,7222,7223,7224,7225,7226,7227,7228,7229, # 6896
7230,7231,7232,7233,7234,7235,7236,7237,7238,7239,7240,7241,7242,7243,7244,7245, # 6912
7246,7247,7248,7249,7250,7251,7252,7253,7254,7255,7256,7257,7258,7259,7260,7261, # 6928
7262,7263,7264,7265,7266,7267,7268,7269,7270,7271,7272,7273,7274,7275,7276,7277, # 6944
7278,7279,7280,7281,7282,7283,7284,7285,7286,7287,7288,7289,7290,7291,7292,7293, # 6960
7294,7295,7296,4844,7297,7298,7299,7300,7301,7302,7303,7304,7305,7306,7307,7308, # 6976
7309,7310,7311,7312,7313,7314,7315,7316,4451,7317,7318,7319,7320,7321,7322,7323, # 6992
7324,7325,7326,7327,7328,7329,7330,7331,7332,7333,7334,7335,7336,7337,7338,7339, # 7008
7340,7341,7342,7343,7344,7345,7346,7347,7348,7349,7350,7351,7352,7353,4173,7354, # 7024
7355,4845,7356,7357,7358,7359,7360,7361,7362,7363,7364,7365,7366,7367,7368,7369, # 7040
7370,7371,7372,7373,7374,7375,7376,7377,7378,7379,7380,7381,7382,7383,7384,7385, # 7056
7386,7387,7388,4846,7389,7390,7391,7392,7393,7394,7395,7396,7397,7398,7399,7400, # 7072
7401,7402,7403,7404,7405,3672,7406,7407,7408,7409,7410,7411,7412,7413,7414,7415, # 7088
7416,7417,7418,7419,7420,7421,7422,7423,7424,7425,7426,7427,7428,7429,7430,7431, # 7104
7432,7433,7434,7435,7436,7437,7438,7439,7440,7441,7442,7443,7444,7445,7446,7447, # 7120
7448,7449,7450,7451,7452,7453,4452,7454,3200,7455,7456,7457,7458,7459,7460,7461, # 7136
7462,7463,7464,7465,7466,7467,7468,7469,7470,7471,7472,7473,7474,4847,7475,7476, # 7152
7477,3133,7478,7479,7480,7481,7482,7483,7484,7485,7486,7487,7488,7489,7490,7491, # 7168
7492,7493,7494,7495,7496,7497,7498,7499,7500,7501,7502,3347,7503,7504,7505,7506, # 7184
7507,7508,7509,7510,7511,7512,7513,7514,7515,7516,7517,7518,7519,7520,7521,4848, # 7200
7522,7523,7524,7525,7526,7527,7528,7529,7530,7531,7532,7533,7534,7535,7536,7537, # 7216
7538,7539,7540,7541,7542,7543,7544,7545,7546,7547,7548,7549,3801,4849,7550,7551, # 7232
7552,7553,7554,7555,7556,7557,7558,7559,7560,7561,7562,7563,7564,7565,7566,7567, # 7248
7568,7569,3035,7570,7571,7572,7573,7574,7575,7576,7577,7578,7579,7580,7581,7582, # 7264
7583,7584,7585,7586,7587,7588,7589,7590,7591,7592,7593,7594,7595,7596,7597,7598, # 7280
7599,7600,7601,7602,7603,7604,7605,7606,7607,7608,7609,7610,7611,7612,7613,7614, # 7296
7615,7616,4850,7617,7618,3802,7619,7620,7621,7622,7623,7624,7625,7626,7627,7628, # 7312
7629,7630,7631,7632,4851,7633,7634,7635,7636,7637,7638,7639,7640,7641,7642,7643, # 7328
7644,7645,7646,7647,7648,7649,7650,7651,7652,7653,7654,7655,7656,7657,7658,7659, # 7344
7660,7661,7662,7663,7664,7665,7666,7667,7668,7669,7670,4453,7671,7672,7673,7674, # 7360
7675,7676,7677,7678,7679,7680,7681,7682,7683,7684,7685,7686,7687,7688,7689,7690, # 7376
7691,7692,7693,7694,7695,7696,7697,3443,7698,7699,7700,7701,7702,4454,7703,7704, # 7392
7705,7706,7707,7708,7709,7710,7711,7712,7713,2472,7714,7715,7716,7717,7718,7719, # 7408
7720,7721,7722,7723,7724,7725,7726,7727,7728,7729,7730,7731,3954,7732,7733,7734, # 7424
7735,7736,7737,7738,7739,7740,7741,7742,7743,7744,7745,7746,7747,7748,7749,7750, # 7440
3134,7751,7752,4852,7753,7754,7755,4853,7756,7757,7758,7759,7760,4174,7761,7762, # 7456
7763,7764,7765,7766,7767,7768,7769,7770,7771,7772,7773,7774,7775,7776,7777,7778, # 7472
7779,7780,7781,7782,7783,7784,7785,7786,7787,7788,7789,7790,7791,7792,7793,7794, # 7488
7795,7796,7797,7798,7799,7800,7801,7802,7803,7804,7805,4854,7806,7807,7808,7809, # 7504
7810,7811,7812,7813,7814,7815,7816,7817,7818,7819,7820,7821,7822,7823,7824,7825, # 7520
4855,7826,7827,7828,7829,7830,7831,7832,7833,7834,7835,7836,7837,7838,7839,7840, # 7536
7841,7842,7843,7844,7845,7846,7847,3955,7848,7849,7850,7851,7852,7853,7854,7855, # 7552
7856,7857,7858,7859,7860,3444,7861,7862,7863,7864,7865,7866,7867,7868,7869,7870, # 7568
7871,7872,7873,7874,7875,7876,7877,7878,7879,7880,7881,7882,7883,7884,7885,7886, # 7584
7887,7888,7889,7890,7891,4175,7892,7893,7894,7895,7896,4856,4857,7897,7898,7899, # 7600
7900,2598,7901,7902,7903,7904,7905,7906,7907,7908,4455,7909,7910,7911,7912,7913, # 7616
7914,3201,7915,7916,7917,7918,7919,7920,7921,4858,7922,7923,7924,7925,7926,7927, # 7632
7928,7929,7930,7931,7932,7933,7934,7935,7936,7937,7938,7939,7940,7941,7942,7943, # 7648
7944,7945,7946,7947,7948,7949,7950,7951,7952,7953,7954,7955,7956,7957,7958,7959, # 7664
7960,7961,7962,7963,7964,7965,7966,7967,7968,7969,7970,7971,7972,7973,7974,7975, # 7680
7976,7977,7978,7979,7980,7981,4859,7982,7983,7984,7985,7986,7987,7988,7989,7990, # 7696
7991,7992,7993,7994,7995,7996,4860,7997,7998,7999,8000,8001,8002,8003,8004,8005, # 7712
8006,8007,8008,8009,8010,8011,8012,8013,8014,8015,8016,4176,8017,8018,8019,8020, # 7728
8021,8022,8023,4861,8024,8025,8026,8027,8028,8029,8030,8031,8032,8033,8034,8035, # 7744
8036,4862,4456,8037,8038,8039,8040,4863,8041,8042,8043,8044,8045,8046,8047,8048, # 7760
8049,8050,8051,8052,8053,8054,8055,8056,8057,8058,8059,8060,8061,8062,8063,8064, # 7776
8065,8066,8067,8068,8069,8070,8071,8072,8073,8074,8075,8076,8077,8078,8079,8080, # 7792
8081,8082,8083,8084,8085,8086,8087,8088,8089,8090,8091,8092,8093,8094,8095,8096, # 7808
8097,8098,8099,4864,4177,8100,8101,8102,8103,8104,8105,8106,8107,8108,8109,8110, # 7824
8111,8112,8113,8114,8115,8116,8117,8118,8119,8120,4178,8121,8122,8123,8124,8125, # 7840
8126,8127,8128,8129,8130,8131,8132,8133,8134,8135,8136,8137,8138,8139,8140,8141, # 7856
8142,8143,8144,8145,4865,4866,8146,8147,8148,8149,8150,8151,8152,8153,8154,8155, # 7872
8156,8157,8158,8159,8160,8161,8162,8163,8164,8165,4179,8166,8167,8168,8169,8170, # 7888
8171,8172,8173,8174,8175,8176,8177,8178,8179,8180,8181,4457,8182,8183,8184,8185, # 7904
8186,8187,8188,8189,8190,8191,8192,8193,8194,8195,8196,8197,8198,8199,8200,8201, # 7920
8202,8203,8204,8205,8206,8207,8208,8209,8210,8211,8212,8213,8214,8215,8216,8217, # 7936
8218,8219,8220,8221,8222,8223,8224,8225,8226,8227,8228,8229,8230,8231,8232,8233, # 7952
8234,8235,8236,8237,8238,8239,8240,8241,8242,8243,8244,8245,8246,8247,8248,8249, # 7968
8250,8251,8252,8253,8254,8255,8256,3445,8257,8258,8259,8260,8261,8262,4458,8263, # 7984
8264,8265,8266,8267,8268,8269,8270,8271,8272,4459,8273,8274,8275,8276,3550,8277, # 8000
8278,8279,8280,8281,8282,8283,8284,8285,8286,8287,8288,8289,4460,8290,8291,8292, # 8016
8293,8294,8295,8296,8297,8298,8299,8300,8301,8302,8303,8304,8305,8306,8307,4867, # 8032
8308,8309,8310,8311,8312,3551,8313,8314,8315,8316,8317,8318,8319,8320,8321,8322, # 8048
8323,8324,8325,8326,4868,8327,8328,8329,8330,8331,8332,8333,8334,8335,8336,8337, # 8064
8338,8339,8340,8341,8342,8343,8344,8345,8346,8347,8348,8349,8350,8351,8352,8353, # 8080
8354,8355,8356,8357,8358,8359,8360,8361,8362,8363,4869,4461,8364,8365,8366,8367, # 8096
8368,8369,8370,4870,8371,8372,8373,8374,8375,8376,8377,8378,8379,8380,8381,8382, # 8112
8383,8384,8385,8386,8387,8388,8389,8390,8391,8392,8393,8394,8395,8396,8397,8398, # 8128
8399,8400,8401,8402,8403,8404,8405,8406,8407,8408,8409,8410,4871,8411,8412,8413, # 8144
8414,8415,8416,8417,8418,8419,8420,8421,8422,4462,8423,8424,8425,8426,8427,8428, # 8160
8429,8430,8431,8432,8433,2986,8434,8435,8436,8437,8438,8439,8440,8441,8442,8443, # 8176
8444,8445,8446,8447,8448,8449,8450,8451,8452,8453,8454,8455,8456,8457,8458,8459, # 8192
8460,8461,8462,8463,8464,8465,8466,8467,8468,8469,8470,8471,8472,8473,8474,8475, # 8208
8476,8477,8478,4180,8479,8480,8481,8482,8483,8484,8485,8486,8487,8488,8489,8490, # 8224
8491,8492,8493,8494,8495,8496,8497,8498,8499,8500,8501,8502,8503,8504,8505,8506, # 8240
8507,8508,8509,8510,8511,8512,8513,8514,8515,8516,8517,8518,8519,8520,8521,8522, # 8256
8523,8524,8525,8526,8527,8528,8529,8530,8531,8532,8533,8534,8535,8536,8537,8538, # 8272
8539,8540,8541,8542,8543,8544,8545,8546,8547,8548,8549,8550,8551,8552,8553,8554, # 8288
8555,8556,8557,8558,8559,8560,8561,8562,8563,8564,4872,8565,8566,8567,8568,8569, # 8304
8570,8571,8572,8573,4873,8574,8575,8576,8577,8578,8579,8580,8581,8582,8583,8584, # 8320
8585,8586,8587,8588,8589,8590,8591,8592,8593,8594,8595,8596,8597,8598,8599,8600, # 8336
8601,8602,8603,8604,8605,3803,8606,8607,8608,8609,8610,8611,8612,8613,4874,3804, # 8352
8614,8615,8616,8617,8618,8619,8620,8621,3956,8622,8623,8624,8625,8626,8627,8628, # 8368
8629,8630,8631,8632,8633,8634,8635,8636,8637,8638,2865,8639,8640,8641,8642,8643, # 8384
8644,8645,8646,8647,8648,8649,8650,8651,8652,8653,8654,8655,8656,4463,8657,8658, # 8400
8659,4875,4876,8660,8661,8662,8663,8664,8665,8666,8667,8668,8669,8670,8671,8672, # 8416
8673,8674,8675,8676,8677,8678,8679,8680,8681,4464,8682,8683,8684,8685,8686,8687, # 8432
8688,8689,8690,8691,8692,8693,8694,8695,8696,8697,8698,8699,8700,8701,8702,8703, # 8448
8704,8705,8706,8707,8708,8709,2261,8710,8711,8712,8713,8714,8715,8716,8717,8718, # 8464
8719,8720,8721,8722,8723,8724,8725,8726,8727,8728,8729,8730,8731,8732,8733,4181, # 8480
8734,8735,8736,8737,8738,8739,8740,8741,8742,8743,8744,8745,8746,8747,8748,8749, # 8496
8750,8751,8752,8753,8754,8755,8756,8757,8758,8759,8760,8761,8762,8763,4877,8764, # 8512
8765,8766,8767,8768,8769,8770,8771,8772,8773,8774,8775,8776,8777,8778,8779,8780, # 8528
8781,8782,8783,8784,8785,8786,8787,8788,4878,8789,4879,8790,8791,8792,4880,8793, # 8544
8794,8795,8796,8797,8798,8799,8800,8801,4881,8802,8803,8804,8805,8806,8807,8808, # 8560
8809,8810,8811,8812,8813,8814,8815,3957,8816,8817,8818,8819,8820,8821,8822,8823, # 8576
8824,8825,8826,8827,8828,8829,8830,8831,8832,8833,8834,8835,8836,8837,8838,8839, # 8592
8840,8841,8842,8843,8844,8845,8846,8847,4882,8848,8849,8850,8851,8852,8853,8854, # 8608
8855,8856,8857,8858,8859,8860,8861,8862,8863,8864,8865,8866,8867,8868,8869,8870, # 8624
8871,8872,8873,8874,8875,8876,8877,8878,8879,8880,8881,8882,8883,8884,3202,8885, # 8640
8886,8887,8888,8889,8890,8891,8892,8893,8894,8895,8896,8897,8898,8899,8900,8901, # 8656
8902,8903,8904,8905,8906,8907,8908,8909,8910,8911,8912,8913,8914,8915,8916,8917, # 8672
8918,8919,8920,8921,8922,8923,8924,4465,8925,8926,8927,8928,8929,8930,8931,8932, # 8688
4883,8933,8934,8935,8936,8937,8938,8939,8940,8941,8942,8943,2214,8944,8945,8946, # 8704
8947,8948,8949,8950,8951,8952,8953,8954,8955,8956,8957,8958,8959,8960,8961,8962, # 8720
8963,8964,8965,4884,8966,8967,8968,8969,8970,8971,8972,8973,8974,8975,8976,8977, # 8736
8978,8979,8980,8981,8982,8983,8984,8985,8986,8987,8988,8989,8990,8991,8992,4885, # 8752
8993,8994,8995,8996,8997,8998,8999,9000,9001,9002,9003,9004,9005,9006,9007,9008, # 8768
9009,9010,9011,9012,9013,9014,9015,9016,9017,9018,9019,9020,9021,4182,9022,9023, # 8784
9024,9025,9026,9027,9028,9029,9030,9031,9032,9033,9034,9035,9036,9037,9038,9039, # 8800
9040,9041,9042,9043,9044,9045,9046,9047,9048,9049,9050,9051,9052,9053,9054,9055, # 8816
9056,9057,9058,9059,9060,9061,9062,9063,4886,9064,9065,9066,9067,9068,9069,4887, # 8832
9070,9071,9072,9073,9074,9075,9076,9077,9078,9079,9080,9081,9082,9083,9084,9085, # 8848
9086,9087,9088,9089,9090,9091,9092,9093,9094,9095,9096,9097,9098,9099,9100,9101, # 8864
9102,9103,9104,9105,9106,9107,9108,9109,9110,9111,9112,9113,9114,9115,9116,9117, # 8880
9118,9119,9120,9121,9122,9123,9124,9125,9126,9127,9128,9129,9130,9131,9132,9133, # 8896
9134,9135,9136,9137,9138,9139,9140,9141,3958,9142,9143,9144,9145,9146,9147,9148, # 8912
9149,9150,9151,4888,9152,9153,9154,9155,9156,9157,9158,9159,9160,9161,9162,9163, # 8928
9164,9165,9166,9167,9168,9169,9170,9171,9172,9173,9174,9175,4889,9176,9177,9178, # 8944
9179,9180,9181,9182,9183,9184,9185,9186,9187,9188,9189,9190,9191,9192,9193,9194, # 8960
9195,9196,9197,9198,9199,9200,9201,9202,9203,4890,9204,9205,9206,9207,9208,9209, # 8976
9210,9211,9212,9213,9214,9215,9216,9217,9218,9219,9220,9221,9222,4466,9223,9224, # 8992
9225,9226,9227,9228,9229,9230,9231,9232,9233,9234,9235,9236,9237,9238,9239,9240, # 9008
9241,9242,9243,9244,9245,4891,9246,9247,9248,9249,9250,9251,9252,9253,9254,9255, # 9024
9256,9257,4892,9258,9259,9260,9261,4893,4894,9262,9263,9264,9265,9266,9267,9268, # 9040
9269,9270,9271,9272,9273,4467,9274,9275,9276,9277,9278,9279,9280,9281,9282,9283, # 9056
9284,9285,3673,9286,9287,9288,9289,9290,9291,9292,9293,9294,9295,9296,9297,9298, # 9072
9299,9300,9301,9302,9303,9304,9305,9306,9307,9308,9309,9310,9311,9312,9313,9314, # 9088
9315,9316,9317,9318,9319,9320,9321,9322,4895,9323,9324,9325,9326,9327,9328,9329, # 9104
9330,9331,9332,9333,9334,9335,9336,9337,9338,9339,9340,9341,9342,9343,9344,9345, # 9120
9346,9347,4468,9348,9349,9350,9351,9352,9353,9354,9355,9356,9357,9358,9359,9360, # 9136
9361,9362,9363,9364,9365,9366,9367,9368,9369,9370,9371,9372,9373,4896,9374,4469, # 9152
9375,9376,9377,9378,9379,4897,9380,9381,9382,9383,9384,9385,9386,9387,9388,9389, # 9168
9390,9391,9392,9393,9394,9395,9396,9397,9398,9399,9400,9401,9402,9403,9404,9405, # 9184
9406,4470,9407,2751,9408,9409,3674,3552,9410,9411,9412,9413,9414,9415,9416,9417, # 9200
9418,9419,9420,9421,4898,9422,9423,9424,9425,9426,9427,9428,9429,3959,9430,9431, # 9216
9432,9433,9434,9435,9436,4471,9437,9438,9439,9440,9441,9442,9443,9444,9445,9446, # 9232
9447,9448,9449,9450,3348,9451,9452,9453,9454,9455,9456,9457,9458,9459,9460,9461, # 9248
9462,9463,9464,9465,9466,9467,9468,9469,9470,9471,9472,4899,9473,9474,9475,9476, # 9264
9477,4900,9478,9479,9480,9481,9482,9483,9484,9485,9486,9487,9488,3349,9489,9490, # 9280
9491,9492,9493,9494,9495,9496,9497,9498,9499,9500,9501,9502,9503,9504,9505,9506, # 9296
9507,9508,9509,9510,9511,9512,9513,9514,9515,9516,9517,9518,9519,9520,4901,9521, # 9312
9522,9523,9524,9525,9526,4902,9527,9528,9529,9530,9531,9532,9533,9534,9535,9536, # 9328
9537,9538,9539,9540,9541,9542,9543,9544,9545,9546,9547,9548,9549,9550,9551,9552, # 9344
9553,9554,9555,9556,9557,9558,9559,9560,9561,9562,9563,9564,9565,9566,9567,9568, # 9360
9569,9570,9571,9572,9573,9574,9575,9576,9577,9578,9579,9580,9581,9582,9583,9584, # 9376
3805,9585,9586,9587,9588,9589,9590,9591,9592,9593,9594,9595,9596,9597,9598,9599, # 9392
9600,9601,9602,4903,9603,9604,9605,9606,9607,4904,9608,9609,9610,9611,9612,9613, # 9408
9614,4905,9615,9616,9617,9618,9619,9620,9621,9622,9623,9624,9625,9626,9627,9628, # 9424
9629,9630,9631,9632,4906,9633,9634,9635,9636,9637,9638,9639,9640,9641,9642,9643, # 9440
4907,9644,9645,9646,9647,9648,9649,9650,9651,9652,9653,9654,9655,9656,9657,9658, # 9456
9659,9660,9661,9662,9663,9664,9665,9666,9667,9668,9669,9670,9671,9672,4183,9673, # 9472
9674,9675,9676,9677,4908,9678,9679,9680,9681,4909,9682,9683,9684,9685,9686,9687, # 9488
9688,9689,9690,4910,9691,9692,9693,3675,9694,9695,9696,2945,9697,9698,9699,9700, # 9504
9701,9702,9703,9704,9705,4911,9706,9707,9708,9709,9710,9711,9712,9713,9714,9715, # 9520
9716,9717,9718,9719,9720,9721,9722,9723,9724,9725,9726,9727,9728,9729,9730,9731, # 9536
9732,9733,9734,9735,4912,9736,9737,9738,9739,9740,4913,9741,9742,9743,9744,9745, # 9552
9746,9747,9748,9749,9750,9751,9752,9753,9754,9755,9756,9757,9758,4914,9759,9760, # 9568
9761,9762,9763,9764,9765,9766,9767,9768,9769,9770,9771,9772,9773,9774,9775,9776, # 9584
9777,9778,9779,9780,9781,9782,4915,9783,9784,9785,9786,9787,9788,9789,9790,9791, # 9600
9792,9793,4916,9794,9795,9796,9797,9798,9799,9800,9801,9802,9803,9804,9805,9806, # 9616
9807,9808,9809,9810,9811,9812,9813,9814,9815,9816,9817,9818,9819,9820,9821,9822, # 9632
9823,9824,9825,9826,9827,9828,9829,9830,9831,9832,9833,9834,9835,9836,9837,9838, # 9648
9839,9840,9841,9842,9843,9844,9845,9846,9847,9848,9849,9850,9851,9852,9853,9854, # 9664
9855,9856,9857,9858,9859,9860,9861,9862,9863,9864,9865,9866,9867,9868,4917,9869, # 9680
9870,9871,9872,9873,9874,9875,9876,9877,9878,9879,9880,9881,9882,9883,9884,9885, # 9696
9886,9887,9888,9889,9890,9891,9892,4472,9893,9894,9895,9896,9897,3806,9898,9899, # 9712
9900,9901,9902,9903,9904,9905,9906,9907,9908,9909,9910,9911,9912,9913,9914,4918, # 9728
9915,9916,9917,4919,9918,9919,9920,9921,4184,9922,9923,9924,9925,9926,9927,9928, # 9744
9929,9930,9931,9932,9933,9934,9935,9936,9937,9938,9939,9940,9941,9942,9943,9944, # 9760
9945,9946,4920,9947,9948,9949,9950,9951,9952,9953,9954,9955,4185,9956,9957,9958, # 9776
9959,9960,9961,9962,9963,9964,9965,4921,9966,9967,9968,4473,9969,9970,9971,9972, # 9792
9973,9974,9975,9976,9977,4474,9978,9979,9980,9981,9982,9983,9984,9985,9986,9987, # 9808
9988,9989,9990,9991,9992,9993,9994,9995,9996,9997,9998,9999,10000,10001,10002,10003, # 9824
10004,10005,10006,10007,10008,10009,10010,10011,10012,10013,10014,10015,10016,10017,10018,10019, # 9840
10020,10021,4922,10022,4923,10023,10024,10025,10026,10027,10028,10029,10030,10031,10032,10033, # 9856
10034,10035,10036,10037,10038,10039,10040,10041,10042,10043,10044,10045,10046,10047,10048,4924, # 9872
10049,10050,10051,10052,10053,10054,10055,10056,10057,10058,10059,10060,10061,10062,10063,10064, # 9888
10065,10066,10067,10068,10069,10070,10071,10072,10073,10074,10075,10076,10077,10078,10079,10080, # 9904
10081,10082,10083,10084,10085,10086,10087,4475,10088,10089,10090,10091,10092,10093,10094,10095, # 9920
10096,10097,4476,10098,10099,10100,10101,10102,10103,10104,10105,10106,10107,10108,10109,10110, # 9936
10111,2174,10112,10113,10114,10115,10116,10117,10118,10119,10120,10121,10122,10123,10124,10125, # 9952
10126,10127,10128,10129,10130,10131,10132,10133,10134,10135,10136,10137,10138,10139,10140,3807, # 9968
4186,4925,10141,10142,10143,10144,10145,10146,10147,4477,4187,10148,10149,10150,10151,10152, # 9984
10153,4188,10154,10155,10156,10157,10158,10159,10160,10161,4926,10162,10163,10164,10165,10166, #10000
10167,10168,10169,10170,10171,10172,10173,10174,10175,10176,10177,10178,10179,10180,10181,10182, #10016
10183,10184,10185,10186,10187,10188,10189,10190,10191,10192,3203,10193,10194,10195,10196,10197, #10032
10198,10199,10200,4478,10201,10202,10203,10204,4479,10205,10206,10207,10208,10209,10210,10211, #10048
10212,10213,10214,10215,10216,10217,10218,10219,10220,10221,10222,10223,10224,10225,10226,10227, #10064
10228,10229,10230,10231,10232,10233,10234,4927,10235,10236,10237,10238,10239,10240,10241,10242, #10080
10243,10244,10245,10246,10247,10248,10249,10250,10251,10252,10253,10254,10255,10256,10257,10258, #10096
10259,10260,10261,10262,10263,10264,10265,10266,10267,10268,10269,10270,10271,10272,10273,4480, #10112
4928,4929,10274,10275,10276,10277,10278,10279,10280,10281,10282,10283,10284,10285,10286,10287, #10128
10288,10289,10290,10291,10292,10293,10294,10295,10296,10297,10298,10299,10300,10301,10302,10303, #10144
10304,10305,10306,10307,10308,10309,10310,10311,10312,10313,10314,10315,10316,10317,10318,10319, #10160
10320,10321,10322,10323,10324,10325,10326,10327,10328,10329,10330,10331,10332,10333,10334,4930, #10176
10335,10336,10337,10338,10339,10340,10341,10342,4931,10343,10344,10345,10346,10347,10348,10349, #10192
10350,10351,10352,10353,10354,10355,3088,10356,2786,10357,10358,10359,10360,4189,10361,10362, #10208
10363,10364,10365,10366,10367,10368,10369,10370,10371,10372,10373,10374,10375,4932,10376,10377, #10224
10378,10379,10380,10381,10382,10383,10384,10385,10386,10387,10388,10389,10390,10391,10392,4933, #10240
10393,10394,10395,4934,10396,10397,10398,10399,10400,10401,10402,10403,10404,10405,10406,10407, #10256
10408,10409,10410,10411,10412,3446,10413,10414,10415,10416,10417,10418,10419,10420,10421,10422, #10272
10423,4935,10424,10425,10426,10427,10428,10429,10430,4936,10431,10432,10433,10434,10435,10436, #10288
10437,10438,10439,10440,10441,10442,10443,4937,10444,10445,10446,10447,4481,10448,10449,10450, #10304
10451,10452,10453,10454,10455,10456,10457,10458,10459,10460,10461,10462,10463,10464,10465,10466, #10320
10467,10468,10469,10470,10471,10472,10473,10474,10475,10476,10477,10478,10479,10480,10481,10482, #10336
10483,10484,10485,10486,10487,10488,10489,10490,10491,10492,10493,10494,10495,10496,10497,10498, #10352
10499,10500,10501,10502,10503,10504,10505,4938,10506,10507,10508,10509,10510,2552,10511,10512, #10368
10513,10514,10515,10516,3447,10517,10518,10519,10520,10521,10522,10523,10524,10525,10526,10527, #10384
10528,10529,10530,10531,10532,10533,10534,10535,10536,10537,10538,10539,10540,10541,10542,10543, #10400
4482,10544,4939,10545,10546,10547,10548,10549,10550,10551,10552,10553,10554,10555,10556,10557, #10416
10558,10559,10560,10561,10562,10563,10564,10565,10566,10567,3676,4483,10568,10569,10570,10571, #10432
10572,3448,10573,10574,10575,10576,10577,10578,10579,10580,10581,10582,10583,10584,10585,10586, #10448
10587,10588,10589,10590,10591,10592,10593,10594,10595,10596,10597,10598,10599,10600,10601,10602, #10464
10603,10604,10605,10606,10607,10608,10609,10610,10611,10612,10613,10614,10615,10616,10617,10618, #10480
10619,10620,10621,10622,10623,10624,10625,10626,10627,4484,10628,10629,10630,10631,10632,4940, #10496
10633,10634,10635,10636,10637,10638,10639,10640,10641,10642,10643,10644,10645,10646,10647,10648, #10512
10649,10650,10651,10652,10653,10654,10655,10656,4941,10657,10658,10659,2599,10660,10661,10662, #10528
10663,10664,10665,10666,3089,10667,10668,10669,10670,10671,10672,10673,10674,10675,10676,10677, #10544
10678,10679,10680,4942,10681,10682,10683,10684,10685,10686,10687,10688,10689,10690,10691,10692, #10560
10693,10694,10695,10696,10697,4485,10698,10699,10700,10701,10702,10703,10704,4943,10705,3677, #10576
10706,10707,10708,10709,10710,10711,10712,4944,10713,10714,10715,10716,10717,10718,10719,10720, #10592
10721,10722,10723,10724,10725,10726,10727,10728,4945,10729,10730,10731,10732,10733,10734,10735, #10608
10736,10737,10738,10739,10740,10741,10742,10743,10744,10745,10746,10747,10748,10749,10750,10751, #10624
10752,10753,10754,10755,10756,10757,10758,10759,10760,10761,4946,10762,10763,10764,10765,10766, #10640
10767,4947,4948,10768,10769,10770,10771,10772,10773,10774,10775,10776,10777,10778,10779,10780, #10656
10781,10782,10783,10784,10785,10786,10787,10788,10789,10790,10791,10792,10793,10794,10795,10796, #10672
10797,10798,10799,10800,10801,10802,10803,10804,10805,10806,10807,10808,10809,10810,10811,10812, #10688
10813,10814,10815,10816,10817,10818,10819,10820,10821,10822,10823,10824,10825,10826,10827,10828, #10704
10829,10830,10831,10832,10833,10834,10835,10836,10837,10838,10839,10840,10841,10842,10843,10844, #10720
10845,10846,10847,10848,10849,10850,10851,10852,10853,10854,10855,10856,10857,10858,10859,10860, #10736
10861,10862,10863,10864,10865,10866,10867,10868,10869,10870,10871,10872,10873,10874,10875,10876, #10752
10877,10878,4486,10879,10880,10881,10882,10883,10884,10885,4949,10886,10887,10888,10889,10890, #10768
10891,10892,10893,10894,10895,10896,10897,10898,10899,10900,10901,10902,10903,10904,10905,10906, #10784
10907,10908,10909,10910,10911,10912,10913,10914,10915,10916,10917,10918,10919,4487,10920,10921, #10800
10922,10923,10924,10925,10926,10927,10928,10929,10930,10931,10932,4950,10933,10934,10935,10936, #10816
10937,10938,10939,10940,10941,10942,10943,10944,10945,10946,10947,10948,10949,4488,10950,10951, #10832
10952,10953,10954,10955,10956,10957,10958,10959,4190,10960,10961,10962,10963,10964,10965,10966, #10848
10967,10968,10969,10970,10971,10972,10973,10974,10975,10976,10977,10978,10979,10980,10981,10982, #10864
10983,10984,10985,10986,10987,10988,10989,10990,10991,10992,10993,10994,10995,10996,10997,10998, #10880
10999,11000,11001,11002,11003,11004,11005,11006,3960,11007,11008,11009,11010,11011,11012,11013, #10896
11014,11015,11016,11017,11018,11019,11020,11021,11022,11023,11024,11025,11026,11027,11028,11029, #10912
11030,11031,11032,4951,11033,11034,11035,11036,11037,11038,11039,11040,11041,11042,11043,11044, #10928
11045,11046,11047,4489,11048,11049,11050,11051,4952,11052,11053,11054,11055,11056,11057,11058, #10944
4953,11059,11060,11061,11062,11063,11064,11065,11066,11067,11068,11069,11070,11071,4954,11072, #10960
11073,11074,11075,11076,11077,11078,11079,11080,11081,11082,11083,11084,11085,11086,11087,11088, #10976
11089,11090,11091,11092,11093,11094,11095,11096,11097,11098,11099,11100,11101,11102,11103,11104, #10992
11105,11106,11107,11108,11109,11110,11111,11112,11113,11114,11115,3808,11116,11117,11118,11119, #11008
11120,11121,11122,11123,11124,11125,11126,11127,11128,11129,11130,11131,11132,11133,11134,4955, #11024
11135,11136,11137,11138,11139,11140,11141,11142,11143,11144,11145,11146,11147,11148,11149,11150, #11040
11151,11152,11153,11154,11155,11156,11157,11158,11159,11160,11161,4956,11162,11163,11164,11165, #11056
11166,11167,11168,11169,11170,11171,11172,11173,11174,11175,11176,11177,11178,11179,11180,4957, #11072
11181,11182,11183,11184,11185,11186,4958,11187,11188,11189,11190,11191,11192,11193,11194,11195, #11088
11196,11197,11198,11199,11200,3678,11201,11202,11203,11204,11205,11206,4191,11207,11208,11209, #11104
11210,11211,11212,11213,11214,11215,11216,11217,11218,11219,11220,11221,11222,11223,11224,11225, #11120
11226,11227,11228,11229,11230,11231,11232,11233,11234,11235,11236,11237,11238,11239,11240,11241, #11136
11242,11243,11244,11245,11246,11247,11248,11249,11250,11251,4959,11252,11253,11254,11255,11256, #11152
11257,11258,11259,11260,11261,11262,11263,11264,11265,11266,11267,11268,11269,11270,11271,11272, #11168
11273,11274,11275,11276,11277,11278,11279,11280,11281,11282,11283,11284,11285,11286,11287,11288, #11184
11289,11290,11291,11292,11293,11294,11295,11296,11297,11298,11299,11300,11301,11302,11303,11304, #11200
11305,11306,11307,11308,11309,11310,11311,11312,11313,11314,3679,11315,11316,11317,11318,4490, #11216
11319,11320,11321,11322,11323,11324,11325,11326,11327,11328,11329,11330,11331,11332,11333,11334, #11232
11335,11336,11337,11338,11339,11340,11341,11342,11343,11344,11345,11346,11347,4960,11348,11349, #11248
11350,11351,11352,11353,11354,11355,11356,11357,11358,11359,11360,11361,11362,11363,11364,11365, #11264
11366,11367,11368,11369,11370,11371,11372,11373,11374,11375,11376,11377,3961,4961,11378,11379, #11280
11380,11381,11382,11383,11384,11385,11386,11387,11388,11389,11390,11391,11392,11393,11394,11395, #11296
11396,11397,4192,11398,11399,11400,11401,11402,11403,11404,11405,11406,11407,11408,11409,11410, #11312
11411,4962,11412,11413,11414,11415,11416,11417,11418,11419,11420,11421,11422,11423,11424,11425, #11328
11426,11427,11428,11429,11430,11431,11432,11433,11434,11435,11436,11437,11438,11439,11440,11441, #11344
11442,11443,11444,11445,11446,11447,11448,11449,11450,11451,11452,11453,11454,11455,11456,11457, #11360
11458,11459,11460,11461,11462,11463,11464,11465,11466,11467,11468,11469,4963,11470,11471,4491, #11376
11472,11473,11474,11475,4964,11476,11477,11478,11479,11480,11481,11482,11483,11484,11485,11486, #11392
11487,11488,11489,11490,11491,11492,4965,11493,11494,11495,11496,11497,11498,11499,11500,11501, #11408
11502,11503,11504,11505,11506,11507,11508,11509,11510,11511,11512,11513,11514,11515,11516,11517, #11424
11518,11519,11520,11521,11522,11523,11524,11525,11526,11527,11528,11529,3962,11530,11531,11532, #11440
11533,11534,11535,11536,11537,11538,11539,11540,11541,11542,11543,11544,11545,11546,11547,11548, #11456
11549,11550,11551,11552,11553,11554,11555,11556,11557,11558,11559,11560,11561,11562,11563,11564, #11472
4193,4194,11565,11566,11567,11568,11569,11570,11571,11572,11573,11574,11575,11576,11577,11578, #11488
11579,11580,11581,11582,11583,11584,11585,11586,11587,11588,11589,11590,11591,4966,4195,11592, #11504
11593,11594,11595,11596,11597,11598,11599,11600,11601,11602,11603,11604,3090,11605,11606,11607, #11520
11608,11609,11610,4967,11611,11612,11613,11614,11615,11616,11617,11618,11619,11620,11621,11622, #11536
11623,11624,11625,11626,11627,11628,11629,11630,11631,11632,11633,11634,11635,11636,11637,11638, #11552
11639,11640,11641,11642,11643,11644,11645,11646,11647,11648,11649,11650,11651,11652,11653,11654, #11568
11655,11656,11657,11658,11659,11660,11661,11662,11663,11664,11665,11666,11667,11668,11669,11670, #11584
11671,11672,11673,11674,4968,11675,11676,11677,11678,11679,11680,11681,11682,11683,11684,11685, #11600
11686,11687,11688,11689,11690,11691,11692,11693,3809,11694,11695,11696,11697,11698,11699,11700, #11616
11701,11702,11703,11704,11705,11706,11707,11708,11709,11710,11711,11712,11713,11714,11715,11716, #11632
11717,11718,3553,11719,11720,11721,11722,11723,11724,11725,11726,11727,11728,11729,11730,4969, #11648
11731,11732,11733,11734,11735,11736,11737,11738,11739,11740,4492,11741,11742,11743,11744,11745, #11664
11746,11747,11748,11749,11750,11751,11752,4970,11753,11754,11755,11756,11757,11758,11759,11760, #11680
11761,11762,11763,11764,11765,11766,11767,11768,11769,11770,11771,11772,11773,11774,11775,11776, #11696
11777,11778,11779,11780,11781,11782,11783,11784,11785,11786,11787,11788,11789,11790,4971,11791, #11712
11792,11793,11794,11795,11796,11797,4972,11798,11799,11800,11801,11802,11803,11804,11805,11806, #11728
11807,11808,11809,11810,4973,11811,11812,11813,11814,11815,11816,11817,11818,11819,11820,11821, #11744
11822,11823,11824,11825,11826,11827,11828,11829,11830,11831,11832,11833,11834,3680,3810,11835, #11760
11836,4974,11837,11838,11839,11840,11841,11842,11843,11844,11845,11846,11847,11848,11849,11850, #11776
11851,11852,11853,11854,11855,11856,11857,11858,11859,11860,11861,11862,11863,11864,11865,11866, #11792
11867,11868,11869,11870,11871,11872,11873,11874,11875,11876,11877,11878,11879,11880,11881,11882, #11808
11883,11884,4493,11885,11886,11887,11888,11889,11890,11891,11892,11893,11894,11895,11896,11897, #11824
11898,11899,11900,11901,11902,11903,11904,11905,11906,11907,11908,11909,11910,11911,11912,11913, #11840
11914,11915,4975,11916,11917,11918,11919,11920,11921,11922,11923,11924,11925,11926,11927,11928, #11856
11929,11930,11931,11932,11933,11934,11935,11936,11937,11938,11939,11940,11941,11942,11943,11944, #11872
11945,11946,11947,11948,11949,4976,11950,11951,11952,11953,11954,11955,11956,11957,11958,11959, #11888
11960,11961,11962,11963,11964,11965,11966,11967,11968,11969,11970,11971,11972,11973,11974,11975, #11904
11976,11977,11978,11979,11980,11981,11982,11983,11984,11985,11986,11987,4196,11988,11989,11990, #11920
11991,11992,4977,11993,11994,11995,11996,11997,11998,11999,12000,12001,12002,12003,12004,12005, #11936
12006,12007,12008,12009,12010,12011,12012,12013,12014,12015,12016,12017,12018,12019,12020,12021, #11952
12022,12023,12024,12025,12026,12027,12028,12029,12030,12031,12032,12033,12034,12035,12036,12037, #11968
12038,12039,12040,12041,12042,12043,12044,12045,12046,12047,12048,12049,12050,12051,12052,12053, #11984
12054,12055,12056,12057,12058,12059,12060,12061,4978,12062,12063,12064,12065,12066,12067,12068, #12000
12069,12070,12071,12072,12073,12074,12075,12076,12077,12078,12079,12080,12081,12082,12083,12084, #12016
12085,12086,12087,12088,12089,12090,12091,12092,12093,12094,12095,12096,12097,12098,12099,12100, #12032
12101,12102,12103,12104,12105,12106,12107,12108,12109,12110,12111,12112,12113,12114,12115,12116, #12048
12117,12118,12119,12120,12121,12122,12123,4979,12124,12125,12126,12127,12128,4197,12129,12130, #12064
12131,12132,12133,12134,12135,12136,12137,12138,12139,12140,12141,12142,12143,12144,12145,12146, #12080
12147,12148,12149,12150,12151,12152,12153,12154,4980,12155,12156,12157,12158,12159,12160,4494, #12096
12161,12162,12163,12164,3811,12165,12166,12167,12168,12169,4495,12170,12171,4496,12172,12173, #12112
12174,12175,12176,3812,12177,12178,12179,12180,12181,12182,12183,12184,12185,12186,12187,12188, #12128
12189,12190,12191,12192,12193,12194,12195,12196,12197,12198,12199,12200,12201,12202,12203,12204, #12144
12205,12206,12207,12208,12209,12210,12211,12212,12213,12214,12215,12216,12217,12218,12219,12220, #12160
12221,4981,12222,12223,12224,12225,12226,12227,12228,12229,12230,12231,12232,12233,12234,12235, #12176
4982,12236,12237,12238,12239,12240,12241,12242,12243,12244,12245,4983,12246,12247,12248,12249, #12192
4984,12250,12251,12252,12253,12254,12255,12256,12257,12258,12259,12260,12261,12262,12263,12264, #12208
4985,12265,4497,12266,12267,12268,12269,12270,12271,12272,12273,12274,12275,12276,12277,12278, #12224
12279,12280,12281,12282,12283,12284,12285,12286,12287,4986,12288,12289,12290,12291,12292,12293, #12240
12294,12295,12296,2473,12297,12298,12299,12300,12301,12302,12303,12304,12305,12306,12307,12308, #12256
12309,12310,12311,12312,12313,12314,12315,12316,12317,12318,12319,3963,12320,12321,12322,12323, #12272
12324,12325,12326,12327,12328,12329,12330,12331,12332,4987,12333,12334,12335,12336,12337,12338, #12288
12339,12340,12341,12342,12343,12344,12345,12346,12347,12348,12349,12350,12351,12352,12353,12354, #12304
12355,12356,12357,12358,12359,3964,12360,12361,12362,12363,12364,12365,12366,12367,12368,12369, #12320
12370,3965,12371,12372,12373,12374,12375,12376,12377,12378,12379,12380,12381,12382,12383,12384, #12336
12385,12386,12387,12388,12389,12390,12391,12392,12393,12394,12395,12396,12397,12398,12399,12400, #12352
12401,12402,12403,12404,12405,12406,12407,12408,4988,12409,12410,12411,12412,12413,12414,12415, #12368
12416,12417,12418,12419,12420,12421,12422,12423,12424,12425,12426,12427,12428,12429,12430,12431, #12384
12432,12433,12434,12435,12436,12437,12438,3554,12439,12440,12441,12442,12443,12444,12445,12446, #12400
12447,12448,12449,12450,12451,12452,12453,12454,12455,12456,12457,12458,12459,12460,12461,12462, #12416
12463,12464,4989,12465,12466,12467,12468,12469,12470,12471,12472,12473,12474,12475,12476,12477, #12432
12478,12479,12480,4990,12481,12482,12483,12484,12485,12486,12487,12488,12489,4498,12490,12491, #12448
12492,12493,12494,12495,12496,12497,12498,12499,12500,12501,12502,12503,12504,12505,12506,12507, #12464
12508,12509,12510,12511,12512,12513,12514,12515,12516,12517,12518,12519,12520,12521,12522,12523, #12480
12524,12525,12526,12527,12528,12529,12530,12531,12532,12533,12534,12535,12536,12537,12538,12539, #12496
12540,12541,12542,12543,12544,12545,12546,12547,12548,12549,12550,12551,4991,12552,12553,12554, #12512
12555,12556,12557,12558,12559,12560,12561,12562,12563,12564,12565,12566,12567,12568,12569,12570, #12528
12571,12572,12573,12574,12575,12576,12577,12578,3036,12579,12580,12581,12582,12583,3966,12584, #12544
12585,12586,12587,12588,12589,12590,12591,12592,12593,12594,12595,12596,12597,12598,12599,12600, #12560
12601,12602,12603,12604,12605,12606,12607,12608,12609,12610,12611,12612,12613,12614,12615,12616, #12576
12617,12618,12619,12620,12621,12622,12623,12624,12625,12626,12627,12628,12629,12630,12631,12632, #12592
12633,12634,12635,12636,12637,12638,12639,12640,12641,12642,12643,12644,12645,12646,4499,12647, #12608
12648,12649,12650,12651,12652,12653,12654,12655,12656,12657,12658,12659,12660,12661,12662,12663, #12624
12664,12665,12666,12667,12668,12669,12670,12671,12672,12673,12674,12675,12676,12677,12678,12679, #12640
12680,12681,12682,12683,12684,12685,12686,12687,12688,12689,12690,12691,12692,12693,12694,12695, #12656
12696,12697,12698,4992,12699,12700,12701,12702,12703,12704,12705,12706,12707,12708,12709,12710, #12672
12711,12712,12713,12714,12715,12716,12717,12718,12719,12720,12721,12722,12723,12724,12725,12726, #12688
12727,12728,12729,12730,12731,12732,12733,12734,12735,12736,12737,12738,12739,12740,12741,12742, #12704
12743,12744,12745,12746,12747,12748,12749,12750,12751,12752,12753,12754,12755,12756,12757,12758, #12720
12759,12760,12761,12762,12763,12764,12765,12766,12767,12768,12769,12770,12771,12772,12773,12774, #12736
12775,12776,12777,12778,4993,2175,12779,12780,12781,12782,12783,12784,12785,12786,4500,12787, #12752
12788,12789,12790,12791,12792,12793,12794,12795,12796,12797,12798,12799,12800,12801,12802,12803, #12768
12804,12805,12806,12807,12808,12809,12810,12811,12812,12813,12814,12815,12816,12817,12818,12819, #12784
12820,12821,12822,12823,12824,12825,12826,4198,3967,12827,12828,12829,12830,12831,12832,12833, #12800
12834,12835,12836,12837,12838,12839,12840,12841,12842,12843,12844,12845,12846,12847,12848,12849, #12816
12850,12851,12852,12853,12854,12855,12856,12857,12858,12859,12860,12861,4199,12862,12863,12864, #12832
12865,12866,12867,12868,12869,12870,12871,12872,12873,12874,12875,12876,12877,12878,12879,12880, #12848
12881,12882,12883,12884,12885,12886,12887,4501,12888,12889,12890,12891,12892,12893,12894,12895, #12864
12896,12897,12898,12899,12900,12901,12902,12903,12904,12905,12906,12907,12908,12909,12910,12911, #12880
12912,4994,12913,12914,12915,12916,12917,12918,12919,12920,12921,12922,12923,12924,12925,12926, #12896
12927,12928,12929,12930,12931,12932,12933,12934,12935,12936,12937,12938,12939,12940,12941,12942, #12912
12943,12944,12945,12946,12947,12948,12949,12950,12951,12952,12953,12954,12955,12956,1772,12957, #12928
12958,12959,12960,12961,12962,12963,12964,12965,12966,12967,12968,12969,12970,12971,12972,12973, #12944
12974,12975,12976,12977,12978,12979,12980,12981,12982,12983,12984,12985,12986,12987,12988,12989, #12960
12990,12991,12992,12993,12994,12995,12996,12997,4502,12998,4503,12999,13000,13001,13002,13003, #12976
4504,13004,13005,13006,13007,13008,13009,13010,13011,13012,13013,13014,13015,13016,13017,13018, #12992
13019,13020,13021,13022,13023,13024,13025,13026,13027,13028,13029,3449,13030,13031,13032,13033, #13008
13034,13035,13036,13037,13038,13039,13040,13041,13042,13043,13044,13045,13046,13047,13048,13049, #13024
13050,13051,13052,13053,13054,13055,13056,13057,13058,13059,13060,13061,13062,13063,13064,13065, #13040
13066,13067,13068,13069,13070,13071,13072,13073,13074,13075,13076,13077,13078,13079,13080,13081, #13056
13082,13083,13084,13085,13086,13087,13088,13089,13090,13091,13092,13093,13094,13095,13096,13097, #13072
13098,13099,13100,13101,13102,13103,13104,13105,13106,13107,13108,13109,13110,13111,13112,13113, #13088
13114,13115,13116,13117,13118,3968,13119,4995,13120,13121,13122,13123,13124,13125,13126,13127, #13104
4505,13128,13129,13130,13131,13132,13133,13134,4996,4506,13135,13136,13137,13138,13139,4997, #13120
13140,13141,13142,13143,13144,13145,13146,13147,13148,13149,13150,13151,13152,13153,13154,13155, #13136
13156,13157,13158,13159,4998,13160,13161,13162,13163,13164,13165,13166,13167,13168,13169,13170, #13152
13171,13172,13173,13174,13175,13176,4999,13177,13178,13179,13180,13181,13182,13183,13184,13185, #13168
13186,13187,13188,13189,13190,13191,13192,13193,13194,13195,13196,13197,13198,13199,13200,13201, #13184
13202,13203,13204,13205,13206,5000,13207,13208,13209,13210,13211,13212,13213,13214,13215,13216, #13200
13217,13218,13219,13220,13221,13222,13223,13224,13225,13226,13227,4200,5001,13228,13229,13230, #13216
13231,13232,13233,13234,13235,13236,13237,13238,13239,13240,3969,13241,13242,13243,13244,3970, #13232
13245,13246,13247,13248,13249,13250,13251,13252,13253,13254,13255,13256,13257,13258,13259,13260, #13248
13261,13262,13263,13264,13265,13266,13267,13268,3450,13269,13270,13271,13272,13273,13274,13275, #13264
13276,5002,13277,13278,13279,13280,13281,13282,13283,13284,13285,13286,13287,13288,13289,13290, #13280
13291,13292,13293,13294,13295,13296,13297,13298,13299,13300,13301,13302,3813,13303,13304,13305, #13296
13306,13307,13308,13309,13310,13311,13312,13313,13314,13315,13316,13317,13318,13319,13320,13321, #13312
13322,13323,13324,13325,13326,13327,13328,4507,13329,13330,13331,13332,13333,13334,13335,13336, #13328
13337,13338,13339,13340,13341,5003,13342,13343,13344,13345,13346,13347,13348,13349,13350,13351, #13344
13352,13353,13354,13355,13356,13357,13358,13359,13360,13361,13362,13363,13364,13365,13366,13367, #13360
5004,13368,13369,13370,13371,13372,13373,13374,13375,13376,13377,13378,13379,13380,13381,13382, #13376
13383,13384,13385,13386,13387,13388,13389,13390,13391,13392,13393,13394,13395,13396,13397,13398, #13392
13399,13400,13401,13402,13403,13404,13405,13406,13407,13408,13409,13410,13411,13412,13413,13414, #13408
13415,13416,13417,13418,13419,13420,13421,13422,13423,13424,13425,13426,13427,13428,13429,13430, #13424
13431,13432,4508,13433,13434,13435,4201,13436,13437,13438,13439,13440,13441,13442,13443,13444, #13440
13445,13446,13447,13448,13449,13450,13451,13452,13453,13454,13455,13456,13457,5005,13458,13459, #13456
13460,13461,13462,13463,13464,13465,13466,13467,13468,13469,13470,4509,13471,13472,13473,13474, #13472
13475,13476,13477,13478,13479,13480,13481,13482,13483,13484,13485,13486,13487,13488,13489,13490, #13488
13491,13492,13493,13494,13495,13496,13497,13498,13499,13500,13501,13502,13503,13504,13505,13506, #13504
13507,13508,13509,13510,13511,13512,13513,13514,13515,13516,13517,13518,13519,13520,13521,13522, #13520
13523,13524,13525,13526,13527,13528,13529,13530,13531,13532,13533,13534,13535,13536,13537,13538, #13536
13539,13540,13541,13542,13543,13544,13545,13546,13547,13548,13549,13550,13551,13552,13553,13554, #13552
13555,13556,13557,13558,13559,13560,13561,13562,13563,13564,13565,13566,13567,13568,13569,13570, #13568
13571,13572,13573,13574,13575,13576,13577,13578,13579,13580,13581,13582,13583,13584,13585,13586, #13584
13587,13588,13589,13590,13591,13592,13593,13594,13595,13596,13597,13598,13599,13600,13601,13602, #13600
13603,13604,13605,13606,13607,13608,13609,13610,13611,13612,13613,13614,13615,13616,13617,13618, #13616
13619,13620,13621,13622,13623,13624,13625,13626,13627,13628,13629,13630,13631,13632,13633,13634, #13632
13635,13636,13637,13638,13639,13640,13641,13642,5006,13643,13644,13645,13646,13647,13648,13649, #13648
13650,13651,5007,13652,13653,13654,13655,13656,13657,13658,13659,13660,13661,13662,13663,13664, #13664
13665,13666,13667,13668,13669,13670,13671,13672,13673,13674,13675,13676,13677,13678,13679,13680, #13680
13681,13682,13683,13684,13685,13686,13687,13688,13689,13690,13691,13692,13693,13694,13695,13696, #13696
13697,13698,13699,13700,13701,13702,13703,13704,13705,13706,13707,13708,13709,13710,13711,13712, #13712
13713,13714,13715,13716,13717,13718,13719,13720,13721,13722,13723,13724,13725,13726,13727,13728, #13728
13729,13730,13731,13732,13733,13734,13735,13736,13737,13738,13739,13740,13741,13742,13743,13744, #13744
13745,13746,13747,13748,13749,13750,13751,13752,13753,13754,13755,13756,13757,13758,13759,13760, #13760
13761,13762,13763,13764,13765,13766,13767,13768,13769,13770,13771,13772,13773,13774,3273,13775, #13776
13776,13777,13778,13779,13780,13781,13782,13783,13784,13785,13786,13787,13788,13789,13790,13791, #13792
13792,13793,13794,13795,13796,13797,13798,13799,13800,13801,13802,13803,13804,13805,13806,13807, #13808
13808,13809,13810,13811,13812,13813,13814,13815,13816,13817,13818,13819,13820,13821,13822,13823, #13824
13824,13825,13826,13827,13828,13829,13830,13831,13832,13833,13834,13835,13836,13837,13838,13839, #13840
13840,13841,13842,13843,13844,13845,13846,13847,13848,13849,13850,13851,13852,13853,13854,13855, #13856
13856,13857,13858,13859,13860,13861,13862,13863,13864,13865,13866,13867,13868,13869,13870,13871, #13872
13872,13873,13874,13875,13876,13877,13878,13879,13880,13881,13882,13883,13884,13885,13886,13887, #13888
13888,13889,13890,13891,13892,13893,13894,13895,13896,13897,13898,13899,13900,13901,13902,13903, #13904
13904,13905,13906,13907,13908,13909,13910,13911,13912,13913,13914,13915,13916,13917,13918,13919, #13920
13920,13921,13922,13923,13924,13925,13926,13927,13928,13929,13930,13931,13932,13933,13934,13935, #13936
13936,13937,13938,13939,13940,13941,13942,13943,13944,13945,13946,13947,13948,13949,13950,13951, #13952
13952,13953,13954,13955,13956,13957,13958,13959,13960,13961,13962,13963,13964,13965,13966,13967, #13968
13968,13969,13970,13971,13972) #13973
# flake8: noqa
| mit |
ProfessionalIT/professionalit-webiste | sdk/google_appengine/lib/django-1.4/django/utils/dictconfig.py | 335 | 22939 | # This is a copy of the Python logging.config.dictconfig module,
# reproduced with permission. It is provided here for backwards
# compatibility for Python versions prior to 2.7.
#
# Copyright 2009-2010 by Vinay Sajip. All Rights Reserved.
#
# Permission to use, copy, modify, and distribute this software and its
# documentation for any purpose and without fee is hereby granted,
# provided that the above copyright notice appear in all copies and that
# both that copyright notice and this permission notice appear in
# supporting documentation, and that the name of Vinay Sajip
# not be used in advertising or publicity pertaining to distribution
# of the software without specific, written prior permission.
# VINAY SAJIP DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, INCLUDING
# ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL
# VINAY SAJIP BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR
# ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER
# IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
import logging.handlers
import re
import sys
import types
IDENTIFIER = re.compile('^[a-z_][a-z0-9_]*$', re.I)
def valid_ident(s):
m = IDENTIFIER.match(s)
if not m:
raise ValueError('Not a valid Python identifier: %r' % s)
return True
#
# This function is defined in logging only in recent versions of Python
#
try:
from logging import _checkLevel
except ImportError:
def _checkLevel(level):
if isinstance(level, int):
rv = level
elif str(level) == level:
if level not in logging._levelNames:
raise ValueError('Unknown level: %r' % level)
rv = logging._levelNames[level]
else:
raise TypeError('Level not an integer or a '
'valid string: %r' % level)
return rv
# The ConvertingXXX classes are wrappers around standard Python containers,
# and they serve to convert any suitable values in the container. The
# conversion converts base dicts, lists and tuples to their wrapped
# equivalents, whereas strings which match a conversion format are converted
# appropriately.
#
# Each wrapper should have a configurator attribute holding the actual
# configurator to use for conversion.
class ConvertingDict(dict):
"""A converting dictionary wrapper."""
def __getitem__(self, key):
value = dict.__getitem__(self, key)
result = self.configurator.convert(value)
#If the converted value is different, save for next time
if value is not result:
self[key] = result
if type(result) in (ConvertingDict, ConvertingList,
ConvertingTuple):
result.parent = self
result.key = key
return result
def get(self, key, default=None):
value = dict.get(self, key, default)
result = self.configurator.convert(value)
#If the converted value is different, save for next time
if value is not result:
self[key] = result
if type(result) in (ConvertingDict, ConvertingList,
ConvertingTuple):
result.parent = self
result.key = key
return result
def pop(self, key, default=None):
value = dict.pop(self, key, default)
result = self.configurator.convert(value)
if value is not result:
if type(result) in (ConvertingDict, ConvertingList,
ConvertingTuple):
result.parent = self
result.key = key
return result
class ConvertingList(list):
"""A converting list wrapper."""
def __getitem__(self, key):
value = list.__getitem__(self, key)
result = self.configurator.convert(value)
#If the converted value is different, save for next time
if value is not result:
self[key] = result
if type(result) in (ConvertingDict, ConvertingList,
ConvertingTuple):
result.parent = self
result.key = key
return result
def pop(self, idx=-1):
value = list.pop(self, idx)
result = self.configurator.convert(value)
if value is not result:
if type(result) in (ConvertingDict, ConvertingList,
ConvertingTuple):
result.parent = self
return result
class ConvertingTuple(tuple):
"""A converting tuple wrapper."""
def __getitem__(self, key):
value = tuple.__getitem__(self, key)
result = self.configurator.convert(value)
if value is not result:
if type(result) in (ConvertingDict, ConvertingList,
ConvertingTuple):
result.parent = self
result.key = key
return result
class BaseConfigurator(object):
"""
The configurator base class which defines some useful defaults.
"""
CONVERT_PATTERN = re.compile(r'^(?P<prefix>[a-z]+)://(?P<suffix>.*)$')
WORD_PATTERN = re.compile(r'^\s*(\w+)\s*')
DOT_PATTERN = re.compile(r'^\.\s*(\w+)\s*')
INDEX_PATTERN = re.compile(r'^\[\s*(\w+)\s*\]\s*')
DIGIT_PATTERN = re.compile(r'^\d+$')
value_converters = {
'ext' : 'ext_convert',
'cfg' : 'cfg_convert',
}
# We might want to use a different one, e.g. importlib
importer = __import__
def __init__(self, config):
self.config = ConvertingDict(config)
self.config.configurator = self
def resolve(self, s):
"""
Resolve strings to objects using standard import and attribute
syntax.
"""
name = s.split('.')
used = name.pop(0)
try:
found = self.importer(used)
for frag in name:
used += '.' + frag
try:
found = getattr(found, frag)
except AttributeError:
self.importer(used)
found = getattr(found, frag)
return found
except ImportError:
e, tb = sys.exc_info()[1:]
v = ValueError('Cannot resolve %r: %s' % (s, e))
v.__cause__, v.__traceback__ = e, tb
raise v
def ext_convert(self, value):
"""Default converter for the ext:// protocol."""
return self.resolve(value)
def cfg_convert(self, value):
"""Default converter for the cfg:// protocol."""
rest = value
m = self.WORD_PATTERN.match(rest)
if m is None:
raise ValueError("Unable to convert %r" % value)
else:
rest = rest[m.end():]
d = self.config[m.groups()[0]]
#print d, rest
while rest:
m = self.DOT_PATTERN.match(rest)
if m:
d = d[m.groups()[0]]
else:
m = self.INDEX_PATTERN.match(rest)
if m:
idx = m.groups()[0]
if not self.DIGIT_PATTERN.match(idx):
d = d[idx]
else:
try:
n = int(idx) # try as number first (most likely)
d = d[n]
except TypeError:
d = d[idx]
if m:
rest = rest[m.end():]
else:
raise ValueError('Unable to convert '
'%r at %r' % (value, rest))
#rest should be empty
return d
def convert(self, value):
"""
Convert values to an appropriate type. dicts, lists and tuples are
replaced by their converting alternatives. Strings are checked to
see if they have a conversion format and are converted if they do.
"""
if not isinstance(value, ConvertingDict) and isinstance(value, dict):
value = ConvertingDict(value)
value.configurator = self
elif not isinstance(value, ConvertingList) and isinstance(value, list):
value = ConvertingList(value)
value.configurator = self
elif not isinstance(value, ConvertingTuple) and\
isinstance(value, tuple):
value = ConvertingTuple(value)
value.configurator = self
elif isinstance(value, basestring): # str for py3k
m = self.CONVERT_PATTERN.match(value)
if m:
d = m.groupdict()
prefix = d['prefix']
converter = self.value_converters.get(prefix, None)
if converter:
suffix = d['suffix']
converter = getattr(self, converter)
value = converter(suffix)
return value
def configure_custom(self, config):
"""Configure an object with a user-supplied factory."""
c = config.pop('()')
if not hasattr(c, '__call__') and hasattr(types, 'ClassType') and type(c) != types.ClassType:
c = self.resolve(c)
props = config.pop('.', None)
# Check for valid identifiers
kwargs = dict([(k, config[k]) for k in config if valid_ident(k)])
result = c(**kwargs)
if props:
for name, value in props.items():
setattr(result, name, value)
return result
def as_tuple(self, value):
"""Utility function which converts lists to tuples."""
if isinstance(value, list):
value = tuple(value)
return value
class DictConfigurator(BaseConfigurator):
"""
Configure logging using a dictionary-like object to describe the
configuration.
"""
def configure(self):
"""Do the configuration."""
config = self.config
if 'version' not in config:
raise ValueError("dictionary doesn't specify a version")
if config['version'] != 1:
raise ValueError("Unsupported version: %s" % config['version'])
incremental = config.pop('incremental', False)
EMPTY_DICT = {}
logging._acquireLock()
try:
if incremental:
handlers = config.get('handlers', EMPTY_DICT)
# incremental handler config only if handler name
# ties in to logging._handlers (Python 2.7)
if sys.version_info[:2] == (2, 7):
for name in handlers:
if name not in logging._handlers:
raise ValueError('No handler found with '
'name %r' % name)
else:
try:
handler = logging._handlers[name]
handler_config = handlers[name]
level = handler_config.get('level', None)
if level:
handler.setLevel(_checkLevel(level))
except StandardError, e:
raise ValueError('Unable to configure handler '
'%r: %s' % (name, e))
loggers = config.get('loggers', EMPTY_DICT)
for name in loggers:
try:
self.configure_logger(name, loggers[name], True)
except StandardError, e:
raise ValueError('Unable to configure logger '
'%r: %s' % (name, e))
root = config.get('root', None)
if root:
try:
self.configure_root(root, True)
except StandardError, e:
raise ValueError('Unable to configure root '
'logger: %s' % e)
else:
disable_existing = config.pop('disable_existing_loggers', True)
logging._handlers.clear()
del logging._handlerList[:]
# Do formatters first - they don't refer to anything else
formatters = config.get('formatters', EMPTY_DICT)
for name in formatters:
try:
formatters[name] = self.configure_formatter(
formatters[name])
except StandardError, e:
raise ValueError('Unable to configure '
'formatter %r: %s' % (name, e))
# Next, do filters - they don't refer to anything else, either
filters = config.get('filters', EMPTY_DICT)
for name in filters:
try:
filters[name] = self.configure_filter(filters[name])
except StandardError, e:
raise ValueError('Unable to configure '
'filter %r: %s' % (name, e))
# Next, do handlers - they refer to formatters and filters
# As handlers can refer to other handlers, sort the keys
# to allow a deterministic order of configuration
handlers = config.get('handlers', EMPTY_DICT)
for name in sorted(handlers):
try:
handler = self.configure_handler(handlers[name])
handler.name = name
handlers[name] = handler
except StandardError, e:
raise ValueError('Unable to configure handler '
'%r: %s' % (name, e))
# Next, do loggers - they refer to handlers and filters
#we don't want to lose the existing loggers,
#since other threads may have pointers to them.
#existing is set to contain all existing loggers,
#and as we go through the new configuration we
#remove any which are configured. At the end,
#what's left in existing is the set of loggers
#which were in the previous configuration but
#which are not in the new configuration.
root = logging.root
existing = root.manager.loggerDict.keys()
#The list needs to be sorted so that we can
#avoid disabling child loggers of explicitly
#named loggers. With a sorted list it is easier
#to find the child loggers.
existing.sort()
#We'll keep the list of existing loggers
#which are children of named loggers here...
child_loggers = []
#now set up the new ones...
loggers = config.get('loggers', EMPTY_DICT)
for name in loggers:
if name in existing:
i = existing.index(name)
prefixed = name + "."
pflen = len(prefixed)
num_existing = len(existing)
i = i + 1 # look at the entry after name
while (i < num_existing) and\
(existing[i][:pflen] == prefixed):
child_loggers.append(existing[i])
i = i + 1
existing.remove(name)
try:
self.configure_logger(name, loggers[name])
except StandardError, e:
raise ValueError('Unable to configure logger '
'%r: %s' % (name, e))
#Disable any old loggers. There's no point deleting
#them as other threads may continue to hold references
#and by disabling them, you stop them doing any logging.
#However, don't disable children of named loggers, as that's
#probably not what was intended by the user.
for log in existing:
logger = root.manager.loggerDict[log]
if log in child_loggers:
logger.level = logging.NOTSET
logger.handlers = []
logger.propagate = True
elif disable_existing:
logger.disabled = True
# And finally, do the root logger
root = config.get('root', None)
if root:
try:
self.configure_root(root)
except StandardError, e:
raise ValueError('Unable to configure root '
'logger: %s' % e)
finally:
logging._releaseLock()
def configure_formatter(self, config):
"""Configure a formatter from a dictionary."""
if '()' in config:
factory = config['()'] # for use in exception handler
try:
result = self.configure_custom(config)
except TypeError, te:
if "'format'" not in str(te):
raise
#Name of parameter changed from fmt to format.
#Retry with old name.
#This is so that code can be used with older Python versions
#(e.g. by Django)
config['fmt'] = config.pop('format')
config['()'] = factory
result = self.configure_custom(config)
else:
fmt = config.get('format', None)
dfmt = config.get('datefmt', None)
result = logging.Formatter(fmt, dfmt)
return result
def configure_filter(self, config):
"""Configure a filter from a dictionary."""
if '()' in config:
result = self.configure_custom(config)
else:
name = config.get('name', '')
result = logging.Filter(name)
return result
def add_filters(self, filterer, filters):
"""Add filters to a filterer from a list of names."""
for f in filters:
try:
filterer.addFilter(self.config['filters'][f])
except StandardError, e:
raise ValueError('Unable to add filter %r: %s' % (f, e))
def configure_handler(self, config):
"""Configure a handler from a dictionary."""
formatter = config.pop('formatter', None)
if formatter:
try:
formatter = self.config['formatters'][formatter]
except StandardError, e:
raise ValueError('Unable to set formatter '
'%r: %s' % (formatter, e))
level = config.pop('level', None)
filters = config.pop('filters', None)
if '()' in config:
c = config.pop('()')
if not hasattr(c, '__call__') and hasattr(types, 'ClassType') and type(c) != types.ClassType:
c = self.resolve(c)
factory = c
else:
klass = self.resolve(config.pop('class'))
#Special case for handler which refers to another handler
if issubclass(klass, logging.handlers.MemoryHandler) and\
'target' in config:
try:
config['target'] = self.config['handlers'][config['target']]
except StandardError, e:
raise ValueError('Unable to set target handler '
'%r: %s' % (config['target'], e))
elif issubclass(klass, logging.handlers.SMTPHandler) and\
'mailhost' in config:
config['mailhost'] = self.as_tuple(config['mailhost'])
elif issubclass(klass, logging.handlers.SysLogHandler) and\
'address' in config:
config['address'] = self.as_tuple(config['address'])
factory = klass
kwargs = dict([(k, config[k]) for k in config if valid_ident(k)])
try:
result = factory(**kwargs)
except TypeError, te:
if "'stream'" not in str(te):
raise
#The argument name changed from strm to stream
#Retry with old name.
#This is so that code can be used with older Python versions
#(e.g. by Django)
kwargs['strm'] = kwargs.pop('stream')
result = factory(**kwargs)
if formatter:
result.setFormatter(formatter)
if level is not None:
result.setLevel(_checkLevel(level))
if filters:
self.add_filters(result, filters)
return result
def add_handlers(self, logger, handlers):
"""Add handlers to a logger from a list of names."""
for h in handlers:
try:
logger.addHandler(self.config['handlers'][h])
except StandardError, e:
raise ValueError('Unable to add handler %r: %s' % (h, e))
def common_logger_config(self, logger, config, incremental=False):
"""
Perform configuration which is common to root and non-root loggers.
"""
level = config.get('level', None)
if level is not None:
logger.setLevel(_checkLevel(level))
if not incremental:
#Remove any existing handlers
for h in logger.handlers[:]:
logger.removeHandler(h)
handlers = config.get('handlers', None)
if handlers:
self.add_handlers(logger, handlers)
filters = config.get('filters', None)
if filters:
self.add_filters(logger, filters)
def configure_logger(self, name, config, incremental=False):
"""Configure a non-root logger from a dictionary."""
logger = logging.getLogger(name)
self.common_logger_config(logger, config, incremental)
propagate = config.get('propagate', None)
if propagate is not None:
logger.propagate = propagate
def configure_root(self, config, incremental=False):
"""Configure a root logger from a dictionary."""
root = logging.getLogger()
self.common_logger_config(root, config, incremental)
dictConfigClass = DictConfigurator
def dictConfig(config):
"""Configure logging using a dictionary."""
dictConfigClass(config).configure()
| lgpl-3.0 |
mancoast/CPythonPyc_test | fail/300_test_site.py | 3 | 9767 | """Tests for 'site'.
Tests assume the initial paths in sys.path once the interpreter has begun
executing have not been removed.
"""
import unittest
from test.support import TestSkipped, run_unittest, TESTFN
import builtins
import os
import sys
import encodings
import subprocess
# Need to make sure to not import 'site' if someone specified ``-S`` at the
# command-line. Detect this by just making sure 'site' has not been imported
# already.
if "site" in sys.modules:
import site
else:
raise TestSkipped("importation of site.py suppressed")
if not os.path.isdir(site.USER_SITE):
# need to add user site directory for tests
os.makedirs(site.USER_SITE)
site.addsitedir(site.USER_SITE)
class HelperFunctionsTests(unittest.TestCase):
"""Tests for helper functions.
The setting of the encoding (set using sys.setdefaultencoding) used by
the Unicode implementation is not tested.
"""
def setUp(self):
"""Save a copy of sys.path"""
self.sys_path = sys.path[:]
def tearDown(self):
"""Restore sys.path"""
sys.path = self.sys_path
def test_makepath(self):
# Test makepath() have an absolute path for its first return value
# and a case-normalized version of the absolute path for its
# second value.
path_parts = ("Beginning", "End")
original_dir = os.path.join(*path_parts)
abs_dir, norm_dir = site.makepath(*path_parts)
self.failUnlessEqual(os.path.abspath(original_dir), abs_dir)
if original_dir == os.path.normcase(original_dir):
self.failUnlessEqual(abs_dir, norm_dir)
else:
self.failUnlessEqual(os.path.normcase(abs_dir), norm_dir)
def test_init_pathinfo(self):
dir_set = site._init_pathinfo()
for entry in [site.makepath(path)[1] for path in sys.path
if path and os.path.isdir(path)]:
self.failUnless(entry in dir_set,
"%s from sys.path not found in set returned "
"by _init_pathinfo(): %s" % (entry, dir_set))
def pth_file_tests(self, pth_file):
"""Contain common code for testing results of reading a .pth file"""
self.failUnless(pth_file.imported in sys.modules,
"%s not in sys.path" % pth_file.imported)
self.failUnless(site.makepath(pth_file.good_dir_path)[0] in sys.path)
self.failUnless(not os.path.exists(pth_file.bad_dir_path))
def test_addpackage(self):
# Make sure addpackage() imports if the line starts with 'import',
# adds directories to sys.path for any line in the file that is not a
# comment or import that is a valid directory name for where the .pth
# file resides; invalid directories are not added
pth_file = PthFile()
pth_file.cleanup(prep=True) # to make sure that nothing is
# pre-existing that shouldn't be
try:
pth_file.create()
site.addpackage(pth_file.base_dir, pth_file.filename, set())
self.pth_file_tests(pth_file)
finally:
pth_file.cleanup()
def test_addsitedir(self):
# Same tests for test_addpackage since addsitedir() essentially just
# calls addpackage() for every .pth file in the directory
pth_file = PthFile()
pth_file.cleanup(prep=True) # Make sure that nothing is pre-existing
# that is tested for
try:
pth_file.create()
site.addsitedir(pth_file.base_dir, set())
self.pth_file_tests(pth_file)
finally:
pth_file.cleanup()
def test_s_option(self):
usersite = site.USER_SITE
self.assert_(usersite in sys.path)
rc = subprocess.call([sys.executable, '-c',
'import sys; sys.exit(%r in sys.path)' % usersite])
self.assertEqual(rc, 1)
rc = subprocess.call([sys.executable, '-s', '-c',
'import sys; sys.exit(%r in sys.path)' % usersite])
self.assertEqual(rc, 0)
env = os.environ.copy()
env["PYTHONNOUSERSITE"] = "1"
rc = subprocess.call([sys.executable, '-c',
'import sys; sys.exit(%r in sys.path)' % usersite],
env=env)
self.assertEqual(rc, 0)
env = os.environ.copy()
env["PYTHONUSERBASE"] = "/tmp"
rc = subprocess.call([sys.executable, '-c',
'import sys, site; sys.exit(site.USER_BASE.startswith("/tmp"))'],
env=env)
self.assertEqual(rc, 1)
class PthFile(object):
"""Helper class for handling testing of .pth files"""
def __init__(self, filename_base=TESTFN, imported="time",
good_dirname="__testdir__", bad_dirname="__bad"):
"""Initialize instance variables"""
self.filename = filename_base + ".pth"
self.base_dir = os.path.abspath('')
self.file_path = os.path.join(self.base_dir, self.filename)
self.imported = imported
self.good_dirname = good_dirname
self.bad_dirname = bad_dirname
self.good_dir_path = os.path.join(self.base_dir, self.good_dirname)
self.bad_dir_path = os.path.join(self.base_dir, self.bad_dirname)
def create(self):
"""Create a .pth file with a comment, blank lines, an ``import
<self.imported>``, a line with self.good_dirname, and a line with
self.bad_dirname.
Creation of the directory for self.good_dir_path (based off of
self.good_dirname) is also performed.
Make sure to call self.cleanup() to undo anything done by this method.
"""
FILE = open(self.file_path, 'w')
try:
print("#import @bad module name", file=FILE)
print("\n", file=FILE)
print("import %s" % self.imported, file=FILE)
print(self.good_dirname, file=FILE)
print(self.bad_dirname, file=FILE)
finally:
FILE.close()
os.mkdir(self.good_dir_path)
def cleanup(self, prep=False):
"""Make sure that the .pth file is deleted, self.imported is not in
sys.modules, and that both self.good_dirname and self.bad_dirname are
not existing directories."""
if os.path.exists(self.file_path):
os.remove(self.file_path)
if prep:
self.imported_module = sys.modules.get(self.imported)
if self.imported_module:
del sys.modules[self.imported]
else:
if self.imported_module:
sys.modules[self.imported] = self.imported_module
if os.path.exists(self.good_dir_path):
os.rmdir(self.good_dir_path)
if os.path.exists(self.bad_dir_path):
os.rmdir(self.bad_dir_path)
class ImportSideEffectTests(unittest.TestCase):
"""Test side-effects from importing 'site'."""
def setUp(self):
"""Make a copy of sys.path"""
self.sys_path = sys.path[:]
def tearDown(self):
"""Restore sys.path"""
sys.path = self.sys_path
def test_abs__file__(self):
# Make sure all imported modules have their __file__ attribute
# as an absolute path.
# Handled by abs__file__()
site.abs__file__()
for module in (sys, os, builtins):
try:
self.failUnless(os.path.isabs(module.__file__), repr(module))
except AttributeError:
continue
# We could try everything in sys.modules; however, when regrtest.py
# runs something like test_frozen before test_site, then we will
# be testing things loaded *after* test_site did path normalization
def test_no_duplicate_paths(self):
# No duplicate paths should exist in sys.path
# Handled by removeduppaths()
site.removeduppaths()
seen_paths = set()
for path in sys.path:
self.failUnless(path not in seen_paths)
seen_paths.add(path)
def test_add_build_dir(self):
# Test that the build directory's Modules directory is used when it
# should be.
# XXX: implement
pass
def test_setting_quit(self):
# 'quit' and 'exit' should be injected into builtins
self.failUnless(hasattr(builtins, "quit"))
self.failUnless(hasattr(builtins, "exit"))
def test_setting_copyright(self):
# 'copyright' and 'credits' should be in builtins
self.failUnless(hasattr(builtins, "copyright"))
self.failUnless(hasattr(builtins, "credits"))
def test_setting_help(self):
# 'help' should be set in builtins
self.failUnless(hasattr(builtins, "help"))
def test_aliasing_mbcs(self):
if sys.platform == "win32":
import locale
if locale.getdefaultlocale()[1].startswith('cp'):
for value in encodings.aliases.aliases.values():
if value == "mbcs":
break
else:
self.fail("did not alias mbcs")
def test_setdefaultencoding_removed(self):
# Make sure sys.setdefaultencoding is gone
self.failUnless(not hasattr(sys, "setdefaultencoding"))
def test_sitecustomize_executed(self):
# If sitecustomize is available, it should have been imported.
if "sitecustomize" not in sys.modules:
try:
import sitecustomize
except ImportError:
pass
else:
self.fail("sitecustomize not imported automatically")
def test_main():
run_unittest(HelperFunctionsTests, ImportSideEffectTests)
if __name__ == "__main__":
test_main()
| gpl-3.0 |
uclouvain/osis | cms/migrations/0004_auto_20170425_1548.py | 3 | 1236 | # -*- coding: utf-8 -*-
# Generated by Django 1.9 on 2017-04-25 13:48
from __future__ import unicode_literals
import ckeditor.fields
import django.core.validators
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('cms', '0003_auto_20170317_1033'),
]
operations = [
migrations.AlterField(
model_name='textlabel',
name='entity',
field=models.CharField(choices=[('learning_unit_year', 'learning_unit_year'), ('offer_year', 'offer_year')], max_length=25),
),
migrations.AlterField(
model_name='textlabel',
name='order',
field=models.PositiveIntegerField(default=1, validators=[django.core.validators.MinValueValidator(1)]),
),
migrations.AlterField(
model_name='translatedtext',
name='entity',
field=models.CharField(choices=[('learning_unit_year', 'learning_unit_year'), ('offer_year', 'offer_year')], db_index=True, max_length=25),
),
migrations.AlterField(
model_name='translatedtext',
name='text',
field=ckeditor.fields.RichTextField(null=True),
),
]
| agpl-3.0 |
Azure/azure-sdk-for-python | sdk/batch/azure-mgmt-batch/azure/mgmt/batch/_configuration.py | 1 | 3313 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
from azure.core.configuration import Configuration
from azure.core.pipeline import policies
from azure.mgmt.core.policies import ARMHttpLoggingPolicy
from ._version import VERSION
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any
from azure.core.credentials import TokenCredential
class BatchManagementConfiguration(Configuration):
"""Configuration for BatchManagement.
Note that all parameters used to create this instance are saved as instance
attributes.
:param credential: Credential needed for the client to connect to Azure.
:type credential: ~azure.core.credentials.TokenCredential
:param subscription_id: The Azure subscription ID. This is a GUID-formatted string (e.g. 00000000-0000-0000-0000-000000000000).
:type subscription_id: str
"""
def __init__(
self,
credential, # type: "TokenCredential"
subscription_id, # type: str
**kwargs # type: Any
):
# type: (...) -> None
if credential is None:
raise ValueError("Parameter 'credential' must not be None.")
if subscription_id is None:
raise ValueError("Parameter 'subscription_id' must not be None.")
super(BatchManagementConfiguration, self).__init__(**kwargs)
self.credential = credential
self.subscription_id = subscription_id
self.api_version = "2021-01-01"
self.credential_scopes = kwargs.pop('credential_scopes', ['https://management.azure.com/.default'])
kwargs.setdefault('sdk_moniker', 'mgmt-batch/{}'.format(VERSION))
self._configure(**kwargs)
def _configure(
self,
**kwargs # type: Any
):
# type: (...) -> None
self.user_agent_policy = kwargs.get('user_agent_policy') or policies.UserAgentPolicy(**kwargs)
self.headers_policy = kwargs.get('headers_policy') or policies.HeadersPolicy(**kwargs)
self.proxy_policy = kwargs.get('proxy_policy') or policies.ProxyPolicy(**kwargs)
self.logging_policy = kwargs.get('logging_policy') or policies.NetworkTraceLoggingPolicy(**kwargs)
self.http_logging_policy = kwargs.get('http_logging_policy') or ARMHttpLoggingPolicy(**kwargs)
self.retry_policy = kwargs.get('retry_policy') or policies.RetryPolicy(**kwargs)
self.custom_hook_policy = kwargs.get('custom_hook_policy') or policies.CustomHookPolicy(**kwargs)
self.redirect_policy = kwargs.get('redirect_policy') or policies.RedirectPolicy(**kwargs)
self.authentication_policy = kwargs.get('authentication_policy')
if self.credential and not self.authentication_policy:
self.authentication_policy = policies.BearerTokenCredentialPolicy(self.credential, *self.credential_scopes, **kwargs)
| mit |
Vaibhav/Stock-Analysis | DEPRECATED/Scrapers/finviz.py | 1 | 3650 | import requests
from bs4 import BeautifulSoup
import csv
import pdb
import datetime
# pdb.set_trace() - python step by step debugger command
print(datetime.datetime.now())
print("Finviz Financial Start")
url = "http://www.finviz.com/screener.ashx?v=161&f=geo_usa"
## url = "http://www.finviz.com/screener.ashx?v=121&f=fa_eps5years_pos,fa_epsqoq_pos,fa_epsyoy_pos,fa_epsyoy1_pos,fa_estltgrowth_pos,fa_sales5years_pos,fa_salesqoq_pos,ind_stocksonly,sh_avgvol_o50,sh_curvol_o0,sh_insiderown_o10,sh_instown_o10,sh_price_o10,ta_averagetruerange_o0.25,ta_beta_u1,ta_change_u,ta_changeopen_u,ta_highlow20d_nh,ta_highlow50d_nh,ta_highlow52w_nh,ta_sma20_pa,ta_sma200_pa,ta_sma50_pa"
response = requests.get(url)
html = response.content
soup = BeautifulSoup(html)
# print("==============")
# print(soup)
# print("==============")
firstcount = soup.find_all('select', {"id": "pageSelect"})
lastnum = len(firstcount) - 1
print("==============")
print(firstcount)
print("==============")
lastpagenum = firstcount[lastnum].attrs['value']
currentpage = int(lastpagenum)
alldata = []
templist = []
# Overview = 111, Valuation = 121, Financial = 161, Ownership = 131, Performance = 141
#pagesarray = [111,121,161,131,141]
titleslist = soup.find_all('td', {"class": "table-top"})
titleslisttickerid = soup.find_all('td', {"class": "table-top-s"})
titleticker = titleslisttickerid[0].text
titlesarray = []
for title in titleslist:
titlesarray.append(title.text)
titlesarray.insert(1, titleticker)
i = 0
while(currentpage > 0):
i += 1
print(str(i) + " page(s) done")
secondurl = "http://www.finviz.com/screener.ashx?v=" + str(161) + "&f=geo_usa" + "&r=" + str(currentpage)
secondresponse = requests.get(secondurl)
secondhtml = secondresponse.content
secondsoup = BeautifulSoup(secondhtml)
stockdata = secondsoup.find_all('a', {"class": "screener-link"})
stockticker = secondsoup.find_all('a', {"class": "screener-link-primary"})
datalength = len(stockdata)
tickerdatalength = len(stockticker)
while(datalength > 0):
templist = [stockdata[datalength - 17].text, stockticker[tickerdatalength - 1].text, stockdata[datalength - 16].text, stockdata[datalength - 15].text, stockdata[datalength - 14].text, stockdata[datalength - 13].text, stockdata[datalength - 12].text, stockdata[datalength - 11].text, stockdata[datalength - 10].text, stockdata[datalength - 9].text, stockdata[datalength - 8].text, stockdata[datalength - 7].text, stockdata[datalength - 6].text, stockdata[datalength - 5].text, stockdata[datalength - 4].text, stockdata[datalength - 3].text, stockdata[datalength - 2].text, stockdata[datalength - 1].text, ]
alldata.append(templist)
templist = []
datalength -= 17
tickerdatalength -= 1
currentpage -= 20
with open('stockfinancial.csv', 'wb') as csvfile:
financial = csv.DictWriter(csvfile, delimiter=',', lineterminator='\n', fieldnames=titlesarray)
financial.writeheader()
for stock in alldata:
financial.writerow({titlesarray[0]: stock[0], titlesarray[1]: stock[1], titlesarray[2]: stock[2], titlesarray[3]: stock[3], titlesarray[4]: stock[4], titlesarray[5]: stock[5], titlesarray[6]: stock[6], titlesarray[7]: stock[7], titlesarray[8]: stock[8], titlesarray[9]: stock[9], titlesarray[10]: stock[10], titlesarray[11]: stock[11], titlesarray[12]: stock[12], titlesarray[13]: stock[13], titlesarray[14]: stock[14], titlesarray[15]: stock[15], titlesarray[16]: stock[16], titlesarray[17]: stock[17]})
print(datetime.datetime.now())
print("Finviz Financial Completed")
| mit |
splunk/splunk-ref-pas-code | spikes/googledrive_addon/bin/apiclient/discovery.py | 2 | 35848 | # Copyright (C) 2010 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Client for discovery based APIs.
A client library for Google's discovery based APIs.
"""
__author__ = 'jcgregorio@google.com (Joe Gregorio)'
__all__ = [
'build',
'build_from_document',
'fix_method_name',
'key2param',
]
# Standard library imports
import copy
from email.mime.multipart import MIMEMultipart
from email.mime.nonmultipart import MIMENonMultipart
import keyword
import logging
import mimetypes
import os
import re
import urllib
import urlparse
try:
from urlparse import parse_qsl
except ImportError:
from cgi import parse_qsl
# Third-party imports
import httplib2
import mimeparse
import uritemplate
# Local imports
from apiclient.errors import HttpError
from apiclient.errors import InvalidJsonError
from apiclient.errors import MediaUploadSizeError
from apiclient.errors import UnacceptableMimeTypeError
from apiclient.errors import UnknownApiNameOrVersion
from apiclient.errors import UnknownFileType
from apiclient.http import HttpRequest
from apiclient.http import MediaFileUpload
from apiclient.http import MediaUpload
from apiclient.model import JsonModel
from apiclient.model import MediaModel
from apiclient.model import RawModel
from apiclient.schema import Schemas
from oauth2client.anyjson import simplejson
from oauth2client.util import _add_query_parameter
from oauth2client.util import positional
# The client library requires a version of httplib2 that supports RETRIES.
httplib2.RETRIES = 1
logger = logging.getLogger(__name__)
URITEMPLATE = re.compile('{[^}]*}')
VARNAME = re.compile('[a-zA-Z0-9_-]+')
DISCOVERY_URI = ('https://www.googleapis.com/discovery/v1/apis/'
'{api}/{apiVersion}/rest')
DEFAULT_METHOD_DOC = 'A description of how to use this function'
HTTP_PAYLOAD_METHODS = frozenset(['PUT', 'POST', 'PATCH'])
_MEDIA_SIZE_BIT_SHIFTS = {'KB': 10, 'MB': 20, 'GB': 30, 'TB': 40}
BODY_PARAMETER_DEFAULT_VALUE = {
'description': 'The request body.',
'type': 'object',
'required': True,
}
MEDIA_BODY_PARAMETER_DEFAULT_VALUE = {
'description': ('The filename of the media request body, or an instance '
'of a MediaUpload object.'),
'type': 'string',
'required': False,
}
# Parameters accepted by the stack, but not visible via discovery.
# TODO(dhermes): Remove 'userip' in 'v2'.
STACK_QUERY_PARAMETERS = frozenset(['trace', 'pp', 'userip', 'strict'])
STACK_QUERY_PARAMETER_DEFAULT_VALUE = {'type': 'string', 'location': 'query'}
# Library-specific reserved words beyond Python keywords.
RESERVED_WORDS = frozenset(['body'])
def fix_method_name(name):
"""Fix method names to avoid reserved word conflicts.
Args:
name: string, method name.
Returns:
The name with a '_' prefixed if the name is a reserved word.
"""
if keyword.iskeyword(name) or name in RESERVED_WORDS:
return name + '_'
else:
return name
def key2param(key):
"""Converts key names into parameter names.
For example, converting "max-results" -> "max_results"
Args:
key: string, the method key name.
Returns:
A safe method name based on the key name.
"""
result = []
key = list(key)
if not key[0].isalpha():
result.append('x')
for c in key:
if c.isalnum():
result.append(c)
else:
result.append('_')
return ''.join(result)
@positional(2)
def build(serviceName,
version,
http=None,
discoveryServiceUrl=DISCOVERY_URI,
developerKey=None,
model=None,
requestBuilder=HttpRequest):
"""Construct a Resource for interacting with an API.
Construct a Resource object for interacting with an API. The serviceName and
version are the names from the Discovery service.
Args:
serviceName: string, name of the service.
version: string, the version of the service.
http: httplib2.Http, An instance of httplib2.Http or something that acts
like it that HTTP requests will be made through.
discoveryServiceUrl: string, a URI Template that points to the location of
the discovery service. It should have two parameters {api} and
{apiVersion} that when filled in produce an absolute URI to the discovery
document for that service.
developerKey: string, key obtained from
https://code.google.com/apis/console.
model: apiclient.Model, converts to and from the wire format.
requestBuilder: apiclient.http.HttpRequest, encapsulator for an HTTP
request.
Returns:
A Resource object with methods for interacting with the service.
"""
params = {
'api': serviceName,
'apiVersion': version
}
if http is None:
http = httplib2.Http()
requested_url = uritemplate.expand(discoveryServiceUrl, params)
# REMOTE_ADDR is defined by the CGI spec [RFC3875] as the environment
# variable that contains the network address of the client sending the
# request. If it exists then add that to the request for the discovery
# document to avoid exceeding the quota on discovery requests.
if 'REMOTE_ADDR' in os.environ:
requested_url = _add_query_parameter(requested_url, 'userIp',
os.environ['REMOTE_ADDR'])
logger.info('URL being requested: %s' % requested_url)
resp, content = http.request(requested_url)
if resp.status == 404:
raise UnknownApiNameOrVersion("name: %s version: %s" % (serviceName,
version))
if resp.status >= 400:
raise HttpError(resp, content, uri=requested_url)
try:
service = simplejson.loads(content)
except ValueError, e:
logger.error('Failed to parse as JSON: ' + content)
raise InvalidJsonError()
return build_from_document(content, base=discoveryServiceUrl, http=http,
developerKey=developerKey, model=model, requestBuilder=requestBuilder)
@positional(1)
def build_from_document(
service,
base=None,
future=None,
http=None,
developerKey=None,
model=None,
requestBuilder=HttpRequest):
"""Create a Resource for interacting with an API.
Same as `build()`, but constructs the Resource object from a discovery
document that is it given, as opposed to retrieving one over HTTP.
Args:
service: string or object, the JSON discovery document describing the API.
The value passed in may either be the JSON string or the deserialized
JSON.
base: string, base URI for all HTTP requests, usually the discovery URI.
This parameter is no longer used as rootUrl and servicePath are included
within the discovery document. (deprecated)
future: string, discovery document with future capabilities (deprecated).
http: httplib2.Http, An instance of httplib2.Http or something that acts
like it that HTTP requests will be made through.
developerKey: string, Key for controlling API usage, generated
from the API Console.
model: Model class instance that serializes and de-serializes requests and
responses.
requestBuilder: Takes an http request and packages it up to be executed.
Returns:
A Resource object with methods for interacting with the service.
"""
# future is no longer used.
future = {}
if isinstance(service, basestring):
service = simplejson.loads(service)
base = urlparse.urljoin(service['rootUrl'], service['servicePath'])
schema = Schemas(service)
if model is None:
features = service.get('features', [])
model = JsonModel('dataWrapper' in features)
return Resource(http=http, baseUrl=base, model=model,
developerKey=developerKey, requestBuilder=requestBuilder,
resourceDesc=service, rootDesc=service, schema=schema)
def _cast(value, schema_type):
"""Convert value to a string based on JSON Schema type.
See http://tools.ietf.org/html/draft-zyp-json-schema-03 for more details on
JSON Schema.
Args:
value: any, the value to convert
schema_type: string, the type that value should be interpreted as
Returns:
A string representation of 'value' based on the schema_type.
"""
if schema_type == 'string':
if type(value) == type('') or type(value) == type(u''):
return value
else:
return str(value)
elif schema_type == 'integer':
return str(int(value))
elif schema_type == 'number':
return str(float(value))
elif schema_type == 'boolean':
return str(bool(value)).lower()
else:
if type(value) == type('') or type(value) == type(u''):
return value
else:
return str(value)
def _media_size_to_long(maxSize):
"""Convert a string media size, such as 10GB or 3TB into an integer.
Args:
maxSize: string, size as a string, such as 2MB or 7GB.
Returns:
The size as an integer value.
"""
if len(maxSize) < 2:
return 0L
units = maxSize[-2:].upper()
bit_shift = _MEDIA_SIZE_BIT_SHIFTS.get(units)
if bit_shift is not None:
return long(maxSize[:-2]) << bit_shift
else:
return long(maxSize)
def _media_path_url_from_info(root_desc, path_url):
"""Creates an absolute media path URL.
Constructed using the API root URI and service path from the discovery
document and the relative path for the API method.
Args:
root_desc: Dictionary; the entire original deserialized discovery document.
path_url: String; the relative URL for the API method. Relative to the API
root, which is specified in the discovery document.
Returns:
String; the absolute URI for media upload for the API method.
"""
return '%(root)supload/%(service_path)s%(path)s' % {
'root': root_desc['rootUrl'],
'service_path': root_desc['servicePath'],
'path': path_url,
}
def _fix_up_parameters(method_desc, root_desc, http_method):
"""Updates parameters of an API method with values specific to this library.
Specifically, adds whatever global parameters are specified by the API to the
parameters for the individual method. Also adds parameters which don't
appear in the discovery document, but are available to all discovery based
APIs (these are listed in STACK_QUERY_PARAMETERS).
SIDE EFFECTS: This updates the parameters dictionary object in the method
description.
Args:
method_desc: Dictionary with metadata describing an API method. Value comes
from the dictionary of methods stored in the 'methods' key in the
deserialized discovery document.
root_desc: Dictionary; the entire original deserialized discovery document.
http_method: String; the HTTP method used to call the API method described
in method_desc.
Returns:
The updated Dictionary stored in the 'parameters' key of the method
description dictionary.
"""
parameters = method_desc.setdefault('parameters', {})
# Add in the parameters common to all methods.
for name, description in root_desc.get('parameters', {}).iteritems():
parameters[name] = description
# Add in undocumented query parameters.
for name in STACK_QUERY_PARAMETERS:
parameters[name] = STACK_QUERY_PARAMETER_DEFAULT_VALUE.copy()
# Add 'body' (our own reserved word) to parameters if the method supports
# a request payload.
if http_method in HTTP_PAYLOAD_METHODS and 'request' in method_desc:
body = BODY_PARAMETER_DEFAULT_VALUE.copy()
body.update(method_desc['request'])
parameters['body'] = body
return parameters
def _fix_up_media_upload(method_desc, root_desc, path_url, parameters):
"""Updates parameters of API by adding 'media_body' if supported by method.
SIDE EFFECTS: If the method supports media upload and has a required body,
sets body to be optional (required=False) instead. Also, if there is a
'mediaUpload' in the method description, adds 'media_upload' key to
parameters.
Args:
method_desc: Dictionary with metadata describing an API method. Value comes
from the dictionary of methods stored in the 'methods' key in the
deserialized discovery document.
root_desc: Dictionary; the entire original deserialized discovery document.
path_url: String; the relative URL for the API method. Relative to the API
root, which is specified in the discovery document.
parameters: A dictionary describing method parameters for method described
in method_desc.
Returns:
Triple (accept, max_size, media_path_url) where:
- accept is a list of strings representing what content types are
accepted for media upload. Defaults to empty list if not in the
discovery document.
- max_size is a long representing the max size in bytes allowed for a
media upload. Defaults to 0L if not in the discovery document.
- media_path_url is a String; the absolute URI for media upload for the
API method. Constructed using the API root URI and service path from
the discovery document and the relative path for the API method. If
media upload is not supported, this is None.
"""
media_upload = method_desc.get('mediaUpload', {})
accept = media_upload.get('accept', [])
max_size = _media_size_to_long(media_upload.get('maxSize', ''))
media_path_url = None
if media_upload:
media_path_url = _media_path_url_from_info(root_desc, path_url)
parameters['media_body'] = MEDIA_BODY_PARAMETER_DEFAULT_VALUE.copy()
if 'body' in parameters:
parameters['body']['required'] = False
return accept, max_size, media_path_url
def _fix_up_method_description(method_desc, root_desc):
"""Updates a method description in a discovery document.
SIDE EFFECTS: Changes the parameters dictionary in the method description with
extra parameters which are used locally.
Args:
method_desc: Dictionary with metadata describing an API method. Value comes
from the dictionary of methods stored in the 'methods' key in the
deserialized discovery document.
root_desc: Dictionary; the entire original deserialized discovery document.
Returns:
Tuple (path_url, http_method, method_id, accept, max_size, media_path_url)
where:
- path_url is a String; the relative URL for the API method. Relative to
the API root, which is specified in the discovery document.
- http_method is a String; the HTTP method used to call the API method
described in the method description.
- method_id is a String; the name of the RPC method associated with the
API method, and is in the method description in the 'id' key.
- accept is a list of strings representing what content types are
accepted for media upload. Defaults to empty list if not in the
discovery document.
- max_size is a long representing the max size in bytes allowed for a
media upload. Defaults to 0L if not in the discovery document.
- media_path_url is a String; the absolute URI for media upload for the
API method. Constructed using the API root URI and service path from
the discovery document and the relative path for the API method. If
media upload is not supported, this is None.
"""
path_url = method_desc['path']
http_method = method_desc['httpMethod']
method_id = method_desc['id']
parameters = _fix_up_parameters(method_desc, root_desc, http_method)
# Order is important. `_fix_up_media_upload` needs `method_desc` to have a
# 'parameters' key and needs to know if there is a 'body' parameter because it
# also sets a 'media_body' parameter.
accept, max_size, media_path_url = _fix_up_media_upload(
method_desc, root_desc, path_url, parameters)
return path_url, http_method, method_id, accept, max_size, media_path_url
# TODO(dhermes): Convert this class to ResourceMethod and make it callable
class ResourceMethodParameters(object):
"""Represents the parameters associated with a method.
Attributes:
argmap: Map from method parameter name (string) to query parameter name
(string).
required_params: List of required parameters (represented by parameter
name as string).
repeated_params: List of repeated parameters (represented by parameter
name as string).
pattern_params: Map from method parameter name (string) to regular
expression (as a string). If the pattern is set for a parameter, the
value for that parameter must match the regular expression.
query_params: List of parameters (represented by parameter name as string)
that will be used in the query string.
path_params: Set of parameters (represented by parameter name as string)
that will be used in the base URL path.
param_types: Map from method parameter name (string) to parameter type. Type
can be any valid JSON schema type; valid values are 'any', 'array',
'boolean', 'integer', 'number', 'object', or 'string'. Reference:
http://tools.ietf.org/html/draft-zyp-json-schema-03#section-5.1
enum_params: Map from method parameter name (string) to list of strings,
where each list of strings is the list of acceptable enum values.
"""
def __init__(self, method_desc):
"""Constructor for ResourceMethodParameters.
Sets default values and defers to set_parameters to populate.
Args:
method_desc: Dictionary with metadata describing an API method. Value
comes from the dictionary of methods stored in the 'methods' key in
the deserialized discovery document.
"""
self.argmap = {}
self.required_params = []
self.repeated_params = []
self.pattern_params = {}
self.query_params = []
# TODO(dhermes): Change path_params to a list if the extra URITEMPLATE
# parsing is gotten rid of.
self.path_params = set()
self.param_types = {}
self.enum_params = {}
self.set_parameters(method_desc)
def set_parameters(self, method_desc):
"""Populates maps and lists based on method description.
Iterates through each parameter for the method and parses the values from
the parameter dictionary.
Args:
method_desc: Dictionary with metadata describing an API method. Value
comes from the dictionary of methods stored in the 'methods' key in
the deserialized discovery document.
"""
for arg, desc in method_desc.get('parameters', {}).iteritems():
param = key2param(arg)
self.argmap[param] = arg
if desc.get('pattern'):
self.pattern_params[param] = desc['pattern']
if desc.get('enum'):
self.enum_params[param] = desc['enum']
if desc.get('required'):
self.required_params.append(param)
if desc.get('repeated'):
self.repeated_params.append(param)
if desc.get('location') == 'query':
self.query_params.append(param)
if desc.get('location') == 'path':
self.path_params.add(param)
self.param_types[param] = desc.get('type', 'string')
# TODO(dhermes): Determine if this is still necessary. Discovery based APIs
# should have all path parameters already marked with
# 'location: path'.
for match in URITEMPLATE.finditer(method_desc['path']):
for namematch in VARNAME.finditer(match.group(0)):
name = key2param(namematch.group(0))
self.path_params.add(name)
if name in self.query_params:
self.query_params.remove(name)
def createMethod(methodName, methodDesc, rootDesc, schema):
"""Creates a method for attaching to a Resource.
Args:
methodName: string, name of the method to use.
methodDesc: object, fragment of deserialized discovery document that
describes the method.
rootDesc: object, the entire deserialized discovery document.
schema: object, mapping of schema names to schema descriptions.
"""
methodName = fix_method_name(methodName)
(pathUrl, httpMethod, methodId, accept,
maxSize, mediaPathUrl) = _fix_up_method_description(methodDesc, rootDesc)
parameters = ResourceMethodParameters(methodDesc)
def method(self, **kwargs):
# Don't bother with doc string, it will be over-written by createMethod.
for name in kwargs.iterkeys():
if name not in parameters.argmap:
raise TypeError('Got an unexpected keyword argument "%s"' % name)
# Remove args that have a value of None.
keys = kwargs.keys()
for name in keys:
if kwargs[name] is None:
del kwargs[name]
for name in parameters.required_params:
if name not in kwargs:
raise TypeError('Missing required parameter "%s"' % name)
for name, regex in parameters.pattern_params.iteritems():
if name in kwargs:
if isinstance(kwargs[name], basestring):
pvalues = [kwargs[name]]
else:
pvalues = kwargs[name]
for pvalue in pvalues:
if re.match(regex, pvalue) is None:
raise TypeError(
'Parameter "%s" value "%s" does not match the pattern "%s"' %
(name, pvalue, regex))
for name, enums in parameters.enum_params.iteritems():
if name in kwargs:
# We need to handle the case of a repeated enum
# name differently, since we want to handle both
# arg='value' and arg=['value1', 'value2']
if (name in parameters.repeated_params and
not isinstance(kwargs[name], basestring)):
values = kwargs[name]
else:
values = [kwargs[name]]
for value in values:
if value not in enums:
raise TypeError(
'Parameter "%s" value "%s" is not an allowed value in "%s"' %
(name, value, str(enums)))
actual_query_params = {}
actual_path_params = {}
for key, value in kwargs.iteritems():
to_type = parameters.param_types.get(key, 'string')
# For repeated parameters we cast each member of the list.
if key in parameters.repeated_params and type(value) == type([]):
cast_value = [_cast(x, to_type) for x in value]
else:
cast_value = _cast(value, to_type)
if key in parameters.query_params:
actual_query_params[parameters.argmap[key]] = cast_value
if key in parameters.path_params:
actual_path_params[parameters.argmap[key]] = cast_value
body_value = kwargs.get('body', None)
media_filename = kwargs.get('media_body', None)
if self._developerKey:
actual_query_params['key'] = self._developerKey
model = self._model
if methodName.endswith('_media'):
model = MediaModel()
elif 'response' not in methodDesc:
model = RawModel()
headers = {}
headers, params, query, body = model.request(headers,
actual_path_params, actual_query_params, body_value)
expanded_url = uritemplate.expand(pathUrl, params)
url = urlparse.urljoin(self._baseUrl, expanded_url + query)
resumable = None
multipart_boundary = ''
if media_filename:
# Ensure we end up with a valid MediaUpload object.
if isinstance(media_filename, basestring):
(media_mime_type, encoding) = mimetypes.guess_type(media_filename)
if media_mime_type is None:
raise UnknownFileType(media_filename)
if not mimeparse.best_match([media_mime_type], ','.join(accept)):
raise UnacceptableMimeTypeError(media_mime_type)
media_upload = MediaFileUpload(media_filename,
mimetype=media_mime_type)
elif isinstance(media_filename, MediaUpload):
media_upload = media_filename
else:
raise TypeError('media_filename must be str or MediaUpload.')
# Check the maxSize
if maxSize > 0 and media_upload.size() > maxSize:
raise MediaUploadSizeError("Media larger than: %s" % maxSize)
# Use the media path uri for media uploads
expanded_url = uritemplate.expand(mediaPathUrl, params)
url = urlparse.urljoin(self._baseUrl, expanded_url + query)
if media_upload.resumable():
url = _add_query_parameter(url, 'uploadType', 'resumable')
if media_upload.resumable():
# This is all we need to do for resumable, if the body exists it gets
# sent in the first request, otherwise an empty body is sent.
resumable = media_upload
else:
# A non-resumable upload
if body is None:
# This is a simple media upload
headers['content-type'] = media_upload.mimetype()
body = media_upload.getbytes(0, media_upload.size())
url = _add_query_parameter(url, 'uploadType', 'media')
else:
# This is a multipart/related upload.
msgRoot = MIMEMultipart('related')
# msgRoot should not write out it's own headers
setattr(msgRoot, '_write_headers', lambda self: None)
# attach the body as one part
msg = MIMENonMultipart(*headers['content-type'].split('/'))
msg.set_payload(body)
msgRoot.attach(msg)
# attach the media as the second part
msg = MIMENonMultipart(*media_upload.mimetype().split('/'))
msg['Content-Transfer-Encoding'] = 'binary'
payload = media_upload.getbytes(0, media_upload.size())
msg.set_payload(payload)
msgRoot.attach(msg)
body = msgRoot.as_string()
multipart_boundary = msgRoot.get_boundary()
headers['content-type'] = ('multipart/related; '
'boundary="%s"') % multipart_boundary
url = _add_query_parameter(url, 'uploadType', 'multipart')
logger.info('URL being requested: %s' % url)
return self._requestBuilder(self._http,
model.response,
url,
method=httpMethod,
body=body,
headers=headers,
methodId=methodId,
resumable=resumable)
docs = [methodDesc.get('description', DEFAULT_METHOD_DOC), '\n\n']
if len(parameters.argmap) > 0:
docs.append('Args:\n')
# Skip undocumented params and params common to all methods.
skip_parameters = rootDesc.get('parameters', {}).keys()
skip_parameters.extend(STACK_QUERY_PARAMETERS)
all_args = parameters.argmap.keys()
args_ordered = [key2param(s) for s in methodDesc.get('parameterOrder', [])]
# Move body to the front of the line.
if 'body' in all_args:
args_ordered.append('body')
for name in all_args:
if name not in args_ordered:
args_ordered.append(name)
for arg in args_ordered:
if arg in skip_parameters:
continue
repeated = ''
if arg in parameters.repeated_params:
repeated = ' (repeated)'
required = ''
if arg in parameters.required_params:
required = ' (required)'
paramdesc = methodDesc['parameters'][parameters.argmap[arg]]
paramdoc = paramdesc.get('description', 'A parameter')
if '$ref' in paramdesc:
docs.append(
(' %s: object, %s%s%s\n The object takes the'
' form of:\n\n%s\n\n') % (arg, paramdoc, required, repeated,
schema.prettyPrintByName(paramdesc['$ref'])))
else:
paramtype = paramdesc.get('type', 'string')
docs.append(' %s: %s, %s%s%s\n' % (arg, paramtype, paramdoc, required,
repeated))
enum = paramdesc.get('enum', [])
enumDesc = paramdesc.get('enumDescriptions', [])
if enum and enumDesc:
docs.append(' Allowed values\n')
for (name, desc) in zip(enum, enumDesc):
docs.append(' %s - %s\n' % (name, desc))
if 'response' in methodDesc:
if methodName.endswith('_media'):
docs.append('\nReturns:\n The media object as a string.\n\n ')
else:
docs.append('\nReturns:\n An object of the form:\n\n ')
docs.append(schema.prettyPrintSchema(methodDesc['response']))
setattr(method, '__doc__', ''.join(docs))
return (methodName, method)
def createNextMethod(methodName):
"""Creates any _next methods for attaching to a Resource.
The _next methods allow for easy iteration through list() responses.
Args:
methodName: string, name of the method to use.
"""
methodName = fix_method_name(methodName)
def methodNext(self, previous_request, previous_response):
"""Retrieves the next page of results.
Args:
previous_request: The request for the previous page. (required)
previous_response: The response from the request for the previous page. (required)
Returns:
A request object that you can call 'execute()' on to request the next
page. Returns None if there are no more items in the collection.
"""
# Retrieve nextPageToken from previous_response
# Use as pageToken in previous_request to create new request.
if 'nextPageToken' not in previous_response:
return None
request = copy.copy(previous_request)
pageToken = previous_response['nextPageToken']
parsed = list(urlparse.urlparse(request.uri))
q = parse_qsl(parsed[4])
# Find and remove old 'pageToken' value from URI
newq = [(key, value) for (key, value) in q if key != 'pageToken']
newq.append(('pageToken', pageToken))
parsed[4] = urllib.urlencode(newq)
uri = urlparse.urlunparse(parsed)
request.uri = uri
logger.info('URL being requested: %s' % uri)
return request
return (methodName, methodNext)
class Resource(object):
"""A class for interacting with a resource."""
def __init__(self, http, baseUrl, model, requestBuilder, developerKey,
resourceDesc, rootDesc, schema):
"""Build a Resource from the API description.
Args:
http: httplib2.Http, Object to make http requests with.
baseUrl: string, base URL for the API. All requests are relative to this
URI.
model: apiclient.Model, converts to and from the wire format.
requestBuilder: class or callable that instantiates an
apiclient.HttpRequest object.
developerKey: string, key obtained from
https://code.google.com/apis/console
resourceDesc: object, section of deserialized discovery document that
describes a resource. Note that the top level discovery document
is considered a resource.
rootDesc: object, the entire deserialized discovery document.
schema: object, mapping of schema names to schema descriptions.
"""
self._dynamic_attrs = []
self._http = http
self._baseUrl = baseUrl
self._model = model
self._developerKey = developerKey
self._requestBuilder = requestBuilder
self._resourceDesc = resourceDesc
self._rootDesc = rootDesc
self._schema = schema
self._set_service_methods()
def _set_dynamic_attr(self, attr_name, value):
"""Sets an instance attribute and tracks it in a list of dynamic attributes.
Args:
attr_name: string; The name of the attribute to be set
value: The value being set on the object and tracked in the dynamic cache.
"""
self._dynamic_attrs.append(attr_name)
self.__dict__[attr_name] = value
def __getstate__(self):
"""Trim the state down to something that can be pickled.
Uses the fact that the instance variable _dynamic_attrs holds attrs that
will be wiped and restored on pickle serialization.
"""
state_dict = copy.copy(self.__dict__)
for dynamic_attr in self._dynamic_attrs:
del state_dict[dynamic_attr]
del state_dict['_dynamic_attrs']
return state_dict
def __setstate__(self, state):
"""Reconstitute the state of the object from being pickled.
Uses the fact that the instance variable _dynamic_attrs holds attrs that
will be wiped and restored on pickle serialization.
"""
self.__dict__.update(state)
self._dynamic_attrs = []
self._set_service_methods()
def _set_service_methods(self):
self._add_basic_methods(self._resourceDesc, self._rootDesc, self._schema)
self._add_nested_resources(self._resourceDesc, self._rootDesc, self._schema)
self._add_next_methods(self._resourceDesc, self._schema)
def _add_basic_methods(self, resourceDesc, rootDesc, schema):
# Add basic methods to Resource
if 'methods' in resourceDesc:
for methodName, methodDesc in resourceDesc['methods'].iteritems():
fixedMethodName, method = createMethod(
methodName, methodDesc, rootDesc, schema)
self._set_dynamic_attr(fixedMethodName,
method.__get__(self, self.__class__))
# Add in _media methods. The functionality of the attached method will
# change when it sees that the method name ends in _media.
if methodDesc.get('supportsMediaDownload', False):
fixedMethodName, method = createMethod(
methodName + '_media', methodDesc, rootDesc, schema)
self._set_dynamic_attr(fixedMethodName,
method.__get__(self, self.__class__))
def _add_nested_resources(self, resourceDesc, rootDesc, schema):
# Add in nested resources
if 'resources' in resourceDesc:
def createResourceMethod(methodName, methodDesc):
"""Create a method on the Resource to access a nested Resource.
Args:
methodName: string, name of the method to use.
methodDesc: object, fragment of deserialized discovery document that
describes the method.
"""
methodName = fix_method_name(methodName)
def methodResource(self):
return Resource(http=self._http, baseUrl=self._baseUrl,
model=self._model, developerKey=self._developerKey,
requestBuilder=self._requestBuilder,
resourceDesc=methodDesc, rootDesc=rootDesc,
schema=schema)
setattr(methodResource, '__doc__', 'A collection resource.')
setattr(methodResource, '__is_resource__', True)
return (methodName, methodResource)
for methodName, methodDesc in resourceDesc['resources'].iteritems():
fixedMethodName, method = createResourceMethod(methodName, methodDesc)
self._set_dynamic_attr(fixedMethodName,
method.__get__(self, self.__class__))
def _add_next_methods(self, resourceDesc, schema):
# Add _next() methods
# Look for response bodies in schema that contain nextPageToken, and methods
# that take a pageToken parameter.
if 'methods' in resourceDesc:
for methodName, methodDesc in resourceDesc['methods'].iteritems():
if 'response' in methodDesc:
responseSchema = methodDesc['response']
if '$ref' in responseSchema:
responseSchema = schema.get(responseSchema['$ref'])
hasNextPageToken = 'nextPageToken' in responseSchema.get('properties',
{})
hasPageToken = 'pageToken' in methodDesc.get('parameters', {})
if hasNextPageToken and hasPageToken:
fixedMethodName, method = createNextMethod(methodName + '_next')
self._set_dynamic_attr(fixedMethodName,
method.__get__(self, self.__class__))
| apache-2.0 |
abhisg/scikit-learn | sklearn/tree/tree.py | 2 | 37683 | """
This module gathers tree-based methods, including decision, regression and
randomized trees. Single and multi-output problems are both handled.
"""
# Authors: Gilles Louppe <g.louppe@gmail.com>
# Peter Prettenhofer <peter.prettenhofer@gmail.com>
# Brian Holt <bdholt1@gmail.com>
# Noel Dawe <noel@dawe.me>
# Satrajit Gosh <satrajit.ghosh@gmail.com>
# Joly Arnaud <arnaud.v.joly@gmail.com>
# Fares Hedayati <fares.hedayati@gmail.com>
#
# Licence: BSD 3 clause
from __future__ import division
import numbers
from abc import ABCMeta
from abc import abstractmethod
import numpy as np
from scipy.sparse import issparse
from ..base import BaseEstimator
from ..base import ClassifierMixin
from ..base import RegressorMixin
from ..externals import six
from ..feature_selection.from_model import _LearntSelectorMixin
from ..utils import check_array
from ..utils import check_random_state
from ..utils import compute_sample_weight
from ..utils.validation import NotFittedError
from ..utils.multiclass import check_classification_targets
from ._criterion import Criterion
from ._splitter import Splitter
from ._tree import DepthFirstTreeBuilder
from ._tree import BestFirstTreeBuilder
from ._tree import Tree
from . import _tree, _splitter, _criterion
__all__ = ["DecisionTreeClassifier",
"DecisionTreeRegressor",
"ExtraTreeClassifier",
"ExtraTreeRegressor"]
# =============================================================================
# Types and constants
# =============================================================================
DTYPE = _tree.DTYPE
DOUBLE = _tree.DOUBLE
CRITERIA_CLF = {"gini": _criterion.Gini, "entropy": _criterion.Entropy}
CRITERIA_REG = {"mse": _criterion.MSE, "friedman_mse": _criterion.FriedmanMSE}
DENSE_SPLITTERS = {"best": _splitter.BestSplitter,
"random": _splitter.RandomSplitter}
SPARSE_SPLITTERS = {"best": _splitter.BestSparseSplitter,
"random": _splitter.RandomSparseSplitter}
# =============================================================================
# Base decision tree
# =============================================================================
class BaseDecisionTree(six.with_metaclass(ABCMeta, BaseEstimator,
_LearntSelectorMixin)):
"""Base class for decision trees.
Warning: This class should not be used directly.
Use derived classes instead.
"""
@abstractmethod
def __init__(self,
criterion,
splitter,
max_depth,
min_samples_split,
min_samples_leaf,
min_weight_fraction_leaf,
max_features,
max_leaf_nodes,
random_state,
class_weight=None,
presort=False):
self.criterion = criterion
self.splitter = splitter
self.max_depth = max_depth
self.min_samples_split = min_samples_split
self.min_samples_leaf = min_samples_leaf
self.min_weight_fraction_leaf = min_weight_fraction_leaf
self.max_features = max_features
self.random_state = random_state
self.max_leaf_nodes = max_leaf_nodes
self.class_weight = class_weight
self.presort = presort
self.n_features_ = None
self.n_outputs_ = None
self.classes_ = None
self.n_classes_ = None
self.tree_ = None
self.max_features_ = None
def fit(self, X, y, sample_weight=None, check_input=True,
X_idx_sorted=None):
"""Build a decision tree from the training set (X, y).
Parameters
----------
X : array-like or sparse matrix, shape = [n_samples, n_features]
The training input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csc_matrix``.
y : array-like, shape = [n_samples] or [n_samples, n_outputs]
The target values (class labels in classification, real numbers in
regression). In the regression case, use ``dtype=np.float64`` and
``order='C'`` for maximum efficiency.
sample_weight : array-like, shape = [n_samples] or None
Sample weights. If None, then samples are equally weighted. Splits
that would create child nodes with net zero or negative weight are
ignored while searching for a split in each node. In the case of
classification, splits are also ignored if they would result in any
single class carrying a negative weight in either child node.
check_input : boolean, (default=True)
Allow to bypass several input checking.
Don't use this parameter unless you know what you do.
X_idx_sorted : array-like, shape = [n_samples, n_features], optional
The indexes of the sorted training input samples. If many tree
are grown on the same dataset, this allows the ordering to be
cached between trees. If None, the data will be sorted here.
Don't use this parameter unless you know what to do.
Returns
-------
self : object
Returns self.
"""
random_state = check_random_state(self.random_state)
if check_input:
X = check_array(X, dtype=DTYPE, accept_sparse="csc")
if issparse(X):
X.sort_indices()
if X.indices.dtype != np.intc or X.indptr.dtype != np.intc:
raise ValueError("No support for np.int64 index based "
"sparse matrices")
# Determine output settings
n_samples, self.n_features_ = X.shape
is_classification = isinstance(self, ClassifierMixin)
y = np.atleast_1d(y)
expanded_class_weight = None
if y.ndim == 1:
# reshape is necessary to preserve the data contiguity against vs
# [:, np.newaxis] that does not.
y = np.reshape(y, (-1, 1))
self.n_outputs_ = y.shape[1]
if is_classification:
check_classification_targets(y)
y = np.copy(y)
self.classes_ = []
self.n_classes_ = []
if self.class_weight is not None:
y_original = np.copy(y)
y_store_unique_indices = np.zeros(y.shape, dtype=np.int)
for k in range(self.n_outputs_):
classes_k, y_store_unique_indices[:, k] = np.unique(y[:, k], return_inverse=True)
self.classes_.append(classes_k)
self.n_classes_.append(classes_k.shape[0])
y = y_store_unique_indices
if self.class_weight is not None:
expanded_class_weight = compute_sample_weight(
self.class_weight, y_original)
else:
self.classes_ = [None] * self.n_outputs_
self.n_classes_ = [1] * self.n_outputs_
self.n_classes_ = np.array(self.n_classes_, dtype=np.intp)
if getattr(y, "dtype", None) != DOUBLE or not y.flags.contiguous:
y = np.ascontiguousarray(y, dtype=DOUBLE)
# Check parameters
max_depth = ((2 ** 31) - 1 if self.max_depth is None
else self.max_depth)
max_leaf_nodes = (-1 if self.max_leaf_nodes is None
else self.max_leaf_nodes)
if isinstance(self.max_features, six.string_types):
if self.max_features == "auto":
if is_classification:
max_features = max(1, int(np.sqrt(self.n_features_)))
else:
max_features = self.n_features_
elif self.max_features == "sqrt":
max_features = max(1, int(np.sqrt(self.n_features_)))
elif self.max_features == "log2":
max_features = max(1, int(np.log2(self.n_features_)))
else:
raise ValueError(
'Invalid value for max_features. Allowed string '
'values are "auto", "sqrt" or "log2".')
elif self.max_features is None:
max_features = self.n_features_
elif isinstance(self.max_features, (numbers.Integral, np.integer)):
max_features = self.max_features
else: # float
if self.max_features > 0.0:
max_features = max(1, int(self.max_features * self.n_features_))
else:
max_features = 0
self.max_features_ = max_features
if len(y) != n_samples:
raise ValueError("Number of labels=%d does not match "
"number of samples=%d" % (len(y), n_samples))
if self.min_samples_split <= 0:
raise ValueError("min_samples_split must be greater than zero.")
if self.min_samples_leaf <= 0:
raise ValueError("min_samples_leaf must be greater than zero.")
if not 0 <= self.min_weight_fraction_leaf <= 0.5:
raise ValueError("min_weight_fraction_leaf must in [0, 0.5]")
if max_depth <= 0:
raise ValueError("max_depth must be greater than zero. ")
if not (0 < max_features <= self.n_features_):
raise ValueError("max_features must be in (0, n_features]")
if not isinstance(max_leaf_nodes, (numbers.Integral, np.integer)):
raise ValueError("max_leaf_nodes must be integral number but was "
"%r" % max_leaf_nodes)
if -1 < max_leaf_nodes < 2:
raise ValueError(("max_leaf_nodes {0} must be either smaller than "
"0 or larger than 1").format(max_leaf_nodes))
if sample_weight is not None:
if (getattr(sample_weight, "dtype", None) != DOUBLE or
not sample_weight.flags.contiguous):
sample_weight = np.ascontiguousarray(
sample_weight, dtype=DOUBLE)
if len(sample_weight.shape) > 1:
raise ValueError("Sample weights array has more "
"than one dimension: %d" %
len(sample_weight.shape))
if len(sample_weight) != n_samples:
raise ValueError("Number of weights=%d does not match "
"number of samples=%d" %
(len(sample_weight), n_samples))
if expanded_class_weight is not None:
if sample_weight is not None:
sample_weight = sample_weight * expanded_class_weight
else:
sample_weight = expanded_class_weight
# Set min_weight_leaf from min_weight_fraction_leaf
if self.min_weight_fraction_leaf != 0. and sample_weight is not None:
min_weight_leaf = (self.min_weight_fraction_leaf *
np.sum(sample_weight))
else:
min_weight_leaf = 0.
# Set min_samples_split sensibly
min_samples_split = max(self.min_samples_split,
2 * self.min_samples_leaf)
presort = self.presort
# Allow presort to be 'auto', which means True if the dataset is dense,
# otherwise it will be False.
if self.presort == 'auto' and issparse(X):
presort = False
elif self.presort == 'auto':
presort = True
if presort == True and issparse(X):
raise ValueError("Presorting is not supported for sparse matrices.")
# If multiple trees are built on the same dataset, we only want to
# presort once. Splitters now can accept presorted indices if desired,
# but do not handle any presorting themselves. Ensemble algorithms which
# desire presorting must do presorting themselves and pass that matrix
# into each tree.
if X_idx_sorted is None and presort:
X_idx_sorted = np.asfortranarray(np.argsort(X, axis=0),
dtype=np.int32)
if presort and X_idx_sorted.shape != X.shape:
raise ValueError("The shape of X (X.shape = {}) doesn't match "
"the shape of X_idx_sorted (X_idx_sorted"
".shape = {})".format(X.shape,
X_idx_sorted.shape))
# Build tree
criterion = self.criterion
if not isinstance(criterion, Criterion):
if is_classification:
criterion = CRITERIA_CLF[self.criterion](self.n_outputs_,
self.n_classes_)
else:
criterion = CRITERIA_REG[self.criterion](self.n_outputs_)
SPLITTERS = SPARSE_SPLITTERS if issparse(X) else DENSE_SPLITTERS
splitter = self.splitter
if not isinstance(self.splitter, Splitter):
splitter = SPLITTERS[self.splitter](criterion,
self.max_features_,
self.min_samples_leaf,
min_weight_leaf,
random_state,
self.presort)
self.tree_ = Tree(self.n_features_, self.n_classes_, self.n_outputs_)
# Use BestFirst if max_leaf_nodes given; use DepthFirst otherwise
if max_leaf_nodes < 0:
builder = DepthFirstTreeBuilder(splitter, min_samples_split,
self.min_samples_leaf,
min_weight_leaf,
max_depth)
else:
builder = BestFirstTreeBuilder(splitter, min_samples_split,
self.min_samples_leaf,
min_weight_leaf,
max_depth,
max_leaf_nodes)
builder.build(self.tree_, X, y, sample_weight, X_idx_sorted)
if self.n_outputs_ == 1:
self.n_classes_ = self.n_classes_[0]
self.classes_ = self.classes_[0]
return self
def _validate_X_predict(self, X, check_input):
"""Validate X whenever one tries to predict, apply, predict_proba"""
if self.tree_ is None:
raise NotFittedError("Estimator not fitted, "
"call `fit` before exploiting the model.")
if check_input:
X = check_array(X, dtype=DTYPE, accept_sparse="csr")
if issparse(X) and (X.indices.dtype != np.intc or
X.indptr.dtype != np.intc):
raise ValueError("No support for np.int64 index based "
"sparse matrices")
n_features = X.shape[1]
if self.n_features_ != n_features:
raise ValueError("Number of features of the model must "
" match the input. Model n_features is %s and "
" input n_features is %s "
% (self.n_features_, n_features))
return X
def predict(self, X, check_input=True):
"""Predict class or regression value for X.
For a classification model, the predicted class for each sample in X is
returned. For a regression model, the predicted value based on X is
returned.
Parameters
----------
X : array-like or sparse matrix of shape = [n_samples, n_features]
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
check_input : boolean, (default=True)
Allow to bypass several input checking.
Don't use this parameter unless you know what you do.
Returns
-------
y : array of shape = [n_samples] or [n_samples, n_outputs]
The predicted classes, or the predict values.
"""
X = self._validate_X_predict(X, check_input)
proba = self.tree_.predict(X)
n_samples = X.shape[0]
# Classification
if isinstance(self, ClassifierMixin):
if self.n_outputs_ == 1:
return self.classes_.take(np.argmax(proba, axis=1), axis=0)
else:
predictions = np.zeros((n_samples, self.n_outputs_))
for k in range(self.n_outputs_):
predictions[:, k] = self.classes_[k].take(
np.argmax(proba[:, k], axis=1),
axis=0)
return predictions
# Regression
else:
if self.n_outputs_ == 1:
return proba[:, 0]
else:
return proba[:, :, 0]
def apply(self, X, check_input=True):
"""
Returns the index of the leaf that each sample is predicted as.
Parameters
----------
X : array_like or sparse matrix, shape = [n_samples, n_features]
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
check_input : boolean, (default=True)
Allow to bypass several input checking.
Don't use this parameter unless you know what you do.
Returns
-------
X_leaves : array_like, shape = [n_samples,]
For each datapoint x in X, return the index of the leaf x
ends up in. Leaves are numbered within
``[0; self.tree_.node_count)``, possibly with gaps in the
numbering.
"""
X = self._validate_X_predict(X, check_input)
return self.tree_.apply(X)
@property
def feature_importances_(self):
"""Return the feature importances.
The importance of a feature is computed as the (normalized) total
reduction of the criterion brought by that feature.
It is also known as the Gini importance.
Returns
-------
feature_importances_ : array, shape = [n_features]
"""
if self.tree_ is None:
raise NotFittedError("Estimator not fitted, call `fit` before"
" `feature_importances_`.")
return self.tree_.compute_feature_importances()
# =============================================================================
# Public estimators
# =============================================================================
class DecisionTreeClassifier(BaseDecisionTree, ClassifierMixin):
"""A decision tree classifier.
Read more in the :ref:`User Guide <tree>`.
Parameters
----------
criterion : string, optional (default="gini")
The function to measure the quality of a split. Supported criteria are
"gini" for the Gini impurity and "entropy" for the information gain.
splitter : string, optional (default="best")
The strategy used to choose the split at each node. Supported
strategies are "best" to choose the best split and "random" to choose
the best random split.
max_features : int, float, string or None, optional (default=None)
The number of features to consider when looking for the best split:
- If int, then consider `max_features` features at each split.
- If float, then `max_features` is a percentage and
`int(max_features * n_features)` features are considered at each
split.
- If "auto", then `max_features=sqrt(n_features)`.
- If "sqrt", then `max_features=sqrt(n_features)`.
- If "log2", then `max_features=log2(n_features)`.
- If None, then `max_features=n_features`.
Note: the search for a split does not stop until at least one
valid partition of the node samples is found, even if it requires to
effectively inspect more than ``max_features`` features.
max_depth : int or None, optional (default=None)
The maximum depth of the tree. If None, then nodes are expanded until
all leaves are pure or until all leaves contain less than
min_samples_split samples.
Ignored if ``max_leaf_nodes`` is not None.
min_samples_split : int, optional (default=2)
The minimum number of samples required to split an internal node.
min_samples_leaf : int, optional (default=1)
The minimum number of samples required to be at a leaf node.
min_weight_fraction_leaf : float, optional (default=0.)
The minimum weighted fraction of the input samples required to be at a
leaf node.
max_leaf_nodes : int or None, optional (default=None)
Grow a tree with ``max_leaf_nodes`` in best-first fashion.
Best nodes are defined as relative reduction in impurity.
If None then unlimited number of leaf nodes.
If not None then ``max_depth`` will be ignored.
class_weight : dict, list of dicts, "balanced" or None, optional
(default=None)
Weights associated with classes in the form ``{class_label: weight}``.
If not given, all classes are supposed to have weight one. For
multi-output problems, a list of dicts can be provided in the same
order as the columns of y.
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``
For multi-output, the weights of each column of y will be multiplied.
Note that these weights will be multiplied with sample_weight (passed
through the fit method) if sample_weight is specified.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
presort : bool, optional (default=False)
Whether to presort the data to speed up the finding of best splits in
fitting. For the default settings of a decision tree on large
datasets, setting this to true may slow down the training process.
When using either a smaller dataset or a restricted depth, this may
speed up the training.
Attributes
----------
classes_ : array of shape = [n_classes] or a list of such arrays
The classes labels (single output problem),
or a list of arrays of class labels (multi-output problem).
feature_importances_ : array of shape = [n_features]
The feature importances. The higher, the more important the
feature. The importance of a feature is computed as the (normalized)
total reduction of the criterion brought by that feature. It is also
known as the Gini importance [4]_.
max_features_ : int,
The inferred value of max_features.
n_classes_ : int or list
The number of classes (for single output problems),
or a list containing the number of classes for each
output (for multi-output problems).
n_features_ : int
The number of features when ``fit`` is performed.
n_outputs_ : int
The number of outputs when ``fit`` is performed.
tree_ : Tree object
The underlying Tree object.
See also
--------
DecisionTreeRegressor
References
----------
.. [1] http://en.wikipedia.org/wiki/Decision_tree_learning
.. [2] L. Breiman, J. Friedman, R. Olshen, and C. Stone, "Classification
and Regression Trees", Wadsworth, Belmont, CA, 1984.
.. [3] T. Hastie, R. Tibshirani and J. Friedman. "Elements of Statistical
Learning", Springer, 2009.
.. [4] L. Breiman, and A. Cutler, "Random Forests",
http://www.stat.berkeley.edu/~breiman/RandomForests/cc_home.htm
Examples
--------
>>> from sklearn.datasets import load_iris
>>> from sklearn.cross_validation import cross_val_score
>>> from sklearn.tree import DecisionTreeClassifier
>>> clf = DecisionTreeClassifier(random_state=0)
>>> iris = load_iris()
>>> cross_val_score(clf, iris.data, iris.target, cv=10)
... # doctest: +SKIP
...
array([ 1. , 0.93..., 0.86..., 0.93..., 0.93...,
0.93..., 0.93..., 1. , 0.93..., 1. ])
"""
def __init__(self,
criterion="gini",
splitter="best",
max_depth=None,
min_samples_split=2,
min_samples_leaf=1,
min_weight_fraction_leaf=0.,
max_features=None,
random_state=None,
max_leaf_nodes=None,
class_weight=None,
presort=False):
super(DecisionTreeClassifier, self).__init__(
criterion=criterion,
splitter=splitter,
max_depth=max_depth,
min_samples_split=min_samples_split,
min_samples_leaf=min_samples_leaf,
min_weight_fraction_leaf=min_weight_fraction_leaf,
max_features=max_features,
max_leaf_nodes=max_leaf_nodes,
class_weight=class_weight,
random_state=random_state,
presort=presort)
def predict_proba(self, X, check_input=True):
"""Predict class probabilities of the input samples X.
The predicted class probability is the fraction of samples of the same
class in a leaf.
check_input : boolean, (default=True)
Allow to bypass several input checking.
Don't use this parameter unless you know what you do.
Parameters
----------
X : array-like or sparse matrix of shape = [n_samples, n_features]
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
Returns
-------
p : array of shape = [n_samples, n_classes], or a list of n_outputs
such arrays if n_outputs > 1.
The class probabilities of the input samples. The order of the
classes corresponds to that in the attribute `classes_`.
"""
X = self._validate_X_predict(X, check_input)
proba = self.tree_.predict(X)
if self.n_outputs_ == 1:
proba = proba[:, :self.n_classes_]
normalizer = proba.sum(axis=1)[:, np.newaxis]
normalizer[normalizer == 0.0] = 1.0
proba /= normalizer
return proba
else:
all_proba = []
for k in range(self.n_outputs_):
proba_k = proba[:, k, :self.n_classes_[k]]
normalizer = proba_k.sum(axis=1)[:, np.newaxis]
normalizer[normalizer == 0.0] = 1.0
proba_k /= normalizer
all_proba.append(proba_k)
return all_proba
def predict_log_proba(self, X):
"""Predict class log-probabilities of the input samples X.
Parameters
----------
X : array-like or sparse matrix of shape = [n_samples, n_features]
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
Returns
-------
p : array of shape = [n_samples, n_classes], or a list of n_outputs
such arrays if n_outputs > 1.
The class log-probabilities of the input samples. The order of the
classes corresponds to that in the attribute `classes_`.
"""
proba = self.predict_proba(X)
if self.n_outputs_ == 1:
return np.log(proba)
else:
for k in range(self.n_outputs_):
proba[k] = np.log(proba[k])
return proba
class DecisionTreeRegressor(BaseDecisionTree, RegressorMixin):
"""A decision tree regressor.
Read more in the :ref:`User Guide <tree>`.
Parameters
----------
criterion : string, optional (default="mse")
The function to measure the quality of a split. The only supported
criterion is "mse" for the mean squared error, which is equal to
variance reduction as feature selection criterion.
splitter : string, optional (default="best")
The strategy used to choose the split at each node. Supported
strategies are "best" to choose the best split and "random" to choose
the best random split.
max_features : int, float, string or None, optional (default=None)
The number of features to consider when looking for the best split:
- If int, then consider `max_features` features at each split.
- If float, then `max_features` is a percentage and
`int(max_features * n_features)` features are considered at each
split.
- If "auto", then `max_features=n_features`.
- If "sqrt", then `max_features=sqrt(n_features)`.
- If "log2", then `max_features=log2(n_features)`.
- If None, then `max_features=n_features`.
Note: the search for a split does not stop until at least one
valid partition of the node samples is found, even if it requires to
effectively inspect more than ``max_features`` features.
max_depth : int or None, optional (default=None)
The maximum depth of the tree. If None, then nodes are expanded until
all leaves are pure or until all leaves contain less than
min_samples_split samples.
Ignored if ``max_leaf_nodes`` is not None.
min_samples_split : int, optional (default=2)
The minimum number of samples required to split an internal node.
min_samples_leaf : int, optional (default=1)
The minimum number of samples required to be at a leaf node.
min_weight_fraction_leaf : float, optional (default=0.)
The minimum weighted fraction of the input samples required to be at a
leaf node.
max_leaf_nodes : int or None, optional (default=None)
Grow a tree with ``max_leaf_nodes`` in best-first fashion.
Best nodes are defined as relative reduction in impurity.
If None then unlimited number of leaf nodes.
If not None then ``max_depth`` will be ignored.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
presort : bool, optional (default=False)
Whether to presort the data to speed up the finding of best splits in
fitting. For the default settings of a decision tree on large
datasets, setting this to true may slow down the training process.
When using either a smaller dataset or a restricted depth, this may
speed up the training.
Attributes
----------
feature_importances_ : array of shape = [n_features]
The feature importances.
The higher, the more important the feature.
The importance of a feature is computed as the
(normalized) total reduction of the criterion brought
by that feature. It is also known as the Gini importance [4]_.
max_features_ : int,
The inferred value of max_features.
n_features_ : int
The number of features when ``fit`` is performed.
n_outputs_ : int
The number of outputs when ``fit`` is performed.
tree_ : Tree object
The underlying Tree object.
See also
--------
DecisionTreeClassifier
References
----------
.. [1] http://en.wikipedia.org/wiki/Decision_tree_learning
.. [2] L. Breiman, J. Friedman, R. Olshen, and C. Stone, "Classification
and Regression Trees", Wadsworth, Belmont, CA, 1984.
.. [3] T. Hastie, R. Tibshirani and J. Friedman. "Elements of Statistical
Learning", Springer, 2009.
.. [4] L. Breiman, and A. Cutler, "Random Forests",
http://www.stat.berkeley.edu/~breiman/RandomForests/cc_home.htm
Examples
--------
>>> from sklearn.datasets import load_boston
>>> from sklearn.cross_validation import cross_val_score
>>> from sklearn.tree import DecisionTreeRegressor
>>> boston = load_boston()
>>> regressor = DecisionTreeRegressor(random_state=0)
>>> cross_val_score(regressor, boston.data, boston.target, cv=10)
... # doctest: +SKIP
...
array([ 0.61..., 0.57..., -0.34..., 0.41..., 0.75...,
0.07..., 0.29..., 0.33..., -1.42..., -1.77...])
"""
def __init__(self,
criterion="mse",
splitter="best",
max_depth=None,
min_samples_split=2,
min_samples_leaf=1,
min_weight_fraction_leaf=0.,
max_features=None,
random_state=None,
max_leaf_nodes=None,
presort=False):
super(DecisionTreeRegressor, self).__init__(
criterion=criterion,
splitter=splitter,
max_depth=max_depth,
min_samples_split=min_samples_split,
min_samples_leaf=min_samples_leaf,
min_weight_fraction_leaf=min_weight_fraction_leaf,
max_features=max_features,
max_leaf_nodes=max_leaf_nodes,
random_state=random_state,
presort=presort)
class ExtraTreeClassifier(DecisionTreeClassifier):
"""An extremely randomized tree classifier.
Extra-trees differ from classic decision trees in the way they are built.
When looking for the best split to separate the samples of a node into two
groups, random splits are drawn for each of the `max_features` randomly
selected features and the best split among those is chosen. When
`max_features` is set 1, this amounts to building a totally random
decision tree.
Warning: Extra-trees should only be used within ensemble methods.
Read more in the :ref:`User Guide <tree>`.
See also
--------
ExtraTreeRegressor, ExtraTreesClassifier, ExtraTreesRegressor
References
----------
.. [1] P. Geurts, D. Ernst., and L. Wehenkel, "Extremely randomized trees",
Machine Learning, 63(1), 3-42, 2006.
"""
def __init__(self,
criterion="gini",
splitter="random",
max_depth=None,
min_samples_split=2,
min_samples_leaf=1,
min_weight_fraction_leaf=0.,
max_features="auto",
random_state=None,
max_leaf_nodes=None,
class_weight=None):
super(ExtraTreeClassifier, self).__init__(
criterion=criterion,
splitter=splitter,
max_depth=max_depth,
min_samples_split=min_samples_split,
min_samples_leaf=min_samples_leaf,
min_weight_fraction_leaf=min_weight_fraction_leaf,
max_features=max_features,
max_leaf_nodes=max_leaf_nodes,
class_weight=class_weight,
random_state=random_state)
class ExtraTreeRegressor(DecisionTreeRegressor):
"""An extremely randomized tree regressor.
Extra-trees differ from classic decision trees in the way they are built.
When looking for the best split to separate the samples of a node into two
groups, random splits are drawn for each of the `max_features` randomly
selected features and the best split among those is chosen. When
`max_features` is set 1, this amounts to building a totally random
decision tree.
Warning: Extra-trees should only be used within ensemble methods.
Read more in the :ref:`User Guide <tree>`.
See also
--------
ExtraTreeClassifier, ExtraTreesClassifier, ExtraTreesRegressor
References
----------
.. [1] P. Geurts, D. Ernst., and L. Wehenkel, "Extremely randomized trees",
Machine Learning, 63(1), 3-42, 2006.
"""
def __init__(self,
criterion="mse",
splitter="random",
max_depth=None,
min_samples_split=2,
min_samples_leaf=1,
min_weight_fraction_leaf=0.,
max_features="auto",
random_state=None,
max_leaf_nodes=None):
super(ExtraTreeRegressor, self).__init__(
criterion=criterion,
splitter=splitter,
max_depth=max_depth,
min_samples_split=min_samples_split,
min_samples_leaf=min_samples_leaf,
min_weight_fraction_leaf=min_weight_fraction_leaf,
max_features=max_features,
max_leaf_nodes=max_leaf_nodes,
random_state=random_state)
| bsd-3-clause |
couchand/petard | vendor/cxxtest-4.3/python/cxxtest/cxxtestgen.py | 7 | 23336 | #-------------------------------------------------------------------------
# CxxTest: A lightweight C++ unit testing library.
# Copyright (c) 2008 Sandia Corporation.
# This software is distributed under the LGPL License v3
# For more information, see the COPYING file in the top CxxTest directory.
# Under the terms of Contract DE-AC04-94AL85000 with Sandia Corporation,
# the U.S. Government retains certain rights in this software.
#-------------------------------------------------------------------------
# vim: fileencoding=utf-8
from __future__ import division
# the above import important for forward-compatibility with python3,
# which is already the default in archlinux!
__all__ = ['main', 'create_manpage']
import __release__
import os
import sys
import re
import glob
from optparse import OptionParser
import cxxtest_parser
from string import Template
try:
import cxxtest_fog
imported_fog=True
except ImportError:
imported_fog=False
from cxxtest_misc import abort
try:
from os.path import relpath
except ImportError:
from cxxtest_misc import relpath
# Global data is initialized by main()
options = []
suites = []
wrotePreamble = 0
wroteWorld = 0
lastIncluded = ''
def main(args=sys.argv, catch=False):
'''The main program'''
#
# Reset global state
#
global wrotePreamble
wrotePreamble=0
global wroteWorld
wroteWorld=0
global lastIncluded
lastIncluded = ''
global suites
suites = []
global options
options = []
#
try:
files = parseCommandline(args)
if imported_fog and options.fog:
[options,suites] = cxxtest_fog.scanInputFiles( files, options )
else:
[options,suites] = cxxtest_parser.scanInputFiles( files, options )
writeOutput()
except SystemExit:
if not catch:
raise
def create_parser(asciidoc=False):
parser = OptionParser("cxxtestgen [options] [<filename> ...]")
if asciidoc:
parser.description="The cxxtestgen command processes C++ header files to perform test discovery, and then it creates files for the CxxTest test runner."
else:
parser.description="The 'cxxtestgen' command processes C++ header files to perform test discovery, and then it creates files for the 'CxxTest' test runner."
parser.add_option("--version",
action="store_true", dest="version", default=False,
help="Write the CxxTest version.")
parser.add_option("-o", "--output",
dest="outputFileName", default=None, metavar="NAME",
help="Write output to file NAME.")
parser.add_option("-w","--world", dest="world", default="cxxtest",
help="The label of the tests, used to name the XML results.")
parser.add_option("", "--include", action="append",
dest="headers", default=[], metavar="HEADER",
help="Include file HEADER in the test runner before other headers.")
parser.add_option("", "--abort-on-fail",
action="store_true", dest="abortOnFail", default=False,
help="Abort tests on failed asserts (like xUnit).")
parser.add_option("", "--main",
action="store", dest="main", default="main",
help="Specify an alternative name for the main() function.")
parser.add_option("", "--headers",
action="store", dest="header_filename", default=None,
help="Specify a filename that contains a list of header files that are processed to generate a test runner.")
parser.add_option("", "--runner",
dest="runner", default="", metavar="CLASS",
help="Create a test runner that processes test events using the class CxxTest::CLASS.")
parser.add_option("", "--gui",
dest="gui", metavar="CLASS",
help="Create a GUI test runner that processes test events using the class CxxTest::CLASS. (deprecated)")
parser.add_option("", "--error-printer",
action="store_true", dest="error_printer", default=False,
help="Create a test runner using the ErrorPrinter class, and allow the use of the standard library.")
parser.add_option("", "--xunit-printer",
action="store_true", dest="xunit_printer", default=False,
help="Create a test runner using the XUnitPrinter class.")
parser.add_option("", "--xunit-file", dest="xunit_file", default="",
help="The file to which the XML summary is written for test runners using the XUnitPrinter class. The default XML filename is TEST-<world>.xml, where <world> is the value of the --world option. (default: cxxtest)")
parser.add_option("", "--have-std",
action="store_true", dest="haveStandardLibrary", default=False,
help="Use the standard library (even if not found in tests).")
parser.add_option("", "--no-std",
action="store_true", dest="noStandardLibrary", default=False,
help="Do not use standard library (even if found in tests).")
parser.add_option("", "--have-eh",
action="store_true", dest="haveExceptionHandling", default=False,
help="Use exception handling (even if not found in tests).")
parser.add_option("", "--no-eh",
action="store_true", dest="noExceptionHandling", default=False,
help="Do not use exception handling (even if found in tests).")
parser.add_option("", "--longlong",
dest="longlong", default=None, metavar="TYPE",
help="Use TYPE as for long long integers. (default: not supported)")
parser.add_option("", "--no-static-init",
action="store_true", dest="noStaticInit", default=False,
help="Do not rely on static initialization in the test runner.")
parser.add_option("", "--template",
dest="templateFileName", default=None, metavar="TEMPLATE",
help="Generate the test runner using file TEMPLATE to define a template.")
parser.add_option("", "--root",
action="store_true", dest="root", default=False,
help="Write the main() function and global data for a test runner.")
parser.add_option("", "--part",
action="store_true", dest="part", default=False,
help="Write the tester classes for a test runner.")
#parser.add_option("", "--factor",
#action="store_true", dest="factor", default=False,
#help="Declare the _CXXTEST_FACTOR macro. (deprecated)")
if imported_fog:
fog_help = "Use new FOG C++ parser"
else:
fog_help = "Use new FOG C++ parser (disabled)"
parser.add_option("-f", "--fog-parser",
action="store_true",
dest="fog",
default=False,
help=fog_help
)
return parser
def parseCommandline(args):
'''Analyze command line arguments'''
global imported_fog
global options
parser = create_parser()
(options, args) = parser.parse_args(args=args)
if not options.header_filename is None:
if not os.path.exists(options.header_filename):
abort( "ERROR: the file '%s' does not exist!" % options.header_filename )
INPUT = open(options.header_filename)
headers = [line.strip() for line in INPUT]
args.extend( headers )
INPUT.close()
if options.fog and not imported_fog:
abort( "Cannot use the FOG parser. Check that the 'ply' package is installed. The 'ordereddict' package is also required if running Python 2.6")
if options.version:
printVersion()
# the cxxtest builder relies on this behaviour! don't remove
if options.runner == 'none':
options.runner = None
if options.xunit_printer or options.runner == "XUnitPrinter":
options.xunit_printer=True
options.runner="XUnitPrinter"
if len(args) > 1:
if options.xunit_file == "":
if options.world == "":
options.world = "cxxtest"
options.xunit_file="TEST-"+options.world+".xml"
elif options.xunit_file == "":
if options.world == "":
options.world = "cxxtest"
options.xunit_file="TEST-"+options.world+".xml"
if options.error_printer:
options.runner= "ErrorPrinter"
options.haveStandardLibrary = True
if options.noStaticInit and (options.root or options.part):
abort( '--no-static-init cannot be used with --root/--part' )
if options.gui and not options.runner:
options.runner = 'StdioPrinter'
files = setFiles(args[1:])
if len(files) == 0 and not options.root:
sys.stderr.write(parser.error("No input files found"))
return files
def printVersion():
'''Print CxxTest version and exit'''
sys.stdout.write( "This is CxxTest version %s.\n" % __release__.__version__ )
sys.exit(0)
def setFiles(patterns ):
'''Set input files specified on command line'''
files = expandWildcards( patterns )
return files
def expandWildcards( patterns ):
'''Expand all wildcards in an array (glob)'''
fileNames = []
for pathName in patterns:
patternFiles = glob.glob( pathName )
for fileName in patternFiles:
fileNames.append( fixBackslashes( fileName ) )
return fileNames
def fixBackslashes( fileName ):
'''Convert backslashes to slashes in file name'''
return re.sub( r'\\', '/', fileName, 0 )
def writeOutput():
'''Create output file'''
if options.templateFileName:
writeTemplateOutput()
else:
writeSimpleOutput()
def writeSimpleOutput():
'''Create output not based on template'''
output = startOutputFile()
writePreamble( output )
if options.root or not options.part:
writeMain( output )
if len(suites) > 0:
output.write("bool "+suites[0]['object']+"_init = false;\n")
writeWorld( output )
output.close()
include_re = re.compile( r"\s*\#\s*include\s+<cxxtest/" )
preamble_re = re.compile( r"^\s*<CxxTest\s+preamble>\s*$" )
world_re = re.compile( r"^\s*<CxxTest\s+world>\s*$" )
def writeTemplateOutput():
'''Create output based on template file'''
template = open(options.templateFileName)
output = startOutputFile()
while 1:
line = template.readline()
if not line:
break;
if include_re.search( line ):
writePreamble( output )
output.write( line )
elif preamble_re.search( line ):
writePreamble( output )
elif world_re.search( line ):
if len(suites) > 0:
output.write("bool "+suites[0]['object']+"_init = false;\n")
writeWorld( output )
else:
output.write( line )
template.close()
output.close()
def startOutputFile():
'''Create output file and write header'''
if options.outputFileName is not None:
output = open( options.outputFileName, 'w' )
else:
output = sys.stdout
output.write( "/* Generated file, do not edit */\n\n" )
return output
def writePreamble( output ):
'''Write the CxxTest header (#includes and #defines)'''
global wrotePreamble
if wrotePreamble: return
output.write( "#ifndef CXXTEST_RUNNING\n" )
output.write( "#define CXXTEST_RUNNING\n" )
output.write( "#endif\n" )
output.write( "\n" )
if options.xunit_printer:
output.write( "#include <fstream>\n" )
if options.haveStandardLibrary:
output.write( "#define _CXXTEST_HAVE_STD\n" )
if options.haveExceptionHandling:
output.write( "#define _CXXTEST_HAVE_EH\n" )
if options.abortOnFail:
output.write( "#define _CXXTEST_ABORT_TEST_ON_FAIL\n" )
if options.longlong:
output.write( "#define _CXXTEST_LONGLONG %s\n" % options.longlong )
#if options.factor:
#output.write( "#define _CXXTEST_FACTOR\n" )
for header in options.headers:
output.write( "#include \"%s\"\n" % header )
output.write( "#include <cxxtest/TestListener.h>\n" )
output.write( "#include <cxxtest/TestTracker.h>\n" )
output.write( "#include <cxxtest/TestRunner.h>\n" )
output.write( "#include <cxxtest/RealDescriptions.h>\n" )
output.write( "#include <cxxtest/TestMain.h>\n" )
if options.runner:
output.write( "#include <cxxtest/%s.h>\n" % options.runner )
if options.gui:
output.write( "#include <cxxtest/%s.h>\n" % options.gui )
output.write( "\n" )
wrotePreamble = 1
def writeMain( output ):
'''Write the main() function for the test runner'''
if not (options.gui or options.runner):
return
output.write( 'int %s( int argc, char *argv[] ) {\n' % options.main )
output.write( ' int status;\n' )
if options.noStaticInit:
output.write( ' CxxTest::initialize();\n' )
if options.gui:
tester_t = "CxxTest::GuiTuiRunner<CxxTest::%s, CxxTest::%s> " % (options.gui, options.runner)
else:
tester_t = "CxxTest::%s" % (options.runner)
if options.xunit_printer:
output.write( ' std::ofstream ofstr("%s");\n' % options.xunit_file )
output.write( ' %s tmp(ofstr);\n' % tester_t )
else:
output.write( ' %s tmp;\n' % tester_t )
output.write( ' CxxTest::RealWorldDescription::_worldName = "%s";\n' % options.world )
output.write( ' status = CxxTest::Main< %s >( tmp, argc, argv );\n' % tester_t )
output.write( ' return status;\n')
output.write( '}\n' )
def writeWorld( output ):
'''Write the world definitions'''
global wroteWorld
if wroteWorld: return
writePreamble( output )
writeSuites( output )
if options.root or not options.part:
writeRoot( output )
writeWorldDescr( output )
if options.noStaticInit:
writeInitialize( output )
wroteWorld = 1
def writeSuites(output):
'''Write all TestDescriptions and SuiteDescriptions'''
for suite in suites:
writeInclude( output, suite['file'] )
if isGenerated(suite):
generateSuite( output, suite )
if not options.noStaticInit:
if isDynamic(suite):
writeSuitePointer( output, suite )
else:
writeSuiteObject( output, suite )
writeTestList( output, suite )
writeSuiteDescription( output, suite )
writeTestDescriptions( output, suite )
def isGenerated(suite):
'''Checks whether a suite class should be created'''
return suite['generated']
def isDynamic(suite):
'''Checks whether a suite is dynamic'''
return 'create' in suite
def writeInclude(output, file):
'''Add #include "file" statement'''
global lastIncluded
file = os.path.abspath(file)
if file == lastIncluded: return
output.writelines( [ '#include "', file, '"\n\n' ] )
lastIncluded = file
def generateSuite( output, suite ):
'''Write a suite declared with CXXTEST_SUITE()'''
output.write( 'class %s : public CxxTest::TestSuite {\n' % suite['fullname'] )
output.write( 'public:\n' )
for line in suite['lines']:
output.write(line)
output.write( '};\n\n' )
def writeSuitePointer( output, suite ):
'''Create static suite pointer object for dynamic suites'''
if options.noStaticInit:
output.write( 'static %s* %s;\n\n' % (suite['fullname'], suite['object']) )
else:
output.write( 'static %s* %s = 0;\n\n' % (suite['fullname'], suite['object']) )
def writeSuiteObject( output, suite ):
'''Create static suite object for non-dynamic suites'''
output.writelines( [ "static ", suite['fullname'], " ", suite['object'], ";\n\n" ] )
def writeTestList( output, suite ):
'''Write the head of the test linked list for a suite'''
if options.noStaticInit:
output.write( 'static CxxTest::List %s;\n' % suite['tlist'] )
else:
output.write( 'static CxxTest::List %s = { 0, 0 };\n' % suite['tlist'] )
def writeWorldDescr( output ):
'''Write the static name of the world name'''
if options.noStaticInit:
output.write( 'const char* CxxTest::RealWorldDescription::_worldName;\n' )
else:
output.write( 'const char* CxxTest::RealWorldDescription::_worldName = "cxxtest";\n' )
def writeTestDescriptions( output, suite ):
'''Write all test descriptions for a suite'''
for test in suite['tests']:
writeTestDescription( output, suite, test )
def writeTestDescription( output, suite, test ):
'''Write test description object'''
if not options.noStaticInit:
output.write( 'static class %s : public CxxTest::RealTestDescription {\n' % test['class'] )
else:
output.write( 'class %s : public CxxTest::RealTestDescription {\n' % test['class'] )
#
output.write( 'public:\n' )
if not options.noStaticInit:
output.write( ' %s() : CxxTest::RealTestDescription( %s, %s, %s, "%s" ) {}\n' %
(test['class'], suite['tlist'], suite['dobject'], test['line'], test['name']) )
else:
if isDynamic(suite):
output.write( ' %s(%s* _%s) : %s(_%s) { }\n' %
(test['class'], suite['fullname'], suite['object'], suite['object'], suite['object']) )
output.write( ' %s* %s;\n' % (suite['fullname'], suite['object']) )
else:
output.write( ' %s(%s& _%s) : %s(_%s) { }\n' %
(test['class'], suite['fullname'], suite['object'], suite['object'], suite['object']) )
output.write( ' %s& %s;\n' % (suite['fullname'], suite['object']) )
output.write( ' void runTest() { %s }\n' % runBody( suite, test ) )
#
if not options.noStaticInit:
output.write( '} %s;\n\n' % test['object'] )
else:
output.write( '};\n\n' )
def runBody( suite, test ):
'''Body of TestDescription::run()'''
if isDynamic(suite): return dynamicRun( suite, test )
else: return staticRun( suite, test )
def dynamicRun( suite, test ):
'''Body of TestDescription::run() for test in a dynamic suite'''
return 'if ( ' + suite['object'] + ' ) ' + suite['object'] + '->' + test['name'] + '();'
def staticRun( suite, test ):
'''Body of TestDescription::run() for test in a non-dynamic suite'''
return suite['object'] + '.' + test['name'] + '();'
def writeSuiteDescription( output, suite ):
'''Write SuiteDescription object'''
if isDynamic( suite ):
writeDynamicDescription( output, suite )
else:
writeStaticDescription( output, suite )
def writeDynamicDescription( output, suite ):
'''Write SuiteDescription for a dynamic suite'''
output.write( 'CxxTest::DynamicSuiteDescription< %s > %s' % (suite['fullname'], suite['dobject']) )
if not options.noStaticInit:
output.write( '( %s, %s, "%s", %s, %s, %s, %s )' %
(suite['cfile'], suite['line'], suite['fullname'], suite['tlist'],
suite['object'], suite['create'], suite['destroy']) )
output.write( ';\n\n' )
def writeStaticDescription( output, suite ):
'''Write SuiteDescription for a static suite'''
output.write( 'CxxTest::StaticSuiteDescription %s' % suite['dobject'] )
if not options.noStaticInit:
output.write( '( %s, %s, "%s", %s, %s )' %
(suite['cfile'], suite['line'], suite['fullname'], suite['object'], suite['tlist']) )
output.write( ';\n\n' )
def writeRoot(output):
'''Write static members of CxxTest classes'''
output.write( '#include <cxxtest/Root.cpp>\n' )
def writeInitialize(output):
'''Write CxxTest::initialize(), which replaces static initialization'''
output.write( 'namespace CxxTest {\n' )
output.write( ' void initialize()\n' )
output.write( ' {\n' )
for suite in suites:
#print "HERE", suite
writeTestList( output, suite )
output.write( ' %s.initialize();\n' % suite['tlist'] )
#writeSuiteObject( output, suite )
if isDynamic(suite):
writeSuitePointer( output, suite )
output.write( ' %s = 0;\n' % suite['object'])
else:
writeSuiteObject( output, suite )
output.write( ' static ')
writeSuiteDescription( output, suite )
if isDynamic(suite):
#output.write( ' %s = %s.suite();\n' % (suite['object'],suite['dobject']) )
output.write( ' %s.initialize( %s, %s, "%s", %s, %s, %s, %s );\n' %
(suite['dobject'], suite['cfile'], suite['line'], suite['fullname'],
suite['tlist'], suite['object'], suite['create'], suite['destroy']) )
output.write( ' %s.setUp();\n' % suite['dobject'])
else:
output.write( ' %s.initialize( %s, %s, "%s", %s, %s );\n' %
(suite['dobject'], suite['cfile'], suite['line'], suite['fullname'],
suite['object'], suite['tlist']) )
for test in suite['tests']:
output.write( ' static %s %s(%s);\n' %
(test['class'], test['object'], suite['object']) )
output.write( ' %s.initialize( %s, %s, %s, "%s" );\n' %
(test['object'], suite['tlist'], suite['dobject'], test['line'], test['name']) )
output.write( ' }\n' )
output.write( '}\n' )
man_template=Template("""CXXTESTGEN(1)
=============
:doctype: manpage
NAME
----
cxxtestgen - performs test discovery to create a CxxTest test runner
SYNOPSIS
--------
${usage}
DESCRIPTION
-----------
${description}
OPTIONS
-------
${options}
EXIT STATUS
-----------
*0*::
Success
*1*::
Failure (syntax or usage error; configuration error; document
processing failure; unexpected error).
BUGS
----
See the CxxTest Home Page for the link to the CxxTest ticket repository.
AUTHOR
------
CxxTest was originally written by Erez Volk. Many people have
contributed to it.
RESOURCES
---------
Home page: <http://cxxtest.com/>
CxxTest User Guide: <http://cxxtest.com/cxxtest/doc/guide.html>
COPYING
-------
Copyright (c) 2008 Sandia Corporation. This software is distributed
under the Lesser GNU General Public License (LGPL) v3
""")
def create_manpage():
"""Write ASCIIDOC manpage file"""
parser = create_parser(asciidoc=True)
#
usage = parser.usage
description = parser.description
options=""
for opt in parser.option_list:
opts = opt._short_opts + opt._long_opts
optstr = '*' + ', '.join(opts) + '*'
if not opt.metavar is None:
optstr += "='%s'" % opt.metavar
optstr += '::\n'
options += optstr
#
options += opt.help
options += '\n\n'
#
OUTPUT = open('cxxtestgen.1.txt','w')
OUTPUT.write( man_template.substitute(usage=usage, description=description, options=options) )
OUTPUT.close()
| mit |
tarawa/codejam-commandline | lib/google_login.py | 20 | 9663 | #!/usr/bin/env python2
# -*- coding: utf-8 -*-
#
# Copyright 2011 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This module exposes one function Login(), that authenticates user into the
Google services, returning an authentication token and a cookie."""
from datetime import datetime, timedelta
import time
import urllib
import urllib2
class AuthenticationError(urllib2.HTTPError):
"""Exception class to indicate an error when authenticating with Google's
ClientLogin.
"""
def __init__(self, url, code, message, headers, args):
"""Initialize the error with the specified arguments."""
super(AuthenticationError, self).__init__(url, code, message,
headers, None)
self.args = args
self.reason = args["Error"]
def _GetHTTPOpener():
"""Create an http opener used to interact with Google's ClientLogin.
Returns:
An http opener capable of handling anything needed to interact with
Google's ClientLogin.
"""
# Create an http opener capable of handling proxies, http and https.
opener = urllib2.OpenerDirector()
opener.add_handler(urllib2.ProxyHandler())
opener.add_handler(urllib2.UnknownHandler())
opener.add_handler(urllib2.HTTPHandler())
opener.add_handler(urllib2.HTTPDefaultErrorHandler())
opener.add_handler(urllib2.HTTPErrorProcessor())
opener.add_handler(urllib2.HTTPSHandler())
return opener
def _ParseBodyAsDict(body):
""" Parse the specified body as a dictionary with each element in a line, and
key value pairs separated by '='.
Args:
body: The string with the HTTP body to parse.
Returns:
A dictionary with the body contents.
"""
return dict(line.split('=') for line in body.split('\n') if line)
def _GetGoogleAuthtoken(account_type, user, password, service, source):
"""This function authenticates the user in the specified service using
the provided authentication data.
Args:
account_type: Type of the account to login, could be GOOGLE or any other
string if the account is external.
user: Name of the user to be logged in.
password: Password of the user to be logged in.
service: Service where the user wants to log in, for example, 'ah'.
source: Name of the application requesting the user authentication.
Returns:
The authentatication token for the user if the supplied data is correct.
Raises:
lib.AuthenticationError: This exception is raised if the HTTP response is
403 - Forbidden, in this case the error is parsed and returned to the
user in the exception.
urllib2.HTTPError: This exception is raised for any other HTTP error.
"""
# Create a request for Google's Client login, with the specied data.
auth_request_data_map = {
'accountType': account_type,
'Email': user,
'Passwd': password,
'service': service,
'source': source
}
auth_request_data = urllib.urlencode(auth_request_data_map)
auth_url = 'https://www.google.com/accounts/ClientLogin'
auth_request = urllib2.Request(auth_url, auth_request_data)
try:
# Create a custom opener, make the request and extract the body.
http_opener = _GetHTTPOpener()
auth_response = http_opener.open(auth_request)
auth_response_body = auth_response.read()
# Parse the response data as a dictionary and return the 'Auth' key.
auth_response_data = _ParseBodyAsDict(auth_response_body)
return auth_response_data['Auth']
except urllib2.HTTPError as e:
# Check if the error was a 403 - Forbidden. In that case, forward the
# exception as an authentication error. Otherwise, just forward the
# exception.
if e.code == 403:
# Parse the error body as a dictionary and forward the exception as an
# authentication error.
response_dict = _ParseBodyAsDict(e.read())
raise AuthenticationError(auth_request.get_full_url(), e.code, e.msg,
e.headers, response_dict)
else:
raise
def _GetGaeCookie(host, service, auth_token, secure):
"""This function creates a login cookie using the authentication token
obtained after logging in successfully in the Google account.
Args:
host: Host where the user wants to login.
service: Service code where the user wants to login.
auth_token: Authentication token obtained from ClientLogin.
secure: True if we want a secure cookie, false if not.
Returns:
A cookie for the specifed service.
Raises:
urllib2.HTTPError: This exception is raised when the cookie cannot be
obtained and the user is redirected to another place.
"""
# Create a request for Google's service with the authentication token.
continue_location = 'http://localhost/'
cookie_request_data_map = {
'continue' : continue_location,
'auth' : auth_token,
}
cookie_request_data = urllib.urlencode(cookie_request_data_map)
cookie_url = '{protocol}://{host}/_{service}/login?{data}'.format(
protocol=('https' if secure else 'http'), host=host, service=service,
data=cookie_request_data)
cookie_request = urllib2.Request(cookie_url)
try:
# Create a custom opener, make the request and extract the body.
http_opener = _GetHTTPOpener()
cookie_response = http_opener.open(cookie_request)
except urllib2.HTTPError as e:
# Keep the error as the cookie response.
cookie_response = e
# Check that a redirection was made to the required continue location.
# Otherwise, return an HTTP error.
response_code = cookie_response.code
if (response_code != 302 or
cookie_response.info()['location'] != continue_location):
raise urllib2.HTTPError(cookie_request.get_full_url(), response_code,
cookie_response.msg, cookie_response.headers,
cookie_response.fp)
# Extract the cookie from the headers and remove 'HttpOnly' from it.
cookie = cookie_response.headers.get('Set-Cookie')
return cookie.replace('; HttpOnly', '')
def Login(host, account_type, user, password, service, source, secure):
"""Retrieve the authentication token and cookie from the specified service,
using the given user and password to authenticate.
Args:
host: Host where the user wants to login.
account_type: Type of the account to login, could be GOOGLE or any other
string if the account is external.
user: Name of the user to be logged in.
password: Password of the user to be logged in.
service: Service where the user wants to log in, for example, 'ah'.
source: Name of the application requesting the user authentication.
secure: True if we want a secure cookie, false if not.
Returns:
A tuple with the authentication token and a cookie for the specifed service.
"""
auth_token = _GetGoogleAuthtoken(account_type, user, password, service,
source)
cookie = _GetGaeCookie(host, service, auth_token, secure)
return auth_token, cookie
def _ParseCookieFields(cookie):
# Fields inside the cookie are separated by a semicolon, so split the cookie
# and process each token as a field.
cookie_fields = {}
for token in cookie.split(';'):
# Keys and values are separated by a single equal in the field, or they
# might be keys without values. In this case, use True as the field value.
equal_index = token.find('=')
if equal_index == -1:
field_name = token.strip()
field_value = True
else:
field_name = token[:equal_index].strip()
field_value = token[equal_index + 1:].strip()
cookie_fields[field_name] = field_value
return cookie_fields
def GetCookieExpirationTime(cookie):
"""Extract and return the expiration time in the cookie.
Args:
cookie: String with the cookie whose expiration time must be retrieved.
Returns:
A string with the cookie expiration time, or None if the expiration field\
was not found. The expiration time is returned in UTC.
"""
# Parse the cookie fields and look for an expiration field, and return None if
# the cookie has no expiration date.
cookie_fields = _ParseCookieFields(cookie)
return cookie_fields.get('expires')
def CookieHasExpired(cookie):
"""Checks whether the specified cookie expired or not.
Args:
cookie: String with the cookie information.
Returns:
True if the cookie has expired, false otherwise.
"""
# Get the cookie expiration time, if it is not found just assume the cookie
# has not expired yet.
expiration_time_string = GetCookieExpirationTime(cookie)
if expiration_time_string is None:
return False
# Parse the cookie expiration time and check if there are at least 5 minutes
# before expiration, otherwise the cookie might expire after this function
# exits but before the user action is complete.
expiration_time = datetime.strptime(expiration_time_string,
'%a, %d-%b-%Y %H:%M:%S %Z')
offset = time.altzone if time.daylight else time.timezone
today_gmt_time = datetime.today() + timedelta(seconds=offset)
time_left = expiration_time - today_gmt_time
return time_left < timedelta(minutes=5)
| apache-2.0 |
kurrik/github-recs | src/rmn/logistic_test.py | 1 | 2137 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
__author__ = 'Arne Roomann-Kurrik <kurrik@gmail.com>'
import logistic
import numpy as np
from numpy import linalg
from itertools import combinations
class TestFeatures(object):
def __init__(self, labels):
self.__labels = labels
self.__length = len(labels) * 2 + 1
def ParseX(self, line):
label1, label2 = line.split(',')
x = self.ZeroF()
off = len(self.__labels)
index1 = self.__labels.index(label1)
index2 = self.__labels.index(label2)
x[index1] = 1
#x[index2] = 1
#x[index1 + off] = 1
x[index2 + off] = 1
x[-1] = 1
return x
def Parse(self, line):
exists, xline = line.split(',', 1)
y = 1 if (exists == 'True') else -1
x = self.ParseX(xline)
print y, x
return y, x
def ZeroF(self):
return np.array([0] * self.__length)
def GetD(self, lines):
return [self.Parse(line) for line in lines]
if __name__ == '__main__':
nodes = [
'JavaScript','JavaScript','JavaScript','JavaScript',
'Scala', 'Scala',
'Go','Go',
'PHP'
]
edges = set([(0,1),(0,2),(0,3),(1,2),(4,5),(6,7),(2,5),(3,6),(1,7),(1,8)])
data = []
for edge in combinations(range(len(nodes)), 2):
exists = edge in edges or (edge[1],edge[0]) in edges
data.append("%s,%s,%s" % (exists, nodes[edge[0]], nodes[edge[1]]))
print data
F = TestFeatures(['JavaScript', 'Scala', 'Go', 'PHP'])
D = F.GetD(data)
net = logistic.Logistic(0.09, 0.01)
w = net.Weights(D)
print w
delta = 1
while delta > 0.00001:
w, delta = net.Descent(D, w)
print w, delta
tests = [
'JavaScript,Go',
'JavaScript,Scala',
'Go,Go',
'Scala,Scala',
'Scala,Go',
'JavaScript,JavaScript',
'JavaScript,PHP',
'Scala,PHP',
'Go,PHP',
]
for test in tests:
pass
x = F.ParseX(test)
pF = net.Pw(-1, x, w)
pT = net.Pw(1, x, w)
print "Testing %s: P(T) = %s, P(F) = %s, Result = " % (test, pT, pF),
if pF > pT:
print "False"
elif pT > pF:
print "True"
else:
print "Tie"
print "Dot: %s" % net.Sigmoid(np.dot(x, w))
| apache-2.0 |
spektom/incubator-airflow | airflow/providers/slack/hooks/slack_webhook.py | 4 | 5545 | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
import json
from airflow.exceptions import AirflowException
from airflow.providers.http.hooks.http import HttpHook
class SlackWebhookHook(HttpHook):
"""
This hook allows you to post messages to Slack using incoming webhooks.
Takes both Slack webhook token directly and connection that has Slack webhook token.
If both supplied, http_conn_id will be used as base_url,
and webhook_token will be taken as endpoint, the relative path of the url.
Each Slack webhook token can be pre-configured to use a specific channel, username and
icon. You can override these defaults in this hook.
:param http_conn_id: connection that has Slack webhook token in the extra field
:type http_conn_id: str
:param webhook_token: Slack webhook token
:type webhook_token: str
:param message: The message you want to send on Slack
:type message: str
:param attachments: The attachments to send on Slack. Should be a list of
dictionaries representing Slack attachments.
:type attachments: list
:param blocks: The blocks to send on Slack. Should be a list of
dictionaries representing Slack blocks.
:type blocks: list
:param channel: The channel the message should be posted to
:type channel: str
:param username: The username to post to slack with
:type username: str
:param icon_emoji: The emoji to use as icon for the user posting to Slack
:type icon_emoji: str
:param icon_url: The icon image URL string to use in place of the default icon.
:type icon_url: str
:param link_names: Whether or not to find and link channel and usernames in your
message
:type link_names: bool
:param proxy: Proxy to use to make the Slack webhook call
:type proxy: str
"""
def __init__(self,
http_conn_id=None,
webhook_token=None,
message="",
attachments=None,
blocks=None,
channel=None,
username=None,
icon_emoji=None,
icon_url=None,
link_names=False,
proxy=None,
*args,
**kwargs
):
super().__init__(http_conn_id=http_conn_id, *args, **kwargs)
self.webhook_token = self._get_token(webhook_token, http_conn_id)
self.message = message
self.attachments = attachments
self.blocks = blocks
self.channel = channel
self.username = username
self.icon_emoji = icon_emoji
self.icon_url = icon_url
self.link_names = link_names
self.proxy = proxy
def _get_token(self, token, http_conn_id):
"""
Given either a manually set token or a conn_id, return the webhook_token to use.
:param token: The manually provided token
:type token: str
:param http_conn_id: The conn_id provided
:type http_conn_id: str
:return: webhook_token to use
:rtype: str
"""
if token:
return token
elif http_conn_id:
conn = self.get_connection(http_conn_id)
extra = conn.extra_dejson
return extra.get('webhook_token', '')
else:
raise AirflowException('Cannot get token: No valid Slack '
'webhook token nor conn_id supplied')
def _build_slack_message(self):
"""
Construct the Slack message. All relevant parameters are combined here to a valid
Slack json message.
:return: Slack message to send
:rtype: str
"""
cmd = {}
if self.channel:
cmd['channel'] = self.channel
if self.username:
cmd['username'] = self.username
if self.icon_emoji:
cmd['icon_emoji'] = self.icon_emoji
if self.icon_url:
cmd['icon_url'] = self.icon_url
if self.link_names:
cmd['link_names'] = 1
if self.attachments:
cmd['attachments'] = self.attachments
if self.blocks:
cmd['blocks'] = self.blocks
cmd['text'] = self.message
return json.dumps(cmd)
def execute(self):
"""
Remote Popen (actually execute the slack webhook call)
"""
proxies = {}
if self.proxy:
# we only need https proxy for Slack, as the endpoint is https
proxies = {'https': self.proxy}
slack_message = self._build_slack_message()
self.run(endpoint=self.webhook_token,
data=slack_message,
headers={'Content-type': 'application/json'},
extra_options={'proxies': proxies})
| apache-2.0 |
johankaito/fufuka | microblog/flask/venv/lib/python2.7/site-packages/scipy/_lib/tests/test__version.py | 107 | 2027 | from numpy.testing import assert_, run_module_suite, assert_raises
from scipy._lib._version import NumpyVersion
def test_main_versions():
assert_(NumpyVersion('1.8.0') == '1.8.0')
for ver in ['1.9.0', '2.0.0', '1.8.1']:
assert_(NumpyVersion('1.8.0') < ver)
for ver in ['1.7.0', '1.7.1', '0.9.9']:
assert_(NumpyVersion('1.8.0') > ver)
def test_version_1_point_10():
# regression test for gh-2998.
assert_(NumpyVersion('1.9.0') < '1.10.0')
assert_(NumpyVersion('1.11.0') < '1.11.1')
assert_(NumpyVersion('1.11.0') == '1.11.0')
assert_(NumpyVersion('1.99.11') < '1.99.12')
def test_alpha_beta_rc():
assert_(NumpyVersion('1.8.0rc1') == '1.8.0rc1')
for ver in ['1.8.0', '1.8.0rc2']:
assert_(NumpyVersion('1.8.0rc1') < ver)
for ver in ['1.8.0a2', '1.8.0b3', '1.7.2rc4']:
assert_(NumpyVersion('1.8.0rc1') > ver)
assert_(NumpyVersion('1.8.0b1') > '1.8.0a2')
def test_dev_version():
assert_(NumpyVersion('1.9.0.dev-Unknown') < '1.9.0')
for ver in ['1.9.0', '1.9.0a1', '1.9.0b2', '1.9.0b2.dev-ffffffff']:
assert_(NumpyVersion('1.9.0.dev-f16acvda') < ver)
assert_(NumpyVersion('1.9.0.dev-f16acvda') == '1.9.0.dev-11111111')
def test_dev_a_b_rc_mixed():
assert_(NumpyVersion('1.9.0a2.dev-f16acvda') == '1.9.0a2.dev-11111111')
assert_(NumpyVersion('1.9.0a2.dev-6acvda54') < '1.9.0a2')
def test_dev0_version():
assert_(NumpyVersion('1.9.0.dev0+Unknown') < '1.9.0')
for ver in ['1.9.0', '1.9.0a1', '1.9.0b2', '1.9.0b2.dev0+ffffffff']:
assert_(NumpyVersion('1.9.0.dev0+f16acvda') < ver)
assert_(NumpyVersion('1.9.0.dev0+f16acvda') == '1.9.0.dev0+11111111')
def test_dev0_a_b_rc_mixed():
assert_(NumpyVersion('1.9.0a2.dev0+f16acvda') == '1.9.0a2.dev0+11111111')
assert_(NumpyVersion('1.9.0a2.dev0+6acvda54') < '1.9.0a2')
def test_raises():
for ver in ['1.9', '1,9.0', '1.7.x']:
assert_raises(ValueError, NumpyVersion, ver)
if __name__ == "__main__":
run_module_suite()
| apache-2.0 |
sirchia/CouchPotatoServer | libs/enzyme/riff.py | 179 | 20109 | # -*- coding: utf-8 -*-
# enzyme - Video metadata parser
# Copyright 2011-2012 Antoine Bertin <diaoulael@gmail.com>
# Copyright 2003-2006 Thomas Schueppel <stain@acm.org>
# Copyright 2003-2006 Dirk Meyer <dischi@freevo.org>
#
# This file is part of enzyme.
#
# enzyme is free software; you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# enzyme is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with enzyme. If not, see <http://www.gnu.org/licenses/>.
__all__ = ['Parser']
import os
import struct
import string
import logging
import time
from exceptions import ParseError
import core
# get logging object
log = logging.getLogger(__name__)
# List of tags
# http://kibus1.narod.ru/frames_eng.htm?sof/abcavi/infotags.htm
# http://www.divx-digest.com/software/avitags_dll.html
# File Format: google for odmlff2.pdf
AVIINFO = {
'INAM': 'title',
'IART': 'artist',
'IPRD': 'product',
'ISFT': 'software',
'ICMT': 'comment',
'ILNG': 'language',
'IKEY': 'keywords',
'IPRT': 'trackno',
'IFRM': 'trackof',
'IPRO': 'producer',
'IWRI': 'writer',
'IGNR': 'genre',
'ICOP': 'copyright'
}
# Taken from libavcodec/mpeg4data.h (pixel_aspect struct)
PIXEL_ASPECT = {
1: (1, 1),
2: (12, 11),
3: (10, 11),
4: (16, 11),
5: (40, 33)
}
class Riff(core.AVContainer):
"""
AVI parser also parsing metadata like title, languages, etc.
"""
table_mapping = { 'AVIINFO' : AVIINFO }
def __init__(self, file):
core.AVContainer.__init__(self)
# read the header
h = file.read(12)
if h[:4] != "RIFF" and h[:4] != 'SDSS':
raise ParseError()
self.has_idx = False
self.header = {}
self.junkStart = None
self.infoStart = None
self.type = h[8:12]
if self.type == 'AVI ':
self.mime = 'video/avi'
elif self.type == 'WAVE':
self.mime = 'audio/wav'
try:
while self._parseRIFFChunk(file):
pass
except IOError:
log.exception(u'error in file, stop parsing')
self._find_subtitles(file.name)
if not self.has_idx and isinstance(self, core.AVContainer):
log.debug(u'WARNING: avi has no index')
self._set('corrupt', True)
def _find_subtitles(self, filename):
"""
Search for subtitle files. Right now only VobSub is supported
"""
base = os.path.splitext(filename)[0]
if os.path.isfile(base + '.idx') and \
(os.path.isfile(base + '.sub') or os.path.isfile(base + '.rar')):
file = open(base + '.idx')
if file.readline().find('VobSub index file') > 0:
for line in file.readlines():
if line.find('id') == 0:
sub = core.Subtitle()
sub.language = line[4:6]
sub.trackno = base + '.idx' # Maybe not?
self.subtitles.append(sub)
file.close()
def _parseAVIH(self, t):
retval = {}
v = struct.unpack('<IIIIIIIIIIIIII', t[0:56])
(retval['dwMicroSecPerFrame'],
retval['dwMaxBytesPerSec'],
retval['dwPaddingGranularity'],
retval['dwFlags'],
retval['dwTotalFrames'],
retval['dwInitialFrames'],
retval['dwStreams'],
retval['dwSuggestedBufferSize'],
retval['dwWidth'],
retval['dwHeight'],
retval['dwScale'],
retval['dwRate'],
retval['dwStart'],
retval['dwLength']) = v
if retval['dwMicroSecPerFrame'] == 0:
log.warning(u'ERROR: Corrupt AVI')
raise ParseError()
return retval
def _parseSTRH(self, t):
retval = {}
retval['fccType'] = t[0:4]
log.debug(u'_parseSTRH(%r) : %d bytes' % (retval['fccType'], len(t)))
if retval['fccType'] != 'auds':
retval['fccHandler'] = t[4:8]
v = struct.unpack('<IHHIIIIIIIII', t[8:52])
(retval['dwFlags'],
retval['wPriority'],
retval['wLanguage'],
retval['dwInitialFrames'],
retval['dwScale'],
retval['dwRate'],
retval['dwStart'],
retval['dwLength'],
retval['dwSuggestedBufferSize'],
retval['dwQuality'],
retval['dwSampleSize'],
retval['rcFrame']) = v
else:
try:
v = struct.unpack('<IHHIIIIIIIII', t[8:52])
(retval['dwFlags'],
retval['wPriority'],
retval['wLanguage'],
retval['dwInitialFrames'],
retval['dwScale'],
retval['dwRate'],
retval['dwStart'],
retval['dwLength'],
retval['dwSuggestedBufferSize'],
retval['dwQuality'],
retval['dwSampleSize'],
retval['rcFrame']) = v
self.delay = float(retval['dwStart']) / \
(float(retval['dwRate']) / retval['dwScale'])
except (KeyError, IndexError, ValueError, ZeroDivisionError):
pass
return retval
def _parseSTRF(self, t, strh):
fccType = strh['fccType']
retval = {}
if fccType == 'auds':
v = struct.unpack('<HHHHHH', t[0:12])
(retval['wFormatTag'],
retval['nChannels'],
retval['nSamplesPerSec'],
retval['nAvgBytesPerSec'],
retval['nBlockAlign'],
retval['nBitsPerSample'],
) = v
ai = core.AudioStream()
ai.samplerate = retval['nSamplesPerSec']
ai.channels = retval['nChannels']
# FIXME: Bitrate calculation is completely wrong.
#ai.samplebits = retval['nBitsPerSample']
#ai.bitrate = retval['nAvgBytesPerSec'] * 8
# TODO: set code if possible
# http://www.stats.uwa.edu.au/Internal/Specs/DXALL/FileSpec/\
# Languages
# ai.language = strh['wLanguage']
ai.codec = retval['wFormatTag']
self.audio.append(ai)
elif fccType == 'vids':
v = struct.unpack('<IIIHH', t[0:16])
(retval['biSize'],
retval['biWidth'],
retval['biHeight'],
retval['biPlanes'],
retval['biBitCount']) = v
v = struct.unpack('IIIII', t[20:40])
(retval['biSizeImage'],
retval['biXPelsPerMeter'],
retval['biYPelsPerMeter'],
retval['biClrUsed'],
retval['biClrImportant']) = v
vi = core.VideoStream()
vi.codec = t[16:20]
vi.width = retval['biWidth']
vi.height = retval['biHeight']
# FIXME: Bitrate calculation is completely wrong.
#vi.bitrate = strh['dwRate']
vi.fps = float(strh['dwRate']) / strh['dwScale']
vi.length = strh['dwLength'] / vi.fps
self.video.append(vi)
return retval
def _parseSTRL(self, t):
retval = {}
size = len(t)
i = 0
while i < len(t) - 8:
key = t[i:i + 4]
sz = struct.unpack('<I', t[i + 4:i + 8])[0]
i += 8
value = t[i:]
if key == 'strh':
retval[key] = self._parseSTRH(value)
elif key == 'strf':
retval[key] = self._parseSTRF(value, retval['strh'])
else:
log.debug(u'_parseSTRL: unsupported stream tag %r', key)
i += sz
return retval, i
def _parseODML(self, t):
retval = {}
size = len(t)
i = 0
key = t[i:i + 4]
sz = struct.unpack('<I', t[i + 4:i + 8])[0]
i += 8
value = t[i:]
if key != 'dmlh':
log.debug(u'_parseODML: Error')
i += sz - 8
return (retval, i)
def _parseVPRP(self, t):
retval = {}
v = struct.unpack('<IIIIIIIIII', t[:4 * 10])
(retval['VideoFormat'],
retval['VideoStandard'],
retval['RefreshRate'],
retval['HTotalIn'],
retval['VTotalIn'],
retval['FrameAspectRatio'],
retval['wPixel'],
retval['hPixel']) = v[1:-1]
# I need an avi with more informations
# enum {FORMAT_UNKNOWN, FORMAT_PAL_SQUARE, FORMAT_PAL_CCIR_601,
# FORMAT_NTSC_SQUARE, FORMAT_NTSC_CCIR_601,...} VIDEO_FORMAT;
# enum {STANDARD_UNKNOWN, STANDARD_PAL, STANDARD_NTSC, STANDARD_SECAM}
# VIDEO_STANDARD;
#
r = retval['FrameAspectRatio']
r = float(r >> 16) / (r & 0xFFFF)
retval['FrameAspectRatio'] = r
if self.video:
map(lambda v: setattr(v, 'aspect', r), self.video)
return (retval, v[0])
def _parseLISTmovi(self, size, file):
"""
Digs into movi list, looking for a Video Object Layer header in an
mpeg4 stream in order to determine aspect ratio.
"""
i = 0
n_dc = 0
done = False
# If the VOL header doesn't appear within 5MB or 5 video chunks,
# give up. The 5MB limit is not likely to apply except in
# pathological cases.
while i < min(1024 * 1024 * 5, size - 8) and n_dc < 5:
data = file.read(8)
if ord(data[0]) == 0:
# Eat leading nulls.
data = data[1:] + file.read(1)
i += 1
key, sz = struct.unpack('<4sI', data)
if key[2:] != 'dc' or sz > 1024 * 500:
# This chunk is not video or is unusually big (> 500KB);
# skip it.
file.seek(sz, 1)
i += 8 + sz
continue
n_dc += 1
# Read video chunk into memory
data = file.read(sz)
#for p in range(0,min(80, sz)):
# print "%02x " % ord(data[p]),
#print "\n\n"
# Look through the picture header for VOL startcode. The basic
# logic for this is taken from libavcodec, h263.c
pos = 0
startcode = 0xff
def bits(v, o, n):
# Returns n bits in v, offset o bits.
return (v & 2 ** n - 1 << (64 - n - o)) >> 64 - n - o
while pos < sz:
startcode = ((startcode << 8) | ord(data[pos])) & 0xffffffff
pos += 1
if startcode & 0xFFFFFF00 != 0x100:
# No startcode found yet
continue
if startcode >= 0x120 and startcode <= 0x12F:
# We have the VOL startcode. Pull 64 bits of it and treat
# as a bitstream
v = struct.unpack(">Q", data[pos : pos + 8])[0]
offset = 10
if bits(v, 9, 1):
# is_ol_id, skip over vo_ver_id and vo_priority
offset += 7
ar_info = bits(v, offset, 4)
if ar_info == 15:
# Extended aspect
num = bits(v, offset + 4, 8)
den = bits(v, offset + 12, 8)
else:
# A standard pixel aspect
num, den = PIXEL_ASPECT.get(ar_info, (0, 0))
# num/den indicates pixel aspect; convert to video aspect,
# so we need frame width and height.
if 0 not in [num, den]:
width, height = self.video[-1].width, self.video[-1].height
self.video[-1].aspect = num / float(den) * width / height
done = True
break
startcode = 0xff
i += 8 + len(data)
if done:
# We have the aspect, no need to continue parsing the movi
# list, so break out of the loop.
break
if i < size:
# Seek past whatever might be remaining of the movi list.
file.seek(size - i, 1)
def _parseLIST(self, t):
retval = {}
i = 0
size = len(t)
while i < size - 8:
# skip zero
if ord(t[i]) == 0: i += 1
key = t[i:i + 4]
sz = 0
if key == 'LIST':
sz = struct.unpack('<I', t[i + 4:i + 8])[0]
i += 8
key = "LIST:" + t[i:i + 4]
value = self._parseLIST(t[i:i + sz])
if key == 'strl':
for k in value.keys():
retval[k] = value[k]
else:
retval[key] = value
i += sz
elif key == 'avih':
sz = struct.unpack('<I', t[i + 4:i + 8])[0]
i += 8
value = self._parseAVIH(t[i:i + sz])
i += sz
retval[key] = value
elif key == 'strl':
i += 4
(value, sz) = self._parseSTRL(t[i:])
key = value['strh']['fccType']
i += sz
retval[key] = value
elif key == 'odml':
i += 4
(value, sz) = self._parseODML(t[i:])
i += sz
elif key == 'vprp':
i += 4
(value, sz) = self._parseVPRP(t[i:])
retval[key] = value
i += sz
elif key == 'JUNK':
sz = struct.unpack('<I', t[i + 4:i + 8])[0]
i += sz + 8
else:
sz = struct.unpack('<I', t[i + 4:i + 8])[0]
i += 8
# in most cases this is some info stuff
if not key in AVIINFO.keys() and key != 'IDIT':
log.debug(u'Unknown Key: %r, len: %d' % (key, sz))
value = t[i:i + sz]
if key == 'ISFT':
# product information
if value.find('\0') > 0:
# works for Casio S500 camera videos
value = value[:value.find('\0')]
value = value.replace('\0', '').lstrip().rstrip()
value = value.replace('\0', '').lstrip().rstrip()
if value:
retval[key] = value
if key in ['IDIT', 'ICRD']:
# Timestamp the video was created. Spec says it
# should be a format like "Wed Jan 02 02:03:55 1990"
# Casio S500 uses "2005/12/24/ 14:11", but I've
# also seen "December 24, 2005"
specs = ('%a %b %d %H:%M:%S %Y', '%Y/%m/%d/ %H:%M', '%B %d, %Y')
for tmspec in specs:
try:
tm = time.strptime(value, tmspec)
# save timestamp as int
self.timestamp = int(time.mktime(tm))
break
except ValueError:
pass
else:
log.debug(u'no support for time format %r', value)
i += sz
return retval
def _parseRIFFChunk(self, file):
h = file.read(8)
if len(h) < 8:
return False
name = h[:4]
size = struct.unpack('<I', h[4:8])[0]
if name == 'LIST':
pos = file.tell() - 8
key = file.read(4)
if key == 'movi' and self.video and not self.video[-1].aspect and \
self.video[-1].width and self.video[-1].height and \
self.video[-1].format in ['DIVX', 'XVID', 'FMP4']: # any others?
# If we don't have the aspect (i.e. it isn't in odml vprp
# header), but we do know the video's dimensions, and
# we're dealing with an mpeg4 stream, try to get the aspect
# from the VOL header in the mpeg4 stream.
self._parseLISTmovi(size - 4, file)
return True
elif size > 80000:
log.debug(u'RIFF LIST %r too long to parse: %r bytes' % (key, size))
t = file.seek(size - 4, 1)
return True
elif size < 5:
log.debug(u'RIFF LIST %r too short: %r bytes' % (key, size))
return True
t = file.read(size - 4)
log.debug(u'parse RIFF LIST %r: %d bytes' % (key, size))
value = self._parseLIST(t)
self.header[key] = value
if key == 'INFO':
self.infoStart = pos
self._appendtable('AVIINFO', value)
elif key == 'MID ':
self._appendtable('AVIMID', value)
elif key == 'hdrl':
# no need to add this info to a table
pass
else:
log.debug(u'Skipping table info %r' % key)
elif name == 'JUNK':
self.junkStart = file.tell() - 8
self.junkSize = size
file.seek(size, 1)
elif name == 'idx1':
self.has_idx = True
log.debug(u'idx1: %r bytes' % size)
# no need to parse this
t = file.seek(size, 1)
elif name == 'RIFF':
log.debug(u'New RIFF chunk, extended avi [%i]' % size)
type = file.read(4)
if type != 'AVIX':
log.debug(u'Second RIFF chunk is %r, not AVIX, skipping', type)
file.seek(size - 4, 1)
# that's it, no new informations should be in AVIX
return False
elif name == 'fmt ' and size <= 50:
# This is a wav file.
data = file.read(size)
fmt = struct.unpack("<HHLLHH", data[:16])
self._set('codec', hex(fmt[0]))
self._set('samplerate', fmt[2])
# fmt[3] is average bytes per second, so we must divide it
# by 125 to get kbits per second
self._set('bitrate', fmt[3] / 125)
# ugly hack: remember original rate in bytes per second
# so that the length can be calculated in next elif block
self._set('byterate', fmt[3])
# Set a dummy fourcc so codec will be resolved in finalize.
self._set('fourcc', 'dummy')
elif name == 'data':
# XXX: this is naive and may not be right. For example if the
# stream is something that supports VBR like mp3, the value
# will be off. The only way to properly deal with this issue
# is to decode part of the stream based on its codec, but
# kaa.metadata doesn't have this capability (yet?)
# ugly hack: use original rate in bytes per second
self._set('length', size / float(self.byterate))
file.seek(size, 1)
elif not name.strip(string.printable + string.whitespace):
# check if name is something usefull at all, maybe it is no
# avi or broken
t = file.seek(size, 1)
log.debug(u'Skipping %r [%i]' % (name, size))
else:
# bad avi
log.debug(u'Bad or broken avi')
return False
return True
Parser = Riff
| gpl-3.0 |
bdunnette/djecks | migrations/0007_auto__add_field_deck_source__chg_field_deck_title__chg_field_deck_desc.py | 1 | 3848 | # -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'Deck.source'
db.add_column(u'djecks_deck', 'source',
self.gf('django.db.models.fields.CharField')(max_length=255, null=True, blank=True),
keep_default=False)
# Changing field 'Deck.title'
db.alter_column(u'djecks_deck', 'title', self.gf('django.db.models.fields.TextField')(null=True))
# Changing field 'Deck.description'
db.alter_column(u'djecks_deck', 'description', self.gf('django.db.models.fields.TextField')(null=True))
# Adding field 'Card.source'
db.add_column(u'djecks_card', 'source',
self.gf('django.db.models.fields.CharField')(max_length=255, null=True, blank=True),
keep_default=False)
def backwards(self, orm):
# Deleting field 'Deck.source'
db.delete_column(u'djecks_deck', 'source')
# Changing field 'Deck.title'
db.alter_column(u'djecks_deck', 'title', self.gf('django.db.models.fields.TextField')(default=''))
# Changing field 'Deck.description'
db.alter_column(u'djecks_deck', 'description', self.gf('django.db.models.fields.TextField')(default=''))
# Deleting field 'Card.source'
db.delete_column(u'djecks_card', 'source')
models = {
u'djecks.card': {
'Meta': {'object_name': 'Card'},
'cases': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': u"orm['djecks.Case']", 'null': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image_back': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'image_front': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'source': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'title': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'})
},
u'djecks.case': {
'Meta': {'object_name': 'Case'},
'age': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'decks': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': u"orm['djecks.Deck']", 'null': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'sex': ('django.db.models.fields.CharField', [], {'max_length': '1', 'null': 'True', 'blank': 'True'}),
'source': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'title': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'})
},
u'djecks.deck': {
'Meta': {'object_name': 'Deck'},
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'source': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'title': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'})
}
}
complete_apps = ['djecks'] | agpl-3.0 |
drewejohnson/drewtils | drewtils/__init__.py | 1 | 1629 | # THIS FILE IS PROVIDED AS IS UNDER THE CONDITIONS DETAILED IN LICENSE
# COPYRIGHT ANDREW JOHNSON, 2017-2020
import operator
from drewtils.parsers import KeywordParser, PatternReader
__versions__ = '0.2.0'
def dfSubset(data, where):
"""
Return a subset of the data given a series of conditions
.. versionadded:: 0.1.9
Parameters
----------
data: :py:class:`pandas.DataFrame`:
DataFrame to view
where: str or list or tuple
Conditions to apply.
Notes
-----
If the argument is a string, it will be converted
to a tuple for iteration. Items in iterable can be either a string
or three-valued iterable of the following form::
string: 'column operand target'
iterable: ('column', 'operand', 'target')
If the first-level item is a string, it will be split at spaces.
Operands are string-representations of operators from the operator module,
e.g.::
'eq', 'ge', 'le', 'ne', 'gt', 'lt', 'contains'
Returns
-------
view: :py:class:`pandas.DataFrame`:
View into the data frame after successive slices
See Also
--------
:py:mod:`operator`
"""
view = data
if isinstance(where, str):
where = where,
for item in where:
if isinstance(item, str):
cond = item.split()
else:
cond = item
assert len(cond) == 3, ('Conditions should have three arguments, '
'not like {}'.format(item))
evalFunc = getattr(operator, cond[1])
view = view[evalFunc(view[cond[0]], cond[2])]
return view
| mit |
yannickcr/CouchPotatoServer | couchpotato/core/notifications/twitter/__init__.py | 81 | 1547 | from .main import Twitter
def autoload():
return Twitter()
config = [{
'name': 'twitter',
'groups': [
{
'tab': 'notifications',
'list': 'notification_providers',
'name': 'twitter',
'options': [
{
'name': 'enabled',
'default': 0,
'type': 'enabler',
},
{
'name': 'access_token_key',
'advanced': True,
},
{
'name': 'screen_name',
'advanced': True,
},
{
'name': 'access_token_secret',
'advanced': True,
},
{
'name': 'mention',
'description': 'Add a mention to this user to the tweet.',
},
{
'name': 'on_snatch',
'default': 0,
'type': 'bool',
'advanced': True,
'description': 'Also send message when movie is snatched.',
},
{
'name': 'direct_message',
'default': 0,
'type': 'bool',
'advanced': True,
'description': 'Use direct messages for the notifications (Also applies to the mentioned users).',
},
],
}
],
}]
| gpl-3.0 |
blacklin/kbengine | kbe/src/lib/python/Tools/unicode/comparecodecs.py | 52 | 1440 | #!/usr/bin/env python3
""" Compare the output of two codecs.
(c) Copyright 2005, Marc-Andre Lemburg (mal@lemburg.com).
Licensed to PSF under a Contributor Agreement.
"""
import sys
def compare_codecs(encoding1, encoding2):
print('Comparing encoding/decoding of %r and %r' % (encoding1, encoding2))
mismatch = 0
# Check encoding
for i in range(sys.maxunicode+1):
u = chr(i)
try:
c1 = u.encode(encoding1)
except UnicodeError as reason:
c1 = '<undefined>'
try:
c2 = u.encode(encoding2)
except UnicodeError as reason:
c2 = '<undefined>'
if c1 != c2:
print(' * encoding mismatch for 0x%04X: %-14r != %r' % \
(i, c1, c2))
mismatch += 1
# Check decoding
for i in range(256):
c = bytes([i])
try:
u1 = c.decode(encoding1)
except UnicodeError:
u1 = '<undefined>'
try:
u2 = c.decode(encoding2)
except UnicodeError:
u2 = '<undefined>'
if u1 != u2:
print(' * decoding mismatch for 0x%04X: %-14r != %r' % \
(i, u1, u2))
mismatch += 1
if mismatch:
print()
print('Found %i mismatches' % mismatch)
else:
print('-> Codecs are identical.')
if __name__ == '__main__':
compare_codecs(sys.argv[1], sys.argv[2])
| lgpl-3.0 |
Kiiv/CouchPotatoServer | libs/suds/mx/encoded.py | 211 | 4651 | # This program is free software; you can redistribute it and/or modify
# it under the terms of the (LGPL) GNU Lesser General Public License as
# published by the Free Software Foundation; either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Library Lesser General Public License for more details at
# ( http://www.gnu.org/licenses/lgpl.html ).
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
# written by: Jeff Ortel ( jortel@redhat.com )
"""
Provides encoded I{marshaller} classes.
"""
from logging import getLogger
from suds import *
from suds.mx import *
from suds.mx.literal import Literal
from suds.mx.typer import Typer
from suds.sudsobject import Factory, Object
from suds.xsd.query import TypeQuery
log = getLogger(__name__)
#
# Add encoded extensions
# aty = The soap (section 5) encoded array type.
#
Content.extensions.append('aty')
class Encoded(Literal):
"""
A SOAP section (5) encoding marshaller.
This marshaller supports rpc/encoded soap styles.
"""
def start(self, content):
#
# For soap encoded arrays, the 'aty' (array type) information
# is extracted and added to the 'content'. Then, the content.value
# is replaced with an object containing an 'item=[]' attribute
# containing values that are 'typed' suds objects.
#
start = Literal.start(self, content)
if start and isinstance(content.value, (list,tuple)):
resolved = content.type.resolve()
for c in resolved:
if hasattr(c[0], 'aty'):
content.aty = (content.tag, c[0].aty)
self.cast(content)
break
return start
def end(self, parent, content):
#
# For soap encoded arrays, the soapenc:arrayType attribute is
# added with proper type and size information.
# Eg: soapenc:arrayType="xs:int[3]"
#
Literal.end(self, parent, content)
if content.aty is None:
return
tag, aty = content.aty
ns0 = ('at0', aty[1])
ns1 = ('at1', 'http://schemas.xmlsoap.org/soap/encoding/')
array = content.value.item
child = parent.getChild(tag)
child.addPrefix(ns0[0], ns0[1])
child.addPrefix(ns1[0], ns1[1])
name = '%s:arrayType' % ns1[0]
value = '%s:%s[%d]' % (ns0[0], aty[0], len(array))
child.set(name, value)
def encode(self, node, content):
if content.type.any():
Typer.auto(node, content.value)
return
if content.real.any():
Typer.auto(node, content.value)
return
ns = None
name = content.real.name
if self.xstq:
ns = content.real.namespace()
Typer.manual(node, name, ns)
def cast(self, content):
"""
Cast the I{untyped} list items found in content I{value}.
Each items contained in the list is checked for XSD type information.
Items (values) that are I{untyped}, are replaced with suds objects and
type I{metadata} is added.
@param content: The content holding the collection.
@type content: L{Content}
@return: self
@rtype: L{Encoded}
"""
aty = content.aty[1]
resolved = content.type.resolve()
array = Factory.object(resolved.name)
array.item = []
query = TypeQuery(aty)
ref = query.execute(self.schema)
if ref is None:
raise TypeNotFound(qref)
for x in content.value:
if isinstance(x, (list, tuple)):
array.item.append(x)
continue
if isinstance(x, Object):
md = x.__metadata__
md.sxtype = ref
array.item.append(x)
continue
if isinstance(x, dict):
x = Factory.object(ref.name, x)
md = x.__metadata__
md.sxtype = ref
array.item.append(x)
continue
x = Factory.property(ref.name, x)
md = x.__metadata__
md.sxtype = ref
array.item.append(x)
content.value = array
return self
| gpl-3.0 |
pradyunsg/Py2C | setup.py | 1 | 1554 | #!/usr/bin/python3
# pylint:disable=C0103
import sys
if sys.version_info[:2] < (3, 3):
print("[Py2C] Cannot run on Python versions before Python 3.3")
sys.exit(1)
try:
from setuptools import setup, find_packages
except ImportError:
print("[Py2C] Please install 'setuptools'..")
sys.exit(1)
# setup.py metadata
from setup_metadata import get_metadata # noqa
# -----------------------------------------------------------------------------
# Generating the AST
# -----------------------------------------------------------------------------
from os.path import join, dirname, realpath # noqa
try: # If ever setuptools improves on the build_py command.
from setuptools.command.build_py import build_py as _build_py
except ImportError:
from distutils.command.build_py import build_py as _build_py
class build_py(_build_py):
"""A customized version to build the AST definition files
"""
def initialize_options(self):
import py2c.tree.node_gen as node_gen
path_to_definitions = realpath(join(dirname(__file__), "py2c", "tree"))
node_gen.generate(path_to_definitions)
super().initialize_options()
metadata = get_metadata()
# -----------------------------------------------------------------------------
# The main setup call
# -----------------------------------------------------------------------------
setup(
# Packaging related stuff
packages=find_packages(),
setup_requires=["ply==3.4"],
cmdclass={
'build_py': build_py,
},
**metadata
)
| bsd-3-clause |
JioCloud/python-novaclient | novaclient/openstack/common/strutils.py | 5 | 8198 | # Copyright 2011 OpenStack Foundation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
System-level utilities and helper functions.
"""
import math
import re
import sys
import unicodedata
import six
from novaclient.openstack.common.gettextutils import _
UNIT_PREFIX_EXPONENT = {
'k': 1,
'K': 1,
'Ki': 1,
'M': 2,
'Mi': 2,
'G': 3,
'Gi': 3,
'T': 4,
'Ti': 4,
}
UNIT_SYSTEM_INFO = {
'IEC': (1024, re.compile(r'(^[-+]?\d*\.?\d+)([KMGT]i?)?(b|bit|B)$')),
'SI': (1000, re.compile(r'(^[-+]?\d*\.?\d+)([kMGT])?(b|bit|B)$')),
}
TRUE_STRINGS = ('1', 't', 'true', 'on', 'y', 'yes')
FALSE_STRINGS = ('0', 'f', 'false', 'off', 'n', 'no')
SLUGIFY_STRIP_RE = re.compile(r"[^\w\s-]")
SLUGIFY_HYPHENATE_RE = re.compile(r"[-\s]+")
def int_from_bool_as_string(subject):
"""Interpret a string as a boolean and return either 1 or 0.
Any string value in:
('True', 'true', 'On', 'on', '1')
is interpreted as a boolean True.
Useful for JSON-decoded stuff and config file parsing
"""
return bool_from_string(subject) and 1 or 0
def bool_from_string(subject, strict=False, default=False):
"""Interpret a string as a boolean.
A case-insensitive match is performed such that strings matching 't',
'true', 'on', 'y', 'yes', or '1' are considered True and, when
`strict=False`, anything else returns the value specified by 'default'.
Useful for JSON-decoded stuff and config file parsing.
If `strict=True`, unrecognized values, including None, will raise a
ValueError which is useful when parsing values passed in from an API call.
Strings yielding False are 'f', 'false', 'off', 'n', 'no', or '0'.
"""
if not isinstance(subject, six.string_types):
subject = str(subject)
lowered = subject.strip().lower()
if lowered in TRUE_STRINGS:
return True
elif lowered in FALSE_STRINGS:
return False
elif strict:
acceptable = ', '.join(
"'%s'" % s for s in sorted(TRUE_STRINGS + FALSE_STRINGS))
msg = _("Unrecognized value '%(val)s', acceptable values are:"
" %(acceptable)s") % {'val': subject,
'acceptable': acceptable}
raise ValueError(msg)
else:
return default
def safe_decode(text, incoming=None, errors='strict'):
"""Decodes incoming text/bytes string using `incoming` if they're not
already unicode.
:param incoming: Text's current encoding
:param errors: Errors handling policy. See here for valid
values http://docs.python.org/2/library/codecs.html
:returns: text or a unicode `incoming` encoded
representation of it.
:raises TypeError: If text is not an instance of str
"""
if not isinstance(text, (six.string_types, six.binary_type)):
raise TypeError("%s can't be decoded" % type(text))
if isinstance(text, six.text_type):
return text
if not incoming:
incoming = (sys.stdin.encoding or
sys.getdefaultencoding())
try:
return text.decode(incoming, errors)
except UnicodeDecodeError:
# Note(flaper87) If we get here, it means that
# sys.stdin.encoding / sys.getdefaultencoding
# didn't return a suitable encoding to decode
# text. This happens mostly when global LANG
# var is not set correctly and there's no
# default encoding. In this case, most likely
# python will use ASCII or ANSI encoders as
# default encodings but they won't be capable
# of decoding non-ASCII characters.
#
# Also, UTF-8 is being used since it's an ASCII
# extension.
return text.decode('utf-8', errors)
def safe_encode(text, incoming=None,
encoding='utf-8', errors='strict'):
"""Encodes incoming text/bytes string using `encoding`.
If incoming is not specified, text is expected to be encoded with
current python's default encoding. (`sys.getdefaultencoding`)
:param incoming: Text's current encoding
:param encoding: Expected encoding for text (Default UTF-8)
:param errors: Errors handling policy. See here for valid
values http://docs.python.org/2/library/codecs.html
:returns: text or a bytestring `encoding` encoded
representation of it.
:raises TypeError: If text is not an instance of str
"""
if not isinstance(text, (six.string_types, six.binary_type)):
raise TypeError("%s can't be encoded" % type(text))
if not incoming:
incoming = (sys.stdin.encoding or
sys.getdefaultencoding())
if isinstance(text, six.text_type):
if six.PY3:
return text.encode(encoding, errors).decode(incoming)
else:
return text.encode(encoding, errors)
elif text and encoding != incoming:
# Decode text before encoding it with `encoding`
text = safe_decode(text, incoming, errors)
if six.PY3:
return text.encode(encoding, errors).decode(incoming)
else:
return text.encode(encoding, errors)
return text
def string_to_bytes(text, unit_system='IEC', return_int=False):
"""Converts a string into an float representation of bytes.
The units supported for IEC ::
Kb(it), Kib(it), Mb(it), Mib(it), Gb(it), Gib(it), Tb(it), Tib(it)
KB, KiB, MB, MiB, GB, GiB, TB, TiB
The units supported for SI ::
kb(it), Mb(it), Gb(it), Tb(it)
kB, MB, GB, TB
Note that the SI unit system does not support capital letter 'K'
:param text: String input for bytes size conversion.
:param unit_system: Unit system for byte size conversion.
:param return_int: If True, returns integer representation of text
in bytes. (default: decimal)
:returns: Numerical representation of text in bytes.
:raises ValueError: If text has an invalid value.
"""
try:
base, reg_ex = UNIT_SYSTEM_INFO[unit_system]
except KeyError:
msg = _('Invalid unit system: "%s"') % unit_system
raise ValueError(msg)
match = reg_ex.match(text)
if match:
magnitude = float(match.group(1))
unit_prefix = match.group(2)
if match.group(3) in ['b', 'bit']:
magnitude /= 8
else:
msg = _('Invalid string format: %s') % text
raise ValueError(msg)
if not unit_prefix:
res = magnitude
else:
res = magnitude * pow(base, UNIT_PREFIX_EXPONENT[unit_prefix])
if return_int:
return int(math.ceil(res))
return res
def to_slug(value, incoming=None, errors="strict"):
"""Normalize string.
Convert to lowercase, remove non-word characters, and convert spaces
to hyphens.
Inspired by Django's `slugify` filter.
:param value: Text to slugify
:param incoming: Text's current encoding
:param errors: Errors handling policy. See here for valid
values http://docs.python.org/2/library/codecs.html
:returns: slugified unicode representation of `value`
:raises TypeError: If text is not an instance of str
"""
value = safe_decode(value, incoming, errors)
# NOTE(aababilov): no need to use safe_(encode|decode) here:
# encodings are always "ascii", error handling is always "ignore"
# and types are always known (first: unicode; second: str)
value = unicodedata.normalize("NFKD", value).encode(
"ascii", "ignore").decode("ascii")
value = SLUGIFY_STRIP_RE.sub("", value).strip().lower()
return SLUGIFY_HYPHENATE_RE.sub("-", value)
| apache-2.0 |
osvalr/odoo | addons/project_issue/project_issue.py | 217 | 29319 | #-*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from datetime import datetime
from openerp import api
from openerp import SUPERUSER_ID
from openerp import tools
from openerp.osv import fields, osv, orm
from openerp.tools import DEFAULT_SERVER_DATETIME_FORMAT
from openerp.tools import html2plaintext
from openerp.tools.translate import _
class project_issue_version(osv.Model):
_name = "project.issue.version"
_order = "name desc"
_columns = {
'name': fields.char('Version Number', required=True),
'active': fields.boolean('Active', required=False),
}
_defaults = {
'active': 1,
}
class project_issue(osv.Model):
_name = "project.issue"
_description = "Project Issue"
_order = "priority desc, create_date desc"
_inherit = ['mail.thread', 'ir.needaction_mixin']
_mail_post_access = 'read'
_track = {
'stage_id': {
# this is only an heuristics; depending on your particular stage configuration it may not match all 'new' stages
'project_issue.mt_issue_new': lambda self, cr, uid, obj, ctx=None: obj.stage_id and obj.stage_id.sequence <= 1,
'project_issue.mt_issue_stage': lambda self, cr, uid, obj, ctx=None: obj.stage_id and obj.stage_id.sequence > 1,
},
'user_id': {
'project_issue.mt_issue_assigned': lambda self, cr, uid, obj, ctx=None: obj.user_id and obj.user_id.id,
},
'kanban_state': {
'project_issue.mt_issue_blocked': lambda self, cr, uid, obj, ctx=None: obj.kanban_state == 'blocked',
'project_issue.mt_issue_ready': lambda self, cr, uid, obj, ctx=None: obj.kanban_state == 'done',
},
}
def _get_default_partner(self, cr, uid, context=None):
project_id = self._get_default_project_id(cr, uid, context)
if project_id:
project = self.pool.get('project.project').browse(cr, uid, project_id, context=context)
if project and project.partner_id:
return project.partner_id.id
return False
def _get_default_project_id(self, cr, uid, context=None):
""" Gives default project by checking if present in the context """
return self._resolve_project_id_from_context(cr, uid, context=context)
def _get_default_stage_id(self, cr, uid, context=None):
""" Gives default stage_id """
project_id = self._get_default_project_id(cr, uid, context=context)
return self.stage_find(cr, uid, [], project_id, [('fold', '=', False)], context=context)
def _resolve_project_id_from_context(self, cr, uid, context=None):
""" Returns ID of project based on the value of 'default_project_id'
context key, or None if it cannot be resolved to a single
project.
"""
if context is None:
context = {}
if type(context.get('default_project_id')) in (int, long):
return context.get('default_project_id')
if isinstance(context.get('default_project_id'), basestring):
project_name = context['default_project_id']
project_ids = self.pool.get('project.project').name_search(cr, uid, name=project_name, context=context)
if len(project_ids) == 1:
return int(project_ids[0][0])
return None
def _read_group_stage_ids(self, cr, uid, ids, domain, read_group_order=None, access_rights_uid=None, context=None):
access_rights_uid = access_rights_uid or uid
stage_obj = self.pool.get('project.task.type')
order = stage_obj._order
# lame hack to allow reverting search, should just work in the trivial case
if read_group_order == 'stage_id desc':
order = "%s desc" % order
# retrieve section_id from the context and write the domain
# - ('id', 'in', 'ids'): add columns that should be present
# - OR ('case_default', '=', True), ('fold', '=', False): add default columns that are not folded
# - OR ('project_ids', 'in', project_id), ('fold', '=', False) if project_id: add project columns that are not folded
search_domain = []
project_id = self._resolve_project_id_from_context(cr, uid, context=context)
if project_id:
search_domain += ['|', ('project_ids', '=', project_id)]
search_domain += [('id', 'in', ids)]
# perform search
stage_ids = stage_obj._search(cr, uid, search_domain, order=order, access_rights_uid=access_rights_uid, context=context)
result = stage_obj.name_get(cr, access_rights_uid, stage_ids, context=context)
# restore order of the search
result.sort(lambda x,y: cmp(stage_ids.index(x[0]), stage_ids.index(y[0])))
fold = {}
for stage in stage_obj.browse(cr, access_rights_uid, stage_ids, context=context):
fold[stage.id] = stage.fold or False
return result, fold
def _compute_day(self, cr, uid, ids, fields, args, context=None):
"""
@param cr: the current row, from the database cursor,
@param uid: the current user’s ID for security checks,
@param ids: List of Openday’s IDs
@return: difference between current date and log date
@param context: A standard dictionary for contextual values
"""
Calendar = self.pool['resource.calendar']
res = dict((res_id, {}) for res_id in ids)
for issue in self.browse(cr, uid, ids, context=context):
values = {
'day_open': 0.0, 'day_close': 0.0,
'working_hours_open': 0.0, 'working_hours_close': 0.0,
'days_since_creation': 0.0, 'inactivity_days': 0.0,
}
# if the working hours on the project are not defined, use default ones (8 -> 12 and 13 -> 17 * 5), represented by None
calendar_id = None
if issue.project_id and issue.project_id.resource_calendar_id:
calendar_id = issue.project_id.resource_calendar_id.id
dt_create_date = datetime.strptime(issue.create_date, DEFAULT_SERVER_DATETIME_FORMAT)
if issue.date_open:
dt_date_open = datetime.strptime(issue.date_open, DEFAULT_SERVER_DATETIME_FORMAT)
values['day_open'] = (dt_date_open - dt_create_date).total_seconds() / (24.0 * 3600)
values['working_hours_open'] = Calendar._interval_hours_get(
cr, uid, calendar_id, dt_create_date, dt_date_open,
timezone_from_uid=issue.user_id.id or uid,
exclude_leaves=False, context=context)
if issue.date_closed:
dt_date_closed = datetime.strptime(issue.date_closed, DEFAULT_SERVER_DATETIME_FORMAT)
values['day_close'] = (dt_date_closed - dt_create_date).total_seconds() / (24.0 * 3600)
values['working_hours_close'] = Calendar._interval_hours_get(
cr, uid, calendar_id, dt_create_date, dt_date_closed,
timezone_from_uid=issue.user_id.id or uid,
exclude_leaves=False, context=context)
days_since_creation = datetime.today() - dt_create_date
values['days_since_creation'] = days_since_creation.days
if issue.date_action_last:
inactive_days = datetime.today() - datetime.strptime(issue.date_action_last, DEFAULT_SERVER_DATETIME_FORMAT)
elif issue.date_last_stage_update:
inactive_days = datetime.today() - datetime.strptime(issue.date_last_stage_update, DEFAULT_SERVER_DATETIME_FORMAT)
else:
inactive_days = datetime.today() - datetime.strptime(issue.create_date, DEFAULT_SERVER_DATETIME_FORMAT)
values['inactivity_days'] = inactive_days.days
# filter only required values
for field in fields:
res[issue.id][field] = values[field]
return res
def _hours_get(self, cr, uid, ids, field_names, args, context=None):
task_pool = self.pool.get('project.task')
res = {}
for issue in self.browse(cr, uid, ids, context=context):
progress = 0.0
if issue.task_id:
progress = task_pool._hours_get(cr, uid, [issue.task_id.id], field_names, args, context=context)[issue.task_id.id]['progress']
res[issue.id] = {'progress' : progress}
return res
def on_change_project(self, cr, uid, ids, project_id, context=None):
if project_id:
project = self.pool.get('project.project').browse(cr, uid, project_id, context=context)
if project and project.partner_id:
return {'value': {'partner_id': project.partner_id.id}}
return {}
def _get_issue_task(self, cr, uid, ids, context=None):
issues = []
issue_pool = self.pool.get('project.issue')
for task in self.pool.get('project.task').browse(cr, uid, ids, context=context):
issues += issue_pool.search(cr, uid, [('task_id','=',task.id)])
return issues
def _get_issue_work(self, cr, uid, ids, context=None):
issues = []
issue_pool = self.pool.get('project.issue')
for work in self.pool.get('project.task.work').browse(cr, uid, ids, context=context):
if work.task_id:
issues += issue_pool.search(cr, uid, [('task_id','=',work.task_id.id)])
return issues
_columns = {
'id': fields.integer('ID', readonly=True),
'name': fields.char('Issue', required=True),
'active': fields.boolean('Active', required=False),
'create_date': fields.datetime('Creation Date', readonly=True, select=True),
'write_date': fields.datetime('Update Date', readonly=True),
'days_since_creation': fields.function(_compute_day, string='Days since creation date', \
multi='compute_day', type="integer", help="Difference in days between creation date and current date"),
'date_deadline': fields.date('Deadline'),
'section_id': fields.many2one('crm.case.section', 'Sales Team', \
select=True, help='Sales team to which Case belongs to.\
Define Responsible user and Email account for mail gateway.'),
'partner_id': fields.many2one('res.partner', 'Contact', select=1),
'company_id': fields.many2one('res.company', 'Company'),
'description': fields.text('Private Note'),
'kanban_state': fields.selection([('normal', 'Normal'),('blocked', 'Blocked'),('done', 'Ready for next stage')], 'Kanban State',
track_visibility='onchange',
help="A Issue's kanban state indicates special situations affecting it:\n"
" * Normal is the default situation\n"
" * Blocked indicates something is preventing the progress of this issue\n"
" * Ready for next stage indicates the issue is ready to be pulled to the next stage",
required=False),
'email_from': fields.char('Email', size=128, help="These people will receive email.", select=1),
'email_cc': fields.char('Watchers Emails', size=256, help="These email addresses will be added to the CC field of all inbound and outbound emails for this record before being sent. Separate multiple email addresses with a comma"),
'date_open': fields.datetime('Assigned', readonly=True, select=True),
# Project Issue fields
'date_closed': fields.datetime('Closed', readonly=True, select=True),
'date': fields.datetime('Date'),
'date_last_stage_update': fields.datetime('Last Stage Update', select=True),
'channel': fields.char('Channel', help="Communication channel."),
'categ_ids': fields.many2many('project.category', string='Tags'),
'priority': fields.selection([('0','Low'), ('1','Normal'), ('2','High')], 'Priority', select=True),
'version_id': fields.many2one('project.issue.version', 'Version'),
'stage_id': fields.many2one ('project.task.type', 'Stage',
track_visibility='onchange', select=True,
domain="[('project_ids', '=', project_id)]", copy=False),
'project_id': fields.many2one('project.project', 'Project', track_visibility='onchange', select=True),
'duration': fields.float('Duration'),
'task_id': fields.many2one('project.task', 'Task', domain="[('project_id','=',project_id)]"),
'day_open': fields.function(_compute_day, string='Days to Assign',
multi='compute_day', type="float",
store={'project.issue': (lambda self, cr, uid, ids, c={}: ids, ['date_open'], 10)}),
'day_close': fields.function(_compute_day, string='Days to Close',
multi='compute_day', type="float",
store={'project.issue': (lambda self, cr, uid, ids, c={}: ids, ['date_closed'], 10)}),
'user_id': fields.many2one('res.users', 'Assigned to', required=False, select=1, track_visibility='onchange'),
'working_hours_open': fields.function(_compute_day, string='Working Hours to assign the Issue',
multi='compute_day', type="float",
store={'project.issue': (lambda self, cr, uid, ids, c={}: ids, ['date_open'], 10)}),
'working_hours_close': fields.function(_compute_day, string='Working Hours to close the Issue',
multi='compute_day', type="float",
store={'project.issue': (lambda self, cr, uid, ids, c={}: ids, ['date_closed'], 10)}),
'inactivity_days': fields.function(_compute_day, string='Days since last action',
multi='compute_day', type="integer", help="Difference in days between last action and current date"),
'color': fields.integer('Color Index'),
'user_email': fields.related('user_id', 'email', type='char', string='User Email', readonly=True),
'date_action_last': fields.datetime('Last Action', readonly=1),
'date_action_next': fields.datetime('Next Action', readonly=1),
'progress': fields.function(_hours_get, string='Progress (%)', multi='hours', group_operator="avg", help="Computed as: Time Spent / Total Time.",
store = {
'project.issue': (lambda self, cr, uid, ids, c={}: ids, ['task_id'], 10),
'project.task': (_get_issue_task, ['work_ids', 'remaining_hours', 'planned_hours', 'state', 'stage_id'], 10),
'project.task.work': (_get_issue_work, ['hours'], 10),
}),
}
_defaults = {
'active': 1,
'stage_id': lambda s, cr, uid, c: s._get_default_stage_id(cr, uid, c),
'company_id': lambda s, cr, uid, c: s.pool.get('res.company')._company_default_get(cr, uid, 'crm.helpdesk', context=c),
'priority': '0',
'kanban_state': 'normal',
'date_last_stage_update': fields.datetime.now,
'user_id': lambda obj, cr, uid, context: uid,
}
_group_by_full = {
'stage_id': _read_group_stage_ids
}
def copy(self, cr, uid, id, default=None, context=None):
issue = self.read(cr, uid, [id], ['name'], context=context)[0]
if not default:
default = {}
default = default.copy()
default.update(name=_('%s (copy)') % (issue['name']))
return super(project_issue, self).copy(cr, uid, id, default=default, context=context)
def create(self, cr, uid, vals, context=None):
context = dict(context or {})
if vals.get('project_id') and not context.get('default_project_id'):
context['default_project_id'] = vals.get('project_id')
if vals.get('user_id'):
vals['date_open'] = fields.datetime.now()
if 'stage_id' in vals:
vals.update(self.onchange_stage_id(cr, uid, None, vals.get('stage_id'), context=context)['value'])
# context: no_log, because subtype already handle this
create_context = dict(context, mail_create_nolog=True)
return super(project_issue, self).create(cr, uid, vals, context=create_context)
def write(self, cr, uid, ids, vals, context=None):
# stage change: update date_last_stage_update
if 'stage_id' in vals:
vals.update(self.onchange_stage_id(cr, uid, ids, vals.get('stage_id'), context=context)['value'])
vals['date_last_stage_update'] = fields.datetime.now()
if 'kanban_state' not in vals:
vals['kanban_state'] = 'normal'
# user_id change: update date_start
if vals.get('user_id'):
vals['date_open'] = fields.datetime.now()
return super(project_issue, self).write(cr, uid, ids, vals, context)
def onchange_task_id(self, cr, uid, ids, task_id, context=None):
if not task_id:
return {'value': {}}
task = self.pool.get('project.task').browse(cr, uid, task_id, context=context)
return {'value': {'user_id': task.user_id.id, }}
def onchange_partner_id(self, cr, uid, ids, partner_id, context=None):
""" This function returns value of partner email address based on partner
:param part: Partner's id
"""
result = {}
if partner_id:
partner = self.pool['res.partner'].browse(cr, uid, partner_id, context)
result['email_from'] = partner.email
return {'value': result}
def get_empty_list_help(self, cr, uid, help, context=None):
context = dict(context or {})
context['empty_list_help_model'] = 'project.project'
context['empty_list_help_id'] = context.get('default_project_id')
context['empty_list_help_document_name'] = _("issues")
return super(project_issue, self).get_empty_list_help(cr, uid, help, context=context)
# -------------------------------------------------------
# Stage management
# -------------------------------------------------------
def onchange_stage_id(self, cr, uid, ids, stage_id, context=None):
if not stage_id:
return {'value': {}}
stage = self.pool['project.task.type'].browse(cr, uid, stage_id, context=context)
if stage.fold:
return {'value': {'date_closed': fields.datetime.now()}}
return {'value': {'date_closed': False}}
def stage_find(self, cr, uid, cases, section_id, domain=[], order='sequence', context=None):
""" Override of the base.stage method
Parameter of the stage search taken from the issue:
- type: stage type must be the same or 'both'
- section_id: if set, stages must belong to this section or
be a default case
"""
if isinstance(cases, (int, long)):
cases = self.browse(cr, uid, cases, context=context)
# collect all section_ids
section_ids = []
if section_id:
section_ids.append(section_id)
for task in cases:
if task.project_id:
section_ids.append(task.project_id.id)
# OR all section_ids and OR with case_default
search_domain = []
if section_ids:
search_domain += [('|')] * (len(section_ids)-1)
for section_id in section_ids:
search_domain.append(('project_ids', '=', section_id))
search_domain += list(domain)
# perform search, return the first found
stage_ids = self.pool.get('project.task.type').search(cr, uid, search_domain, order=order, context=context)
if stage_ids:
return stage_ids[0]
return False
def case_escalate(self, cr, uid, ids, context=None): # FIXME rename this method to issue_escalate
for issue in self.browse(cr, uid, ids, context=context):
data = {}
esc_proj = issue.project_id.project_escalation_id
if not esc_proj:
raise osv.except_osv(_('Warning!'), _('You cannot escalate this issue.\nThe relevant Project has not configured the Escalation Project!'))
data['project_id'] = esc_proj.id
if esc_proj.user_id:
data['user_id'] = esc_proj.user_id.id
issue.write(data)
if issue.task_id:
issue.task_id.write({'project_id': esc_proj.id, 'user_id': False})
return True
# -------------------------------------------------------
# Mail gateway
# -------------------------------------------------------
def message_get_reply_to(self, cr, uid, ids, context=None):
""" Override to get the reply_to of the parent project. """
issues = self.browse(cr, SUPERUSER_ID, ids, context=context)
project_ids = set([issue.project_id.id for issue in issues if issue.project_id])
aliases = self.pool['project.project'].message_get_reply_to(cr, uid, list(project_ids), context=context)
return dict((issue.id, aliases.get(issue.project_id and issue.project_id.id or 0, False)) for issue in issues)
def message_get_suggested_recipients(self, cr, uid, ids, context=None):
recipients = super(project_issue, self).message_get_suggested_recipients(cr, uid, ids, context=context)
try:
for issue in self.browse(cr, uid, ids, context=context):
if issue.partner_id:
self._message_add_suggested_recipient(cr, uid, recipients, issue, partner=issue.partner_id, reason=_('Customer'))
elif issue.email_from:
self._message_add_suggested_recipient(cr, uid, recipients, issue, email=issue.email_from, reason=_('Customer Email'))
except (osv.except_osv, orm.except_orm): # no read access rights -> just ignore suggested recipients because this imply modifying followers
pass
return recipients
def message_new(self, cr, uid, msg, custom_values=None, context=None):
""" Overrides mail_thread message_new that is called by the mailgateway
through message_process.
This override updates the document according to the email.
"""
if custom_values is None:
custom_values = {}
context = dict(context or {}, state_to='draft')
defaults = {
'name': msg.get('subject') or _("No Subject"),
'email_from': msg.get('from'),
'email_cc': msg.get('cc'),
'partner_id': msg.get('author_id', False),
'user_id': False,
}
defaults.update(custom_values)
res_id = super(project_issue, self).message_new(cr, uid, msg, custom_values=defaults, context=context)
return res_id
@api.cr_uid_ids_context
def message_post(self, cr, uid, thread_id, body='', subject=None, type='notification', subtype=None, parent_id=False, attachments=None, context=None, content_subtype='html', **kwargs):
""" Overrides mail_thread message_post so that we can set the date of last action field when
a new message is posted on the issue.
"""
if context is None:
context = {}
res = super(project_issue, self).message_post(cr, uid, thread_id, body=body, subject=subject, type=type, subtype=subtype, parent_id=parent_id, attachments=attachments, context=context, content_subtype=content_subtype, **kwargs)
if thread_id and subtype:
self.write(cr, SUPERUSER_ID, thread_id, {'date_action_last': fields.datetime.now()}, context=context)
return res
class project(osv.Model):
_inherit = "project.project"
def _get_alias_models(self, cr, uid, context=None):
return [('project.task', "Tasks"), ("project.issue", "Issues")]
def _issue_count(self, cr, uid, ids, field_name, arg, context=None):
Issue = self.pool['project.issue']
return {
project_id: Issue.search_count(cr,uid, [('project_id', '=', project_id), ('stage_id.fold', '=', False)], context=context)
for project_id in ids
}
_columns = {
'project_escalation_id': fields.many2one('project.project', 'Project Escalation',
help='If any issue is escalated from the current Project, it will be listed under the project selected here.',
states={'close': [('readonly', True)], 'cancelled': [('readonly', True)]}),
'issue_count': fields.function(_issue_count, type='integer', string="Issues",),
'issue_ids': fields.one2many('project.issue', 'project_id',
domain=[('stage_id.fold', '=', False)])
}
def _check_escalation(self, cr, uid, ids, context=None):
project_obj = self.browse(cr, uid, ids[0], context=context)
if project_obj.project_escalation_id:
if project_obj.project_escalation_id.id == project_obj.id:
return False
return True
_constraints = [
(_check_escalation, 'Error! You cannot assign escalation to the same project!', ['project_escalation_id'])
]
class account_analytic_account(osv.Model):
_inherit = 'account.analytic.account'
_description = 'Analytic Account'
_columns = {
'use_issues': fields.boolean('Issues', help="Check this field if this project manages issues"),
}
def on_change_template(self, cr, uid, ids, template_id, date_start=False, context=None):
res = super(account_analytic_account, self).on_change_template(cr, uid, ids, template_id, date_start=date_start, context=context)
if template_id and 'value' in res:
template = self.browse(cr, uid, template_id, context=context)
res['value']['use_issues'] = template.use_issues
return res
def _trigger_project_creation(self, cr, uid, vals, context=None):
if context is None:
context = {}
res = super(account_analytic_account, self)._trigger_project_creation(cr, uid, vals, context=context)
return res or (vals.get('use_issues') and not 'project_creation_in_progress' in context)
class project_project(osv.Model):
_inherit = 'project.project'
_defaults = {
'use_issues': True
}
def _check_create_write_values(self, cr, uid, vals, context=None):
""" Perform some check on values given to create or write. """
# Handle use_tasks / use_issues: if only one is checked, alias should take the same model
if vals.get('use_tasks') and not vals.get('use_issues'):
vals['alias_model'] = 'project.task'
elif vals.get('use_issues') and not vals.get('use_tasks'):
vals['alias_model'] = 'project.issue'
def on_change_use_tasks_or_issues(self, cr, uid, ids, use_tasks, use_issues, context=None):
values = {}
if use_tasks and not use_issues:
values['alias_model'] = 'project.task'
elif not use_tasks and use_issues:
values['alias_model'] = 'project.issue'
return {'value': values}
def create(self, cr, uid, vals, context=None):
self._check_create_write_values(cr, uid, vals, context=context)
return super(project_project, self).create(cr, uid, vals, context=context)
def write(self, cr, uid, ids, vals, context=None):
self._check_create_write_values(cr, uid, vals, context=context)
return super(project_project, self).write(cr, uid, ids, vals, context=context)
class res_partner(osv.osv):
def _issue_count(self, cr, uid, ids, field_name, arg, context=None):
Issue = self.pool['project.issue']
return {
partner_id: Issue.search_count(cr,uid, [('partner_id', '=', partner_id)])
for partner_id in ids
}
""" Inherits partner and adds Issue information in the partner form """
_inherit = 'res.partner'
_columns = {
'issue_count': fields.function(_issue_count, string='# Issues', type='integer'),
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
jumpstarter-io/neutron | neutron/tests/unit/cisco/cfg_agent/test_routing_svc_helper.py | 5 | 29597 | # Copyright 2014 Cisco Systems, Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# @author: Hareesh Puthalath, Cisco Systems, Inc.
import copy
import mock
from oslo.config import cfg
from neutron.common import config as base_config
from neutron.common import constants as l3_constants
from neutron.common import rpc as n_rpc
from neutron.openstack.common import log as logging
from neutron.openstack.common import uuidutils
from neutron.plugins.cisco.cfg_agent import cfg_agent
from neutron.plugins.cisco.cfg_agent import cfg_exceptions
from neutron.plugins.cisco.cfg_agent.service_helpers.routing_svc_helper import(
RouterInfo)
from neutron.plugins.cisco.cfg_agent.service_helpers.routing_svc_helper import(
RoutingServiceHelper)
from neutron.tests import base
_uuid = uuidutils.generate_uuid
HOST = 'myhost'
FAKE_ID = _uuid()
LOG = logging.getLogger(__name__)
def prepare_router_data(enable_snat=None, num_internal_ports=1):
router_id = _uuid()
ex_gw_port = {'id': _uuid(),
'network_id': _uuid(),
'fixed_ips': [{'ip_address': '19.4.4.4',
'subnet_id': _uuid()}],
'subnet': {'cidr': '19.4.4.0/24',
'gateway_ip': '19.4.4.1'}}
int_ports = []
for i in range(num_internal_ports):
int_ports.append({'id': _uuid(),
'network_id': _uuid(),
'admin_state_up': True,
'fixed_ips': [{'ip_address': '35.4.%s.4' % i,
'subnet_id': _uuid()}],
'mac_address': 'ca:fe:de:ad:be:ef',
'subnet': {'cidr': '35.4.%s.0/24' % i,
'gateway_ip': '35.4.%s.1' % i}})
hosting_device = {'id': _uuid(),
"name": "CSR1kv_template",
"booting_time": 300,
"host_category": "VM",
'management_ip_address': '20.0.0.5',
'protocol_port': 22,
"credentials": {
"username": "user",
"password": "4getme"},
}
router = {
'id': router_id,
'admin_state_up': True,
l3_constants.INTERFACE_KEY: int_ports,
'routes': [],
'gw_port': ex_gw_port,
'hosting_device': hosting_device}
if enable_snat is not None:
router['enable_snat'] = enable_snat
return router, int_ports
class TestRouterInfo(base.BaseTestCase):
def setUp(self):
super(TestRouterInfo, self).setUp()
self.ex_gw_port = {'id': _uuid(),
'network_id': _uuid(),
'fixed_ips': [{'ip_address': '19.4.4.4',
'subnet_id': _uuid()}],
'subnet': {'cidr': '19.4.4.0/24',
'gateway_ip': '19.4.4.1'}}
self.router = {'id': _uuid(),
'enable_snat': True,
'routes': [],
'gw_port': self.ex_gw_port}
def test_router_info_create(self):
router_id = _uuid()
fake_router = {}
ri = RouterInfo(router_id, fake_router)
self.assertTrue(ri.router_name().endswith(router_id))
def test_router_info_create_with_router(self):
router_id = _uuid()
ri = RouterInfo(router_id, self.router)
self.assertTrue(ri.router_name().endswith(router_id))
self.assertEqual(ri.router, self.router)
self.assertEqual(ri._router, self.router)
self.assertTrue(ri.snat_enabled)
self.assertIsNone(ri.ex_gw_port)
def test_router_info_create_snat_disabled(self):
router_id = _uuid()
self.router['enable_snat'] = False
ri = RouterInfo(router_id, self.router)
self.assertFalse(ri.snat_enabled)
class TestBasicRoutingOperations(base.BaseTestCase):
def setUp(self):
super(TestBasicRoutingOperations, self).setUp()
self.conf = cfg.ConfigOpts()
self.conf.register_opts(base_config.core_opts)
self.conf.register_opts(cfg_agent.CiscoCfgAgent.OPTS)
self.ex_gw_port = {'id': _uuid(),
'network_id': _uuid(),
'fixed_ips': [{'ip_address': '19.4.4.4',
'subnet_id': _uuid()}],
'subnet': {'cidr': '19.4.4.0/24',
'gateway_ip': '19.4.4.1'}}
self.hosting_device = {'id': "100",
'name': "CSR1kv_template",
'booting_time': 300,
'host_category': "VM",
'management_ip_address': '20.0.0.5',
'protocol_port': 22,
'credentials': {'username': 'user',
"password": '4getme'},
}
self.router = {
'id': _uuid(),
'enable_snat': True,
'routes': [],
'gw_port': self.ex_gw_port,
'hosting_device': self.hosting_device}
self.agent = mock.Mock()
#Patches & Mocks
self.l3pluginApi_cls_p = mock.patch(
'neutron.plugins.cisco.cfg_agent.service_helpers.'
'routing_svc_helper.CiscoRoutingPluginApi')
l3plugin_api_cls = self.l3pluginApi_cls_p.start()
self.plugin_api = mock.Mock()
l3plugin_api_cls.return_value = self.plugin_api
self.plugin_api.get_routers = mock.MagicMock()
self.looping_call_p = mock.patch(
'neutron.openstack.common.loopingcall.FixedIntervalLoopingCall')
self.looping_call_p.start()
mock.patch('neutron.common.rpc.create_connection').start()
self.routing_helper = RoutingServiceHelper(
HOST, self.conf, self.agent)
self.routing_helper._internal_network_added = mock.Mock()
self.routing_helper._external_gateway_added = mock.Mock()
self.routing_helper._internal_network_removed = mock.Mock()
self.routing_helper._external_gateway_removed = mock.Mock()
self.driver = self._mock_driver_and_hosting_device(
self.routing_helper)
def _mock_driver_and_hosting_device(self, svc_helper):
svc_helper._dev_status.is_hosting_device_reachable = mock.MagicMock(
return_value=True)
driver = mock.MagicMock()
svc_helper._drivermgr.get_driver = mock.Mock(return_value=driver)
svc_helper._drivermgr.set_driver = mock.Mock(return_value=driver)
return driver
def _reset_mocks(self):
self.routing_helper._process_router_floating_ips.reset_mock()
self.routing_helper._internal_network_added.reset_mock()
self.routing_helper._external_gateway_added.reset_mock()
self.routing_helper._internal_network_removed.reset_mock()
self.routing_helper._external_gateway_removed.reset_mock()
def test_process_router_throw_config_error(self):
snip_name = 'CREATE_SUBINTERFACE'
e_type = 'Fake error'
e_tag = 'Fake error tag'
params = {'snippet': snip_name, 'type': e_type, 'tag': e_tag}
self.routing_helper._internal_network_added.side_effect = (
cfg_exceptions.CSR1kvConfigException(**params))
router, ports = prepare_router_data()
ri = RouterInfo(router['id'], router)
self.assertRaises(cfg_exceptions.CSR1kvConfigException,
self.routing_helper._process_router, ri)
def test_process_router(self):
router, ports = prepare_router_data()
#Setup mock for call to proceess floating ips
self.routing_helper._process_router_floating_ips = mock.Mock()
fake_floatingips1 = {'floatingips': [
{'id': _uuid(),
'floating_ip_address': '8.8.8.8',
'fixed_ip_address': '7.7.7.7',
'port_id': _uuid()}]}
ri = RouterInfo(router['id'], router=router)
# Process with initial values
self.routing_helper._process_router(ri)
ex_gw_port = ri.router.get('gw_port')
# Assert that process_floating_ips, internal_network & external network
# added were all called with the right params
self.routing_helper._process_router_floating_ips.assert_called_with(
ri, ex_gw_port)
self.routing_helper._internal_network_added.assert_called_with(
ri, ports[0], ex_gw_port)
self.routing_helper._external_gateway_added.assert_called_with(
ri, ex_gw_port)
self._reset_mocks()
# remap floating IP to a new fixed ip
fake_floatingips2 = copy.deepcopy(fake_floatingips1)
fake_floatingips2['floatingips'][0]['fixed_ip_address'] = '7.7.7.8'
router[l3_constants.FLOATINGIP_KEY] = fake_floatingips2['floatingips']
# Process again and check that this time only the process_floating_ips
# was only called.
self.routing_helper._process_router(ri)
ex_gw_port = ri.router.get('gw_port')
self.routing_helper._process_router_floating_ips.assert_called_with(
ri, ex_gw_port)
self.assertFalse(self.routing_helper._internal_network_added.called)
self.assertFalse(self.routing_helper._external_gateway_added.called)
self._reset_mocks()
# remove just the floating ips
del router[l3_constants.FLOATINGIP_KEY]
# Process again and check that this time also only the
# process_floating_ips and external_network remove was called
self.routing_helper._process_router(ri)
ex_gw_port = ri.router.get('gw_port')
self.routing_helper._process_router_floating_ips.assert_called_with(
ri, ex_gw_port)
self.assertFalse(self.routing_helper._internal_network_added.called)
self.assertFalse(self.routing_helper._external_gateway_added.called)
self._reset_mocks()
# now no ports so state is torn down
del router[l3_constants.INTERFACE_KEY]
del router['gw_port']
# Update router_info object
ri.router = router
# Keep a copy of the ex_gw_port before its gone after processing.
ex_gw_port = ri.ex_gw_port
# Process router and verify that internal and external network removed
# were called and floating_ips_process was called
self.routing_helper._process_router(ri)
self.assertFalse(self.routing_helper.
_process_router_floating_ips.called)
self.assertFalse(self.routing_helper._external_gateway_added.called)
self.assertTrue(self.routing_helper._internal_network_removed.called)
self.assertTrue(self.routing_helper._external_gateway_removed.called)
self.routing_helper._internal_network_removed.assert_called_with(
ri, ports[0], ex_gw_port)
self.routing_helper._external_gateway_removed.assert_called_with(
ri, ex_gw_port)
def test_routing_table_update(self):
router = self.router
fake_route1 = {'destination': '135.207.0.0/16',
'nexthop': '1.2.3.4'}
fake_route2 = {'destination': '135.207.111.111/32',
'nexthop': '1.2.3.4'}
# First we set the routes to fake_route1 and see if the
# driver.routes_updated was called with 'replace'(==add or replace)
# and fake_route1
router['routes'] = [fake_route1]
ri = RouterInfo(router['id'], router)
self.routing_helper._process_router(ri)
self.driver.routes_updated.assert_called_with(ri, 'replace',
fake_route1)
# Now we replace fake_route1 with fake_route2. This should cause driver
# to be invoked to delete fake_route1 and 'replace'(==add or replace)
self.driver.reset_mock()
router['routes'] = [fake_route2]
ri.router = router
self.routing_helper._process_router(ri)
self.driver.routes_updated.assert_called_with(ri, 'delete',
fake_route1)
self.driver.routes_updated.assert_any_call(ri, 'replace', fake_route2)
# Now we add back fake_route1 as a new route, this should cause driver
# to be invoked to 'replace'(==add or replace) fake_route1
self.driver.reset_mock()
router['routes'] = [fake_route2, fake_route1]
ri.router = router
self.routing_helper._process_router(ri)
self.driver.routes_updated.assert_any_call(ri, 'replace', fake_route1)
# Now we delete all routes. This should cause driver
# to be invoked to delete fake_route1 and fake-route2
self.driver.reset_mock()
router['routes'] = []
ri.router = router
self.routing_helper._process_router(ri)
self.driver.routes_updated.assert_any_call(ri, 'delete', fake_route2)
self.driver.routes_updated.assert_any_call(ri, 'delete', fake_route1)
def test_process_router_internal_network_added_unexpected_error(self):
router, ports = prepare_router_data()
ri = RouterInfo(router['id'], router=router)
# raise RuntimeError to simulate that an unexpected exception occurrs
self.routing_helper._internal_network_added.side_effect = RuntimeError
self.assertRaises(RuntimeError,
self.routing_helper._process_router,
ri)
self.assertNotIn(
router[l3_constants.INTERFACE_KEY][0], ri.internal_ports)
# The unexpected exception has been fixed manually
self.routing_helper._internal_network_added.side_effect = None
# Failure will cause a retry next time, then were able to add the
# port to ri.internal_ports
self.routing_helper._process_router(ri)
self.assertIn(
router[l3_constants.INTERFACE_KEY][0], ri.internal_ports)
def test_process_router_internal_network_removed_unexpected_error(self):
router, ports = prepare_router_data()
ri = RouterInfo(router['id'], router=router)
# add an internal port
self.routing_helper._process_router(ri)
# raise RuntimeError to simulate that an unexpected exception occurrs
self.routing_helper._internal_network_removed.side_effect = mock.Mock(
side_effect=RuntimeError)
ri.internal_ports[0]['admin_state_up'] = False
# The above port is set to down state, remove it.
self.assertRaises(RuntimeError,
self.routing_helper._process_router,
ri)
self.assertIn(
router[l3_constants.INTERFACE_KEY][0], ri.internal_ports)
# The unexpected exception has been fixed manually
self.routing_helper._internal_network_removed.side_effect = None
# Failure will cause a retry next time,
# We were able to add the port to ri.internal_ports
self.routing_helper._process_router(ri)
# We were able to remove the port from ri.internal_ports
self.assertNotIn(
router[l3_constants.INTERFACE_KEY][0], ri.internal_ports)
def test_routers_with_admin_state_down(self):
self.plugin_api.get_external_network_id.return_value = None
routers = [
{'id': _uuid(),
'admin_state_up': False,
'external_gateway_info': {}}]
self.routing_helper._process_routers(routers, None)
self.assertNotIn(routers[0]['id'], self.routing_helper.router_info)
def test_router_deleted(self):
self.routing_helper.router_deleted(None, [FAKE_ID])
self.assertIn(FAKE_ID, self.routing_helper.removed_routers)
def test_routers_updated(self):
self.routing_helper.routers_updated(None, [FAKE_ID])
self.assertIn(FAKE_ID, self.routing_helper.updated_routers)
def test_removed_from_agent(self):
self.routing_helper.router_removed_from_agent(None,
{'router_id': FAKE_ID})
self.assertIn(FAKE_ID, self.routing_helper.removed_routers)
def test_added_to_agent(self):
self.routing_helper.router_added_to_agent(None, [FAKE_ID])
self.assertIn(FAKE_ID, self.routing_helper.updated_routers)
def test_process_router_delete(self):
router = self.router
router['gw_port'] = self.ex_gw_port
self.routing_helper._router_added(router['id'], router)
self.assertIn(router['id'], self.routing_helper.router_info)
# Now we remove the router
self.routing_helper._router_removed(router['id'], deconfigure=True)
self.assertNotIn(router['id'], self.routing_helper.router_info)
def test_collect_state(self):
router, ports = prepare_router_data(enable_snat=True,
num_internal_ports=2)
self.routing_helper._router_added(router['id'], router)
configurations = {}
configurations = self.routing_helper.collect_state(configurations)
hd_exp_result = {
router['hosting_device']['id']: {'routers': 1}}
self.assertEqual(1, configurations['total routers'])
self.assertEqual(1, configurations['total ex_gw_ports'])
self.assertEqual(2, configurations['total interfaces'])
self.assertEqual(0, configurations['total floating_ips'])
self.assertEqual(hd_exp_result, configurations['hosting_devices'])
self.assertEqual([], configurations['non_responding_hosting_devices'])
def test_sort_resources_per_hosting_device(self):
router1, port = prepare_router_data()
router2, port = prepare_router_data()
router3, port = prepare_router_data()
router4, port = prepare_router_data()
hd1_id = router1['hosting_device']['id']
hd2_id = router4['hosting_device']['id']
#Setting router2 and router3 device id same as router1's device id
router2['hosting_device']['id'] = hd1_id
router3['hosting_device']['id'] = hd1_id
resources = {'routers': [router1, router2, router4],
'removed_routers': [router3]}
devices = self.routing_helper._sort_resources_per_hosting_device(
resources)
self.assertEqual(2, len(devices.keys())) # Two devices
hd1_routers = [router1, router2]
self.assertEqual(hd1_routers, devices[hd1_id]['routers'])
self.assertEqual([router3], devices[hd1_id]['removed_routers'])
self.assertEqual([router4], devices[hd2_id]['routers'])
def test_get_router_ids_from_removed_devices_info(self):
removed_devices_info = {
'hosting_data': {'device_1': {'routers': ['id1', 'id2']},
'device_2': {'routers': ['id3', 'id4'],
'other_key': ['value1', 'value2']}}
}
resp = self.routing_helper._get_router_ids_from_removed_devices_info(
removed_devices_info)
self.assertEqual(sorted(resp), sorted(['id1', 'id2', 'id3', 'id4']))
@mock.patch("eventlet.GreenPool.spawn_n")
def test_process_services_full_sync_different_devices(self, mock_spawn):
router1, port = prepare_router_data()
router2, port = prepare_router_data()
self.plugin_api.get_routers = mock.Mock(
return_value=[router1, router2])
self.routing_helper.process_service()
self.assertEqual(2, mock_spawn.call_count)
call1 = mock.call(self.routing_helper._process_routers, [router1],
None, router1['hosting_device']['id'],
all_routers=True)
call2 = mock.call(self.routing_helper._process_routers, [router2],
None, router2['hosting_device']['id'],
all_routers=True)
mock_spawn.assert_has_calls([call1, call2], any_order=True)
@mock.patch("eventlet.GreenPool.spawn_n")
def test_process_services_full_sync_same_device(self, mock_spawn):
router1, port = prepare_router_data()
router2, port = prepare_router_data()
router2['hosting_device']['id'] = router1['hosting_device']['id']
self.plugin_api.get_routers = mock.Mock(return_value=[router1,
router2])
self.routing_helper.process_service()
self.assertEqual(1, mock_spawn.call_count)
mock_spawn.assert_called_with(self.routing_helper._process_routers,
[router1, router2],
None,
router1['hosting_device']['id'],
all_routers=True)
@mock.patch("eventlet.GreenPool.spawn_n")
def test_process_services_with_updated_routers(self, mock_spawn):
router1, port = prepare_router_data()
def routers_data(context, router_ids=None, hd_ids=None):
if router_ids:
return [router1]
self.plugin_api.get_routers.side_effect = routers_data
self.routing_helper.fullsync = False
self.routing_helper.updated_routers.add(router1['id'])
self.routing_helper.process_service()
self.assertEqual(1, self.plugin_api.get_routers.call_count)
self.plugin_api.get_routers.assert_called_with(
self.routing_helper.context,
router_ids=[router1['id']])
self.assertEqual(1, mock_spawn.call_count)
mock_spawn.assert_called_with(self.routing_helper._process_routers,
[router1],
None,
router1['hosting_device']['id'],
all_routers=False)
@mock.patch("eventlet.GreenPool.spawn_n")
def test_process_services_with_deviceid(self, mock_spawn):
router, port = prepare_router_data()
device_id = router['hosting_device']['id']
def routers_data(context, router_ids=None, hd_ids=None):
if hd_ids:
self.assertEqual([device_id], hd_ids)
return [router]
self.plugin_api.get_routers.side_effect = routers_data
self.routing_helper.fullsync = False
self.routing_helper.process_service(device_ids=[device_id])
self.assertEqual(1, self.plugin_api.get_routers.call_count)
self.plugin_api.get_routers.assert_called_with(
self.routing_helper.context,
hd_ids=[device_id])
self.assertEqual(1, mock_spawn.call_count)
mock_spawn.assert_called_with(self.routing_helper._process_routers,
[router],
None,
device_id,
all_routers=False)
@mock.patch("eventlet.GreenPool.spawn_n")
def test_process_services_with_removed_routers(self, mock_spawn):
router, port = prepare_router_data()
device_id = router['hosting_device']['id']
self._mock_driver_and_hosting_device(self.routing_helper)
self.routing_helper.fullsync = False
# Emulate router added for setting up internal structures
self.routing_helper._router_added(router['id'], router)
# Add router to removed routers list and process it
self.routing_helper.removed_routers.add(router['id'])
self.routing_helper.process_service()
self.assertEqual(1, mock_spawn.call_count)
mock_spawn.assert_called_with(self.routing_helper._process_routers,
None,
[router],
device_id,
all_routers=False)
@mock.patch("eventlet.GreenPool.spawn_n")
def test_process_services_with_removed_routers_info(self, mock_spawn):
router1, port = prepare_router_data()
device_id = router1['hosting_device']['id']
router2, port = prepare_router_data()
router2['hosting_device']['id'] = _uuid()
removed_devices_info = {
'hosting_data': {device_id: {'routers': [router1['id']]}},
'deconfigure': True
}
self._mock_driver_and_hosting_device(self.routing_helper)
self.routing_helper.fullsync = False
# Emulate router added for setting up internal structures
self.routing_helper._router_added(router1['id'], router1)
self.routing_helper._router_added(router2['id'], router2)
# Add router to removed routers list and process it
self.routing_helper.removed_routers.add(router2['id'])
self.routing_helper.process_service(
removed_devices_info=removed_devices_info)
self.assertEqual(2, mock_spawn.call_count)
call1 = mock.call(self.routing_helper._process_routers,
None,
[router1],
router1['hosting_device']['id'],
all_routers=False)
call2 = mock.call(self.routing_helper._process_routers,
None,
[router2],
router2['hosting_device']['id'],
all_routers=False)
mock_spawn.assert_has_calls([call1, call2], any_order=True)
@mock.patch("eventlet.GreenPool.spawn_n")
def test_process_services_with_rpc_error(self, mock_spawn):
router, port = prepare_router_data()
self.plugin_api.get_routers.side_effect = n_rpc.RPCException
self.routing_helper.fullsync = False
self.routing_helper.updated_routers.add(router['id'])
self.routing_helper.process_service()
self.assertEqual(1, self.plugin_api.get_routers.call_count)
self.plugin_api.get_routers.assert_called_with(
self.routing_helper.context,
router_ids=[router['id']])
self.assertFalse(mock_spawn.called)
self.assertTrue(self.routing_helper.fullsync)
def test_process_routers(self):
router, port = prepare_router_data()
driver = self._mock_driver_and_hosting_device(self.routing_helper)
self.routing_helper._process_router = mock.Mock()
self.routing_helper._process_routers([router], None)
ri = self.routing_helper.router_info[router['id']]
driver.router_added.assert_called_with(ri)
self.routing_helper._process_router.assert_called_with(ri)
def _process_routers_floatingips(self, action='add'):
router, port = prepare_router_data()
driver = self._mock_driver_and_hosting_device(self.routing_helper)
ex_gw_port = router['gw_port']
floating_ip_address = '19.4.4.10'
fixed_ip_address = '35.4.1.10'
fixed_ip_address_2 = '35.4.1.15'
port_id = 'fake_port_id'
floating_ip = {'fixed_ip_address': fixed_ip_address,
'floating_ip_address': floating_ip_address,
'id': 'floating_ip_id',
'port_id': port_id,
'status': 'ACTIVE', }
router[l3_constants.FLOATINGIP_KEY] = [floating_ip]
ri = RouterInfo(router['id'], router=router)
# Default add action
self.routing_helper._process_router_floating_ips(ri, ex_gw_port)
driver.floating_ip_added.assert_called_with(
ri, ex_gw_port, floating_ip_address, fixed_ip_address)
if action == 'remove':
router[l3_constants.FLOATINGIP_KEY] = []
self.routing_helper._process_router_floating_ips(ri, ex_gw_port)
driver.floating_ip_removed.assert_called_with(
ri, ri.ex_gw_port, floating_ip_address, fixed_ip_address)
if action == 'remap':
driver.reset_mock()
floating_ip_2 = copy.deepcopy(floating_ip)
floating_ip_2['fixed_ip_address'] = fixed_ip_address_2
ri.router[l3_constants.FLOATINGIP_KEY] = [floating_ip_2]
self.routing_helper._process_router_floating_ips(ri, ex_gw_port)
driver.floating_ip_added.assert_called_with(
ri, ri.ex_gw_port, floating_ip_address, fixed_ip_address_2)
driver.floating_ip_removed.assert_called_with(
ri, ri.ex_gw_port, floating_ip_address, fixed_ip_address)
def test_process_routers_floatingips_add(self):
self._process_routers_floatingips(action="add")
def test_process_routers_floatingips_remove(self):
self._process_routers_floatingips(action="remove")
def test_process_routers_floatingips_remap(self):
self._process_routers_floatingips(action="remap")
| apache-2.0 |
adlnet-archive/edx-platform | common/lib/xmodule/xmodule/tests/test_peer_grading.py | 33 | 16061 | import unittest
import json
import logging
from mock import Mock, patch
from webob.multidict import MultiDict
from xblock.field_data import DictFieldData
from xblock.fields import ScopeIds
from opaque_keys.edx.locations import Location, SlashSeparatedCourseKey
from xmodule.tests import get_test_system, get_test_descriptor_system
from xmodule.tests.test_util_open_ended import DummyModulestore
from xmodule.open_ended_grading_classes.peer_grading_service import MockPeerGradingService
from xmodule.peer_grading_module import PeerGradingModule, PeerGradingDescriptor, MAX_ALLOWED_FEEDBACK_LENGTH
from xmodule.modulestore.exceptions import ItemNotFoundError, NoPathToItem
log = logging.getLogger(__name__)
class PeerGradingModuleTest(unittest.TestCase, DummyModulestore):
"""
Test peer grading xmodule at the unit level. More detailed tests are difficult, as the module relies on an
external grading service.
"""
course_id = SlashSeparatedCourseKey('edX', 'open_ended', '2012_Fall')
problem_location = course_id.make_usage_key("peergrading", "PeerGradingSample")
coe_location = course_id.make_usage_key("combinedopenended", "SampleQuestion")
calibrated_dict = {'location': "blah"}
coe_dict = {'location': coe_location.to_deprecated_string()}
save_dict = MultiDict({
'location': "blah",
'submission_id': 1,
'submission_key': "",
'score': 1,
'feedback': "",
'submission_flagged': False,
'answer_unknown': False,
})
save_dict.extend(('rubric_scores[]', val) for val in (0, 1))
def get_module_system(self, descriptor):
test_system = get_test_system(self.course_id)
test_system.open_ended_grading_interface = None
return test_system
def setUp(self):
"""
Create a peer grading module from a test system
@return:
"""
self.setup_modulestore(self.course_id.course)
self.peer_grading = self.get_module_from_location(self.problem_location)
self.coe = self.get_module_from_location(self.coe_location)
def test_module_closed(self):
"""
Test if peer grading is closed
@return:
"""
closed = self.peer_grading.closed()
self.assertFalse(closed)
def test_get_html(self):
"""
Test to see if the module can be rendered
@return:
"""
_html = self.peer_grading.get_html()
def test_get_data(self):
"""
Try getting data from the external grading service
@return:
"""
success, _data = self.peer_grading.query_data_for_location(self.problem_location)
self.assertTrue(success)
def test_get_score_none(self):
"""
Test getting the score.
"""
score = self.peer_grading.get_score()
# Score should be None.
self.assertIsNone(score['score'])
def test_get_max_score(self):
"""
Test getting the max score
@return:
"""
max_score = self.peer_grading.max_score()
self.assertEquals(max_score, None)
def get_next_submission(self):
"""
Test to see if we can get the next mock submission
@return:
"""
success, _next_submission = self.peer_grading.get_next_submission({'location': 'blah'})
self.assertEqual(success, True)
def test_save_grade(self):
"""
Test if we can save the grade
@return:
"""
response = self.peer_grading.save_grade(self.save_dict)
self.assertEqual(response['success'], True)
def test_is_student_calibrated(self):
"""
Check to see if the student has calibrated yet
@return:
"""
response = self.peer_grading.is_student_calibrated(self.calibrated_dict)
self.assertTrue(response['success'])
def test_show_calibration_essay(self):
"""
Test showing the calibration essay
@return:
"""
response = self.peer_grading.show_calibration_essay(self.calibrated_dict)
self.assertTrue(response['success'])
def test_save_calibration_essay(self):
"""
Test saving the calibration essay
@return:
"""
response = self.peer_grading.save_calibration_essay(self.save_dict)
self.assertTrue(response['success'])
def test_peer_grading_problem(self):
"""
See if we can render a single problem
@return:
"""
response = self.peer_grading.peer_grading_problem(self.coe_dict)
self.assertTrue(response['success'])
def test___find_corresponding_module_for_location_exceptions(self):
"""
Unit test for the exception cases of __find_corresponding_module_for_location
Mainly for diff coverage
@return:
"""
# pylint: disable=protected-access
with self.assertRaises(ItemNotFoundError):
self.peer_grading._find_corresponding_module_for_location(
Location('org', 'course', 'run', 'category', 'name', 'revision')
)
def test_get_instance_state(self):
"""
Get the instance state dict
@return:
"""
self.peer_grading.get_instance_state()
def test_save_grade_with_long_feedback(self):
"""
Test if feedback is too long save_grade() should return error message.
"""
feedback_fragment = "This is very long feedback."
self.save_dict["feedback"] = feedback_fragment * (
(MAX_ALLOWED_FEEDBACK_LENGTH / len(feedback_fragment) + 1)
)
response = self.peer_grading.save_grade(self.save_dict)
# Should not succeed.
self.assertEqual(response['success'], False)
self.assertEqual(
response['error'],
"Feedback is too long, Max length is {0} characters.".format(
MAX_ALLOWED_FEEDBACK_LENGTH
)
)
def test_get_score_success_fails(self):
"""
Test if query_data_for_location not succeed, their score is None.
"""
score_dict = self.get_score(False, 0, 0)
# Score dict should be None.
self.assertIsNone(score_dict)
def test_get_score(self):
"""
Test if the student has graded equal to required submissions,
their score is 1.0.
"""
score_dict = self.get_score(True, 3, 3)
# Score should be 1.0.
self.assertEqual(score_dict["score"], 1.0)
# Testing score after data is stored in student_data_for_location in xmodule.
_score_dict = self.peer_grading.get_score()
# Score should be 1.0.
self.assertEqual(_score_dict["score"], 1.0)
def test_get_score_zero(self):
"""
Test if the student has graded not equal to required submissions,
their score is 0.0.
"""
score_dict = self.get_score(True, 2, 3)
# Score should be 0.0.
self.assertEqual(score_dict["score"], 0.0)
def get_score(self, success, count_graded, count_required):
self.peer_grading.use_for_single_location_local = True
self.peer_grading.graded = True
# Patch for external grading service.
with patch('xmodule.peer_grading_module.PeerGradingModule.query_data_for_location') as mock_query_data_for_location:
mock_query_data_for_location.return_value = (
success,
{"count_graded": count_graded, "count_required": count_required}
)
# Returning score dict.
return self.peer_grading.get_score()
class MockPeerGradingServiceProblemList(MockPeerGradingService):
def get_problem_list(self, course_id, grader_id):
return {'success': True,
'problem_list': [
{
"num_graded": 3,
"num_pending": 681,
"num_required": 3,
"location": course_id.make_usage_key('combinedopenended', 'SampleQuestion'),
"problem_name": "Peer-Graded Essay"
},
]}
class PeerGradingModuleScoredTest(unittest.TestCase, DummyModulestore):
"""
Test peer grading xmodule at the unit level. More detailed tests are difficult, as the module relies on an
external grading service.
"""
course_id = SlashSeparatedCourseKey('edX', 'open_ended', '2012_Fall')
problem_location = course_id.make_usage_key("peergrading", "PeerGradingScored")
def get_module_system(self, descriptor):
test_system = get_test_system(self.course_id)
test_system.open_ended_grading_interface = None
return test_system
def setUp(self):
"""
Create a peer grading module from a test system
@return:
"""
self.setup_modulestore(self.course_id.course)
def test_metadata_load(self):
peer_grading = self.get_module_from_location(self.problem_location)
self.assertFalse(peer_grading.closed())
def test_problem_list(self):
"""
Test to see if a peer grading problem list can be correctly initialized.
"""
# Initialize peer grading module.
peer_grading = self.get_module_from_location(self.problem_location)
# Ensure that it cannot find any peer grading.
html = peer_grading.peer_grading()
self.assertNotIn("Peer-Graded", html)
# Swap for our mock class, which will find peer grading.
peer_grading.peer_gs = MockPeerGradingServiceProblemList()
html = peer_grading.peer_grading()
self.assertIn("Peer-Graded", html)
class PeerGradingModuleLinkedTest(unittest.TestCase, DummyModulestore):
"""
Test peer grading that is linked to an open ended module.
"""
course_id = SlashSeparatedCourseKey('edX', 'open_ended', '2012_Fall')
problem_location = course_id.make_usage_key("peergrading", "PeerGradingLinked")
coe_location = course_id.make_usage_key("combinedopenended", "SampleQuestion")
def get_module_system(self, descriptor):
test_system = get_test_system(self.course_id)
test_system.open_ended_grading_interface = None
return test_system
def setUp(self):
"""
Create a peer grading module from a test system.
"""
self.setup_modulestore(self.course_id.course)
@property
def field_data(self):
"""
Setup the proper field data for a peer grading module.
"""
return DictFieldData({
'data': '<peergrading/>',
'location': self.problem_location,
'use_for_single_location': True,
'link_to_location': self.coe_location.to_deprecated_string(),
'graded': True,
})
@property
def scope_ids(self):
"""
Return the proper scope ids for the peer grading module.
"""
return ScopeIds(None, None, self.problem_location, self.problem_location)
def _create_peer_grading_descriptor_with_linked_problem(self):
# Initialize the peer grading module.
system = get_test_descriptor_system()
return system.construct_xblock_from_class(
PeerGradingDescriptor,
field_data=self.field_data,
scope_ids=self.scope_ids
)
def _create_peer_grading_with_linked_problem(self, location, valid_linked_descriptor=True):
"""
Create a peer grading problem with a linked location.
"""
# Mock the linked problem descriptor.
linked_descriptor = Mock()
linked_descriptor.location = location
# Mock the peer grading descriptor.
pg_descriptor = Mock()
pg_descriptor.location = self.problem_location
if valid_linked_descriptor:
pg_descriptor.get_required_module_descriptors = lambda: [linked_descriptor, ]
else:
pg_descriptor.get_required_module_descriptors = lambda: []
test_system = self.get_module_system(pg_descriptor)
# Initialize the peer grading module.
peer_grading = PeerGradingModule(
pg_descriptor,
test_system,
self.field_data,
self.scope_ids,
)
return peer_grading
def _get_descriptor_with_invalid_link(self, exception_to_raise):
"""
Ensure that a peer grading descriptor with an invalid link will return an empty list.
"""
# Create a descriptor, and make loading an item throw an error.
descriptor = self._create_peer_grading_descriptor_with_linked_problem()
descriptor.system.load_item = Mock(side_effect=exception_to_raise)
# Ensure that modules is a list of length 0.
modules = descriptor.get_required_module_descriptors()
self.assertIsInstance(modules, list)
self.assertEqual(len(modules), 0)
def test_descriptor_with_nopath(self):
"""
Test to see if a descriptor with a NoPathToItem error when trying to get
its linked module behaves properly.
"""
self._get_descriptor_with_invalid_link(NoPathToItem)
def test_descriptor_with_item_not_found(self):
"""
Test to see if a descriptor with an ItemNotFound error when trying to get
its linked module behaves properly.
"""
self._get_descriptor_with_invalid_link(ItemNotFoundError)
def test_invalid_link(self):
"""
Ensure that a peer grading problem with no linked locations stays in panel mode.
"""
# Setup the peer grading module with no linked locations.
peer_grading = self._create_peer_grading_with_linked_problem(self.coe_location, valid_linked_descriptor=False)
self.assertFalse(peer_grading.use_for_single_location_local)
self.assertTrue(peer_grading.use_for_single_location)
def test_linked_problem(self):
"""
Ensure that a peer grading problem with a linked location loads properly.
"""
# Setup the peer grading module with the proper linked location.
peer_grading = self._create_peer_grading_with_linked_problem(self.coe_location)
# Ensure that it is properly setup.
self.assertTrue(peer_grading.use_for_single_location)
def test_linked_ajax(self):
"""
Ensure that a peer grading problem with a linked location responds to ajax calls.
"""
# Setup the peer grading module with the proper linked location.
peer_grading = self._create_peer_grading_with_linked_problem(self.coe_location)
# If we specify a location, it will render the problem for that location.
data = peer_grading.handle_ajax('problem', {'location': self.coe_location.to_deprecated_string()})
self.assertTrue(json.loads(data)['success'])
# If we don't specify a location, it should use the linked location.
data = peer_grading.handle_ajax('problem', {})
self.assertTrue(json.loads(data)['success'])
def test_linked_score(self):
"""
Ensure that a peer grading problem with a linked location is properly scored.
"""
# Setup the peer grading module with the proper linked location.
peer_grading = self._create_peer_grading_with_linked_problem(self.coe_location)
score_dict = peer_grading.get_score()
self.assertEqual(score_dict['score'], 1)
self.assertEqual(score_dict['total'], 1)
def test_get_next_submission(self):
"""
Ensure that a peer grading problem with a linked location can get a submission to score.
"""
# Setup the peer grading module with the proper linked location.
peer_grading = self._create_peer_grading_with_linked_problem(self.coe_location)
data = peer_grading.handle_ajax('get_next_submission', {'location': self.coe_location})
self.assertEqual(json.loads(data)['submission_id'], 1)
| agpl-3.0 |
ErnieAllen/qpid-dispatch | tests/system_tests_core_client.py | 2 | 8314 | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
from __future__ import unicode_literals
from __future__ import division
from __future__ import absolute_import
from __future__ import print_function
from system_test import TestCase
from system_test import Qdrouterd
from system_test import QdManager
from system_test import TIMEOUT
from proton import Message
from proton import Endpoint
from proton.handlers import MessagingHandler
from proton.reactor import Container
# test the request/response core client messaging API
#
# These tests rely on enabling the router test hooks, which instantiates a test
# client (see modules/test_hooks/core_test_hooks) see core_test_hooks.c
CONTAINER_ID = "org.apache.qpid.dispatch.test_core_client"
TARGET_ADDR = "test_core_client_address"
class CoreClientAPITest(TestCase):
@classmethod
def setUpClass(cls):
super(CoreClientAPITest, cls).setUpClass()
config = Qdrouterd.Config([
('router', {'mode': 'standalone', 'id': 'QDR.A'}),
('listener', {'port': cls.tester.get_port()}),
])
cls.router = cls.tester.qdrouterd("A", config, cl_args=["-T"])
def test_send_receive(self):
ts = TestService(self.router.addresses[0], credit=250)
ts.run()
self.assertTrue(ts.error is None)
self.assertEqual(250, ts.in_count)
self.assertEqual(250, ts.out_count)
def test_credit_starve(self):
ts = TestCreditStarve(self.router.addresses[0])
ts.run()
self.assertTrue(ts.error is None)
self.assertTrue(ts.starved)
self.assertEqual(10, ts.in_count)
def test_unexpected_conn_close(self):
ts = TestEarlyClose(self.router.addresses[0])
ts.run()
self.assertTrue(ts.error is None)
self.assertTrue(ts.in_count >= 1)
def test_bad_format(self):
ts = TestNoCorrelationId(self.router.addresses[0])
ts.run()
self.assertTrue(ts.error is None)
self.assertTrue(ts.rejected)
def test_old_cid(self):
ts = TestOldCorrelationId(self.router.addresses[0])
ts.run()
self.assertTrue(ts.error is None)
self.assertTrue(ts.accepted)
def test_call_timeout(self):
qm = QdManager(self, self.router.addresses[0])
ts = TestCallTimeout(self.router.addresses[0], qm)
ts.run()
self.assertEqual("TIMED OUT!", ts.error)
class TestService(MessagingHandler):
# a service that the core client can communicate with
class Timeout(object):
def __init__(self, service):
self.service = service
def on_timer_task(self, event):
self.service.timeout()
def __init__(self, address, container_id=CONTAINER_ID, credit=1):
super(TestService, self).__init__(prefetch=0)
self._container = Container(self)
self._container.container_id = CONTAINER_ID
self._conn = None
self.address = address
self.timer = None
self.error = None
self.reply_link = None
self.incoming_link = None
self.credit = credit
self.in_count = 0
self.out_count = 0
def fail(self, error):
self.error = error
if self._conn:
self._conn.close()
def timeout(self):
self.fail("Timeout expired")
def on_start(self, event):
self.timer = event.reactor.schedule(TIMEOUT, self.Timeout(self))
self._conn = event.container.connect(self.address)
def on_link_opening(self, event):
link = event.link
if link.state & Endpoint.LOCAL_UNINIT:
link.source.copy(link.remote_source)
link.target.copy(link.remote_target)
if event.sender:
if not link.remote_source.dynamic:
self.fail("expected dynamic source terminus")
return
link.source.dynamic = False
link.source.address = "a/reply/address"
self.reply_link = link
else:
link.flow(self.credit)
self.incoming_link = link
def create_reply(self, message):
return Message(body=message.body,
correlation_id=message.correlation_id)
# echo back to sender
def on_message(self, event):
self.in_count += 1
cid = event.message.correlation_id
self.reply_link.send(self.create_reply(event.message))
# stop when all sent messages have settled
def on_settled(self, event):
self.out_count += 1
self.credit -= 1
if self.credit == 0:
self._conn.close()
def on_connection_closed(self, event):
self._conn = None
def run(self):
self._container.timeout = 1.0
self._container.start()
while self._container.process():
if self._conn is None and self._container.quiesced:
break;
self._container.stop()
self._container.process()
# wait until all credit is exhausted, then re-flow more credit
class TestCreditStarve(TestService):
def __init__(self, address):
super(TestCreditStarve, self).__init__(address, credit=5)
self.starved = False
def on_settled(self, event):
self.credit -= 1
if self.credit == 0:
if not self.starved:
self.starved = True
self.credit = 5
self.incoming_link.drain(self.credit)
else:
self._conn.close()
# grant 10, but don't respond and close early
class TestEarlyClose(TestService):
def __init__(self, address):
super(TestEarlyClose, self).__init__(address, credit=10)
def on_message(self, event):
self.in_count += 1
if self.in_count == 1:
self._conn.close()
class TestNoCorrelationId(TestService):
def __init__(self, address):
super(TestNoCorrelationId, self).__init__(address, credit=1)
self.rejected = False
def create_reply(self, message):
return Message(body=dict())
def on_rejected(self, event):
self.rejected = True
class TestOldCorrelationId(TestService):
def __init__(self, address):
super(TestOldCorrelationId, self).__init__(address, credit=1)
self.accepted = False
def create_reply(self, message):
return Message(body=dict(),
correlation_id="not going to match")
def on_accepted(self, event):
self.accepted = True
class TestCallTimeout(TestService):
# test that the timeout is handled properly
class PeriodicLogScrape(object):
# periodically scan the log for the timeout error
def __init__(self, service):
self.service = service
def on_timer_task(self, event):
log = self.service.qm.get_log()
for e in log:
if (e[0] == 'ROUTER' and e[1] == 'error'
and e[2] == 'client test request done '
'error=Timed out'):
# yes this is the line you're looking for:
self.service.error = "TIMED OUT!"
if self.service._conn:
self.service._conn.close()
return
event.reactor.schedule(1, TestCallTimeout.PeriodicLogScrape(self.service))
def __init__(self, address, qm):
super(TestCallTimeout, self).__init__(address, credit=1)
self.qm = qm
def on_message(self, event):
# drop it
event.reactor.schedule(1, self.PeriodicLogScrape(self))
| apache-2.0 |
tomasdubec/openstack-cinder | cinder/db/sqlalchemy/models.py | 1 | 15282 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (c) 2011 X.commerce, a business unit of eBay Inc.
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# Copyright 2011 Piston Cloud Computing, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
SQLAlchemy models for cinder data.
"""
from sqlalchemy import Column, Integer, String, Text, schema
from sqlalchemy.exc import IntegrityError
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy import ForeignKey, DateTime, Boolean
from sqlalchemy.orm import relationship, backref, object_mapper
from cinder.db.sqlalchemy.session import get_session
from cinder import exception
from cinder import flags
from cinder.openstack.common import timeutils
FLAGS = flags.FLAGS
BASE = declarative_base()
class CinderBase(object):
"""Base class for Cinder Models."""
__table_args__ = {'mysql_engine': 'InnoDB'}
__table_initialized__ = False
created_at = Column(DateTime, default=timeutils.utcnow)
updated_at = Column(DateTime, onupdate=timeutils.utcnow)
deleted_at = Column(DateTime)
deleted = Column(Boolean, default=False)
metadata = None
def save(self, session=None):
"""Save this object."""
if not session:
session = get_session()
session.add(self)
try:
session.flush()
except IntegrityError, e:
if str(e).endswith('is not unique'):
raise exception.Duplicate(str(e))
else:
raise
def delete(self, session=None):
"""Delete this object."""
self.deleted = True
self.deleted_at = timeutils.utcnow()
self.save(session=session)
def __setitem__(self, key, value):
setattr(self, key, value)
def __getitem__(self, key):
return getattr(self, key)
def get(self, key, default=None):
return getattr(self, key, default)
def __iter__(self):
self._i = iter(object_mapper(self).columns)
return self
def next(self):
n = self._i.next().name
return n, getattr(self, n)
def update(self, values):
"""Make the model object behave like a dict."""
for k, v in values.iteritems():
setattr(self, k, v)
def iteritems(self):
"""Make the model object behave like a dict.
Includes attributes from joins."""
local = dict(self)
joined = dict([(k, v) for k, v in self.__dict__.iteritems()
if not k[0] == '_'])
local.update(joined)
return local.iteritems()
class Service(BASE, CinderBase):
"""Represents a running service on a host."""
__tablename__ = 'services'
id = Column(Integer, primary_key=True)
host = Column(String(255)) # , ForeignKey('hosts.id'))
binary = Column(String(255))
topic = Column(String(255))
report_count = Column(Integer, nullable=False, default=0)
disabled = Column(Boolean, default=False)
availability_zone = Column(String(255), default='cinder')
class CinderNode(BASE, CinderBase):
"""Represents a running cinder service on a host."""
__tablename__ = 'cinder_nodes'
id = Column(Integer, primary_key=True)
service_id = Column(Integer, ForeignKey('services.id'), nullable=True)
class Volume(BASE, CinderBase):
"""Represents a block storage device that can be attached to a vm."""
__tablename__ = 'volumes'
id = Column(String(36), primary_key=True)
@property
def name(self):
return FLAGS.volume_name_template % self.id
ec2_id = Column(Integer)
user_id = Column(String(255))
project_id = Column(String(255))
snapshot_id = Column(String(36))
host = Column(String(255)) # , ForeignKey('hosts.id'))
size = Column(Integer)
availability_zone = Column(String(255)) # TODO(vish): foreign key?
instance_uuid = Column(String(36))
mountpoint = Column(String(255))
attach_time = Column(String(255)) # TODO(vish): datetime
status = Column(String(255)) # TODO(vish): enum?
attach_status = Column(String(255)) # TODO(vish): enum
scheduled_at = Column(DateTime)
launched_at = Column(DateTime)
terminated_at = Column(DateTime)
display_name = Column(String(255))
display_description = Column(String(255))
provider_location = Column(String(255))
provider_auth = Column(String(255))
volume_type_id = Column(String(36))
source_volid = Column(String(36))
class VolumeMetadata(BASE, CinderBase):
"""Represents a metadata key/value pair for a volume."""
__tablename__ = 'volume_metadata'
id = Column(Integer, primary_key=True)
key = Column(String(255))
value = Column(String(255))
volume_id = Column(String(36), ForeignKey('volumes.id'), nullable=False)
volume = relationship(Volume, backref="volume_metadata",
foreign_keys=volume_id,
primaryjoin='and_('
'VolumeMetadata.volume_id == Volume.id,'
'VolumeMetadata.deleted == False)')
class VolumeTypes(BASE, CinderBase):
"""Represent possible volume_types of volumes offered."""
__tablename__ = "volume_types"
id = Column(String(36), primary_key=True)
name = Column(String(255))
volumes = relationship(Volume,
backref=backref('volume_type', uselist=False),
foreign_keys=id,
primaryjoin='and_('
'Volume.volume_type_id == VolumeTypes.id, '
'VolumeTypes.deleted == False)')
class VolumeTypeExtraSpecs(BASE, CinderBase):
"""Represents additional specs as key/value pairs for a volume_type."""
__tablename__ = 'volume_type_extra_specs'
id = Column(Integer, primary_key=True)
key = Column(String(255))
value = Column(String(255))
volume_type_id = Column(String(36),
ForeignKey('volume_types.id'),
nullable=False)
volume_type = relationship(
VolumeTypes,
backref="extra_specs",
foreign_keys=volume_type_id,
primaryjoin='and_('
'VolumeTypeExtraSpecs.volume_type_id == VolumeTypes.id,'
'VolumeTypeExtraSpecs.deleted == False)'
)
class VolumeGlanceMetadata(BASE, CinderBase):
"""Glance metadata for a bootable volume."""
__tablename__ = 'volume_glance_metadata'
id = Column(Integer, primary_key=True, nullable=False)
volume_id = Column(String(36), ForeignKey('volumes.id'))
snapshot_id = Column(String(36), ForeignKey('snapshots.id'))
key = Column(String(255))
value = Column(Text)
volume = relationship(Volume, backref="volume_glance_metadata",
foreign_keys=volume_id,
primaryjoin='and_('
'VolumeGlanceMetadata.volume_id == Volume.id,'
'VolumeGlanceMetadata.deleted == False)')
class Quota(BASE, CinderBase):
"""Represents a single quota override for a project.
If there is no row for a given project id and resource, then the
default for the quota class is used. If there is no row for a
given quota class and resource, then the default for the
deployment is used. If the row is present but the hard limit is
Null, then the resource is unlimited.
"""
__tablename__ = 'quotas'
id = Column(Integer, primary_key=True)
project_id = Column(String(255), index=True)
resource = Column(String(255))
hard_limit = Column(Integer, nullable=True)
class QuotaClass(BASE, CinderBase):
"""Represents a single quota override for a quota class.
If there is no row for a given quota class and resource, then the
default for the deployment is used. If the row is present but the
hard limit is Null, then the resource is unlimited.
"""
__tablename__ = 'quota_classes'
id = Column(Integer, primary_key=True)
class_name = Column(String(255), index=True)
resource = Column(String(255))
hard_limit = Column(Integer, nullable=True)
class QuotaUsage(BASE, CinderBase):
"""Represents the current usage for a given resource."""
__tablename__ = 'quota_usages'
id = Column(Integer, primary_key=True)
project_id = Column(String(255), index=True)
resource = Column(String(255))
in_use = Column(Integer)
reserved = Column(Integer)
@property
def total(self):
return self.in_use + self.reserved
until_refresh = Column(Integer, nullable=True)
class Reservation(BASE, CinderBase):
"""Represents a resource reservation for quotas."""
__tablename__ = 'reservations'
id = Column(Integer, primary_key=True)
uuid = Column(String(36), nullable=False)
usage_id = Column(Integer, ForeignKey('quota_usages.id'), nullable=False)
project_id = Column(String(255), index=True)
resource = Column(String(255))
delta = Column(Integer)
expire = Column(DateTime, nullable=False)
class Snapshot(BASE, CinderBase):
"""Represents a block storage device that can be attached to a VM."""
__tablename__ = 'snapshots'
id = Column(String(36), primary_key=True)
@property
def name(self):
return FLAGS.snapshot_name_template % self.id
@property
def volume_name(self):
return FLAGS.volume_name_template % self.volume_id
user_id = Column(String(255))
project_id = Column(String(255))
volume_id = Column(String(36))
status = Column(String(255))
progress = Column(String(255))
volume_size = Column(Integer)
display_name = Column(String(255))
display_description = Column(String(255))
provider_location = Column(String(255))
volume = relationship(Volume, backref="snapshots",
foreign_keys=volume_id,
primaryjoin='and_('
'Snapshot.volume_id == Volume.id,'
'Snapshot.deleted == False)')
class SnapshotMetadata(BASE, CinderBase):
"""Represents a metadata key/value pair for a snapshot."""
__tablename__ = 'snapshot_metadata'
id = Column(Integer, primary_key=True)
key = Column(String(255))
value = Column(String(255))
snapshot_id = Column(String(36),
ForeignKey('snapshots.id'),
nullable=False)
snapshot = relationship(Snapshot, backref="snapshot_metadata",
foreign_keys=snapshot_id,
primaryjoin='and_('
'SnapshotMetadata.snapshot_id == Snapshot.id,'
'SnapshotMetadata.deleted == False)')
class IscsiTarget(BASE, CinderBase):
"""Represents an iscsi target for a given host."""
__tablename__ = 'iscsi_targets'
__table_args__ = (schema.UniqueConstraint("target_num", "host"),
{'mysql_engine': 'InnoDB'})
id = Column(Integer, primary_key=True)
target_num = Column(Integer)
host = Column(String(255))
volume_id = Column(String(36), ForeignKey('volumes.id'), nullable=True)
volume = relationship(Volume,
backref=backref('iscsi_target', uselist=False),
foreign_keys=volume_id,
primaryjoin='and_(IscsiTarget.volume_id==Volume.id,'
'IscsiTarget.deleted==False)')
class Migration(BASE, CinderBase):
"""Represents a running host-to-host migration."""
__tablename__ = 'migrations'
id = Column(Integer, primary_key=True, nullable=False)
# NOTE(tr3buchet): the ____compute variables are instance['host']
source_compute = Column(String(255))
dest_compute = Column(String(255))
# NOTE(tr3buchet): dest_host, btw, is an ip address
dest_host = Column(String(255))
old_instance_type_id = Column(Integer())
new_instance_type_id = Column(Integer())
instance_uuid = Column(String(255),
ForeignKey('instances.uuid'),
nullable=True)
#TODO(_cerberus_): enum
status = Column(String(255))
class SMFlavors(BASE, CinderBase):
"""Represents a flavor for SM volumes."""
__tablename__ = 'sm_flavors'
id = Column(Integer(), primary_key=True)
label = Column(String(255))
description = Column(String(255))
class SMBackendConf(BASE, CinderBase):
"""Represents the connection to the backend for SM."""
__tablename__ = 'sm_backend_config'
id = Column(Integer(), primary_key=True)
flavor_id = Column(Integer, ForeignKey('sm_flavors.id'), nullable=False)
sr_uuid = Column(String(255))
sr_type = Column(String(255))
config_params = Column(String(2047))
class SMVolume(BASE, CinderBase):
__tablename__ = 'sm_volume'
id = Column(String(36), ForeignKey(Volume.id), primary_key=True)
backend_id = Column(Integer, ForeignKey('sm_backend_config.id'),
nullable=False)
vdi_uuid = Column(String(255))
class Backup(BASE, CinderBase):
"""Represents a backup of a volume to Swift."""
__tablename__ = 'backups'
id = Column(String(36), primary_key=True)
@property
def name(self):
return FLAGS.backup_name_template % self.id
user_id = Column(String(255), nullable=False)
project_id = Column(String(255), nullable=False)
volume_id = Column(String(36), nullable=False)
host = Column(String(255))
availability_zone = Column(String(255))
display_name = Column(String(255))
display_description = Column(String(255))
container = Column(String(255))
status = Column(String(255))
fail_reason = Column(String(255))
service_metadata = Column(String(255))
service = Column(String(255))
size = Column(Integer)
object_count = Column(Integer)
def register_models():
"""Register Models and create metadata.
Called from cinder.db.sqlalchemy.__init__ as part of loading the driver,
it will never need to be called explicitly elsewhere unless the
connection is lost and needs to be reestablished.
"""
from sqlalchemy import create_engine
models = (Backup,
Migration,
Service,
SMBackendConf,
SMFlavors,
SMVolume,
Volume,
VolumeMetadata,
SnapshotMetadata,
VolumeTypeExtraSpecs,
VolumeTypes,
VolumeGlanceMetadata,
)
engine = create_engine(FLAGS.sql_connection, echo=False)
for model in models:
model.metadata.create_all(engine)
| apache-2.0 |
malariagen/filterbank | filterbank/core.py | 1 | 2669 | import filterbank.accumulators as accumulators
import filterbank.encoders as encoders
from filterbank.tabfile import Reader
import copy
class BlockDigester:
def __init__(self, block_size, channel_config, output_location):
self.seen_rows = 0
#Accumulators bypassed if we are just doing single values
if block_size == 1:
channel_config = copy.deepcopy(channel_config)
channel_config['accumulators'] = ['LastVal']
self.block_size = block_size
self.accumulators = [accumulators(accum) for accum in channel_config['accumulators']]
self.encoder = encoders(channel_config['encoder'])
self.encoder.start(output_location, channel_config, block_size)
def process_value(self, value):
for accum in self.accumulators:
accum(value)
self.seen_rows += 1
if self.seen_rows == self.block_size:
self.end_block()
def end_block(self):
self.seen_rows = 0
self.encoder.write([accum.result() for accum in self.accumulators])
for accum in self.accumulators:
accum.reset()
def finish(self):
if self.seen_rows > 0:
self.end_block()
self.encoder.finish()
def geometric_series(start, max, mult):
result = []
while True:
result.append(start)
start *= mult
if start > max:
break
return result
class FilterBankProcessor:
def __init__(self, input_file, output_location, config, extra_metadata={}):
self.block_sizes = geometric_series(config['block_sizes']['start'], config['block_sizes']['end'], config['block_sizes'].get('mult',10))
self.reader = Reader(input_file)
self.output_location = output_location
self.channel_configs = [dict({'name':name}, **dict(extra_metadata, **chan_config)) for name, chan_config in config['channels'].items()]
def process(self):
evaluators_and_digesters = []
for channel_config in self.channel_configs:
evaluator = compile(channel_config['value'], '<string>', 'eval')
digesters = [BlockDigester(block_size, channel_config, self.output_location) for block_size in self.block_sizes]
evaluators_and_digesters.append((evaluator, digesters))
for row in self.reader:
for evaluator, digesters in evaluators_and_digesters:
value = eval(evaluator, {}, row)
for digester in digesters:
digester.process_value(value)
for evaluator, digesters in evaluators_and_digesters:
for digester in digesters:
digester.finish()
| mit |
yancharkin/games_nebula_goglib_scripts | corsairs_gold/settings.py | 1 | 3457 | import sys, os
import gi
gi.require_version('Gtk', '3.0')
from gi.repository import Gtk, Gdk
import gettext
import imp
try:
from ConfigParser import ConfigParser as ConfigParser
except:
from configparser import ConfigParser as ConfigParser
nebula_dir = os.getenv('NEBULA_DIR')
modules_dir = nebula_dir + '/modules'
set_visuals = imp.load_source('set_visuals', modules_dir + '/set_visuals.py')
gettext.bindtextdomain('games_nebula', nebula_dir + '/locale')
gettext.textdomain('games_nebula')
_ = gettext.gettext
current_dir = sys.path[0]
game_dir = current_dir + '/game'
class GUI:
def __init__(self):
self.config_load()
self.create_main_window()
def config_load(self):
config_file = game_dir + '/corsairs.ini'
config_parser = ConfigParser()
config_parser.read(config_file)
if not config_parser.has_section('GENERAL'):
self.lang = 'English'
config_parser.add_section('LANGUAGE')
config_parser.set('GENERAL', 'LANGUAGE', str(self.lang))
new_config_file = open(config_file, 'w')
config_parser.write(new_config_file)
new_config_file.close()
else:
self.lang = config_parser.get('GENERAL', 'LANGUAGE')
def config_save(self):
config_file = game_dir + '/corsairs.ini'
config_parser = ConfigParser()
config_parser.read(config_file)
config_parser.set('GENERAL', 'LANGUAGE', str(self.lang))
new_config_file = open(config_file, 'w')
config_parser.write(new_config_file)
new_config_file.close()
def quit_app(self, window, event):
Gtk.main_quit()
def create_main_window(self):
self.main_window = Gtk.Window(
title = _("Corsairs Gold"),
type = Gtk.WindowType.TOPLEVEL,
window_position = Gtk.WindowPosition.CENTER_ALWAYS,
resizable = False,
)
self.main_window.connect('delete-event', self.quit_app)
grid = Gtk.Grid(
margin_left = 10,
margin_right = 10,
margin_top = 10,
margin_bottom = 10,
row_spacing = 10,
column_spacing = 10,
column_homogeneous = True,
)
label_lang = Gtk.Label(
label = _("Language:")
)
lang_list = ['Dutch', 'English', 'German', 'French', 'Italian', 'Polish',
'Portuguese', 'Spanish']
combobox_lang = Gtk.ComboBoxText()
index = 1
for i in range(len(lang_list)):
combobox_lang.append_text(lang_list[i])
if self.lang == lang_list[i]:
index = i
combobox_lang.set_active(index)
combobox_lang.connect('changed', self.cb_combobox_lang)
button_save = Gtk.Button(
label = _("Save and quit")
)
button_save.connect('clicked', self.cb_button_save)
grid.attach(label_lang, 0, 0, 1, 1)
grid.attach(combobox_lang, 1, 0, 1, 1)
grid.attach(button_save, 0, 1, 2, 1)
self.main_window.add(grid)
self.main_window.show_all()
def cb_combobox_lang(self, combobox):
self.lang = combobox.get_active_text()
def cb_button_save(self, button):
self.config_save()
Gtk.main_quit()
def main():
import sys
app = GUI()
Gtk.main()
if __name__ == '__main__':
sys.exit(main())
| gpl-3.0 |
jermainewang/mxnet | example/rcnn/rcnn/symbol/symbol_resnet.py | 18 | 13105 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import mxnet as mx
import proposal
import proposal_target
from rcnn.config import config
eps = 2e-5
use_global_stats = True
workspace = 512
res_deps = {'50': (3, 4, 6, 3), '101': (3, 4, 23, 3), '152': (3, 8, 36, 3), '200': (3, 24, 36, 3)}
units = res_deps['101']
filter_list = [256, 512, 1024, 2048]
def residual_unit(data, num_filter, stride, dim_match, name):
bn1 = mx.sym.BatchNorm(data=data, fix_gamma=False, eps=eps, use_global_stats=use_global_stats, name=name + '_bn1')
act1 = mx.sym.Activation(data=bn1, act_type='relu', name=name + '_relu1')
conv1 = mx.sym.Convolution(data=act1, num_filter=int(num_filter * 0.25), kernel=(1, 1), stride=(1, 1), pad=(0, 0),
no_bias=True, workspace=workspace, name=name + '_conv1')
bn2 = mx.sym.BatchNorm(data=conv1, fix_gamma=False, eps=eps, use_global_stats=use_global_stats, name=name + '_bn2')
act2 = mx.sym.Activation(data=bn2, act_type='relu', name=name + '_relu2')
conv2 = mx.sym.Convolution(data=act2, num_filter=int(num_filter * 0.25), kernel=(3, 3), stride=stride, pad=(1, 1),
no_bias=True, workspace=workspace, name=name + '_conv2')
bn3 = mx.sym.BatchNorm(data=conv2, fix_gamma=False, eps=eps, use_global_stats=use_global_stats, name=name + '_bn3')
act3 = mx.sym.Activation(data=bn3, act_type='relu', name=name + '_relu3')
conv3 = mx.sym.Convolution(data=act3, num_filter=num_filter, kernel=(1, 1), stride=(1, 1), pad=(0, 0), no_bias=True,
workspace=workspace, name=name + '_conv3')
if dim_match:
shortcut = data
else:
shortcut = mx.sym.Convolution(data=act1, num_filter=num_filter, kernel=(1, 1), stride=stride, no_bias=True,
workspace=workspace, name=name + '_sc')
sum = mx.sym.ElementWiseSum(*[conv3, shortcut], name=name + '_plus')
return sum
def get_resnet_conv(data):
# res1
data_bn = mx.sym.BatchNorm(data=data, fix_gamma=True, eps=eps, use_global_stats=use_global_stats, name='bn_data')
conv0 = mx.sym.Convolution(data=data_bn, num_filter=64, kernel=(7, 7), stride=(2, 2), pad=(3, 3),
no_bias=True, name="conv0", workspace=workspace)
bn0 = mx.sym.BatchNorm(data=conv0, fix_gamma=False, eps=eps, use_global_stats=use_global_stats, name='bn0')
relu0 = mx.sym.Activation(data=bn0, act_type='relu', name='relu0')
pool0 = mx.symbol.Pooling(data=relu0, kernel=(3, 3), stride=(2, 2), pad=(1, 1), pool_type='max', name='pool0')
# res2
unit = residual_unit(data=pool0, num_filter=filter_list[0], stride=(1, 1), dim_match=False, name='stage1_unit1')
for i in range(2, units[0] + 1):
unit = residual_unit(data=unit, num_filter=filter_list[0], stride=(1, 1), dim_match=True, name='stage1_unit%s' % i)
# res3
unit = residual_unit(data=unit, num_filter=filter_list[1], stride=(2, 2), dim_match=False, name='stage2_unit1')
for i in range(2, units[1] + 1):
unit = residual_unit(data=unit, num_filter=filter_list[1], stride=(1, 1), dim_match=True, name='stage2_unit%s' % i)
# res4
unit = residual_unit(data=unit, num_filter=filter_list[2], stride=(2, 2), dim_match=False, name='stage3_unit1')
for i in range(2, units[2] + 1):
unit = residual_unit(data=unit, num_filter=filter_list[2], stride=(1, 1), dim_match=True, name='stage3_unit%s' % i)
return unit
def get_resnet_train(num_classes=config.NUM_CLASSES, num_anchors=config.NUM_ANCHORS):
data = mx.symbol.Variable(name="data")
im_info = mx.symbol.Variable(name="im_info")
gt_boxes = mx.symbol.Variable(name="gt_boxes")
rpn_label = mx.symbol.Variable(name='label')
rpn_bbox_target = mx.symbol.Variable(name='bbox_target')
rpn_bbox_weight = mx.symbol.Variable(name='bbox_weight')
# shared convolutional layers
conv_feat = get_resnet_conv(data)
# RPN layers
rpn_conv = mx.symbol.Convolution(
data=conv_feat, kernel=(3, 3), pad=(1, 1), num_filter=512, name="rpn_conv_3x3")
rpn_relu = mx.symbol.Activation(data=rpn_conv, act_type="relu", name="rpn_relu")
rpn_cls_score = mx.symbol.Convolution(
data=rpn_relu, kernel=(1, 1), pad=(0, 0), num_filter=2 * num_anchors, name="rpn_cls_score")
rpn_bbox_pred = mx.symbol.Convolution(
data=rpn_relu, kernel=(1, 1), pad=(0, 0), num_filter=4 * num_anchors, name="rpn_bbox_pred")
# prepare rpn data
rpn_cls_score_reshape = mx.symbol.Reshape(
data=rpn_cls_score, shape=(0, 2, -1, 0), name="rpn_cls_score_reshape")
# classification
rpn_cls_prob = mx.symbol.SoftmaxOutput(data=rpn_cls_score_reshape, label=rpn_label, multi_output=True,
normalization='valid', use_ignore=True, ignore_label=-1, name="rpn_cls_prob")
# bounding box regression
rpn_bbox_loss_ = rpn_bbox_weight * mx.symbol.smooth_l1(name='rpn_bbox_loss_', scalar=3.0, data=(rpn_bbox_pred - rpn_bbox_target))
rpn_bbox_loss = mx.sym.MakeLoss(name='rpn_bbox_loss', data=rpn_bbox_loss_, grad_scale=1.0 / config.TRAIN.RPN_BATCH_SIZE)
# ROI proposal
rpn_cls_act = mx.symbol.SoftmaxActivation(
data=rpn_cls_score_reshape, mode="channel", name="rpn_cls_act")
rpn_cls_act_reshape = mx.symbol.Reshape(
data=rpn_cls_act, shape=(0, 2 * num_anchors, -1, 0), name='rpn_cls_act_reshape')
if config.TRAIN.CXX_PROPOSAL:
rois = mx.symbol.contrib.Proposal(
cls_prob=rpn_cls_act_reshape, bbox_pred=rpn_bbox_pred, im_info=im_info, name='rois',
feature_stride=config.RPN_FEAT_STRIDE, scales=tuple(config.ANCHOR_SCALES), ratios=tuple(config.ANCHOR_RATIOS),
rpn_pre_nms_top_n=config.TRAIN.RPN_PRE_NMS_TOP_N, rpn_post_nms_top_n=config.TRAIN.RPN_POST_NMS_TOP_N,
threshold=config.TRAIN.RPN_NMS_THRESH, rpn_min_size=config.TRAIN.RPN_MIN_SIZE)
else:
rois = mx.symbol.Custom(
cls_prob=rpn_cls_act_reshape, bbox_pred=rpn_bbox_pred, im_info=im_info, name='rois',
op_type='proposal', feat_stride=config.RPN_FEAT_STRIDE,
scales=tuple(config.ANCHOR_SCALES), ratios=tuple(config.ANCHOR_RATIOS),
rpn_pre_nms_top_n=config.TRAIN.RPN_PRE_NMS_TOP_N, rpn_post_nms_top_n=config.TRAIN.RPN_POST_NMS_TOP_N,
threshold=config.TRAIN.RPN_NMS_THRESH, rpn_min_size=config.TRAIN.RPN_MIN_SIZE)
# ROI proposal target
gt_boxes_reshape = mx.symbol.Reshape(data=gt_boxes, shape=(-1, 5), name='gt_boxes_reshape')
group = mx.symbol.Custom(rois=rois, gt_boxes=gt_boxes_reshape, op_type='proposal_target',
num_classes=num_classes, batch_images=config.TRAIN.BATCH_IMAGES,
batch_rois=config.TRAIN.BATCH_ROIS, fg_fraction=config.TRAIN.FG_FRACTION)
rois = group[0]
label = group[1]
bbox_target = group[2]
bbox_weight = group[3]
# Fast R-CNN
roi_pool = mx.symbol.ROIPooling(
name='roi_pool5', data=conv_feat, rois=rois, pooled_size=(14, 14), spatial_scale=1.0 / config.RCNN_FEAT_STRIDE)
# res5
unit = residual_unit(data=roi_pool, num_filter=filter_list[3], stride=(2, 2), dim_match=False, name='stage4_unit1')
for i in range(2, units[3] + 1):
unit = residual_unit(data=unit, num_filter=filter_list[3], stride=(1, 1), dim_match=True, name='stage4_unit%s' % i)
bn1 = mx.sym.BatchNorm(data=unit, fix_gamma=False, eps=eps, use_global_stats=use_global_stats, name='bn1')
relu1 = mx.sym.Activation(data=bn1, act_type='relu', name='relu1')
pool1 = mx.symbol.Pooling(data=relu1, global_pool=True, kernel=(7, 7), pool_type='avg', name='pool1')
# classification
cls_score = mx.symbol.FullyConnected(name='cls_score', data=pool1, num_hidden=num_classes)
cls_prob = mx.symbol.SoftmaxOutput(name='cls_prob', data=cls_score, label=label, normalization='batch')
# bounding box regression
bbox_pred = mx.symbol.FullyConnected(name='bbox_pred', data=pool1, num_hidden=num_classes * 4)
bbox_loss_ = bbox_weight * mx.symbol.smooth_l1(name='bbox_loss_', scalar=1.0, data=(bbox_pred - bbox_target))
bbox_loss = mx.sym.MakeLoss(name='bbox_loss', data=bbox_loss_, grad_scale=1.0 / config.TRAIN.BATCH_ROIS)
# reshape output
label = mx.symbol.Reshape(data=label, shape=(config.TRAIN.BATCH_IMAGES, -1), name='label_reshape')
cls_prob = mx.symbol.Reshape(data=cls_prob, shape=(config.TRAIN.BATCH_IMAGES, -1, num_classes), name='cls_prob_reshape')
bbox_loss = mx.symbol.Reshape(data=bbox_loss, shape=(config.TRAIN.BATCH_IMAGES, -1, 4 * num_classes), name='bbox_loss_reshape')
group = mx.symbol.Group([rpn_cls_prob, rpn_bbox_loss, cls_prob, bbox_loss, mx.symbol.BlockGrad(label)])
return group
def get_resnet_test(num_classes=config.NUM_CLASSES, num_anchors=config.NUM_ANCHORS):
data = mx.symbol.Variable(name="data")
im_info = mx.symbol.Variable(name="im_info")
# shared convolutional layers
conv_feat = get_resnet_conv(data)
# RPN
rpn_conv = mx.symbol.Convolution(
data=conv_feat, kernel=(3, 3), pad=(1, 1), num_filter=512, name="rpn_conv_3x3")
rpn_relu = mx.symbol.Activation(data=rpn_conv, act_type="relu", name="rpn_relu")
rpn_cls_score = mx.symbol.Convolution(
data=rpn_relu, kernel=(1, 1), pad=(0, 0), num_filter=2 * num_anchors, name="rpn_cls_score")
rpn_bbox_pred = mx.symbol.Convolution(
data=rpn_relu, kernel=(1, 1), pad=(0, 0), num_filter=4 * num_anchors, name="rpn_bbox_pred")
# ROI Proposal
rpn_cls_score_reshape = mx.symbol.Reshape(
data=rpn_cls_score, shape=(0, 2, -1, 0), name="rpn_cls_score_reshape")
rpn_cls_prob = mx.symbol.SoftmaxActivation(
data=rpn_cls_score_reshape, mode="channel", name="rpn_cls_prob")
rpn_cls_prob_reshape = mx.symbol.Reshape(
data=rpn_cls_prob, shape=(0, 2 * num_anchors, -1, 0), name='rpn_cls_prob_reshape')
if config.TEST.CXX_PROPOSAL:
rois = mx.symbol.contrib.Proposal(
cls_prob=rpn_cls_prob_reshape, bbox_pred=rpn_bbox_pred, im_info=im_info, name='rois',
feature_stride=config.RPN_FEAT_STRIDE, scales=tuple(config.ANCHOR_SCALES), ratios=tuple(config.ANCHOR_RATIOS),
rpn_pre_nms_top_n=config.TEST.RPN_PRE_NMS_TOP_N, rpn_post_nms_top_n=config.TEST.RPN_POST_NMS_TOP_N,
threshold=config.TEST.RPN_NMS_THRESH, rpn_min_size=config.TEST.RPN_MIN_SIZE)
else:
rois = mx.symbol.Custom(
cls_prob=rpn_cls_prob_reshape, bbox_pred=rpn_bbox_pred, im_info=im_info, name='rois',
op_type='proposal', feat_stride=config.RPN_FEAT_STRIDE,
scales=tuple(config.ANCHOR_SCALES), ratios=tuple(config.ANCHOR_RATIOS),
rpn_pre_nms_top_n=config.TEST.RPN_PRE_NMS_TOP_N, rpn_post_nms_top_n=config.TEST.RPN_POST_NMS_TOP_N,
threshold=config.TEST.RPN_NMS_THRESH, rpn_min_size=config.TEST.RPN_MIN_SIZE)
# Fast R-CNN
roi_pool = mx.symbol.ROIPooling(
name='roi_pool5', data=conv_feat, rois=rois, pooled_size=(14, 14), spatial_scale=1.0 / config.RCNN_FEAT_STRIDE)
# res5
unit = residual_unit(data=roi_pool, num_filter=filter_list[3], stride=(2, 2), dim_match=False, name='stage4_unit1')
for i in range(2, units[3] + 1):
unit = residual_unit(data=unit, num_filter=filter_list[3], stride=(1, 1), dim_match=True, name='stage4_unit%s' % i)
bn1 = mx.sym.BatchNorm(data=unit, fix_gamma=False, eps=eps, use_global_stats=use_global_stats, name='bn1')
relu1 = mx.sym.Activation(data=bn1, act_type='relu', name='relu1')
pool1 = mx.symbol.Pooling(data=relu1, global_pool=True, kernel=(7, 7), pool_type='avg', name='pool1')
# classification
cls_score = mx.symbol.FullyConnected(name='cls_score', data=pool1, num_hidden=num_classes)
cls_prob = mx.symbol.softmax(name='cls_prob', data=cls_score)
# bounding box regression
bbox_pred = mx.symbol.FullyConnected(name='bbox_pred', data=pool1, num_hidden=num_classes * 4)
# reshape output
cls_prob = mx.symbol.Reshape(data=cls_prob, shape=(config.TEST.BATCH_IMAGES, -1, num_classes), name='cls_prob_reshape')
bbox_pred = mx.symbol.Reshape(data=bbox_pred, shape=(config.TEST.BATCH_IMAGES, -1, 4 * num_classes), name='bbox_pred_reshape')
# group output
group = mx.symbol.Group([rois, cls_prob, bbox_pred])
return group
| apache-2.0 |
dongjoon-hyun/spark | python/pyspark/sql/context.py | 15 | 23877 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import sys
import warnings
from pyspark import since, _NoValue
from pyspark.sql.session import _monkey_patch_RDD, SparkSession
from pyspark.sql.dataframe import DataFrame
from pyspark.sql.readwriter import DataFrameReader
from pyspark.sql.streaming import DataStreamReader
from pyspark.sql.udf import UDFRegistration # noqa: F401
from pyspark.sql.utils import install_exception_handler
__all__ = ["SQLContext", "HiveContext"]
class SQLContext(object):
"""The entry point for working with structured data (rows and columns) in Spark, in Spark 1.x.
As of Spark 2.0, this is replaced by :class:`SparkSession`. However, we are keeping the class
here for backward compatibility.
A SQLContext can be used create :class:`DataFrame`, register :class:`DataFrame` as
tables, execute SQL over tables, cache tables, and read parquet files.
.. deprecated:: 3.0.0
Use :func:`SparkSession.builder.getOrCreate()` instead.
Parameters
----------
sparkContext : :class:`SparkContext`
The :class:`SparkContext` backing this SQLContext.
sparkSession : :class:`SparkSession`
The :class:`SparkSession` around which this SQLContext wraps.
jsqlContext : optional
An optional JVM Scala SQLContext. If set, we do not instantiate a new
SQLContext in the JVM, instead we make all calls to this object.
This is only for internal.
Examples
--------
>>> from datetime import datetime
>>> from pyspark.sql import Row
>>> sqlContext = SQLContext(sc)
>>> allTypes = sc.parallelize([Row(i=1, s="string", d=1.0, l=1,
... b=True, list=[1, 2, 3], dict={"s": 0}, row=Row(a=1),
... time=datetime(2014, 8, 1, 14, 1, 5))])
>>> df = allTypes.toDF()
>>> df.createOrReplaceTempView("allTypes")
>>> sqlContext.sql('select i+1, d+1, not b, list[1], dict["s"], time, row.a '
... 'from allTypes where b and i > 0').collect()
[Row((i + 1)=2, (d + 1)=2.0, (NOT b)=False, list[1]=2, \
dict[s]=0, time=datetime.datetime(2014, 8, 1, 14, 1, 5), a=1)]
>>> df.rdd.map(lambda x: (x.i, x.s, x.d, x.l, x.b, x.time, x.row.a, x.list)).collect()
[(1, 'string', 1.0, 1, True, datetime.datetime(2014, 8, 1, 14, 1, 5), 1, [1, 2, 3])]
"""
_instantiatedContext = None
def __init__(self, sparkContext, sparkSession=None, jsqlContext=None):
if sparkSession is None:
warnings.warn(
"Deprecated in 3.0.0. Use SparkSession.builder.getOrCreate() instead.",
FutureWarning
)
self._sc = sparkContext
self._jsc = self._sc._jsc
self._jvm = self._sc._jvm
if sparkSession is None:
sparkSession = SparkSession.builder.getOrCreate()
if jsqlContext is None:
jsqlContext = sparkSession._jwrapped
self.sparkSession = sparkSession
self._jsqlContext = jsqlContext
_monkey_patch_RDD(self.sparkSession)
install_exception_handler()
if (SQLContext._instantiatedContext is None
or SQLContext._instantiatedContext._sc._jsc is None):
SQLContext._instantiatedContext = self
@property
def _ssql_ctx(self):
"""Accessor for the JVM Spark SQL context.
Subclasses can override this property to provide their own
JVM Contexts.
"""
return self._jsqlContext
@property
def _conf(self):
"""Accessor for the JVM SQL-specific configurations"""
return self.sparkSession._jsparkSession.sessionState().conf()
@classmethod
def getOrCreate(cls, sc):
"""
Get the existing SQLContext or create a new one with given SparkContext.
.. versionadded:: 1.6.0
.. deprecated:: 3.0.0
Use :func:`SparkSession.builder.getOrCreate()` instead.
Parameters
----------
sc : :class:`SparkContext`
"""
warnings.warn(
"Deprecated in 3.0.0. Use SparkSession.builder.getOrCreate() instead.",
FutureWarning
)
if (cls._instantiatedContext is None
or SQLContext._instantiatedContext._sc._jsc is None):
jsqlContext = sc._jvm.SparkSession.builder().sparkContext(
sc._jsc.sc()).getOrCreate().sqlContext()
sparkSession = SparkSession(sc, jsqlContext.sparkSession())
cls(sc, sparkSession, jsqlContext)
return cls._instantiatedContext
def newSession(self):
"""
Returns a new SQLContext as new session, that has separate SQLConf,
registered temporary views and UDFs, but shared SparkContext and
table cache.
.. versionadded:: 1.6.0
"""
return self.__class__(self._sc, self.sparkSession.newSession())
def setConf(self, key, value):
"""Sets the given Spark SQL configuration property.
.. versionadded:: 1.3.0
"""
self.sparkSession.conf.set(key, value)
def getConf(self, key, defaultValue=_NoValue):
"""Returns the value of Spark SQL configuration property for the given key.
If the key is not set and defaultValue is set, return
defaultValue. If the key is not set and defaultValue is not set, return
the system default value.
.. versionadded:: 1.3.0
Examples
--------
>>> sqlContext.getConf("spark.sql.shuffle.partitions")
'200'
>>> sqlContext.getConf("spark.sql.shuffle.partitions", "10")
'10'
>>> sqlContext.setConf("spark.sql.shuffle.partitions", "50")
>>> sqlContext.getConf("spark.sql.shuffle.partitions", "10")
'50'
"""
return self.sparkSession.conf.get(key, defaultValue)
@property
def udf(self):
"""Returns a :class:`UDFRegistration` for UDF registration.
.. versionadded:: 1.3.1
Returns
-------
:class:`UDFRegistration`
"""
return self.sparkSession.udf
def range(self, start, end=None, step=1, numPartitions=None):
"""
Create a :class:`DataFrame` with single :class:`pyspark.sql.types.LongType` column named
``id``, containing elements in a range from ``start`` to ``end`` (exclusive) with
step value ``step``.
.. versionadded:: 1.4.0
Parameters
----------
start : int
the start value
end : int, optional
the end value (exclusive)
step : int, optional
the incremental step (default: 1)
numPartitions : int, optional
the number of partitions of the DataFrame
Returns
-------
:class:`DataFrame`
Examples
--------
>>> sqlContext.range(1, 7, 2).collect()
[Row(id=1), Row(id=3), Row(id=5)]
If only one argument is specified, it will be used as the end value.
>>> sqlContext.range(3).collect()
[Row(id=0), Row(id=1), Row(id=2)]
"""
return self.sparkSession.range(start, end, step, numPartitions)
def registerFunction(self, name, f, returnType=None):
"""An alias for :func:`spark.udf.register`.
See :meth:`pyspark.sql.UDFRegistration.register`.
.. versionadded:: 1.2.0
.. deprecated:: 2.3.0
Use :func:`spark.udf.register` instead.
"""
warnings.warn(
"Deprecated in 2.3.0. Use spark.udf.register instead.",
FutureWarning
)
return self.sparkSession.udf.register(name, f, returnType)
def registerJavaFunction(self, name, javaClassName, returnType=None):
"""An alias for :func:`spark.udf.registerJavaFunction`.
See :meth:`pyspark.sql.UDFRegistration.registerJavaFunction`.
.. versionadded:: 2.1.0
.. deprecated:: 2.3.0
Use :func:`spark.udf.registerJavaFunction` instead.
"""
warnings.warn(
"Deprecated in 2.3.0. Use spark.udf.registerJavaFunction instead.",
FutureWarning
)
return self.sparkSession.udf.registerJavaFunction(name, javaClassName, returnType)
# TODO(andrew): delete this once we refactor things to take in SparkSession
def _inferSchema(self, rdd, samplingRatio=None):
"""
Infer schema from an RDD of Row or tuple.
Parameters
----------
rdd : :class:`RDD`
an RDD of Row or tuple
samplingRatio : float, optional
sampling ratio, or no sampling (default)
Returns
-------
:class:`pyspark.sql.types.StructType`
"""
return self.sparkSession._inferSchema(rdd, samplingRatio)
def createDataFrame(self, data, schema=None, samplingRatio=None, verifySchema=True):
"""
Creates a :class:`DataFrame` from an :class:`RDD`, a list or a :class:`pandas.DataFrame`.
When ``schema`` is a list of column names, the type of each column
will be inferred from ``data``.
When ``schema`` is ``None``, it will try to infer the schema (column names and types)
from ``data``, which should be an RDD of :class:`Row`,
or :class:`namedtuple`, or :class:`dict`.
When ``schema`` is :class:`pyspark.sql.types.DataType` or a datatype string it must match
the real data, or an exception will be thrown at runtime. If the given schema is not
:class:`pyspark.sql.types.StructType`, it will be wrapped into a
:class:`pyspark.sql.types.StructType` as its only field, and the field name will be "value",
each record will also be wrapped into a tuple, which can be converted to row later.
If schema inference is needed, ``samplingRatio`` is used to determined the ratio of
rows used for schema inference. The first row will be used if ``samplingRatio`` is ``None``.
.. versionadded:: 1.3.0
.. versionchanged:: 2.0.0
The ``schema`` parameter can be a :class:`pyspark.sql.types.DataType` or a
datatype string after 2.0.
If it's not a :class:`pyspark.sql.types.StructType`, it will be wrapped into a
:class:`pyspark.sql.types.StructType` and each record will also be wrapped into a tuple.
.. versionchanged:: 2.1.0
Added verifySchema.
Parameters
----------
data : :class:`RDD` or iterable
an RDD of any kind of SQL data representation (:class:`Row`,
:class:`tuple`, ``int``, ``boolean``, etc.), or :class:`list`, or
:class:`pandas.DataFrame`.
schema : :class:`pyspark.sql.types.DataType`, str or list, optional
a :class:`pyspark.sql.types.DataType` or a datatype string or a list of
column names, default is None. The data type string format equals to
:class:`pyspark.sql.types.DataType.simpleString`, except that top level struct type can
omit the ``struct<>`` and atomic types use ``typeName()`` as their format, e.g. use
``byte`` instead of ``tinyint`` for :class:`pyspark.sql.types.ByteType`.
We can also use ``int`` as a short name for :class:`pyspark.sql.types.IntegerType`.
samplingRatio : float, optional
the sample ratio of rows used for inferring
verifySchema : bool, optional
verify data types of every row against schema. Enabled by default.
Returns
-------
:class:`DataFrame`
Examples
--------
>>> l = [('Alice', 1)]
>>> sqlContext.createDataFrame(l).collect()
[Row(_1='Alice', _2=1)]
>>> sqlContext.createDataFrame(l, ['name', 'age']).collect()
[Row(name='Alice', age=1)]
>>> d = [{'name': 'Alice', 'age': 1}]
>>> sqlContext.createDataFrame(d).collect()
[Row(age=1, name='Alice')]
>>> rdd = sc.parallelize(l)
>>> sqlContext.createDataFrame(rdd).collect()
[Row(_1='Alice', _2=1)]
>>> df = sqlContext.createDataFrame(rdd, ['name', 'age'])
>>> df.collect()
[Row(name='Alice', age=1)]
>>> from pyspark.sql import Row
>>> Person = Row('name', 'age')
>>> person = rdd.map(lambda r: Person(*r))
>>> df2 = sqlContext.createDataFrame(person)
>>> df2.collect()
[Row(name='Alice', age=1)]
>>> from pyspark.sql.types import *
>>> schema = StructType([
... StructField("name", StringType(), True),
... StructField("age", IntegerType(), True)])
>>> df3 = sqlContext.createDataFrame(rdd, schema)
>>> df3.collect()
[Row(name='Alice', age=1)]
>>> sqlContext.createDataFrame(df.toPandas()).collect() # doctest: +SKIP
[Row(name='Alice', age=1)]
>>> sqlContext.createDataFrame(pandas.DataFrame([[1, 2]])).collect() # doctest: +SKIP
[Row(0=1, 1=2)]
>>> sqlContext.createDataFrame(rdd, "a: string, b: int").collect()
[Row(a='Alice', b=1)]
>>> rdd = rdd.map(lambda row: row[1])
>>> sqlContext.createDataFrame(rdd, "int").collect()
[Row(value=1)]
>>> sqlContext.createDataFrame(rdd, "boolean").collect() # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
Py4JJavaError: ...
"""
return self.sparkSession.createDataFrame(data, schema, samplingRatio, verifySchema)
def registerDataFrameAsTable(self, df, tableName):
"""Registers the given :class:`DataFrame` as a temporary table in the catalog.
Temporary tables exist only during the lifetime of this instance of :class:`SQLContext`.
.. versionadded:: 1.3.0
Examples
--------
>>> sqlContext.registerDataFrameAsTable(df, "table1")
"""
df.createOrReplaceTempView(tableName)
def dropTempTable(self, tableName):
""" Remove the temporary table from catalog.
.. versionadded:: 1.6.0
Examples
--------
>>> sqlContext.registerDataFrameAsTable(df, "table1")
>>> sqlContext.dropTempTable("table1")
"""
self.sparkSession.catalog.dropTempView(tableName)
def createExternalTable(self, tableName, path=None, source=None, schema=None, **options):
"""Creates an external table based on the dataset in a data source.
It returns the DataFrame associated with the external table.
The data source is specified by the ``source`` and a set of ``options``.
If ``source`` is not specified, the default data source configured by
``spark.sql.sources.default`` will be used.
Optionally, a schema can be provided as the schema of the returned :class:`DataFrame` and
created external table.
.. versionadded:: 1.3.0
Returns
-------
:class:`DataFrame`
"""
return self.sparkSession.catalog.createExternalTable(
tableName, path, source, schema, **options)
def sql(self, sqlQuery):
"""Returns a :class:`DataFrame` representing the result of the given query.
.. versionadded:: 1.0.0
Returns
-------
:class:`DataFrame`
Examples
--------
>>> sqlContext.registerDataFrameAsTable(df, "table1")
>>> df2 = sqlContext.sql("SELECT field1 AS f1, field2 as f2 from table1")
>>> df2.collect()
[Row(f1=1, f2='row1'), Row(f1=2, f2='row2'), Row(f1=3, f2='row3')]
"""
return self.sparkSession.sql(sqlQuery)
def table(self, tableName):
"""Returns the specified table or view as a :class:`DataFrame`.
.. versionadded:: 1.0.0
Returns
-------
:class:`DataFrame`
Examples
--------
>>> sqlContext.registerDataFrameAsTable(df, "table1")
>>> df2 = sqlContext.table("table1")
>>> sorted(df.collect()) == sorted(df2.collect())
True
"""
return self.sparkSession.table(tableName)
def tables(self, dbName=None):
"""Returns a :class:`DataFrame` containing names of tables in the given database.
If ``dbName`` is not specified, the current database will be used.
The returned DataFrame has two columns: ``tableName`` and ``isTemporary``
(a column with :class:`BooleanType` indicating if a table is a temporary one or not).
.. versionadded:: 1.3.0
Parameters
----------
dbName: str, optional
name of the database to use.
Returns
-------
:class:`DataFrame`
Examples
--------
>>> sqlContext.registerDataFrameAsTable(df, "table1")
>>> df2 = sqlContext.tables()
>>> df2.filter("tableName = 'table1'").first()
Row(namespace='', tableName='table1', isTemporary=True)
"""
if dbName is None:
return DataFrame(self._ssql_ctx.tables(), self)
else:
return DataFrame(self._ssql_ctx.tables(dbName), self)
def tableNames(self, dbName=None):
"""Returns a list of names of tables in the database ``dbName``.
.. versionadded:: 1.3.0
Parameters
----------
dbName: str
name of the database to use. Default to the current database.
Returns
-------
list
list of table names, in string
>>> sqlContext.registerDataFrameAsTable(df, "table1")
>>> "table1" in sqlContext.tableNames()
True
>>> "table1" in sqlContext.tableNames("default")
True
"""
if dbName is None:
return [name for name in self._ssql_ctx.tableNames()]
else:
return [name for name in self._ssql_ctx.tableNames(dbName)]
@since(1.0)
def cacheTable(self, tableName):
"""Caches the specified table in-memory."""
self._ssql_ctx.cacheTable(tableName)
@since(1.0)
def uncacheTable(self, tableName):
"""Removes the specified table from the in-memory cache."""
self._ssql_ctx.uncacheTable(tableName)
@since(1.3)
def clearCache(self):
"""Removes all cached tables from the in-memory cache. """
self._ssql_ctx.clearCache()
@property
def read(self):
"""
Returns a :class:`DataFrameReader` that can be used to read data
in as a :class:`DataFrame`.
.. versionadded:: 1.4.0
Returns
-------
:class:`DataFrameReader`
"""
return DataFrameReader(self)
@property
def readStream(self):
"""
Returns a :class:`DataStreamReader` that can be used to read data streams
as a streaming :class:`DataFrame`.
.. versionadded:: 2.0.0
Notes
-----
This API is evolving.
Returns
-------
:class:`DataStreamReader`
>>> text_sdf = sqlContext.readStream.text(tempfile.mkdtemp())
>>> text_sdf.isStreaming
True
"""
return DataStreamReader(self)
@property
def streams(self):
"""Returns a :class:`StreamingQueryManager` that allows managing all the
:class:`StreamingQuery` StreamingQueries active on `this` context.
.. versionadded:: 2.0.0
Notes
-----
This API is evolving.
"""
from pyspark.sql.streaming import StreamingQueryManager
return StreamingQueryManager(self._ssql_ctx.streams())
class HiveContext(SQLContext):
"""A variant of Spark SQL that integrates with data stored in Hive.
Configuration for Hive is read from ``hive-site.xml`` on the classpath.
It supports running both SQL and HiveQL commands.
.. deprecated:: 2.0.0
Use SparkSession.builder.enableHiveSupport().getOrCreate().
Parameters
----------
sparkContext : :class:`SparkContext`
The SparkContext to wrap.
jhiveContext : optional
An optional JVM Scala HiveContext. If set, we do not instantiate a new
:class:`HiveContext` in the JVM, instead we make all calls to this object.
This is only for internal use.
"""
def __init__(self, sparkContext, jhiveContext=None):
warnings.warn(
"HiveContext is deprecated in Spark 2.0.0. Please use " +
"SparkSession.builder.enableHiveSupport().getOrCreate() instead.",
FutureWarning
)
if jhiveContext is None:
sparkContext._conf.set("spark.sql.catalogImplementation", "hive")
sparkSession = SparkSession.builder._sparkContext(sparkContext).getOrCreate()
else:
sparkSession = SparkSession(sparkContext, jhiveContext.sparkSession())
SQLContext.__init__(self, sparkContext, sparkSession, jhiveContext)
@classmethod
def _createForTesting(cls, sparkContext):
"""(Internal use only) Create a new HiveContext for testing.
All test code that touches HiveContext *must* go through this method. Otherwise,
you may end up launching multiple derby instances and encounter with incredibly
confusing error messages.
"""
jsc = sparkContext._jsc.sc()
jtestHive = sparkContext._jvm.org.apache.spark.sql.hive.test.TestHiveContext(jsc, False)
return cls(sparkContext, jtestHive)
def refreshTable(self, tableName):
"""Invalidate and refresh all the cached the metadata of the given
table. For performance reasons, Spark SQL or the external data source
library it uses might cache certain metadata about a table, such as the
location of blocks. When those change outside of Spark SQL, users should
call this function to invalidate the cache.
"""
self._ssql_ctx.refreshTable(tableName)
def _test():
import os
import doctest
import tempfile
from pyspark.context import SparkContext
from pyspark.sql import Row, SQLContext
import pyspark.sql.context
os.chdir(os.environ["SPARK_HOME"])
globs = pyspark.sql.context.__dict__.copy()
sc = SparkContext('local[4]', 'PythonTest')
globs['tempfile'] = tempfile
globs['os'] = os
globs['sc'] = sc
globs['sqlContext'] = SQLContext(sc)
globs['rdd'] = rdd = sc.parallelize(
[Row(field1=1, field2="row1"),
Row(field1=2, field2="row2"),
Row(field1=3, field2="row3")]
)
globs['df'] = rdd.toDF()
jsonStrings = [
'{"field1": 1, "field2": "row1", "field3":{"field4":11}}',
'{"field1" : 2, "field3":{"field4":22, "field5": [10, 11]},"field6":[{"field7": "row2"}]}',
'{"field1" : null, "field2": "row3", "field3":{"field4":33, "field5": []}}'
]
globs['jsonStrings'] = jsonStrings
globs['json'] = sc.parallelize(jsonStrings)
(failure_count, test_count) = doctest.testmod(
pyspark.sql.context, globs=globs,
optionflags=doctest.ELLIPSIS | doctest.NORMALIZE_WHITESPACE)
globs['sc'].stop()
if failure_count:
sys.exit(-1)
if __name__ == "__main__":
_test()
| apache-2.0 |
2014c2g3/w16b_test | static/Brython3.1.3-20150514-095342/Lib/gc.py | 743 | 3548 | """This module provides access to the garbage collector for reference cycles.
enable() -- Enable automatic garbage collection.
disable() -- Disable automatic garbage collection.
isenabled() -- Returns true if automatic collection is enabled.
collect() -- Do a full collection right now.
get_count() -- Return the current collection counts.
set_debug() -- Set debugging flags.
get_debug() -- Get debugging flags.
set_threshold() -- Set the collection thresholds.
get_threshold() -- Return the current the collection thresholds.
get_objects() -- Return a list of all objects tracked by the collector.
is_tracked() -- Returns true if a given object is tracked.
get_referrers() -- Return the list of objects that refer to an object.
get_referents() -- Return the list of objects that an object refers to.
"""
DEBUG_COLLECTABLE = 2
DEBUG_LEAK = 38
DEBUG_SAVEALL = 32
DEBUG_STATS = 1
DEBUG_UNCOLLECTABLE = 4
class __loader__:
pass
callbacks = []
def collect(*args,**kw):
"""collect([generation]) -> n
With no arguments, run a full collection. The optional argument
may be an integer specifying which generation to collect. A ValueError
is raised if the generation number is invalid.
The number of unreachable objects is returned.
"""
pass
def disable(*args,**kw):
"""disable() -> None
Disable automatic garbage collection.
"""
pass
def enable(*args,**kw):
"""enable() -> None
Enable automatic garbage collection.
"""
pass
garbage = []
def get_count(*args,**kw):
"""get_count() -> (count0, count1, count2)
Return the current collection counts
"""
pass
def get_debug(*args,**kw):
"""get_debug() -> flags
Get the garbage collection debugging flags.
"""
pass
def get_objects(*args,**kw):
"""get_objects() -> [...]
Return a list of objects tracked by the collector (excluding the list
returned).
"""
pass
def get_referents(*args,**kw):
"""get_referents(*objs) -> list Return the list of objects that are directly referred to by objs."""
pass
def get_referrers(*args,**kw):
"""get_referrers(*objs) -> list Return the list of objects that directly refer to any of objs."""
pass
def get_threshold(*args,**kw):
"""get_threshold() -> (threshold0, threshold1, threshold2)
Return the current collection thresholds
"""
pass
def is_tracked(*args,**kw):
"""is_tracked(obj) -> bool
Returns true if the object is tracked by the garbage collector.
Simple atomic objects will return false.
"""
pass
def isenabled(*args,**kw):
"""isenabled() -> status
Returns true if automatic garbage collection is enabled.
"""
pass
def set_debug(*args,**kw):
"""set_debug(flags) -> None
Set the garbage collection debugging flags. Debugging information is
written to sys.stderr.
flags is an integer and can have the following bits turned on:
DEBUG_STATS - Print statistics during collection.
DEBUG_COLLECTABLE - Print collectable objects found.
DEBUG_UNCOLLECTABLE - Print unreachable but uncollectable objects found.
DEBUG_SAVEALL - Save objects to gc.garbage rather than freeing them.
DEBUG_LEAK - Debug leaking programs (everything but STATS).
"""
pass
def set_threshold(*args,**kw):
"""set_threshold(threshold0, [threshold1, threshold2]) -> None
Sets the collection thresholds. Setting threshold0 to zero disables
collection.
"""
pass
| agpl-3.0 |
liupfskygre/qiime | qiime/adjust_seq_orientation.py | 15 | 1576 | #!/usr/bin/env python
# File created on 07 Oct 2009.
from __future__ import division
__author__ = "Greg Caporaso"
__copyright__ = "Copyright 2011, The QIIME Project"
__credits__ = ["Greg Caporaso", "Antonio Gonzalez Pena"]
__license__ = "GPL"
__version__ = "1.9.1-dev"
__maintainer__ = "Greg Caporaso"
__email__ = "gregcaporaso@gmail.com"
from os.path import split, splitext
from skbio.parse.sequences import parse_fasta
from skbio.sequence import DNA
usage_str = """usage: %prog [options] {-i INPUT_FASTA_FP}
[] indicates optional input (order unimportant)
{} indicates required input (order unimportant)
Example usage:
Write the reverse complement of all seqs in seqs.fasta (-i) to
seqs_rc.fasta (default, change output_fp with -o). Each sequence
description line will have ' RC' appended to the end of it (default,
leave sequence description lines untouched by passing -r):
python ~/repo/Qiime/qiime/adjust_seq_orientation.py -i seqs.fasta
"""
def null_seq_desc_mapper(s):
return s
def append_rc(s):
return s + ' RC'
def rc_fasta_lines(fasta_lines, seq_desc_mapper=append_rc):
"""
"""
for seq_id, seq in parse_fasta(fasta_lines):
seq_id = seq_desc_mapper(seq_id)
seq = str(DNA(seq.upper()).rc())
yield seq_id, seq
return
def rc_fasta_file(fasta_fp, output_fp, seq_id_mapper=append_rc):
"""
"""
input_f = open(fasta_fp)
output_f = open(output_fp, 'w')
for s in rc_fasta_lines(input_f, seq_id_mapper):
output_f.write('>%s\n%s\n' % s)
input_f.close()
output_f.close()
| gpl-2.0 |
yongshengwang/hue | build/env/lib/python2.7/site-packages/Django-1.6.10-py2.7.egg/django/conf/locale/de_CH/formats.py | 118 | 1448 | # -*- encoding: utf-8 -*-
# This file is distributed under the same license as the Django package.
#
# The *_FORMAT strings use the Django date format syntax,
# see http://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
from __future__ import unicode_literals
DATE_FORMAT = 'j. F Y'
TIME_FORMAT = 'H:i:s'
DATETIME_FORMAT = 'j. F Y H:i:s'
YEAR_MONTH_FORMAT = 'F Y'
MONTH_DAY_FORMAT = 'j. F'
SHORT_DATE_FORMAT = 'd.m.Y'
SHORT_DATETIME_FORMAT = 'd.m.Y H:i:s'
FIRST_DAY_OF_WEEK = 1 # Monday
# The *_INPUT_FORMATS strings use the Python strftime format syntax,
# see http://docs.python.org/library/datetime.html#strftime-strptime-behavior
DATE_INPUT_FORMATS = (
'%d.%m.%Y', '%d.%m.%y', # '25.10.2006', '25.10.06'
# '%d. %B %Y', '%d. %b. %Y', # '25. October 2006', '25. Oct. 2006'
)
DATETIME_INPUT_FORMATS = (
'%d.%m.%Y %H:%M:%S', # '25.10.2006 14:30:59'
'%d.%m.%Y %H:%M:%S.%f', # '25.10.2006 14:30:59.000200'
'%d.%m.%Y %H:%M', # '25.10.2006 14:30'
'%d.%m.%Y', # '25.10.2006'
)
# these are the separators for non-monetary numbers. For monetary numbers,
# the DECIMAL_SEPARATOR is a . (decimal point) and the THOUSAND_SEPARATOR is a
# ' (single quote).
# For details, please refer to http://www.bk.admin.ch/dokumentation/sprachen/04915/05016/index.html?lang=de
# (in German) and the documentation
DECIMAL_SEPARATOR = ','
THOUSAND_SEPARATOR = '\xa0' # non-breaking space
NUMBER_GROUPING = 3
| apache-2.0 |
knowsis/django | django/utils/decorators.py | 119 | 4826 | "Functions that help with dynamically creating decorators for views."
from functools import wraps, update_wrapper, WRAPPER_ASSIGNMENTS
from django.utils import six
class classonlymethod(classmethod):
def __get__(self, instance, owner):
if instance is not None:
raise AttributeError("This method is available only on the view class.")
return super(classonlymethod, self).__get__(instance, owner)
def method_decorator(decorator):
"""
Converts a function decorator into a method decorator
"""
# 'func' is a function at the time it is passed to _dec, but will eventually
# be a method of the class it is defined it.
def _dec(func):
def _wrapper(self, *args, **kwargs):
@decorator
def bound_func(*args2, **kwargs2):
return func(self, *args2, **kwargs2)
# bound_func has the signature that 'decorator' expects i.e. no
# 'self' argument, but it is a closure over self so it can call
# 'func' correctly.
return bound_func(*args, **kwargs)
# In case 'decorator' adds attributes to the function it decorates, we
# want to copy those. We don't have access to bound_func in this scope,
# but we can cheat by using it on a dummy function.
@decorator
def dummy(*args, **kwargs):
pass
update_wrapper(_wrapper, dummy)
# Need to preserve any existing attributes of 'func', including the name.
update_wrapper(_wrapper, func)
return _wrapper
update_wrapper(_dec, decorator)
# Change the name to aid debugging.
_dec.__name__ = 'method_decorator(%s)' % decorator.__name__
return _dec
def decorator_from_middleware_with_args(middleware_class):
"""
Like decorator_from_middleware, but returns a function
that accepts the arguments to be passed to the middleware_class.
Use like::
cache_page = decorator_from_middleware_with_args(CacheMiddleware)
# ...
@cache_page(3600)
def my_view(request):
# ...
"""
return make_middleware_decorator(middleware_class)
def decorator_from_middleware(middleware_class):
"""
Given a middleware class (not an instance), returns a view decorator. This
lets you use middleware functionality on a per-view basis. The middleware
is created with no params passed.
"""
return make_middleware_decorator(middleware_class)()
def available_attrs(fn):
"""
Return the list of functools-wrappable attributes on a callable.
This is required as a workaround for http://bugs.python.org/issue3445
under Python 2.
"""
if six.PY3:
return WRAPPER_ASSIGNMENTS
else:
return tuple(a for a in WRAPPER_ASSIGNMENTS if hasattr(fn, a))
def make_middleware_decorator(middleware_class):
def _make_decorator(*m_args, **m_kwargs):
middleware = middleware_class(*m_args, **m_kwargs)
def _decorator(view_func):
@wraps(view_func, assigned=available_attrs(view_func))
def _wrapped_view(request, *args, **kwargs):
if hasattr(middleware, 'process_request'):
result = middleware.process_request(request)
if result is not None:
return result
if hasattr(middleware, 'process_view'):
result = middleware.process_view(request, view_func, args, kwargs)
if result is not None:
return result
try:
response = view_func(request, *args, **kwargs)
except Exception as e:
if hasattr(middleware, 'process_exception'):
result = middleware.process_exception(request, e)
if result is not None:
return result
raise
if hasattr(response, 'render') and callable(response.render):
if hasattr(middleware, 'process_template_response'):
response = middleware.process_template_response(request, response)
# Defer running of process_response until after the template
# has been rendered:
if hasattr(middleware, 'process_response'):
callback = lambda response: middleware.process_response(request, response)
response.add_post_render_callback(callback)
else:
if hasattr(middleware, 'process_response'):
return middleware.process_response(request, response)
return response
return _wrapped_view
return _decorator
return _make_decorator
| bsd-3-clause |
jlspyaozhongkai/Uter | third_party_backup/Python-2.7.9/Lib/bsddb/test/test_fileid.py | 111 | 1830 | """TestCase for reseting File ID.
"""
import os
import shutil
import unittest
from test_all import db, test_support, get_new_environment_path, get_new_database_path
class FileidResetTestCase(unittest.TestCase):
def setUp(self):
self.db_path_1 = get_new_database_path()
self.db_path_2 = get_new_database_path()
self.db_env_path = get_new_environment_path()
def test_fileid_reset(self):
# create DB 1
self.db1 = db.DB()
self.db1.open(self.db_path_1, dbtype=db.DB_HASH, flags=(db.DB_CREATE|db.DB_EXCL))
self.db1.put('spam', 'eggs')
self.db1.close()
shutil.copy(self.db_path_1, self.db_path_2)
self.db2 = db.DB()
self.db2.open(self.db_path_2, dbtype=db.DB_HASH)
self.db2.put('spam', 'spam')
self.db2.close()
self.db_env = db.DBEnv()
self.db_env.open(self.db_env_path, db.DB_CREATE|db.DB_INIT_MPOOL)
# use fileid_reset() here
self.db_env.fileid_reset(self.db_path_2)
self.db1 = db.DB(self.db_env)
self.db1.open(self.db_path_1, dbtype=db.DB_HASH, flags=db.DB_RDONLY)
self.assertEqual(self.db1.get('spam'), 'eggs')
self.db2 = db.DB(self.db_env)
self.db2.open(self.db_path_2, dbtype=db.DB_HASH, flags=db.DB_RDONLY)
self.assertEqual(self.db2.get('spam'), 'spam')
self.db1.close()
self.db2.close()
self.db_env.close()
def tearDown(self):
test_support.unlink(self.db_path_1)
test_support.unlink(self.db_path_2)
test_support.rmtree(self.db_env_path)
def test_suite():
suite = unittest.TestSuite()
if db.version() >= (4, 4):
suite.addTest(unittest.makeSuite(FileidResetTestCase))
return suite
if __name__ == '__main__':
unittest.main(defaultTest='test_suite')
| gpl-3.0 |
ojengwa/oh-mainline | mysite/profile/migrations/0021_raffi_remove_uniqueness_constraint_on_project_exp.py | 17 | 7400 | # This file is part of OpenHatch.
# Copyright (C) 2009 OpenHatch, Inc.
# Copyright (C) 2009 Matthew Ziegelbaum
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from south.db import db
from django.db import models
from mysite.profile.models import *
class Migration:
def forwards(self, orm):
# Changing field 'Person.time_record_was_created'
db.alter_column('profile_person', 'time_record_was_created', models.DateTimeField(default=datetime.datetime(2009, 6, 28, 14, 36, 26, 853766)))
# Changing field 'Link_ProjectExp_Tag.time_record_was_created'
db.alter_column('profile_link_projectexp_tag', 'time_record_was_created', models.DateTimeField(default=datetime.datetime(2009, 6, 28, 14, 36, 27, 271816)))
# Changing field 'Link_Project_Tag.time_record_was_created'
db.alter_column('profile_link_project_tag', 'time_record_was_created', models.DateTimeField(default=datetime.datetime(2009, 6, 28, 14, 36, 27, 361950)))
# Deleting unique_together for [person, project] on ProjectExp.
try:
db.delete_unique('profile_projectexp', ('person', 'project'))
except ValueError:
pass
#db.execute('alter table profile_projectexp drop key profile_projectexp_person_id_6fe12f6f;')
def backwards(self, orm):
# Changing field 'Person.time_record_was_created'
db.alter_column('profile_person', 'time_record_was_created', models.DateTimeField(default=datetime.datetime(2009, 6, 25, 10, 24, 42, 27042)))
# Changing field 'Link_ProjectExp_Tag.time_record_was_created'
db.alter_column('profile_link_projectexp_tag', 'time_record_was_created', models.DateTimeField(default=datetime.datetime(2009, 6, 25, 10, 24, 41, 921862)))
# Changing field 'Link_Project_Tag.time_record_was_created'
db.alter_column('profile_link_project_tag', 'time_record_was_created', models.DateTimeField(default=datetime.datetime(2009, 6, 25, 10, 24, 41, 441781)))
# Creating unique_together for [person, project] on ProjectExp.
db.create_unique('profile_projectexp', ('person', 'project'))
models = {
'profile.person': {
'id': ('models.AutoField', [], {'primary_key': 'True'}),
'interested_in_working_on': ('models.CharField', [], {'default': "''", 'max_length': '1024'}),
'last_polled': ('models.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'last_touched': ('models.DateTimeField', [], {'null': 'True'}),
'name': ('models.CharField', [], {'max_length': '200'}),
'password_hash_md5': ('models.CharField', [], {'max_length': '200'}),
'poll_on_next_web_view': ('models.BooleanField', [], {'default': 'True'}),
'time_record_was_created': ('models.DateTimeField', [], {'default': 'datetime.datetime(2009, 6, 28, 14, 36, 28, 64541)'}),
'username': ('models.CharField', [], {'max_length': '200'})
},
'profile.tag': {
'id': ('models.AutoField', [], {'primary_key': 'True'}),
'tag_type': ('models.ForeignKey', ["orm['profile.TagType']"], {}),
'text': ('models.CharField', [], {'max_length': '50'})
},
'profile.link_projectexp_tag': {
'Meta': {'unique_together': "[('tag','project_exp','source'),]"},
'favorite': ('models.BooleanField', [], {'default': 'False'}),
'id': ('models.AutoField', [], {'primary_key': 'True'}),
'project_exp': ('models.ForeignKey', ["orm['profile.ProjectExp']"], {}),
'source': ('models.CharField', [], {'max_length': '200'}),
'tag': ('models.ForeignKey', ["orm['profile.Tag']"], {}),
'time_record_was_created': ('models.DateTimeField', [], {'default': 'datetime.datetime(2009, 6, 28, 14, 36, 27, 969508)'})
},
'profile.sourceforgeperson': {
'id': ('models.AutoField', [], {'primary_key': 'True'}),
'username': ('models.CharField', [], {'max_length': '200'})
},
'profile.link_project_tag': {
'id': ('models.AutoField', [], {'primary_key': 'True'}),
'project': ('models.ForeignKey', ["orm['search.Project']"], {}),
'source': ('models.CharField', [], {'max_length': '200'}),
'tag': ('models.ForeignKey', ["orm['profile.Tag']"], {}),
'time_record_was_created': ('models.DateTimeField', [], {'default': 'datetime.datetime(2009, 6, 28, 14, 36, 28, 410014)'})
},
'profile.sourceforgeproject': {
'id': ('models.AutoField', [], {'primary_key': 'True'}),
'unixname': ('models.CharField', [], {'max_length': '200'})
},
'search.project': {
'_stub': True,
'id': ('models.AutoField', [], {'primary_key': 'True'})
},
'profile.link_sf_proj_dude_fm': {
'Meta': {'unique_together': "[('person','project'),]"},
'date_collected': ('models.DateTimeField', [], {}),
'id': ('models.AutoField', [], {'primary_key': 'True'}),
'is_admin': ('models.BooleanField', [], {'default': 'False'}),
'person': ('models.ForeignKey', ["orm['profile.SourceForgePerson']"], {}),
'position': ('models.CharField', [], {'max_length': '200'}),
'project': ('models.ForeignKey', ["orm['profile.SourceForgeProject']"], {})
},
'profile.tagtype': {
'id': ('models.AutoField', [], {'primary_key': 'True'}),
'name': ('models.CharField', [], {'max_length': '100'}),
'prefix': ('models.CharField', [], {'max_length': '20'})
},
'profile.projectexp': {
'description': ('models.TextField', [], {}),
'favorite': ('models.BooleanField', [], {'default': '0'}),
'id': ('models.AutoField', [], {'primary_key': 'True'}),
'last_touched': ('models.DateTimeField', [], {'null': 'True'}),
'man_months': ('models.PositiveIntegerField', [], {'null': 'True'}),
'person': ('models.ForeignKey', ["orm['profile.Person']"], {}),
'person_role': ('models.CharField', [], {'max_length': '200'}),
'primary_language': ('models.CharField', [], {'max_length': '200', 'null': 'True'}),
'project': ('models.ForeignKey', ["orm['search.Project']"], {}),
'source': ('models.CharField', [], {'max_length': '100', 'null': 'True'}),
'time_record_was_created': ('models.DateTimeField', [], {'null': 'True'}),
'url': ('models.URLField', [], {'max_length': '200', 'null': 'True'})
}
}
complete_apps = ['profile']
| agpl-3.0 |
yaniv14/OpenCommunity | src/shultze/test_functionality/test_plurality_at_large.py | 3 | 2860 | # Copyright (C) 2009, Brad Beattie
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from shultze.pyvotecore.plurality_at_large import PluralityAtLarge
import unittest
class TestPluralityAtLarge(unittest.TestCase):
# Plurality at Large, no ties
def test_plurality_at_large_no_ties(self):
# Generate data
output = PluralityAtLarge([
{"count":26, "ballot":["c1", "c2"]},
{"count":22, "ballot":["c1", "c3"]},
{"count":23, "ballot":["c2", "c3"]}
], required_winners=2).as_dict()
# Run tests
self.assertEqual(output, {
'candidates': set(['c1', 'c2', 'c3']),
'tallies': {'c3': 45, 'c2': 49, 'c1': 48},
'winners': set(['c2', 'c1'])
})
# Plurality at Large, irrelevant ties
def test_plurality_at_large_irrelevant_ties(self):
# Generate data
output = PluralityAtLarge([
{"count":26, "ballot":["c1", "c2"]},
{"count":22, "ballot":["c1", "c3"]},
{"count":22, "ballot":["c2", "c3"]},
{"count":11, "ballot":["c4", "c5"]}
], required_winners=2).as_dict()
# Run tests
self.assertEqual(output, {
'candidates': set(['c1', 'c2', 'c3', 'c4', 'c5']),
'tallies': {'c3': 44, 'c2': 48, 'c1': 48, 'c5': 11, 'c4': 11},
'winners': set(['c2', 'c1'])
})
# Plurality at Large, irrelevant ties
def test_plurality_at_large_relevant_ties(self):
# Generate data
output = PluralityAtLarge([
{"count":30, "ballot":["c1", "c2"]},
{"count":22, "ballot":["c3", "c1"]},
{"count":22, "ballot":["c2", "c3"]},
{"count":4, "ballot":["c4", "c1"]},
{"count":8, "ballot":["c3", "c4"]},
], required_winners=2).as_dict()
# Run tests
self.assertEqual(output["tallies"], {'c3': 52, 'c2': 52, 'c1': 56, 'c4': 12})
self.assertEqual(len(output["tie_breaker"]), 4)
self.assertEqual(output["tied_winners"], set(['c2', 'c3']))
self.assert_("c1" in output["winners"] and ("c2" in output["winners"] or "c3" in output["winners"]))
self.assertEqual(len(output), 5)
if __name__ == "__main__":
unittest.main()
| bsd-3-clause |
snah/ezvalue | setup.py | 1 | 1142 | # pylint: disable=C0330
import os.path
import setuptools
ROOT_DIR = os.path.abspath(os.path.dirname(__file__))
README_PATH = os.path.join(ROOT_DIR, 'README')
with open(README_PATH, encoding='utf-8') as f:
LONG_DESCRIPTION = f.read()
setuptools.setup(
name='ezvalue',
version='0.1.3',
description='An elegant and powerfull implementation of a value object.',
long_description=LONG_DESCRIPTION,
url='https://github.com/snah/ezvalue',
author='Hans Maree',
author_email='hans.maree@gmail.com',
license='MIT',
classifiers=['Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Topic :: Software Development',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.2',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
],
keywords='value valueobject immutable',
packages=['ezvalue'],
)
| mit |
MoritzS/django | tests/user_commands/tests.py | 27 | 8160 | import os
from io import StringIO
from unittest import mock
from admin_scripts.tests import AdminScriptTestCase
from django.apps import apps
from django.core import management
from django.core.management import BaseCommand, CommandError, find_commands
from django.core.management.utils import find_command, popen_wrapper
from django.db import connection
from django.test import SimpleTestCase, override_settings
from django.test.utils import captured_stderr, extend_sys_path
from django.utils import translation
from .management.commands import dance
# A minimal set of apps to avoid system checks running on all apps.
@override_settings(
INSTALLED_APPS=[
'django.contrib.auth',
'django.contrib.contenttypes',
'user_commands',
],
)
class CommandTests(SimpleTestCase):
def test_command(self):
out = StringIO()
management.call_command('dance', stdout=out)
self.assertIn("I don't feel like dancing Rock'n'Roll.\n", out.getvalue())
def test_command_style(self):
out = StringIO()
management.call_command('dance', style='Jive', stdout=out)
self.assertIn("I don't feel like dancing Jive.\n", out.getvalue())
# Passing options as arguments also works (thanks argparse)
management.call_command('dance', '--style', 'Jive', stdout=out)
self.assertIn("I don't feel like dancing Jive.\n", out.getvalue())
def test_language_preserved(self):
out = StringIO()
with translation.override('fr'):
management.call_command('dance', stdout=out)
self.assertEqual(translation.get_language(), 'fr')
def test_explode(self):
""" An unknown command raises CommandError """
with self.assertRaises(CommandError):
management.call_command(('explode',))
def test_system_exit(self):
""" Exception raised in a command should raise CommandError with
call_command, but SystemExit when run from command line
"""
with self.assertRaises(CommandError):
management.call_command('dance', example="raise")
dance.Command.requires_system_checks = False
try:
with captured_stderr() as stderr, self.assertRaises(SystemExit):
management.ManagementUtility(['manage.py', 'dance', '--example=raise']).execute()
finally:
dance.Command.requires_system_checks = True
self.assertIn("CommandError", stderr.getvalue())
def test_deactivate_locale_set(self):
# Deactivate translation when set to true
with translation.override('pl'):
result = management.call_command('leave_locale_alone_false', stdout=StringIO())
self.assertIsNone(result)
def test_configured_locale_preserved(self):
# Leaves locale from settings when set to false
with translation.override('pl'):
result = management.call_command('leave_locale_alone_true', stdout=StringIO())
self.assertEqual(result, "pl")
def test_find_command_without_PATH(self):
"""
find_command should still work when the PATH environment variable
doesn't exist (#22256).
"""
current_path = os.environ.pop('PATH', None)
try:
self.assertIsNone(find_command('_missing_'))
finally:
if current_path is not None:
os.environ['PATH'] = current_path
def test_discover_commands_in_eggs(self):
"""
Management commands can also be loaded from Python eggs.
"""
egg_dir = '%s/eggs' % os.path.dirname(__file__)
egg_name = '%s/basic.egg' % egg_dir
with extend_sys_path(egg_name):
with self.settings(INSTALLED_APPS=['commandegg']):
cmds = find_commands(os.path.join(apps.get_app_config('commandegg').path, 'management'))
self.assertEqual(cmds, ['eggcommand'])
def test_call_command_option_parsing(self):
"""
When passing the long option name to call_command, the available option
key is the option dest name (#22985).
"""
out = StringIO()
management.call_command('dance', stdout=out, opt_3=True)
self.assertIn("option3", out.getvalue())
self.assertNotIn("opt_3", out.getvalue())
self.assertNotIn("opt-3", out.getvalue())
def test_call_command_option_parsing_non_string_arg(self):
"""
It should be possible to pass non-string arguments to call_command.
"""
out = StringIO()
management.call_command('dance', 1, verbosity=0, stdout=out)
self.assertIn("You passed 1 as a positional argument.", out.getvalue())
def test_calling_a_command_with_only_empty_parameter_should_ends_gracefully(self):
out = StringIO()
management.call_command('hal', "--empty", stdout=out)
self.assertIn("Dave, I can't do that.\n", out.getvalue())
def test_calling_command_with_app_labels_and_parameters_should_be_ok(self):
out = StringIO()
management.call_command('hal', 'myapp', "--verbosity", "3", stdout=out)
self.assertIn("Dave, my mind is going. I can feel it. I can feel it.\n", out.getvalue())
def test_calling_command_with_parameters_and_app_labels_at_the_end_should_be_ok(self):
out = StringIO()
management.call_command('hal', "--verbosity", "3", "myapp", stdout=out)
self.assertIn("Dave, my mind is going. I can feel it. I can feel it.\n", out.getvalue())
def test_calling_a_command_with_no_app_labels_and_parameters_should_raise_a_command_error(self):
with self.assertRaises(CommandError):
management.call_command('hal', stdout=StringIO())
def test_output_transaction(self):
output = management.call_command('transaction', stdout=StringIO(), no_color=True)
self.assertTrue(output.strip().startswith(connection.ops.start_transaction_sql()))
self.assertTrue(output.strip().endswith(connection.ops.end_transaction_sql()))
def test_call_command_no_checks(self):
"""
By default, call_command should not trigger the check framework, unless
specifically asked.
"""
self.counter = 0
def patched_check(self_, **kwargs):
self.counter += 1
saved_check = BaseCommand.check
BaseCommand.check = patched_check
try:
management.call_command("dance", verbosity=0)
self.assertEqual(self.counter, 0)
management.call_command("dance", verbosity=0, skip_checks=False)
self.assertEqual(self.counter, 1)
finally:
BaseCommand.check = saved_check
def test_check_migrations(self):
requires_migrations_checks = dance.Command.requires_migrations_checks
self.assertIs(requires_migrations_checks, False)
try:
with mock.patch.object(BaseCommand, 'check_migrations') as check_migrations:
management.call_command('dance', verbosity=0)
self.assertFalse(check_migrations.called)
dance.Command.requires_migrations_checks = True
management.call_command('dance', verbosity=0)
self.assertTrue(check_migrations.called)
finally:
dance.Command.requires_migrations_checks = requires_migrations_checks
class CommandRunTests(AdminScriptTestCase):
"""
Tests that need to run by simulating the command line, not by call_command.
"""
def tearDown(self):
self.remove_settings('settings.py')
def test_script_prefix_set_in_commands(self):
self.write_settings('settings.py', apps=['user_commands'], sdict={
'ROOT_URLCONF': '"user_commands.urls"',
'FORCE_SCRIPT_NAME': '"/PREFIX/"',
})
out, err = self.run_manage(['reverse_url'])
self.assertNoOutput(err)
self.assertEqual(out.strip(), '/PREFIX/some/url/')
class UtilsTests(SimpleTestCase):
def test_no_existent_external_program(self):
with self.assertRaises(CommandError):
popen_wrapper(['a_42_command_that_doesnt_exist_42'])
| bsd-3-clause |
omaciel/mangonel | mangonel/changeset.py | 1 | 1633 | from common import *
import datetime
import json
import sys
import time
try:
from katello.client.api.task_status import TaskStatusAPI
from katello.client.api.changeset import ChangesetAPI
except ImportError, e:
print "Please install Katello CLI package."
sys.exit(-1)
class Changeset(ChangesetAPI):
task_api = TaskStatusAPI()
def __init__(self):
super(Changeset, self).__init__()
def create(self, org, env, name=None, type_in='promotion', description=None):
if name is None:
name = generate_name(8)
if description is None:
description = "Promoted on %s" % datetime.datetime.now().strftime("%Y-%m-%d-%H-%M-%S")
return super(Changeset, self).create(org['label'], env['id'], name, type_in, description)
def changeset_by_name(self, org, env, name):
return super(Changeset, self).changeset_by_name(org['label'], env['id'], name)
def add_content(self, chsId, content, contentType='content_views'):
return super(Changeset, self).add_content(chsId, contentType, {'content_view_id' : content['id'] })
def apply(self, chsId):
applyTask = super(Changeset, self).apply(chsId)
task = self.task_api.status(applyTask['uuid'])
for i in range(MAX_ATTEMPTS):
task = self.task_api.status(applyTask['uuid'])
if task['state'] == 'finished' or task['state'] == 'error':
break
logger.info("Promoting content...")
logger.debug(task['state'])
time.sleep(REQUEST_DELAY)
else:
task = None
return task
| gpl-2.0 |
kawamon/hue | desktop/core/ext-py/python-openid-2.2.5/openid/test/kvform.py | 75 | 5685 | from openid import kvform
from openid import oidutil
import unittest
class KVBaseTest(unittest.TestCase):
def shortDescription(self):
return '%s test for %r' % (self.__class__.__name__, self.kvform)
def log(self, message, unused_priority=None):
self.warnings.append(message)
def checkWarnings(self, num_warnings):
self.failUnlessEqual(num_warnings, len(self.warnings), repr(self.warnings))
def setUp(self):
self.warnings = []
self.old_log = oidutil.log
self.log_func = oidutil.log = self.log
self.failUnless(self.log_func is oidutil.log,
(oidutil.log, self.log_func))
def tearDown(self):
oidutil.log = self.old_log
class KVDictTest(KVBaseTest):
def __init__(self, kv, dct, warnings):
unittest.TestCase.__init__(self)
self.kvform = kv
self.dict = dct
self.expected_warnings = warnings
def runTest(self):
# Convert KVForm to dict
d = kvform.kvToDict(self.kvform)
# make sure it parses to expected dict
self.failUnlessEqual(self.dict, d)
# Check to make sure we got the expected number of warnings
self.checkWarnings(self.expected_warnings)
# Convert back to KVForm and round-trip back to dict to make
# sure that *** dict -> kv -> dict is identity. ***
kv = kvform.dictToKV(d)
d2 = kvform.kvToDict(kv)
self.failUnlessEqual(d, d2)
class KVSeqTest(KVBaseTest):
def __init__(self, seq, kv, expected_warnings):
unittest.TestCase.__init__(self)
self.kvform = kv
self.seq = seq
self.expected_warnings = expected_warnings
def cleanSeq(self, seq):
"""Create a new sequence by stripping whitespace from start
and end of each value of each pair"""
clean = []
for k, v in self.seq:
if type(k) is str:
k = k.decode('utf8')
if type(v) is str:
v = v.decode('utf8')
clean.append((k.strip(), v.strip()))
return clean
def runTest(self):
# seq serializes to expected kvform
actual = kvform.seqToKV(self.seq)
self.failUnlessEqual(self.kvform, actual)
self.failUnless(type(actual) is str)
# Parse back to sequence. Expected to be unchanged, except
# stripping whitespace from start and end of values
# (i. e. ordering, case, and internal whitespace is preserved)
seq = kvform.kvToSeq(actual)
clean_seq = self.cleanSeq(seq)
self.failUnlessEqual(seq, clean_seq)
self.checkWarnings(self.expected_warnings)
kvdict_cases = [
# (kvform, parsed dictionary, expected warnings)
('', {}, 0),
('college:harvey mudd\n', {'college':'harvey mudd'}, 0),
('city:claremont\nstate:CA\n',
{'city':'claremont', 'state':'CA'}, 0),
('is_valid:true\ninvalidate_handle:{HMAC-SHA1:2398410938412093}\n',
{'is_valid':'true',
'invalidate_handle':'{HMAC-SHA1:2398410938412093}'}, 0),
# Warnings from lines with no colon:
('x\n', {}, 1),
('x\nx\n', {}, 2),
('East is least\n', {}, 1),
# But not from blank lines (because LJ generates them)
('x\n\n', {}, 1),
# Warning from empty key
(':\n', {'':''}, 1),
(':missing key\n', {'':'missing key'}, 1),
# Warnings from leading or trailing whitespace in key or value
(' street:foothill blvd\n', {'street':'foothill blvd'}, 1),
('major: computer science\n', {'major':'computer science'}, 1),
(' dorm : east \n', {'dorm':'east'}, 2),
# Warnings from missing trailing newline
('e^(i*pi)+1:0', {'e^(i*pi)+1':'0'}, 1),
('east:west\nnorth:south', {'east':'west', 'north':'south'}, 1),
]
kvseq_cases = [
([], '', 0),
# Make sure that we handle non-ascii characters (also wider than 8 bits)
([(u'\u03bbx', u'x')], '\xce\xbbx:x\n', 0),
# If it's a UTF-8 str, make sure that it's equivalent to the same
# string, decoded.
([('\xce\xbbx', 'x')], '\xce\xbbx:x\n', 0),
([('openid', 'useful'), ('a', 'b')], 'openid:useful\na:b\n', 0),
# Warnings about leading whitespace
([(' openid', 'useful'), ('a', 'b')], ' openid:useful\na:b\n', 2),
# Warnings about leading and trailing whitespace
([(' openid ', ' useful '),
(' a ', ' b ')], ' openid : useful \n a : b \n', 8),
# warnings about leading and trailing whitespace, but not about
# internal whitespace.
([(' open id ', ' use ful '),
(' a ', ' b ')], ' open id : use ful \n a : b \n', 8),
([(u'foo', 'bar')], 'foo:bar\n', 0),
]
kvexc_cases = [
[('openid', 'use\nful')],
[('open\nid', 'useful')],
[('open\nid', 'use\nful')],
[('open:id', 'useful')],
[('foo', 'bar'), ('ba\n d', 'seed')],
[('foo', 'bar'), ('bad:', 'seed')],
]
class KVExcTest(unittest.TestCase):
def __init__(self, seq):
unittest.TestCase.__init__(self)
self.seq = seq
def shortDescription(self):
return 'KVExcTest for %r' % (self.seq,)
def runTest(self):
self.failUnlessRaises(ValueError, kvform.seqToKV, self.seq)
class GeneralTest(KVBaseTest):
kvform = '<None>'
def test_convert(self):
result = kvform.seqToKV([(1,1)])
self.failUnlessEqual(result, '1:1\n')
self.checkWarnings(2)
def pyUnitTests():
tests = [KVDictTest(*case) for case in kvdict_cases]
tests.extend([KVSeqTest(*case) for case in kvseq_cases])
tests.extend([KVExcTest(case) for case in kvexc_cases])
tests.append(unittest.defaultTestLoader.loadTestsFromTestCase(GeneralTest))
return unittest.TestSuite(tests)
| apache-2.0 |
saideepchandg/oracle-r12-accounting | lib/django/contrib/admin/templatetags/admin_list.py | 46 | 17082 | from __future__ import unicode_literals
import datetime
from django.contrib.admin.templatetags.admin_static import static
from django.contrib.admin.templatetags.admin_urls import add_preserved_filters
from django.contrib.admin.utils import (
display_for_field, display_for_value, label_for_field, lookup_field,
)
from django.contrib.admin.views.main import (
ALL_VAR, EMPTY_CHANGELIST_VALUE, ORDER_VAR, PAGE_VAR, SEARCH_VAR,
)
from django.core.exceptions import ObjectDoesNotExist
from django.core.urlresolvers import NoReverseMatch
from django.db import models
from django.template import Library
from django.template.loader import get_template
from django.utils import formats
from django.utils.encoding import force_text
from django.utils.html import escapejs, format_html
from django.utils.safestring import mark_safe
from django.utils.text import capfirst
from django.utils.translation import ugettext as _
register = Library()
DOT = '.'
@register.simple_tag
def paginator_number(cl, i):
"""
Generates an individual page index link in a paginated list.
"""
if i == DOT:
return '... '
elif i == cl.page_num:
return format_html('<span class="this-page">{}</span> ', i + 1)
else:
return format_html('<a href="{}"{}>{}</a> ',
cl.get_query_string({PAGE_VAR: i}),
mark_safe(' class="end"' if i == cl.paginator.num_pages - 1 else ''),
i + 1)
@register.inclusion_tag('admin/pagination.html')
def pagination(cl):
"""
Generates the series of links to the pages in a paginated list.
"""
paginator, page_num = cl.paginator, cl.page_num
pagination_required = (not cl.show_all or not cl.can_show_all) and cl.multi_page
if not pagination_required:
page_range = []
else:
ON_EACH_SIDE = 3
ON_ENDS = 2
# If there are 10 or fewer pages, display links to every page.
# Otherwise, do some fancy
if paginator.num_pages <= 10:
page_range = range(paginator.num_pages)
else:
# Insert "smart" pagination links, so that there are always ON_ENDS
# links at either end of the list of pages, and there are always
# ON_EACH_SIDE links at either end of the "current page" link.
page_range = []
if page_num > (ON_EACH_SIDE + ON_ENDS):
page_range.extend(range(0, ON_ENDS))
page_range.append(DOT)
page_range.extend(range(page_num - ON_EACH_SIDE, page_num + 1))
else:
page_range.extend(range(0, page_num + 1))
if page_num < (paginator.num_pages - ON_EACH_SIDE - ON_ENDS - 1):
page_range.extend(range(page_num + 1, page_num + ON_EACH_SIDE + 1))
page_range.append(DOT)
page_range.extend(range(paginator.num_pages - ON_ENDS, paginator.num_pages))
else:
page_range.extend(range(page_num + 1, paginator.num_pages))
need_show_all_link = cl.can_show_all and not cl.show_all and cl.multi_page
return {
'cl': cl,
'pagination_required': pagination_required,
'show_all_url': need_show_all_link and cl.get_query_string({ALL_VAR: ''}),
'page_range': page_range,
'ALL_VAR': ALL_VAR,
'1': 1,
}
def result_headers(cl):
"""
Generates the list column headers.
"""
ordering_field_columns = cl.get_ordering_field_columns()
for i, field_name in enumerate(cl.list_display):
text, attr = label_for_field(
field_name, cl.model,
model_admin=cl.model_admin,
return_attr=True
)
if attr:
# Potentially not sortable
# if the field is the action checkbox: no sorting and special class
if field_name == 'action_checkbox':
yield {
"text": text,
"class_attrib": mark_safe(' class="action-checkbox-column"'),
"sortable": False,
}
continue
admin_order_field = getattr(attr, "admin_order_field", None)
if not admin_order_field:
# Not sortable
yield {
"text": text,
"class_attrib": format_html(' class="column-{}"', field_name),
"sortable": False,
}
continue
# OK, it is sortable if we got this far
th_classes = ['sortable', 'column-{}'.format(field_name)]
order_type = ''
new_order_type = 'asc'
sort_priority = 0
sorted = False
# Is it currently being sorted on?
if i in ordering_field_columns:
sorted = True
order_type = ordering_field_columns.get(i).lower()
sort_priority = list(ordering_field_columns).index(i) + 1
th_classes.append('sorted %sending' % order_type)
new_order_type = {'asc': 'desc', 'desc': 'asc'}[order_type]
# build new ordering param
o_list_primary = [] # URL for making this field the primary sort
o_list_remove = [] # URL for removing this field from sort
o_list_toggle = [] # URL for toggling order type for this field
make_qs_param = lambda t, n: ('-' if t == 'desc' else '') + str(n)
for j, ot in ordering_field_columns.items():
if j == i: # Same column
param = make_qs_param(new_order_type, j)
# We want clicking on this header to bring the ordering to the
# front
o_list_primary.insert(0, param)
o_list_toggle.append(param)
# o_list_remove - omit
else:
param = make_qs_param(ot, j)
o_list_primary.append(param)
o_list_toggle.append(param)
o_list_remove.append(param)
if i not in ordering_field_columns:
o_list_primary.insert(0, make_qs_param(new_order_type, i))
yield {
"text": text,
"sortable": True,
"sorted": sorted,
"ascending": order_type == "asc",
"sort_priority": sort_priority,
"url_primary": cl.get_query_string({ORDER_VAR: '.'.join(o_list_primary)}),
"url_remove": cl.get_query_string({ORDER_VAR: '.'.join(o_list_remove)}),
"url_toggle": cl.get_query_string({ORDER_VAR: '.'.join(o_list_toggle)}),
"class_attrib": format_html(' class="{}"', ' '.join(th_classes)) if th_classes else '',
}
def _boolean_icon(field_val):
icon_url = static('admin/img/icon-%s.gif' %
{True: 'yes', False: 'no', None: 'unknown'}[field_val])
return format_html('<img src="{}" alt="{}" />', icon_url, field_val)
def items_for_result(cl, result, form):
"""
Generates the actual list of data.
"""
def link_in_col(is_first, field_name, cl):
if cl.list_display_links is None:
return False
if is_first and not cl.list_display_links:
return True
return field_name in cl.list_display_links
first = True
pk = cl.lookup_opts.pk.attname
for field_name in cl.list_display:
row_classes = ['field-%s' % field_name]
try:
f, attr, value = lookup_field(field_name, result, cl.model_admin)
except ObjectDoesNotExist:
result_repr = EMPTY_CHANGELIST_VALUE
else:
if f is None or f.auto_created:
if field_name == 'action_checkbox':
row_classes = ['action-checkbox']
allow_tags = getattr(attr, 'allow_tags', False)
boolean = getattr(attr, 'boolean', False)
if boolean:
allow_tags = True
result_repr = display_for_value(value, boolean)
# Strip HTML tags in the resulting text, except if the
# function has an "allow_tags" attribute set to True.
if allow_tags:
result_repr = mark_safe(result_repr)
if isinstance(value, (datetime.date, datetime.time)):
row_classes.append('nowrap')
else:
if isinstance(f.rel, models.ManyToOneRel):
field_val = getattr(result, f.name)
if field_val is None:
result_repr = EMPTY_CHANGELIST_VALUE
else:
result_repr = field_val
else:
result_repr = display_for_field(value, f)
if isinstance(f, (models.DateField, models.TimeField, models.ForeignKey)):
row_classes.append('nowrap')
if force_text(result_repr) == '':
result_repr = mark_safe(' ')
row_class = mark_safe(' class="%s"' % ' '.join(row_classes))
# If list_display_links not defined, add the link tag to the first field
if link_in_col(first, field_name, cl):
table_tag = 'th' if first else 'td'
first = False
# Display link to the result's change_view if the url exists, else
# display just the result's representation.
try:
url = cl.url_for_result(result)
except NoReverseMatch:
link_or_text = result_repr
else:
url = add_preserved_filters({'preserved_filters': cl.preserved_filters, 'opts': cl.opts}, url)
# Convert the pk to something that can be used in Javascript.
# Problem cases are long ints (23L) and non-ASCII strings.
if cl.to_field:
attr = str(cl.to_field)
else:
attr = pk
value = result.serializable_value(attr)
result_id = escapejs(value)
link_or_text = format_html(
'<a href="{}"{}>{}</a>',
url,
format_html(
' onclick="opener.dismissRelatedLookupPopup(window, '
''{}'); return false;"', result_id
) if cl.is_popup else '',
result_repr)
yield format_html('<{}{}>{}</{}>',
table_tag,
row_class,
link_or_text,
table_tag)
else:
# By default the fields come from ModelAdmin.list_editable, but if we pull
# the fields out of the form instead of list_editable custom admins
# can provide fields on a per request basis
if (form and field_name in form.fields and not (
field_name == cl.model._meta.pk.name and
form[cl.model._meta.pk.name].is_hidden)):
bf = form[field_name]
result_repr = mark_safe(force_text(bf.errors) + force_text(bf))
yield format_html('<td{}>{}</td>', row_class, result_repr)
if form and not form[cl.model._meta.pk.name].is_hidden:
yield format_html('<td>{}</td>', force_text(form[cl.model._meta.pk.name]))
class ResultList(list):
# Wrapper class used to return items in a list_editable
# changelist, annotated with the form object for error
# reporting purposes. Needed to maintain backwards
# compatibility with existing admin templates.
def __init__(self, form, *items):
self.form = form
super(ResultList, self).__init__(*items)
def results(cl):
if cl.formset:
for res, form in zip(cl.result_list, cl.formset.forms):
yield ResultList(form, items_for_result(cl, res, form))
else:
for res in cl.result_list:
yield ResultList(None, items_for_result(cl, res, None))
def result_hidden_fields(cl):
if cl.formset:
for res, form in zip(cl.result_list, cl.formset.forms):
if form[cl.model._meta.pk.name].is_hidden:
yield mark_safe(force_text(form[cl.model._meta.pk.name]))
@register.inclusion_tag("admin/change_list_results.html")
def result_list(cl):
"""
Displays the headers and data list together
"""
headers = list(result_headers(cl))
num_sorted_fields = 0
for h in headers:
if h['sortable'] and h['sorted']:
num_sorted_fields += 1
return {'cl': cl,
'result_hidden_fields': list(result_hidden_fields(cl)),
'result_headers': headers,
'num_sorted_fields': num_sorted_fields,
'results': list(results(cl))}
@register.inclusion_tag('admin/date_hierarchy.html')
def date_hierarchy(cl):
"""
Displays the date hierarchy for date drill-down functionality.
"""
if cl.date_hierarchy:
field_name = cl.date_hierarchy
field = cl.opts.get_field(field_name)
dates_or_datetimes = 'datetimes' if isinstance(field, models.DateTimeField) else 'dates'
year_field = '%s__year' % field_name
month_field = '%s__month' % field_name
day_field = '%s__day' % field_name
field_generic = '%s__' % field_name
year_lookup = cl.params.get(year_field)
month_lookup = cl.params.get(month_field)
day_lookup = cl.params.get(day_field)
link = lambda filters: cl.get_query_string(filters, [field_generic])
if not (year_lookup or month_lookup or day_lookup):
# select appropriate start level
date_range = cl.queryset.aggregate(first=models.Min(field_name),
last=models.Max(field_name))
if date_range['first'] and date_range['last']:
if date_range['first'].year == date_range['last'].year:
year_lookup = date_range['first'].year
if date_range['first'].month == date_range['last'].month:
month_lookup = date_range['first'].month
if year_lookup and month_lookup and day_lookup:
day = datetime.date(int(year_lookup), int(month_lookup), int(day_lookup))
return {
'show': True,
'back': {
'link': link({year_field: year_lookup, month_field: month_lookup}),
'title': capfirst(formats.date_format(day, 'YEAR_MONTH_FORMAT'))
},
'choices': [{'title': capfirst(formats.date_format(day, 'MONTH_DAY_FORMAT'))}]
}
elif year_lookup and month_lookup:
days = cl.queryset.filter(**{year_field: year_lookup, month_field: month_lookup})
days = getattr(days, dates_or_datetimes)(field_name, 'day')
return {
'show': True,
'back': {
'link': link({year_field: year_lookup}),
'title': str(year_lookup)
},
'choices': [{
'link': link({year_field: year_lookup, month_field: month_lookup, day_field: day.day}),
'title': capfirst(formats.date_format(day, 'MONTH_DAY_FORMAT'))
} for day in days]
}
elif year_lookup:
months = cl.queryset.filter(**{year_field: year_lookup})
months = getattr(months, dates_or_datetimes)(field_name, 'month')
return {
'show': True,
'back': {
'link': link({}),
'title': _('All dates')
},
'choices': [{
'link': link({year_field: year_lookup, month_field: month.month}),
'title': capfirst(formats.date_format(month, 'YEAR_MONTH_FORMAT'))
} for month in months]
}
else:
years = getattr(cl.queryset, dates_or_datetimes)(field_name, 'year')
return {
'show': True,
'choices': [{
'link': link({year_field: str(year.year)}),
'title': str(year.year),
} for year in years]
}
@register.inclusion_tag('admin/search_form.html')
def search_form(cl):
"""
Displays a search form for searching the list.
"""
return {
'cl': cl,
'show_result_count': cl.result_count != cl.full_result_count,
'search_var': SEARCH_VAR
}
@register.simple_tag
def admin_list_filter(cl, spec):
tpl = get_template(spec.template)
return tpl.render({
'title': spec.title,
'choices': list(spec.choices(cl)),
'spec': spec,
})
@register.inclusion_tag('admin/actions.html', takes_context=True)
def admin_actions(context):
"""
Track the number of times the action field has been rendered on the page,
so we know which value to use.
"""
context['action_index'] = context.get('action_index', -1) + 1
return context
| bsd-3-clause |
jamielennox/deluge | deluge/plugins/Scheduler/deluge/plugins/scheduler/__init__.py | 14 | 2250 | #
# __init__.py
#
# Copyright (C) 2009 Andrew Resch <andrewresch@gmail.com>
#
# Basic plugin template created by:
# Copyright (C) 2008 Martijn Voncken <mvoncken@gmail.com>
# Copyright (C) 2007-2009 Andrew Resch <andrewresch@gmail.com>
#
# Deluge is free software.
#
# You may redistribute it and/or modify it under the terms of the
# GNU General Public License, as published by the Free Software
# Foundation; either version 3 of the License, or (at your option)
# any later version.
#
# deluge is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with deluge. If not, write to:
# The Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor
# Boston, MA 02110-1301, USA.
#
# In addition, as a special exception, the copyright holders give
# permission to link the code of portions of this program with the OpenSSL
# library.
# You must obey the GNU General Public License in all respects for all of
# the code used other than OpenSSL. If you modify file(s) with this
# exception, you may extend this exception to your version of the file(s),
# but you are not obligated to do so. If you do not wish to do so, delete
# this exception statement from your version. If you delete this exception
# statement from all source files in the program, then also delete it here.
#
from deluge.plugins.init import PluginInitBase
class CorePlugin(PluginInitBase):
def __init__(self, plugin_name):
from core import Core as _plugin_cls
self._plugin_cls = _plugin_cls
super(CorePlugin, self).__init__(plugin_name)
class GtkUIPlugin(PluginInitBase):
def __init__(self, plugin_name):
from gtkui import GtkUI as _plugin_cls
self._plugin_cls = _plugin_cls
super(GtkUIPlugin, self).__init__(plugin_name)
class WebUIPlugin(PluginInitBase):
def __init__(self, plugin_name):
from webui import WebUI as _plugin_cls
self._plugin_cls = _plugin_cls
super(WebUIPlugin, self).__init__(plugin_name)
| gpl-3.0 |
avoinsystems/odoo | openerp/report/render/simple.py | 324 | 3152 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import render
from cStringIO import StringIO
import xml.dom.minidom
from reportlab.platypus import SimpleDocTemplate, Paragraph, Spacer, Table
from reportlab.lib.units import mm
from reportlab.lib.pagesizes import A4
import reportlab.lib
import copy
class simple(render.render):
def _render(self):
self.result = StringIO()
parser = xml.dom.minidom.parseString(self.xml)
title = parser.documentElement.tagName
doc = SimpleDocTemplate(self.result, pagesize=A4, title=title,
author='Odoo, Fabien Pinckaers', leftmargin=10*mm, rightmargin=10*mm)
styles = reportlab.lib.styles.getSampleStyleSheet()
title_style = copy.deepcopy(styles["Heading1"])
title_style.alignment = reportlab.lib.enums.TA_CENTER
story = [ Paragraph(title, title_style) ]
style_level = {}
nodes = [ (parser.documentElement,0) ]
while len(nodes):
node = nodes.pop(0)
value = ''
n=len(node[0].childNodes)-1
while n>=0:
if node[0].childNodes[n].nodeType==3:
value += node[0].childNodes[n].nodeValue
else:
nodes.insert( 0, (node[0].childNodes[n], node[1]+1) )
n-=1
if not node[1] in style_level:
style = copy.deepcopy(styles["Normal"])
style.leftIndent=node[1]*6*mm
style.firstLineIndent=-3*mm
style_level[node[1]] = style
story.append( Paragraph('<b>%s</b>: %s' % (node[0].tagName, value), style_level[node[1]]))
doc.build(story)
return self.result.getvalue()
if __name__=='__main__':
s = simple()
s.xml = '''<test>
<author-list>
<author>
<name>Fabien Pinckaers</name>
<age>23</age>
</author>
<author>
<name>Michel Pinckaers</name>
<age>53</age>
</author>
No other
</author-list>
</test>'''
if s.render():
print s.get()
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
yongshengwang/hue | desktop/core/ext-py/boto-2.38.0/boto/sqs/bigmessage.py | 170 | 4729 | # Copyright (c) 2013 Mitch Garnaat http://garnaat.org/
# Copyright (c) 2013 Amazon.com, Inc. or its affiliates.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
import uuid
import boto
from boto.sqs.message import RawMessage
from boto.exception import SQSDecodeError
class BigMessage(RawMessage):
"""
The BigMessage class provides large payloads (up to 5GB)
by storing the payload itself in S3 and then placing a reference
to the S3 object in the actual SQS message payload.
To create a BigMessage, you should create a BigMessage object
and pass in a file-like object as the ``body`` param and also
pass in the an S3 URL specifying the bucket in which to store
the message body::
import boto.sqs
from boto.sqs.bigmessage import BigMessage
sqs = boto.sqs.connect_to_region('us-west-2')
queue = sqs.get_queue('myqueue')
fp = open('/path/to/bigmessage/data')
msg = BigMessage(queue, fp, 's3://mybucket')
queue.write(msg)
Passing in a fully-qualified S3 URL (e.g. s3://mybucket/foo)
is interpreted to mean that the body of the message is already
stored in S3 and the that S3 URL is then used directly with no
content uploaded by BigMessage.
"""
def __init__(self, queue=None, body=None, s3_url=None):
self.s3_url = s3_url
super(BigMessage, self).__init__(queue, body)
def _get_bucket_key(self, s3_url):
bucket_name = key_name = None
if s3_url:
if s3_url.startswith('s3://'):
# We need to split out the bucket from the key (if
# supplied). We also have to be aware that someone
# may provide a trailing '/' character as in:
# s3://foo/ and we want to handle that.
s3_components = s3_url[5:].split('/', 1)
bucket_name = s3_components[0]
if len(s3_components) > 1:
if s3_components[1]:
key_name = s3_components[1]
else:
msg = 's3_url parameter should start with s3://'
raise SQSDecodeError(msg, self)
return bucket_name, key_name
def encode(self, value):
"""
:type value: file-like object
:param value: A file-like object containing the content
of the message. The actual content will be stored
in S3 and a link to the S3 object will be stored in
the message body.
"""
bucket_name, key_name = self._get_bucket_key(self.s3_url)
if bucket_name and key_name:
return self.s3_url
key_name = uuid.uuid4()
s3_conn = boto.connect_s3()
s3_bucket = s3_conn.get_bucket(bucket_name)
key = s3_bucket.new_key(key_name)
key.set_contents_from_file(value)
self.s3_url = 's3://%s/%s' % (bucket_name, key_name)
return self.s3_url
def _get_s3_object(self, s3_url):
bucket_name, key_name = self._get_bucket_key(s3_url)
if bucket_name and key_name:
s3_conn = boto.connect_s3()
s3_bucket = s3_conn.get_bucket(bucket_name)
key = s3_bucket.get_key(key_name)
return key
else:
msg = 'Unable to decode S3 URL: %s' % s3_url
raise SQSDecodeError(msg, self)
def decode(self, value):
self.s3_url = value
key = self._get_s3_object(value)
return key.get_contents_as_string()
def delete(self):
# Delete the object in S3 first, then delete the SQS message
if self.s3_url:
key = self._get_s3_object(self.s3_url)
key.delete()
super(BigMessage, self).delete()
| apache-2.0 |
vishnugonela/boto | tests/integration/emr/__init__.py | 761 | 1104 | # Copyright (c) 2006-2011 Mitch Garnaat http://garnaat.org/
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
| mit |
KnoMorales/shpescape | shapeft/views.py | 18 | 10361 | #!/usr/bin/env python
#
# Copyright 2010 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import random
import time
from django.conf import settings
from django.shortcuts import render_to_response, get_object_or_404
from django.template import RequestContext
from django.http import HttpResponse, HttpResponseRedirect, HttpResponseForbidden, Http404
from django.contrib.gis.geos import fromstr, LineString
from django.contrib.gis.models import SpatialRefSys
from django.contrib.gis.gdal import DataSource, OGRGeometry
from django.utils.datastructures import SortedDict
import simplejson
from shapes.forms import UploadForm
from ft_auth.views import *
from shapeft.models import shapeUpload
#@cache_page(60*5)
def static(request, template):
if not template:
template = "index.html"
return render_to_response(template, RequestContext(request,{}))
def generic_import(request):
"""
accept an uploaded file and create associated shapeUpload obj
"""
token = get_token(request)
if not token:
return HttpResponseRedirect('/auth/FTVerify')
if request.method == 'POST':
form = UploadForm(request.POST, request.FILES)
if form.is_valid():
form.handle(request.FILES['file_obj'])
create_simplify = request.POST.get('create_simplify', False);
create_centroid = request.POST.get('create_centroid', False);
create_centroid_poly = request.POST.get('create_centroid_poly', False);
#save form info in a model, and run from cron
uids = []
for shapefile in form.shapefiles:
upload = shapeUpload()
upload.auth_token = token
upload.shapefile = shapefile
upload.status = 1
upload.save()
upload.create_simplify = bool(create_simplify)
upload.create_centroid = bool(create_centroid)
upload.create_centroid_poly = bool(create_centroid_poly)
uids.append(upload.uid)
url = '/uploads/%s/' % 'g'.join(uids)
return HttpResponseRedirect(url)
else:
form = UploadForm()
return render_to_response('upload.html', RequestContext(request,{
'form': form}))
def upload_detail(request, upload_ids):
"""
display status of one or more shapeUploads
"""
uids = upload_ids.split('g')
uploads = shapeUpload.objects.filter(uid__in=uids).order_by('id')
#upload = get_object_or_404(shapeUpload, id=upload_id)
return render_to_response('upload_detail.html', RequestContext(request,{
'uploads': uploads}))
def import_from_shape(upload,
start_row=0,
max_rows=200000,
create_int_style_cols=True):
"""
a shapeUpload object
max_rows - any more than this is ignored
centroid - if it's a (multi)polygon, should we also create a geometry_centroid field
"""
upload.status = 2 #set this right away so it doesn't get reprocessed
upload.save()
ds = DataSource(upload.shapefile)
layer = ds[0]
fields = layer.fields
num_features = len(layer)
#set max # of _style features
max_distinct_style_vals = max(min(num_features / 100, 50),10)
print 'there are %d features' % num_features
upload.total_rows = num_features
if not num_features:
print 'no rows, returning'
upload.status = 6
upload.save()
return
rows = []
#get field types
field_map = {
'OFTString':'STRING',
'OFTReal':'NUMBER',
'OFTInteger':'NUMBER',
'OFTDate':'DATETIME'
}
field_types = [field_map[f.__name__] for f in layer.field_types]
field_layers = layer.fields
#insert geometry layers first
field_layers.insert(0,'geometry')
field_types.insert(0,'LOCATION')
field_layers.insert(1,'geometry_vertex_count')
field_types.insert(1,'NUMBER')
if upload.create_simplify:
field_layers.insert(0,'geometry_simplified')
field_types.insert(0,'LOCATION')
field_layers.insert(1,'geometry_simplified_vertex_count')
field_types.insert(1,'NUMBER')
#use sorted dict so we can ensure table has geom columns upfront
field_dict = SortedDict(zip(field_layers, field_types))
#set up extra fields if creating int/style cols
if create_int_style_cols:
int_style_dict = {}
for field,field_type in field_dict.items():
if field_type == 'STRING':
field_dict[field + '_ft_style'] = 'NUMBER'
int_style_dict[field] = {}
print field_dict
#add some custom import fields
field_dict['import_notes'] = 'STRING'
print 'FIELD DICT', field_dict
print 'starting to process'
for i, feat in enumerate(layer):
if i > max_rows:
continue
if start_row and i < start_row:
continue
upload.rows_processed = i + 1
if not i % ((num_features / 50) or 5):
print upload.rows_processed,'rp'
upload.save()
upload.save()
rd = {}
#geom = fromstr(feat.geom.wkt,srid=srid)
if layer.srs:
try:
geom = OGRGeometry(feat.geom.wkt, layer.srs.proj4)
geom.transform(4326)
except Exception, e:
print 'FAIL GEOM'
print e,
geom = None
else:
geom = OGRGeometry(feat.geom.wkt)
if geom:
geom = fromstr(geom.wkt)
#create optional centroid for polys
if upload.create_centroid and 'oly' in geom.geom_type:
field_dict['geometry_pos'] = 'LOCATION'
rd['geometry_pos'] = geom.point_on_surface.kml
if upload.create_centroid_poly and 'oly' in geom.geom_type:
field_dict['geometry_pos_poly_2'] = 'LOCATION'
field_dict['geometry_pos_poly_3'] = 'LOCATION'
rd['geometry_pos_poly_2'] = geom.point_on_surface.buffer(.0001,10).kml
rd['geometry_pos_poly_3'] = geom.point_on_surface.buffer(.0005,10).kml
#if it's > 1M characters, we need to simplify it for FT
simplify_tolerance = .0001
while len(geom.kml) > 1000000:
geom = geom.simplify(simplify_tolerance)
print 'simplified to %f' % simplify_tolerance
rd['import_notes'] = 'simplified to %d DD' % simplify_tolerance
simplify_tolerance = simplify_tolerance * 1.5
if not geom.valid:
rd['import_notes'] = '<br>Geometry not valid'
kml = geom.kml
rd['geometry'] = kml
rd['geometry_vertex_count'] = geom.num_coords
if upload.create_simplify and not 'oint' in geom.geom_type:
amt = .002
if 'oly' in geom.geom_type:
buffer_geom = geom.buffer(amt)
buffer_geom = buffer_geom.buffer(amt * -1)
simple_geom = buffer_geom.simplify(amt)
else:
simple_geom = geom.simplify(amt)
rd['geometry_simplified'] = simple_geom.kml
rd['geometry_simplified_vertex_count'] = simple_geom.num_coords
for f in fields:
val = feat.get(f)
#make sure we have proper null type for diff fields
if val == '<Null>':
continue
if not val:
continue
if field_dict[f] == 'DATETIME':
val = val.isoformat().split('T')[0]
if field_dict[f] == 'STRING' \
and create_int_style_cols \
and field_dict.has_key(f + '_ft_style'):
#check to see if we have a number for this yet
try:
rd[f + '_ft_style'] = int_style_dict[f][val]
except:
int_style_dict[f][val] = len(int_style_dict[f])
rd[f + '_ft_style'] = int_style_dict[f][val]
#however if we have too many distinct vals, let's just not do this anymore
if len(int_style_dict[f]) > max_distinct_style_vals:
print 'DELETING FD %s' % f
del field_dict[f + '_ft_style']
del rd[f + '_ft_style']
#sucks, but now we should just remove all these fields from previous rows
for srow in rows:
try:del srow[f + '_ft_style']
except:
pass #probably this was a null value?
rd[f] = val
rows.append(rd)
#let's process 10k rows at a time.. not keep everything in memory
if len(rows) > 10000:
uploadRows(upload, field_dict, rows)
rows = []
uploadRows(upload, field_dict, rows)
def uploadRows(upload, field_dict, rows):
if not upload.ft_table_id:
upload = createTable(upload, field_dict)
upload.status = 3
upload.save()
print 'inserting %d rows' % len(rows)
insertData(upload, field_dict, rows)
upload.status = 4
upload.save()
def insertSql(client, sql, attempt_no=0):
try:resp = client.query(sql)
except:
print 'unable to query sql %s' % sql
resp = client.query(sql)
print resp[:50]
if 'Unable' in resp:
if attempt_no > 3:
return 'Error - failed after 3 attempts' + resp
#print sql
print resp
time.sleep(1)
print 'len: %d, attempt: %d' % (len(sql), attempt_no)
insertSql(client, sql, attempt_no + 1)
return resp
def getClient(upload):
ftClient = OAuthFTClient(
FT_OAUTH['key'],
FT_OAUTH['secret'],
upload.auth_token.ft_token,
upload.auth_token.ft_token_secret)
print 'client created'
return ftClient
def createTable(upload, field_dict):
ftClient = getClient(upload)
table_dictionary = {upload.get_title() : field_dict}
results = ftClient.query(SQL().createTable(table_dictionary))
table_id = results.split("\n")[1]
print 'new table: %s' % results
upload.ft_table_id = table_id
upload.save()
return upload
def insertData(upload, field_dict, rows):
ftClient = getClient(upload)
#insert rows
sql = []
sql_len = 0
for i, row in enumerate(rows):
upload.rows_imported = i + 1
if sql_len > 500000 or len(sql) > 100: # max upload is 1MB?
insertSql(ftClient, ';'.join(sql))
sql = []
sql_len = 0
upload.save()
try:
insert_statement = SQL().insert(upload.ft_table_id, row)
except Exception, e:
print 'FAIL SQL', row
print e
continue
sql.append(insert_statement)
sql_len += len( insert_statement)
insertSql(ftClient, ';'.join(sql))
upload.save()
| apache-2.0 |
janwuyts/empty_scripts | python.py | 1 | 1771 | #!/usr/bin/env python
"""Change this docstring of this empty script
- Run unittests if called with "test" as first and only argument
- Call main() if there are other command line arguments
"""
__author__ = "Jan Wuyts"
__copyright__ = "Copyright 2013"
__credits__ = ["Jan Wuyts", ]
__version__ = "0.0.1"
__maintainer__ = "Jan Wuyts"
__email__ = "Jan.Wuyts@gmail.com"
__status__ = "Development"
import sys
import logging
import optparse
#import os
#import re
#import itertools
## FUNCTION AND CLASS DEFINITIONS ##########################
## UNIT TESTS ##############################################
import unittest
class UnitTest(unittest.TestCase):
def setUp(self):
pass
def test_whatever(self):
self.assertEquals(True, True)
if (len(sys.argv) == 2) and ("test" in sys.argv[1].lower()):
sys.argv = sys.argv[:1]
unittest.main()
sys.exit()
## LOGGING #################################################
error = lambda x: logging.error(x)
warn = lambda x: logging.warning(x)
info = lambda x: logging.info(x)
def main(argv=None):
if argv is None:
argv = sys.argv
## COMMAND LINE OPTIONS ####################################
parser = optparse.OptionParser(usage = "usage: python %prog [options]", description=__doc__)
parser.add_option('-v', '--verbose',
dest="verbose",
action="store_true",
default=False,
help="be more verbose")
options, remainder = parser.parse_args(sys.argv)
if options.verbose:
logging.basicConfig(level=logging.INFO)
else:
logging.basicConfig(level=logging.ERROR)
#######
## Do something useful here ################################
##############
if __name__=="__main__":
sys.exit(main())
| mit |
1995parham/OpenBridge | Dryu/src/ofctl-rest.py | 2 | 24385 | # In The Name Of God
# ========================================
# [] File Name : ofctl-rest.py
#
# [] Creation Date : 27-04-2015
#
# [] Created By : Parham Alvani (parham.alvani@gmail.com)
# =======================================
__author__ = 'Parham Alvani'
import logging
import json
import ast
from webob import Response
from ryu.base import app_manager
from ryu.controller import ofp_event
from ryu.controller import dpset
from ryu.controller.handler import MAIN_DISPATCHER
from ryu.controller.handler import set_ev_cls
from ryu.ofproto import ofproto_v1_0
from ryu.ofproto import ofproto_v1_2
from ryu.ofproto import ofproto_v1_3
from ryu.lib import ofctl_v1_0
from ryu.lib import ofctl_v1_2
from ryu.lib import ofctl_v1_3
from ryu.app.wsgi import ControllerBase, WSGIApplication
LOG = logging.getLogger('F-Bridge.Dryu.ofct-rest')
# supported ofctl versions in this restful app
supported_ofctl = {
ofproto_v1_0.OFP_VERSION: ofctl_v1_0,
ofproto_v1_2.OFP_VERSION: ofctl_v1_2,
ofproto_v1_3.OFP_VERSION: ofctl_v1_3,
}
class StatsController(ControllerBase):
def __init__(self, req, link, data, **config):
super(StatsController, self).__init__(req, link, data, **config)
self.dpset = data['dpset']
self.waiters = data['waiters']
def get_dpids(self, req, **_kwargs):
dps = self.dpset.dps.keys()
body = json.dumps(dps)
return Response(content_type='application/json', body=body)
def get_desc_stats(self, req, dpid, **_kwargs):
if type(dpid) == str and not dpid.isdigit():
LOG.debug('invalid dpid %s', dpid)
return Response(status=400)
dp = self.dpset.get(int(dpid))
if dp is None:
return Response(status=404)
_ofp_version = dp.ofproto.OFP_VERSION
_ofctl = supported_ofctl.get(_ofp_version, None)
if _ofctl is not None:
desc = _ofctl.get_desc_stats(dp, self.waiters)
else:
LOG.debug('Unsupported OF protocol')
return Response(status=501)
body = json.dumps(desc)
return Response(content_type='application/json', body=body)
def get_flow_stats(self, req, dpid, **_kwargs):
if req.body == '':
flow = {}
else:
try:
flow = ast.literal_eval(req.body)
except SyntaxError:
LOG.debug('invalid syntax %s', req.body)
return Response(status=400)
if type(dpid) == str and not dpid.isdigit():
LOG.debug('invalid dpid %s', dpid)
return Response(status=400)
dp = self.dpset.get(int(dpid))
if dp is None:
return Response(status=404)
_ofp_version = dp.ofproto.OFP_VERSION
_ofctl = supported_ofctl.get(_ofp_version, None)
if _ofctl is not None:
flows = _ofctl.get_flow_stats(dp, self.waiters, flow)
else:
LOG.debug('Unsupported OF protocol')
return Response(status=501)
body = json.dumps(flows)
return Response(content_type='application/json', body=body)
def get_aggregate_flow_stats(self, req, dpid, **_kwargs):
if req.body == '':
flow = {}
else:
try:
flow = ast.literal_eval(req.body)
except SyntaxError:
LOG.debug('invalid syntax %s', req.body)
return Response(status=400)
if type(dpid) == str and not dpid.isdigit():
LOG.debug('invalid dpid %s', dpid)
return Response(status=400)
dp = self.dpset.get(int(dpid))
if dp is None:
return Response(status=404)
_ofp_version = dp.ofproto.OFP_VERSION
_ofctl = supported_ofctl.get(_ofp_version, None)
if _ofctl is not None:
flows = _ofctl.get_aggregate_flow_stats(dp, self.waiters, flow)
else:
LOG.debug('Unsupported OF protocol')
return Response(status=501)
body = json.dumps(flows)
return Response(content_type='application/json', body=body)
def get_port_stats(self, req, dpid, **_kwargs):
if type(dpid) == str and not dpid.isdigit():
LOG.debug('invalid dpid %s', dpid)
return Response(status=400)
dp = self.dpset.get(int(dpid))
if dp is None:
return Response(status=404)
_ofp_version = dp.ofproto.OFP_VERSION
_ofctl = supported_ofctl.get(_ofp_version, None)
if _ofctl is not None:
ports = _ofctl.get_port_stats(dp, self.waiters)
else:
LOG.debug('Unsupported OF protocol')
return Response(status=501)
body = json.dumps(ports)
return Response(content_type='application/json', body=body)
def get_queue_stats(self, req, dpid, **_kwargs):
if type(dpid) == str and not dpid.isdigit():
LOG.debug('invalid dpid %s', dpid)
return Response(status=400)
dp = self.dpset.get(int(dpid))
if dp is None:
return Response(status=404)
_ofp_version = dp.ofproto.OFP_VERSION
_ofctl = supported_ofctl.get(_ofp_version, None)
if _ofctl is not None:
queues = _ofctl.get_queue_stats(dp, self.waiters)
else:
LOG.debug('Unsupported OF protocol')
return Response(status=501)
body = json.dumps(queues)
return Response(content_type='application/json', body=body)
def get_meter_features(self, req, dpid, **_kwargs):
if type(dpid) == str and not dpid.isdigit():
LOG.debug('invalid dpid %s', dpid)
return Response(status=400)
dp = self.dpset.get(int(dpid))
if dp is None:
return Response(status=404)
_ofp_version = dp.ofproto.OFP_VERSION
_ofctl = supported_ofctl.get(_ofp_version, None)
if _ofctl is not None and hasattr(_ofctl, 'get_meter_features'):
meters = _ofctl.get_meter_features(dp, self.waiters)
else:
LOG.debug('Unsupported OF protocol or \
request not supported in this OF protocol version')
return Response(status=501)
body = json.dumps(meters)
return Response(content_type='application/json', body=body)
def get_meter_config(self, req, dpid, **_kwargs):
if type(dpid) == str and not dpid.isdigit():
LOG.debug('invalid dpid %s', dpid)
return Response(status=400)
dp = self.dpset.get(int(dpid))
if dp is None:
return Response(status=404)
_ofp_version = dp.ofproto.OFP_VERSION
_ofctl = supported_ofctl.get(_ofp_version, None)
if _ofctl is not None and hasattr(_ofctl, 'get_meter_config'):
meters = _ofctl.get_meter_config(dp, self.waiters)
else:
LOG.debug('Unsupported OF protocol or \
request not supported in this OF protocol version')
return Response(status=501)
body = json.dumps(meters)
return Response(content_type='application/json', body=body)
def get_meter_stats(self, req, dpid, **_kwargs):
if type(dpid) == str and not dpid.isdigit():
LOG.debug('invalid dpid %s', dpid)
return Response(status=400)
dp = self.dpset.get(int(dpid))
if dp is None:
return Response(status=404)
_ofp_version = dp.ofproto.OFP_VERSION
_ofctl = supported_ofctl.get(_ofp_version, None)
if _ofctl is not None and hasattr(_ofctl, 'get_meter_stats'):
meters = _ofctl.get_meter_stats(dp, self.waiters)
else:
LOG.debug('Unsupported OF protocol or \
request not supported in this OF protocol version')
return Response(status=501)
body = json.dumps(meters)
return Response(content_type='application/json', body=body)
def get_group_features(self, req, dpid, **_kwargs):
if type(dpid) == str and not dpid.isdigit():
LOG.debug('invalid dpid %s', dpid)
return Response(status=400)
dp = self.dpset.get(int(dpid))
if dp is None:
return Response(status=404)
_ofp_version = dp.ofproto.OFP_VERSION
_ofctl = supported_ofctl.get(_ofp_version, None)
if _ofctl is not None and hasattr(_ofctl, 'get_group_features'):
groups = _ofctl.get_group_features(dp, self.waiters)
else:
LOG.debug('Unsupported OF protocol or \
request not supported in this OF protocol version')
return Response(status=501)
body = json.dumps(groups)
return Response(content_type='application/json', body=body)
def get_group_desc(self, req, dpid, **_kwargs):
if type(dpid) == str and not dpid.isdigit():
LOG.debug('invalid dpid %s', dpid)
return Response(status=400)
dp = self.dpset.get(int(dpid))
if dp is None:
return Response(status=404)
_ofp_version = dp.ofproto.OFP_VERSION
_ofctl = supported_ofctl.get(_ofp_version, None)
if _ofctl is not None and hasattr(_ofctl, 'get_group_desc'):
groups = _ofctl.get_group_desc(dp, self.waiters)
else:
LOG.debug('Unsupported OF protocol or \
request not supported in this OF protocol version')
return Response(status=501)
body = json.dumps(groups)
return Response(content_type='application/json', body=body)
def get_group_stats(self, req, dpid, **_kwargs):
if type(dpid) == str and not dpid.isdigit():
LOG.debug('invalid dpid %s', dpid)
return Response(status=400)
dp = self.dpset.get(int(dpid))
if dp is None:
return Response(status=404)
_ofp_version = dp.ofproto.OFP_VERSION
_ofctl = supported_ofctl.get(_ofp_version, None)
if _ofctl is not None and hasattr(_ofctl, 'get_group_stats'):
groups = _ofctl.get_group_stats(dp, self.waiters)
else:
LOG.debug('Unsupported OF protocol or \
request not supported in this OF protocol version')
return Response(status=501)
body = json.dumps(groups)
return Response(content_type='application/json', body=body)
def get_port_desc(self, req, dpid, **_kwargs):
if type(dpid) == str and not dpid.isdigit():
LOG.debug('invalid dpid %s', dpid)
return Response(status=400)
dp = self.dpset.get(int(dpid))
if dp is None:
return Response(status=404)
_ofp_version = dp.ofproto.OFP_VERSION
_ofctl = supported_ofctl.get(_ofp_version, None)
if _ofctl is not None:
groups = _ofctl.get_port_desc(dp, self.waiters)
else:
LOG.debug('Unsupported OF protocol')
return Response(status=501)
body = json.dumps(groups)
return Response(content_type='application/json', body=body)
def mod_flow_entry(self, req, cmd, **_kwargs):
try:
flow = ast.literal_eval(req.body)
except SyntaxError:
LOG.debug('invalid syntax %s', req.body)
return Response(status=400)
dpid = flow.get('dpid')
if type(dpid) == str and not dpid.isdigit():
LOG.debug('invalid dpid %s', dpid)
return Response(status=400)
dp = self.dpset.get(int(dpid))
if dp is None:
return Response(status=404)
if cmd == 'add':
cmd = dp.ofproto.OFPFC_ADD
elif cmd == 'modify':
cmd = dp.ofproto.OFPFC_MODIFY
elif cmd == 'modify_strict':
cmd = dp.ofproto.OFPFC_MODIFY_STRICT
elif cmd == 'delete':
cmd = dp.ofproto.OFPFC_DELETE
elif cmd == 'delete_strict':
cmd = dp.ofproto.OFPFC_DELETE_STRICT
else:
return Response(status=404)
_ofp_version = dp.ofproto.OFP_VERSION
_ofctl = supported_ofctl.get(_ofp_version, None)
if _ofctl is not None:
_ofctl.mod_flow_entry(dp, flow, cmd)
else:
LOG.debug('Unsupported OF protocol')
return Response(status=501)
return Response(status=200)
def delete_flow_entry(self, req, dpid, **_kwargs):
if type(dpid) == str and not dpid.isdigit():
LOG.debug('invalid dpid %s', dpid)
return Response(status=400)
dp = self.dpset.get(int(dpid))
if dp is None:
return Response(status=404)
flow = {'table_id': dp.ofproto.OFPTT_ALL}
_ofp_version = dp.ofproto.OFP_VERSION
_ofctl = supported_ofctl.get(_ofp_version, None)
if _ofctl is not None:
_ofctl.mod_flow_entry(dp, flow, dp.ofproto.OFPFC_DELETE)
else:
LOG.debug('Unsupported OF protocol')
return Response(status=501)
return Response(status=200)
def mod_meter_entry(self, req, cmd, **_kwargs):
try:
flow = ast.literal_eval(req.body)
except SyntaxError:
LOG.debug('invalid syntax %s', req.body)
return Response(status=400)
dpid = flow.get('dpid')
if type(dpid) == str and not dpid.isdigit():
LOG.debug('invalid dpid %s', dpid)
return Response(status=400)
dp = self.dpset.get(int(dpid))
if dp is None:
return Response(status=404)
if cmd == 'add':
cmd = dp.ofproto.OFPMC_ADD
elif cmd == 'modify':
cmd = dp.ofproto.OFPMC_MODIFY
elif cmd == 'delete':
cmd = dp.ofproto.OFPMC_DELETE
else:
return Response(status=404)
_ofp_version = dp.ofproto.OFP_VERSION
_ofctl = supported_ofctl.get(_ofp_version, None)
if _ofctl is not None and hasattr(_ofctl, 'mod_meter_entry'):
_ofctl.mod_meter_entry(dp, flow, cmd)
else:
LOG.debug('Unsupported OF protocol or \
request not supported in this OF protocol version')
return Response(status=501)
return Response(status=200)
def mod_group_entry(self, req, cmd, **_kwargs):
try:
group = ast.literal_eval(req.body)
except SyntaxError:
LOG.debug('invalid syntax %s', req.body)
return Response(status=400)
dpid = group.get('dpid')
if type(dpid) == str and not dpid.isdigit():
LOG.debug('invalid dpid %s', dpid)
return Response(status=400)
dp = self.dpset.get(int(dpid))
if dp is None:
return Response(status=404)
if cmd == 'add':
cmd = dp.ofproto.OFPGC_ADD
elif cmd == 'modify':
cmd = dp.ofproto.OFPGC_MODIFY
elif cmd == 'delete':
cmd = dp.ofproto.OFPGC_DELETE
else:
return Response(status=404)
_ofp_version = dp.ofproto.OFP_VERSION
_ofctl = supported_ofctl.get(_ofp_version, None)
if _ofctl is not None and hasattr(_ofctl, 'mod_group_entry'):
_ofctl.mod_group_entry(dp, group, cmd)
else:
LOG.debug('Unsupported OF protocol or \
request not supported in this OF protocol version')
return Response(status=501)
return Response(status=200)
def mod_port_behavior(self, req, cmd, **_kwargs):
try:
port_config = ast.literal_eval(req.body)
except SyntaxError:
LOG.debug('invalid syntax %s', req.body)
return Response(status=400)
dpid = port_config.get('dpid')
if type(dpid) == str and not dpid.isdigit():
LOG.debug('invalid dpid %s', dpid)
return Response(status=400)
port_no = port_config.get('port_no', 0)
if type(port_no) == str and not port_no.isdigit():
LOG.debug('invalid port_no %s', port_no)
return Response(status=400)
port_info = self.dpset.port_state[int(dpid)].get(port_no)
if port_info:
port_config.setdefault('hw_addr', port_info.hw_addr)
port_config.setdefault('advertise', port_info.advertised)
else:
return Response(status=404)
dp = self.dpset.get(int(dpid))
if dp is None:
return Response(status=404)
if cmd != 'modify':
return Response(status=404)
_ofp_version = dp.ofproto.OFP_VERSION
_ofctl = supported_ofctl.get(_ofp_version, None)
if _ofctl is not None:
_ofctl.mod_port_behavior(dp, port_config)
else:
LOG.debug('Unsupported OF protocol')
return Response(status=501)
return Response(status=200)
def send_experimenter(self, req, dpid, **_kwargs):
if type(dpid) == str and not dpid.isdigit():
LOG.debug('invalid dpid %s', dpid)
return Response(status=400)
dp = self.dpset.get(int(dpid))
if dp is None:
return Response(status=404)
try:
exp = ast.literal_eval(req.body)
except SyntaxError:
LOG.debug('invalid syntax %s', req.body)
return Response(status=400)
_ofp_version = dp.ofproto.OFP_VERSION
_ofctl = supported_ofctl.get(_ofp_version, None)
if _ofctl is not None and hasattr(_ofctl, 'send_experimenter'):
_ofctl.send_experimenter(dp, exp)
else:
LOG.debug('Unsupported OF protocol')
return Response(status=501)
return Response(status=200)
class RestStatsApi(app_manager.RyuApp):
OFP_VERSIONS = [ofproto_v1_0.OFP_VERSION,
ofproto_v1_2.OFP_VERSION,
ofproto_v1_3.OFP_VERSION]
_CONTEXTS = {
'dpset': dpset.DPSet,
'wsgi': WSGIApplication
}
def __init__(self, *args, **kwargs):
super(RestStatsApi, self).__init__(*args, **kwargs)
self.dpset = kwargs['dpset']
wsgi = kwargs['wsgi']
self.waiters = {}
self.data = {'dpset': self.dpset, 'waiters': self.waiters}
mapper = wsgi.mapper
wsgi.registory['StatsController'] = self.data
path = '/stats'
uri = path + '/switches'
mapper.connect('stats', uri,
controller=StatsController, action='get_dpids',
conditions=dict(method=['GET']))
uri = path + '/desc/{dpid}'
mapper.connect('stats', uri,
controller=StatsController, action='get_desc_stats',
conditions=dict(method=['GET']))
uri = path + '/flow/{dpid}'
mapper.connect('stats', uri,
controller=StatsController, action='get_flow_stats',
conditions=dict(method=['GET', 'POST']))
uri = path + '/aggregateflow/{dpid}'
mapper.connect('stats', uri,
controller=StatsController,
action='get_aggregate_flow_stats',
conditions=dict(method=['GET', 'POST']))
uri = path + '/port/{dpid}'
mapper.connect('stats', uri,
controller=StatsController, action='get_port_stats',
conditions=dict(method=['GET']))
uri = path + '/queue/{dpid}'
mapper.connect('stats', uri,
controller=StatsController, action='get_queue_stats',
conditions=dict(method=['GET']))
uri = path + '/meterfeatures/{dpid}'
mapper.connect('stats', uri,
controller=StatsController, action='get_meter_features',
conditions=dict(method=['GET']))
uri = path + '/meterconfig/{dpid}'
mapper.connect('stats', uri,
controller=StatsController, action='get_meter_config',
conditions=dict(method=['GET']))
uri = path + '/meter/{dpid}'
mapper.connect('stats', uri,
controller=StatsController, action='get_meter_stats',
conditions=dict(method=['GET']))
uri = path + '/groupfeatures/{dpid}'
mapper.connect('stats', uri,
controller=StatsController, action='get_group_features',
conditions=dict(method=['GET']))
uri = path + '/groupdesc/{dpid}'
mapper.connect('stats', uri,
controller=StatsController, action='get_group_desc',
conditions=dict(method=['GET']))
uri = path + '/group/{dpid}'
mapper.connect('stats', uri,
controller=StatsController, action='get_group_stats',
conditions=dict(method=['GET']))
uri = path + '/portdesc/{dpid}'
mapper.connect('stats', uri,
controller=StatsController, action='get_port_desc',
conditions=dict(method=['GET']))
uri = path + '/flowentry/{cmd}'
mapper.connect('stats', uri,
controller=StatsController, action='mod_flow_entry',
conditions=dict(method=['POST']))
uri = path + '/flowentry/clear/{dpid}'
mapper.connect('stats', uri,
controller=StatsController, action='delete_flow_entry',
conditions=dict(method=['DELETE']))
uri = path + '/meterentry/{cmd}'
mapper.connect('stats', uri,
controller=StatsController, action='mod_meter_entry',
conditions=dict(method=['POST']))
uri = path + '/groupentry/{cmd}'
mapper.connect('stats', uri,
controller=StatsController, action='mod_group_entry',
conditions=dict(method=['POST']))
uri = path + '/portdesc/{cmd}'
mapper.connect('stats', uri,
controller=StatsController, action='mod_port_behavior',
conditions=dict(method=['POST']))
uri = path + '/experimenter/{dpid}'
mapper.connect('stats', uri,
controller=StatsController, action='send_experimenter',
conditions=dict(method=['POST']))
@set_ev_cls([ofp_event.EventOFPStatsReply,
ofp_event.EventOFPDescStatsReply,
ofp_event.EventOFPFlowStatsReply,
ofp_event.EventOFPAggregateStatsReply,
ofp_event.EventOFPPortStatsReply,
ofp_event.EventOFPQueueStatsReply,
ofp_event.EventOFPMeterStatsReply,
ofp_event.EventOFPMeterFeaturesStatsReply,
ofp_event.EventOFPMeterConfigStatsReply,
ofp_event.EventOFPGroupStatsReply,
ofp_event.EventOFPGroupFeaturesStatsReply,
ofp_event.EventOFPGroupDescStatsReply,
ofp_event.EventOFPPortDescStatsReply
], MAIN_DISPATCHER)
def stats_reply_handler(self, ev):
msg = ev.msg
dp = msg.datapath
if dp.id not in self.waiters:
return
if msg.xid not in self.waiters[dp.id]:
return
lock, msgs = self.waiters[dp.id][msg.xid]
msgs.append(msg)
flags = 0
if dp.ofproto.OFP_VERSION == ofproto_v1_0.OFP_VERSION:
flags = dp.ofproto.OFPSF_REPLY_MORE
elif dp.ofproto.OFP_VERSION == ofproto_v1_2.OFP_VERSION:
flags = dp.ofproto.OFPSF_REPLY_MORE
elif dp.ofproto.OFP_VERSION == ofproto_v1_3.OFP_VERSION:
flags = dp.ofproto.OFPMPF_REPLY_MORE
if msg.flags & flags:
return
del self.waiters[dp.id][msg.xid]
lock.set()
@set_ev_cls([ofp_event.EventOFPSwitchFeatures], MAIN_DISPATCHER)
def features_reply_handler(self, ev):
msg = ev.msg
dp = msg.datapath
if dp.id not in self.waiters:
return
if msg.xid not in self.waiters[dp.id]:
return
lock, msgs = self.waiters[dp.id][msg.xid]
msgs.append(msg)
del self.waiters[dp.id][msg.xid]
lock.set() | gpl-2.0 |
rmtew/MediaTek-HelioX10-Kernel | alps/kernel-3.10/Documentation/networking/cxacru-cf.py | 14668 | 1626 | #!/usr/bin/env python
# Copyright 2009 Simon Arlott
#
# This program is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the Free
# Software Foundation; either version 2 of the License, or (at your option)
# any later version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
# more details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 59
# Temple Place - Suite 330, Boston, MA 02111-1307, USA.
#
# Usage: cxacru-cf.py < cxacru-cf.bin
# Output: values string suitable for the sysfs adsl_config attribute
#
# Warning: cxacru-cf.bin with MD5 hash cdbac2689969d5ed5d4850f117702110
# contains mis-aligned values which will stop the modem from being able
# to make a connection. If the first and last two bytes are removed then
# the values become valid, but the modulation will be forced to ANSI
# T1.413 only which may not be appropriate.
#
# The original binary format is a packed list of le32 values.
import sys
import struct
i = 0
while True:
buf = sys.stdin.read(4)
if len(buf) == 0:
break
elif len(buf) != 4:
sys.stdout.write("\n")
sys.stderr.write("Error: read {0} not 4 bytes\n".format(len(buf)))
sys.exit(1)
if i > 0:
sys.stdout.write(" ")
sys.stdout.write("{0:x}={1}".format(i, struct.unpack("<I", buf)[0]))
i += 1
sys.stdout.write("\n")
| gpl-2.0 |
afaheem88/tempest | tempest/tests/test_list_tests.py | 34 | 1824 | # Copyright 2013 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
import re
import six
import subprocess
from tempest.tests import base
class TestTestList(base.TestCase):
def test_testr_list_tests_no_errors(self):
# Remove unit test discover path from env to test tempest tests
test_env = os.environ.copy()
test_env.pop('OS_TEST_PATH')
import_failures = []
p = subprocess.Popen(['testr', 'list-tests'], stdout=subprocess.PIPE,
env=test_env)
ids, err = p.communicate()
self.assertEqual(0, p.returncode,
"test discovery failed, one or more files cause an "
"error on import %s" % ids)
ids = six.text_type(ids).split('\n')
for test_id in ids:
if re.match('(\w+\.){3}\w+', test_id):
if not test_id.startswith('tempest.'):
parts = test_id.partition('tempest')
fail_id = parts[1] + parts[2]
import_failures.append(fail_id)
error_message = ("The following tests have import failures and aren't"
" being run with test filters %s" % import_failures)
self.assertFalse(import_failures, error_message)
| apache-2.0 |
digitaleric-google/GCG-3.3 | tools/perf/util/setup.py | 97 | 1405 | #!/usr/bin/python2
from distutils.core import setup, Extension
from os import getenv
from distutils.command.build_ext import build_ext as _build_ext
from distutils.command.install_lib import install_lib as _install_lib
class build_ext(_build_ext):
def finalize_options(self):
_build_ext.finalize_options(self)
self.build_lib = build_lib
self.build_temp = build_tmp
class install_lib(_install_lib):
def finalize_options(self):
_install_lib.finalize_options(self)
self.build_dir = build_lib
cflags = ['-fno-strict-aliasing', '-Wno-write-strings']
cflags += getenv('CFLAGS', '').split()
build_lib = getenv('PYTHON_EXTBUILD_LIB')
build_tmp = getenv('PYTHON_EXTBUILD_TMP')
perf = Extension('perf',
sources = ['util/python.c', 'util/ctype.c', 'util/evlist.c',
'util/evsel.c', 'util/cpumap.c', 'util/thread_map.c',
'util/util.c', 'util/xyarray.c', 'util/cgroup.c',
'util/debugfs.c'],
include_dirs = ['util/include'],
extra_compile_args = cflags,
)
setup(name='perf',
version='0.1',
description='Interface with the Linux profiling infrastructure',
author='Arnaldo Carvalho de Melo',
author_email='acme@redhat.com',
license='GPLv2',
url='http://perf.wiki.kernel.org',
ext_modules=[perf],
cmdclass={'build_ext': build_ext, 'install_lib': install_lib})
| gpl-2.0 |
thaumos/ansible | lib/ansible/modules/cloud/rackspace/rax_files_objects.py | 102 | 18489 | #!/usr/bin/python
# (c) 2013, Paul Durivage <paul.durivage@rackspace.com>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: rax_files_objects
short_description: Upload, download, and delete objects in Rackspace Cloud Files
description:
- Upload, download, and delete objects in Rackspace Cloud Files
version_added: "1.5"
options:
clear_meta:
description:
- Optionally clear existing metadata when applying metadata to existing objects.
Selecting this option is only appropriate when setting type=meta
type: bool
default: 'no'
container:
description:
- The container to use for file object operations.
required: true
dest:
description:
- The destination of a "get" operation; i.e. a local directory, "/home/user/myfolder".
Used to specify the destination of an operation on a remote object; i.e. a file name,
"file1", or a comma-separated list of remote objects, "file1,file2,file17"
expires:
description:
- Used to set an expiration on a file or folder uploaded to Cloud Files.
Requires an integer, specifying expiration in seconds
meta:
description:
- A hash of items to set as metadata values on an uploaded file or folder
method:
description:
- The method of operation to be performed. For example, put to upload files
to Cloud Files, get to download files from Cloud Files or delete to delete
remote objects in Cloud Files
choices:
- get
- put
- delete
default: get
src:
description:
- Source from which to upload files. Used to specify a remote object as a source for
an operation, i.e. a file name, "file1", or a comma-separated list of remote objects,
"file1,file2,file17". src and dest are mutually exclusive on remote-only object operations
structure:
description:
- Used to specify whether to maintain nested directory structure when downloading objects
from Cloud Files. Setting to false downloads the contents of a container to a single,
flat directory
type: bool
default: 'yes'
state:
description:
- Indicate desired state of the resource
choices: ['present', 'absent']
default: present
type:
description:
- Type of object to do work on
- Metadata object or a file object
choices:
- file
- meta
default: file
author: "Paul Durivage (@angstwad)"
extends_documentation_fragment:
- rackspace
- rackspace.openstack
'''
EXAMPLES = '''
- name: "Test Cloud Files Objects"
hosts: local
gather_facts: False
tasks:
- name: "Get objects from test container"
rax_files_objects:
container: testcont
dest: ~/Downloads/testcont
- name: "Get single object from test container"
rax_files_objects:
container: testcont
src: file1
dest: ~/Downloads/testcont
- name: "Get several objects from test container"
rax_files_objects:
container: testcont
src: file1,file2,file3
dest: ~/Downloads/testcont
- name: "Delete one object in test container"
rax_files_objects:
container: testcont
method: delete
dest: file1
- name: "Delete several objects in test container"
rax_files_objects:
container: testcont
method: delete
dest: file2,file3,file4
- name: "Delete all objects in test container"
rax_files_objects:
container: testcont
method: delete
- name: "Upload all files to test container"
rax_files_objects:
container: testcont
method: put
src: ~/Downloads/onehundred
- name: "Upload one file to test container"
rax_files_objects:
container: testcont
method: put
src: ~/Downloads/testcont/file1
- name: "Upload one file to test container with metadata"
rax_files_objects:
container: testcont
src: ~/Downloads/testcont/file2
method: put
meta:
testkey: testdata
who_uploaded_this: someuser@example.com
- name: "Upload one file to test container with TTL of 60 seconds"
rax_files_objects:
container: testcont
method: put
src: ~/Downloads/testcont/file3
expires: 60
- name: "Attempt to get remote object that does not exist"
rax_files_objects:
container: testcont
method: get
src: FileThatDoesNotExist.jpg
dest: ~/Downloads/testcont
ignore_errors: yes
- name: "Attempt to delete remote object that does not exist"
rax_files_objects:
container: testcont
method: delete
dest: FileThatDoesNotExist.jpg
ignore_errors: yes
- name: "Test Cloud Files Objects Metadata"
hosts: local
gather_facts: false
tasks:
- name: "Get metadata on one object"
rax_files_objects:
container: testcont
type: meta
dest: file2
- name: "Get metadata on several objects"
rax_files_objects:
container: testcont
type: meta
src: file2,file1
- name: "Set metadata on an object"
rax_files_objects:
container: testcont
type: meta
dest: file17
method: put
meta:
key1: value1
key2: value2
clear_meta: true
- name: "Verify metadata is set"
rax_files_objects:
container: testcont
type: meta
src: file17
- name: "Delete metadata"
rax_files_objects:
container: testcont
type: meta
dest: file17
method: delete
meta:
key1: ''
key2: ''
- name: "Get metadata on all objects"
rax_files_objects:
container: testcont
type: meta
'''
import os
try:
import pyrax
HAS_PYRAX = True
except ImportError:
HAS_PYRAX = False
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.rax import rax_argument_spec, rax_required_together, setup_rax_module
EXIT_DICT = dict(success=False)
META_PREFIX = 'x-object-meta-'
def _get_container(module, cf, container):
try:
return cf.get_container(container)
except pyrax.exc.NoSuchContainer as e:
module.fail_json(msg=e.message)
def _upload_folder(cf, folder, container, ttl=None, headers=None):
""" Uploads a folder to Cloud Files.
"""
total_bytes = 0
for root, dirs, files in os.walk(folder):
for fname in files:
full_path = os.path.join(root, fname)
obj_name = os.path.relpath(full_path, folder)
obj_size = os.path.getsize(full_path)
cf.upload_file(container, full_path,
obj_name=obj_name, return_none=True, ttl=ttl, headers=headers)
total_bytes += obj_size
return total_bytes
def upload(module, cf, container, src, dest, meta, expires):
""" Uploads a single object or a folder to Cloud Files Optionally sets an
metadata, TTL value (expires), or Content-Disposition and Content-Encoding
headers.
"""
if not src:
module.fail_json(msg='src must be specified when uploading')
c = _get_container(module, cf, container)
src = os.path.abspath(os.path.expanduser(src))
is_dir = os.path.isdir(src)
if not is_dir and not os.path.isfile(src) or not os.path.exists(src):
module.fail_json(msg='src must be a file or a directory')
if dest and is_dir:
module.fail_json(msg='dest cannot be set when whole '
'directories are uploaded')
cont_obj = None
total_bytes = 0
if dest and not is_dir:
try:
cont_obj = c.upload_file(src, obj_name=dest, ttl=expires, headers=meta)
except Exception as e:
module.fail_json(msg=e.message)
elif is_dir:
try:
total_bytes = _upload_folder(cf, src, c, ttl=expires, headers=meta)
except Exception as e:
module.fail_json(msg=e.message)
else:
try:
cont_obj = c.upload_file(src, ttl=expires, headers=meta)
except Exception as e:
module.fail_json(msg=e.message)
EXIT_DICT['success'] = True
EXIT_DICT['container'] = c.name
EXIT_DICT['msg'] = "Uploaded %s to container: %s" % (src, c.name)
if cont_obj or total_bytes > 0:
EXIT_DICT['changed'] = True
if meta:
EXIT_DICT['meta'] = dict(updated=True)
if cont_obj:
EXIT_DICT['bytes'] = cont_obj.total_bytes
EXIT_DICT['etag'] = cont_obj.etag
else:
EXIT_DICT['bytes'] = total_bytes
module.exit_json(**EXIT_DICT)
def download(module, cf, container, src, dest, structure):
""" Download objects from Cloud Files to a local path specified by "dest".
Optionally disable maintaining a directory structure by by passing a
false value to "structure".
"""
# Looking for an explicit destination
if not dest:
module.fail_json(msg='dest is a required argument when '
'downloading from Cloud Files')
# Attempt to fetch the container by name
c = _get_container(module, cf, container)
# Accept a single object name or a comma-separated list of objs
# If not specified, get the entire container
if src:
objs = src.split(',')
objs = map(str.strip, objs)
else:
objs = c.get_object_names()
dest = os.path.abspath(os.path.expanduser(dest))
is_dir = os.path.isdir(dest)
if not is_dir:
module.fail_json(msg='dest must be a directory')
results = []
for obj in objs:
try:
c.download_object(obj, dest, structure=structure)
except Exception as e:
module.fail_json(msg=e.message)
else:
results.append(obj)
len_results = len(results)
len_objs = len(objs)
EXIT_DICT['container'] = c.name
EXIT_DICT['requested_downloaded'] = results
if results:
EXIT_DICT['changed'] = True
if len_results == len_objs:
EXIT_DICT['success'] = True
EXIT_DICT['msg'] = "%s objects downloaded to %s" % (len_results, dest)
else:
EXIT_DICT['msg'] = "Error: only %s of %s objects were " \
"downloaded" % (len_results, len_objs)
module.exit_json(**EXIT_DICT)
def delete(module, cf, container, src, dest):
""" Delete specific objects by proving a single file name or a
comma-separated list to src OR dest (but not both). Omitting file name(s)
assumes the entire container is to be deleted.
"""
objs = None
if src and dest:
module.fail_json(msg="Error: ambiguous instructions; files to be deleted "
"have been specified on both src and dest args")
elif dest:
objs = dest
else:
objs = src
c = _get_container(module, cf, container)
if objs:
objs = objs.split(',')
objs = map(str.strip, objs)
else:
objs = c.get_object_names()
num_objs = len(objs)
results = []
for obj in objs:
try:
result = c.delete_object(obj)
except Exception as e:
module.fail_json(msg=e.message)
else:
results.append(result)
num_deleted = results.count(True)
EXIT_DICT['container'] = c.name
EXIT_DICT['deleted'] = num_deleted
EXIT_DICT['requested_deleted'] = objs
if num_deleted:
EXIT_DICT['changed'] = True
if num_objs == num_deleted:
EXIT_DICT['success'] = True
EXIT_DICT['msg'] = "%s objects deleted" % num_deleted
else:
EXIT_DICT['msg'] = ("Error: only %s of %s objects "
"deleted" % (num_deleted, num_objs))
module.exit_json(**EXIT_DICT)
def get_meta(module, cf, container, src, dest):
""" Get metadata for a single file, comma-separated list, or entire
container
"""
c = _get_container(module, cf, container)
objs = None
if src and dest:
module.fail_json(msg="Error: ambiguous instructions; files to be deleted "
"have been specified on both src and dest args")
elif dest:
objs = dest
else:
objs = src
if objs:
objs = objs.split(',')
objs = map(str.strip, objs)
else:
objs = c.get_object_names()
results = dict()
for obj in objs:
try:
meta = c.get_object(obj).get_metadata()
except Exception as e:
module.fail_json(msg=e.message)
else:
results[obj] = dict()
for k, v in meta.items():
meta_key = k.split(META_PREFIX)[-1]
results[obj][meta_key] = v
EXIT_DICT['container'] = c.name
if results:
EXIT_DICT['meta_results'] = results
EXIT_DICT['success'] = True
module.exit_json(**EXIT_DICT)
def put_meta(module, cf, container, src, dest, meta, clear_meta):
""" Set metadata on a container, single file, or comma-separated list.
Passing a true value to clear_meta clears the metadata stored in Cloud
Files before setting the new metadata to the value of "meta".
"""
objs = None
if src and dest:
module.fail_json(msg="Error: ambiguous instructions; files to set meta"
" have been specified on both src and dest args")
elif dest:
objs = dest
else:
objs = src
objs = objs.split(',')
objs = map(str.strip, objs)
c = _get_container(module, cf, container)
results = []
for obj in objs:
try:
result = c.get_object(obj).set_metadata(meta, clear=clear_meta)
except Exception as e:
module.fail_json(msg=e.message)
else:
results.append(result)
EXIT_DICT['container'] = c.name
EXIT_DICT['success'] = True
if results:
EXIT_DICT['changed'] = True
EXIT_DICT['num_changed'] = True
module.exit_json(**EXIT_DICT)
def delete_meta(module, cf, container, src, dest, meta):
""" Removes metadata keys and values specified in meta, if any. Deletes on
all objects specified by src or dest (but not both), if any; otherwise it
deletes keys on all objects in the container
"""
objs = None
if src and dest:
module.fail_json(msg="Error: ambiguous instructions; meta keys to be "
"deleted have been specified on both src and dest"
" args")
elif dest:
objs = dest
else:
objs = src
objs = objs.split(',')
objs = map(str.strip, objs)
c = _get_container(module, cf, container)
results = [] # Num of metadata keys removed, not objects affected
for obj in objs:
if meta:
for k, v in meta.items():
try:
result = c.get_object(obj).remove_metadata_key(k)
except Exception as e:
module.fail_json(msg=e.message)
else:
results.append(result)
else:
try:
o = c.get_object(obj)
except pyrax.exc.NoSuchObject as e:
module.fail_json(msg=e.message)
for k, v in o.get_metadata().items():
try:
result = o.remove_metadata_key(k)
except Exception as e:
module.fail_json(msg=e.message)
results.append(result)
EXIT_DICT['container'] = c.name
EXIT_DICT['success'] = True
if results:
EXIT_DICT['changed'] = True
EXIT_DICT['num_deleted'] = len(results)
module.exit_json(**EXIT_DICT)
def cloudfiles(module, container, src, dest, method, typ, meta, clear_meta,
structure, expires):
""" Dispatch from here to work with metadata or file objects """
cf = pyrax.cloudfiles
if cf is None:
module.fail_json(msg='Failed to instantiate client. This '
'typically indicates an invalid region or an '
'incorrectly capitalized region name.')
if typ == "file":
if method == 'put':
upload(module, cf, container, src, dest, meta, expires)
elif method == 'get':
download(module, cf, container, src, dest, structure)
elif method == 'delete':
delete(module, cf, container, src, dest)
else:
if method == 'get':
get_meta(module, cf, container, src, dest)
if method == 'put':
put_meta(module, cf, container, src, dest, meta, clear_meta)
if method == 'delete':
delete_meta(module, cf, container, src, dest, meta)
def main():
argument_spec = rax_argument_spec()
argument_spec.update(
dict(
container=dict(required=True),
src=dict(),
dest=dict(),
method=dict(default='get', choices=['put', 'get', 'delete']),
type=dict(default='file', choices=['file', 'meta']),
meta=dict(type='dict', default=dict()),
clear_meta=dict(default=False, type='bool'),
structure=dict(default=True, type='bool'),
expires=dict(type='int'),
)
)
module = AnsibleModule(
argument_spec=argument_spec,
required_together=rax_required_together()
)
if not HAS_PYRAX:
module.fail_json(msg='pyrax is required for this module')
container = module.params.get('container')
src = module.params.get('src')
dest = module.params.get('dest')
method = module.params.get('method')
typ = module.params.get('type')
meta = module.params.get('meta')
clear_meta = module.params.get('clear_meta')
structure = module.params.get('structure')
expires = module.params.get('expires')
if clear_meta and not typ == 'meta':
module.fail_json(msg='clear_meta can only be used when setting metadata')
setup_rax_module(module, pyrax)
cloudfiles(module, container, src, dest, method, typ, meta, clear_meta, structure, expires)
if __name__ == '__main__':
main()
| gpl-3.0 |
d3banjan/polyamide | webdev/lib/python2.7/site-packages/django/core/handlers/wsgi.py | 82 | 9759 | from __future__ import unicode_literals
import cgi
import codecs
import logging
import sys
import warnings
from io import BytesIO
from threading import Lock
from django import http
from django.conf import settings
from django.core import signals
from django.core.handlers import base
from django.core.urlresolvers import set_script_prefix
# For backwards compatibility -- lots of code uses this in the wild!
from django.http.response import REASON_PHRASES as STATUS_CODE_TEXT # NOQA
from django.utils import datastructures, six
from django.utils.deprecation import RemovedInDjango19Warning
from django.utils.encoding import force_str, force_text
from django.utils.functional import cached_property
logger = logging.getLogger('django.request')
# encode() and decode() expect the charset to be a native string.
ISO_8859_1, UTF_8 = str('iso-8859-1'), str('utf-8')
class LimitedStream(object):
'''
LimitedStream wraps another stream in order to not allow reading from it
past specified amount of bytes.
'''
def __init__(self, stream, limit, buf_size=64 * 1024 * 1024):
self.stream = stream
self.remaining = limit
self.buffer = b''
self.buf_size = buf_size
def _read_limited(self, size=None):
if size is None or size > self.remaining:
size = self.remaining
if size == 0:
return b''
result = self.stream.read(size)
self.remaining -= len(result)
return result
def read(self, size=None):
if size is None:
result = self.buffer + self._read_limited()
self.buffer = b''
elif size < len(self.buffer):
result = self.buffer[:size]
self.buffer = self.buffer[size:]
else: # size >= len(self.buffer)
result = self.buffer + self._read_limited(size - len(self.buffer))
self.buffer = b''
return result
def readline(self, size=None):
while b'\n' not in self.buffer and \
(size is None or len(self.buffer) < size):
if size:
# since size is not None here, len(self.buffer) < size
chunk = self._read_limited(size - len(self.buffer))
else:
chunk = self._read_limited()
if not chunk:
break
self.buffer += chunk
sio = BytesIO(self.buffer)
if size:
line = sio.readline(size)
else:
line = sio.readline()
self.buffer = sio.read()
return line
class WSGIRequest(http.HttpRequest):
def __init__(self, environ):
script_name = get_script_name(environ)
path_info = get_path_info(environ)
if not path_info:
# Sometimes PATH_INFO exists, but is empty (e.g. accessing
# the SCRIPT_NAME URL without a trailing slash). We really need to
# operate as if they'd requested '/'. Not amazingly nice to force
# the path like this, but should be harmless.
path_info = '/'
self.environ = environ
self.path_info = path_info
# be careful to only replace the first slash in the path because of
# http://test/something and http://test//something being different as
# stated in http://www.ietf.org/rfc/rfc2396.txt
self.path = '%s/%s' % (script_name.rstrip('/'),
path_info.replace('/', '', 1))
self.META = environ
self.META['PATH_INFO'] = path_info
self.META['SCRIPT_NAME'] = script_name
self.method = environ['REQUEST_METHOD'].upper()
_, content_params = cgi.parse_header(environ.get('CONTENT_TYPE', ''))
if 'charset' in content_params:
try:
codecs.lookup(content_params['charset'])
except LookupError:
pass
else:
self.encoding = content_params['charset']
self._post_parse_error = False
try:
content_length = int(environ.get('CONTENT_LENGTH'))
except (ValueError, TypeError):
content_length = 0
self._stream = LimitedStream(self.environ['wsgi.input'], content_length)
self._read_started = False
self.resolver_match = None
def _get_scheme(self):
return self.environ.get('wsgi.url_scheme')
def _get_request(self):
warnings.warn('`request.REQUEST` is deprecated, use `request.GET` or '
'`request.POST` instead.', RemovedInDjango19Warning, 2)
if not hasattr(self, '_request'):
self._request = datastructures.MergeDict(self.POST, self.GET)
return self._request
@cached_property
def GET(self):
# The WSGI spec says 'QUERY_STRING' may be absent.
raw_query_string = get_bytes_from_wsgi(self.environ, 'QUERY_STRING', '')
return http.QueryDict(raw_query_string, encoding=self._encoding)
def _get_post(self):
if not hasattr(self, '_post'):
self._load_post_and_files()
return self._post
def _set_post(self, post):
self._post = post
@cached_property
def COOKIES(self):
raw_cookie = get_str_from_wsgi(self.environ, 'HTTP_COOKIE', '')
return http.parse_cookie(raw_cookie)
def _get_files(self):
if not hasattr(self, '_files'):
self._load_post_and_files()
return self._files
POST = property(_get_post, _set_post)
FILES = property(_get_files)
REQUEST = property(_get_request)
class WSGIHandler(base.BaseHandler):
initLock = Lock()
request_class = WSGIRequest
def __call__(self, environ, start_response):
# Set up middleware if needed. We couldn't do this earlier, because
# settings weren't available.
if self._request_middleware is None:
with self.initLock:
try:
# Check that middleware is still uninitialized.
if self._request_middleware is None:
self.load_middleware()
except:
# Unload whatever middleware we got
self._request_middleware = None
raise
set_script_prefix(get_script_name(environ))
signals.request_started.send(sender=self.__class__, environ=environ)
try:
request = self.request_class(environ)
except UnicodeDecodeError:
logger.warning('Bad Request (UnicodeDecodeError)',
exc_info=sys.exc_info(),
extra={
'status_code': 400,
}
)
response = http.HttpResponseBadRequest()
else:
response = self.get_response(request)
response._handler_class = self.__class__
status = '%s %s' % (response.status_code, response.reason_phrase)
response_headers = [(str(k), str(v)) for k, v in response.items()]
for c in response.cookies.values():
response_headers.append((str('Set-Cookie'), str(c.output(header=''))))
start_response(force_str(status), response_headers)
if getattr(response, 'file_to_stream', None) is not None and environ.get('wsgi.file_wrapper'):
response = environ['wsgi.file_wrapper'](response.file_to_stream)
return response
def get_path_info(environ):
"""
Returns the HTTP request's PATH_INFO as a unicode string.
"""
path_info = get_bytes_from_wsgi(environ, 'PATH_INFO', '/')
return path_info.decode(UTF_8)
def get_script_name(environ):
"""
Returns the equivalent of the HTTP request's SCRIPT_NAME environment
variable. If Apache mod_rewrite has been used, returns what would have been
the script name prior to any rewriting (so it's the script name as seen
from the client's perspective), unless the FORCE_SCRIPT_NAME setting is
set (to anything).
"""
if settings.FORCE_SCRIPT_NAME is not None:
return force_text(settings.FORCE_SCRIPT_NAME)
# If Apache's mod_rewrite had a whack at the URL, Apache set either
# SCRIPT_URL or REDIRECT_URL to the full resource URL before applying any
# rewrites. Unfortunately not every Web server (lighttpd!) passes this
# information through all the time, so FORCE_SCRIPT_NAME, above, is still
# needed.
script_url = get_bytes_from_wsgi(environ, 'SCRIPT_URL', '')
if not script_url:
script_url = get_bytes_from_wsgi(environ, 'REDIRECT_URL', '')
if script_url:
path_info = get_bytes_from_wsgi(environ, 'PATH_INFO', '')
script_name = script_url[:-len(path_info)]
else:
script_name = get_bytes_from_wsgi(environ, 'SCRIPT_NAME', '')
return script_name.decode(UTF_8)
def get_bytes_from_wsgi(environ, key, default):
"""
Get a value from the WSGI environ dictionary as bytes.
key and default should be str objects. Under Python 2 they may also be
unicode objects provided they only contain ASCII characters.
"""
value = environ.get(str(key), str(default))
# Under Python 3, non-ASCII values in the WSGI environ are arbitrarily
# decoded with ISO-8859-1. This is wrong for Django websites where UTF-8
# is the default. Re-encode to recover the original bytestring.
return value.encode(ISO_8859_1) if six.PY3 else value
def get_str_from_wsgi(environ, key, default):
"""
Get a value from the WSGI environ dictionary as str.
key and default should be str objects. Under Python 2 they may also be
unicode objects provided they only contain ASCII characters.
"""
value = get_bytes_from_wsgi(environ, key, default)
return value.decode(UTF_8, errors='replace') if six.PY3 else value
| bsd-2-clause |
ovresko/erpnext | erpnext/regional/doctype/gst_settings/gst_settings.py | 19 | 3485 | # -*- coding: utf-8 -*-
# Copyright (c) 2017, Frappe Technologies Pvt. Ltd. and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe, os
from frappe import _
from frappe.utils import get_url, nowdate, date_diff
from frappe.model.document import Document
from frappe.contacts.doctype.contact.contact import get_default_contact
class EmailMissing(frappe.ValidationError): pass
class GSTSettings(Document):
def onload(self):
data = frappe._dict()
data.total_addresses = frappe.db.sql('''select count(*) from tabAddress where country = "India"''')
data.total_addresses_with_gstin = frappe.db.sql('''select distinct count(*)
from tabAddress where country = "India" and ifnull(gstin, '')!='' ''')
self.set_onload('data', data)
@frappe.whitelist()
def send_reminder():
frappe.has_permission('GST Settings', throw=True)
last_sent = frappe.db.get_single_value('GST Settings', 'gstin_email_sent_on')
if last_sent and date_diff(nowdate(), last_sent) < 3:
frappe.throw(_("Please wait 3 days before resending the reminder."))
frappe.db.set_value('GST Settings', 'GST Settings', 'gstin_email_sent_on', nowdate())
# enqueue if large number of customers, suppliser
frappe.enqueue('erpnext.regional.doctype.gst_settings.gst_settings.send_gstin_reminder_to_all_parties')
frappe.msgprint(_('Email Reminders will be sent to all parties with email contacts'))
def send_gstin_reminder_to_all_parties():
parties = []
for address_name in frappe.db.sql('''select name
from tabAddress where country = "India" and ifnull(gstin, '')='' '''):
address = frappe.get_doc('Address', address_name[0])
for link in address.links:
party = frappe.get_doc(link.link_doctype, link.link_name)
if link.link_doctype in ('Customer', 'Supplier'):
t = (link.link_doctype, link.link_name, address.email_id)
if not t in parties:
parties.append(t)
sent_to = []
for party in parties:
# get email from default contact
try:
email_id = _send_gstin_reminder(party[0], party[1], party[2], sent_to)
sent_to.append(email_id)
except EmailMissing:
pass
@frappe.whitelist()
def send_gstin_reminder(party_type, party):
'''Send GSTIN reminder to one party (called from Customer, Supplier form)'''
frappe.has_permission(party_type, throw=True)
email = _send_gstin_reminder(party_type ,party)
if email:
frappe.msgprint(_('Reminder to update GSTIN Sent'), title='Reminder sent', indicator='green')
def _send_gstin_reminder(party_type, party, default_email_id=None, sent_to=None):
'''Send GST Reminder email'''
email_id = frappe.db.get_value('Contact', get_default_contact(party_type, party), 'email_id')
if not email_id:
# get email from address
email_id = default_email_id
if not email_id:
frappe.throw(_('Email not found in default contact'), exc=EmailMissing)
if sent_to and email_id in sent_to:
return
frappe.sendmail(
subject='Please update your GSTIN',
recipients=email_id,
message='''
<p>Hello,</p>
<p>Please help us send you GST Ready Invoices.</p>
<p>
<a href="{0}?party={1}">
Click here to update your GSTIN Number in our system
</a>
</p>
<p style="color: #aaa; font-size: 11px; margin-top: 30px;">
Get your GST Ready ERP system at <a href="https://erpnext.com">https://erpnext.com</a>
<br>
ERPNext is a free and open source ERP system.
</p>
'''.format(os.path.join(get_url(), '/regional/india/update-gstin'), party)
)
return email_id
| gpl-3.0 |
waseem18/oh-mainline | vendor/packages/python-openid/admin/builddiscover.py | 66 | 2207 | #!/usr/bin/env python
import os.path
import urlparse
from openid.test import discoverdata
manifest_header = """\
# This file contains test cases for doing YADIS identity URL and
# service discovery. For each case, there are three URLs. The first
# URL is the user input. The second is the identity URL and the third
# is the URL from which the XRDS document should be read.
#
# The file format is as follows:
# User URL <tab> Identity URL <tab> XRDS URL <newline>
#
# blank lines and lines starting with # should be ignored.
#
# To use this test:
#
# 1. Run your discovery routine on the User URL.
#
# 2. Compare the identity URL returned by the discovery routine to the
# identity URL on that line of the file. It must be an EXACT match.
#
# 3. Do a regular HTTP GET on the XRDS URL. Compare the content that
# was returned by your discovery routine with the content returned
# from that URL. It should also be an exact match.
"""
def buildDiscover(base_url, out_dir):
"""Convert all files in a directory to apache mod_asis files in
another directory."""
test_data = discoverdata.readTests(discoverdata.default_test_file)
def writeTestFile(test_name):
template = test_data[test_name]
data = discoverdata.fillTemplate(
test_name, template, base_url, discoverdata.example_xrds)
out_file_name = os.path.join(out_dir, test_name)
out_file = file(out_file_name, 'w')
out_file.write(data)
manifest = [manifest_header]
for success, input_name, id_name, result_name in discoverdata.testlist:
if not success:
continue
writeTestFile(input_name)
input_url = urlparse.urljoin(base_url, input_name)
id_url = urlparse.urljoin(base_url, id_name)
result_url = urlparse.urljoin(base_url, result_name)
manifest.append('\t'.join((input_url, id_url, result_url)))
manifest.append('\n')
manifest_file_name = os.path.join(out_dir, 'manifest.txt')
manifest_file = file(manifest_file_name, 'w')
for chunk in manifest:
manifest_file.write(chunk)
manifest_file.close()
if __name__ == '__main__':
import sys
buildDiscover(*sys.argv[1:])
| agpl-3.0 |
pkkid/python-plexapi | plexapi/mixins.py | 1 | 20849 | # -*- coding: utf-8 -*-
from urllib.parse import quote_plus, urlencode
from plexapi import media, settings, utils
from plexapi.exceptions import BadRequest, NotFound
class AdvancedSettingsMixin(object):
""" Mixin for Plex objects that can have advanced settings. """
def preferences(self):
""" Returns a list of :class:`~plexapi.settings.Preferences` objects. """
data = self._server.query(self._details_key)
return self.findItems(data, settings.Preferences, rtag='Preferences')
def preference(self, pref):
""" Returns a :class:`~plexapi.settings.Preferences` object for the specified pref.
Parameters:
pref (str): The id of the preference to return.
"""
prefs = self.preferences()
try:
return next(p for p in prefs if p.id == pref)
except StopIteration:
availablePrefs = [p.id for p in prefs]
raise NotFound('Unknown preference "%s" for %s. '
'Available preferences: %s'
% (pref, self.TYPE, availablePrefs)) from None
def editAdvanced(self, **kwargs):
""" Edit a Plex object's advanced settings. """
data = {}
key = '%s/prefs?' % self.key
preferences = {pref.id: pref for pref in self.preferences() if pref.enumValues}
for settingID, value in kwargs.items():
try:
pref = preferences[settingID]
except KeyError:
raise NotFound('%s not found in %s' % (value, list(preferences.keys())))
enumValues = pref.enumValues
if enumValues.get(value, enumValues.get(str(value))):
data[settingID] = value
else:
raise NotFound('%s not found in %s' % (value, list(enumValues)))
url = key + urlencode(data)
self._server.query(url, method=self._server._session.put)
def defaultAdvanced(self):
""" Edit all of a Plex object's advanced settings to default. """
data = {}
key = '%s/prefs?' % self.key
for preference in self.preferences():
data[preference.id] = preference.default
url = key + urlencode(data)
self._server.query(url, method=self._server._session.put)
class ArtUrlMixin(object):
""" Mixin for Plex objects that can have a background artwork url. """
@property
def artUrl(self):
""" Return the art url for the Plex object. """
art = self.firstAttr('art', 'grandparentArt')
return self._server.url(art, includeToken=True) if art else None
class ArtMixin(ArtUrlMixin):
""" Mixin for Plex objects that can have background artwork. """
def arts(self):
""" Returns list of available :class:`~plexapi.media.Art` objects. """
return self.fetchItems('/library/metadata/%s/arts' % self.ratingKey, cls=media.Art)
def uploadArt(self, url=None, filepath=None):
""" Upload a background artwork from a url or filepath.
Parameters:
url (str): The full URL to the image to upload.
filepath (str): The full file path the the image to upload.
"""
if url:
key = '/library/metadata/%s/arts?url=%s' % (self.ratingKey, quote_plus(url))
self._server.query(key, method=self._server._session.post)
elif filepath:
key = '/library/metadata/%s/arts?' % self.ratingKey
data = open(filepath, 'rb').read()
self._server.query(key, method=self._server._session.post, data=data)
def setArt(self, art):
""" Set the background artwork for a Plex object.
Parameters:
art (:class:`~plexapi.media.Art`): The art object to select.
"""
art.select()
class BannerUrlMixin(object):
""" Mixin for Plex objects that can have a banner url. """
@property
def bannerUrl(self):
""" Return the banner url for the Plex object. """
banner = self.firstAttr('banner')
return self._server.url(banner, includeToken=True) if banner else None
class BannerMixin(BannerUrlMixin):
""" Mixin for Plex objects that can have banners. """
def banners(self):
""" Returns list of available :class:`~plexapi.media.Banner` objects. """
return self.fetchItems('/library/metadata/%s/banners' % self.ratingKey, cls=media.Banner)
def uploadBanner(self, url=None, filepath=None):
""" Upload a banner from a url or filepath.
Parameters:
url (str): The full URL to the image to upload.
filepath (str): The full file path the the image to upload.
"""
if url:
key = '/library/metadata/%s/banners?url=%s' % (self.ratingKey, quote_plus(url))
self._server.query(key, method=self._server._session.post)
elif filepath:
key = '/library/metadata/%s/banners?' % self.ratingKey
data = open(filepath, 'rb').read()
self._server.query(key, method=self._server._session.post, data=data)
def setBanner(self, banner):
""" Set the banner for a Plex object.
Parameters:
banner (:class:`~plexapi.media.Banner`): The banner object to select.
"""
banner.select()
class PosterUrlMixin(object):
""" Mixin for Plex objects that can have a poster url. """
@property
def thumbUrl(self):
""" Return the thumb url for the Plex object. """
thumb = self.firstAttr('thumb', 'parentThumb', 'granparentThumb')
return self._server.url(thumb, includeToken=True) if thumb else None
@property
def posterUrl(self):
""" Alias to self.thumbUrl. """
return self.thumbUrl
class PosterMixin(PosterUrlMixin):
""" Mixin for Plex objects that can have posters. """
def posters(self):
""" Returns list of available :class:`~plexapi.media.Poster` objects. """
return self.fetchItems('/library/metadata/%s/posters' % self.ratingKey, cls=media.Poster)
def uploadPoster(self, url=None, filepath=None):
""" Upload a poster from a url or filepath.
Parameters:
url (str): The full URL to the image to upload.
filepath (str): The full file path the the image to upload.
"""
if url:
key = '/library/metadata/%s/posters?url=%s' % (self.ratingKey, quote_plus(url))
self._server.query(key, method=self._server._session.post)
elif filepath:
key = '/library/metadata/%s/posters?' % self.ratingKey
data = open(filepath, 'rb').read()
self._server.query(key, method=self._server._session.post, data=data)
def setPoster(self, poster):
""" Set the poster for a Plex object.
Parameters:
poster (:class:`~plexapi.media.Poster`): The poster object to select.
"""
poster.select()
class RatingMixin(object):
""" Mixin for Plex objects that can have user star ratings. """
def rate(self, rating=None):
""" Rate the Plex object. Note: Plex ratings are displayed out of 5 stars (e.g. rating 7.0 = 3.5 stars).
Parameters:
rating (float, optional): Rating from 0 to 10. Exclude to reset the rating.
Raises:
:exc:`~plexapi.exceptions.BadRequest`: If the rating is invalid.
"""
if rating is None:
rating = -1
elif not isinstance(rating, (int, float)) or rating < 0 or rating > 10:
raise BadRequest('Rating must be between 0 to 10.')
key = '/:/rate?key=%s&identifier=com.plexapp.plugins.library&rating=%s' % (self.ratingKey, rating)
self._server.query(key, method=self._server._session.put)
class SplitMergeMixin(object):
""" Mixin for Plex objects that can be split and merged. """
def split(self):
""" Split duplicated Plex object into separate objects. """
key = '/library/metadata/%s/split' % self.ratingKey
return self._server.query(key, method=self._server._session.put)
def merge(self, ratingKeys):
""" Merge other Plex objects into the current object.
Parameters:
ratingKeys (list): A list of rating keys to merge.
"""
if not isinstance(ratingKeys, list):
ratingKeys = str(ratingKeys).split(',')
key = '%s/merge?ids=%s' % (self.key, ','.join([str(r) for r in ratingKeys]))
return self._server.query(key, method=self._server._session.put)
class UnmatchMatchMixin(object):
""" Mixin for Plex objects that can be unmatched and matched. """
def unmatch(self):
""" Unmatches metadata match from object. """
key = '/library/metadata/%s/unmatch' % self.ratingKey
self._server.query(key, method=self._server._session.put)
def matches(self, agent=None, title=None, year=None, language=None):
""" Return list of (:class:`~plexapi.media.SearchResult`) metadata matches.
Parameters:
agent (str): Agent name to be used (imdb, thetvdb, themoviedb, etc.)
title (str): Title of item to search for
year (str): Year of item to search in
language (str) : Language of item to search in
Examples:
1. video.matches()
2. video.matches(title="something", year=2020)
3. video.matches(title="something")
4. video.matches(year=2020)
5. video.matches(title="something", year="")
6. video.matches(title="", year=2020)
7. video.matches(title="", year="")
1. The default behaviour in Plex Web = no params in plexapi
2. Both title and year specified by user
3. Year automatically filled in
4. Title automatically filled in
5. Explicitly searches for title with blank year
6. Explicitly searches for blank title with year
7. I don't know what the user is thinking... return the same result as 1
For 2 to 7, the agent and language is automatically filled in
"""
key = '/library/metadata/%s/matches' % self.ratingKey
params = {'manual': 1}
if agent and not any([title, year, language]):
params['language'] = self.section().language
params['agent'] = utils.getAgentIdentifier(self.section(), agent)
else:
if any(x is not None for x in [agent, title, year, language]):
if title is None:
params['title'] = self.title
else:
params['title'] = title
if year is None:
params['year'] = self.year
else:
params['year'] = year
params['language'] = language or self.section().language
if agent is None:
params['agent'] = self.section().agent
else:
params['agent'] = utils.getAgentIdentifier(self.section(), agent)
key = key + '?' + urlencode(params)
data = self._server.query(key, method=self._server._session.get)
return self.findItems(data, initpath=key)
def fixMatch(self, searchResult=None, auto=False, agent=None):
""" Use match result to update show metadata.
Parameters:
auto (bool): True uses first match from matches
False allows user to provide the match
searchResult (:class:`~plexapi.media.SearchResult`): Search result from
~plexapi.base.matches()
agent (str): Agent name to be used (imdb, thetvdb, themoviedb, etc.)
"""
key = '/library/metadata/%s/match' % self.ratingKey
if auto:
autoMatch = self.matches(agent=agent)
if autoMatch:
searchResult = autoMatch[0]
else:
raise NotFound('No matches found using this agent: (%s:%s)' % (agent, autoMatch))
elif not searchResult:
raise NotFound('fixMatch() requires either auto=True or '
'searchResult=:class:`~plexapi.media.SearchResult`.')
params = {'guid': searchResult.guid,
'name': searchResult.name}
data = key + '?' + urlencode(params)
self._server.query(data, method=self._server._session.put)
class CollectionMixin(object):
""" Mixin for Plex objects that can have collections. """
def addCollection(self, collections, locked=True):
""" Add a collection tag(s).
Parameters:
collections (list): List of strings.
locked (bool): True (default) to lock the field, False to unlock the field.
"""
self._edit_tags('collection', collections, locked=locked)
def removeCollection(self, collections, locked=True):
""" Remove a collection tag(s).
Parameters:
collections (list): List of strings.
locked (bool): True (default) to lock the field, False to unlock the field.
"""
self._edit_tags('collection', collections, locked=locked, remove=True)
class CountryMixin(object):
""" Mixin for Plex objects that can have countries. """
def addCountry(self, countries, locked=True):
""" Add a country tag(s).
Parameters:
countries (list): List of strings.
locked (bool): True (default) to lock the field, False to unlock the field.
"""
self._edit_tags('country', countries, locked=locked)
def removeCountry(self, countries, locked=True):
""" Remove a country tag(s).
Parameters:
countries (list): List of strings.
locked (bool): True (default) to lock the field, False to unlock the field.
"""
self._edit_tags('country', countries, locked=locked, remove=True)
class DirectorMixin(object):
""" Mixin for Plex objects that can have directors. """
def addDirector(self, directors, locked=True):
""" Add a director tag(s).
Parameters:
directors (list): List of strings.
locked (bool): True (default) to lock the field, False to unlock the field.
"""
self._edit_tags('director', directors, locked=locked)
def removeDirector(self, directors, locked=True):
""" Remove a director tag(s).
Parameters:
directors (list): List of strings.
locked (bool): True (default) to lock the field, False to unlock the field.
"""
self._edit_tags('director', directors, locked=locked, remove=True)
class GenreMixin(object):
""" Mixin for Plex objects that can have genres. """
def addGenre(self, genres, locked=True):
""" Add a genre tag(s).
Parameters:
genres (list): List of strings.
locked (bool): True (default) to lock the field, False to unlock the field.
"""
self._edit_tags('genre', genres, locked=locked)
def removeGenre(self, genres, locked=True):
""" Remove a genre tag(s).
Parameters:
genres (list): List of strings.
locked (bool): True (default) to lock the field, False to unlock the field.
"""
self._edit_tags('genre', genres, locked=locked, remove=True)
class LabelMixin(object):
""" Mixin for Plex objects that can have labels. """
def addLabel(self, labels, locked=True):
""" Add a label tag(s).
Parameters:
labels (list): List of strings.
locked (bool): True (default) to lock the field, False to unlock the field.
"""
self._edit_tags('label', labels, locked=locked)
def removeLabel(self, labels, locked=True):
""" Remove a label tag(s).
Parameters:
labels (list): List of strings.
locked (bool): True (default) to lock the field, False to unlock the field.
"""
self._edit_tags('label', labels, locked=locked, remove=True)
class MoodMixin(object):
""" Mixin for Plex objects that can have moods. """
def addMood(self, moods, locked=True):
""" Add a mood tag(s).
Parameters:
moods (list): List of strings.
locked (bool): True (default) to lock the field, False to unlock the field.
"""
self._edit_tags('mood', moods, locked=locked)
def removeMood(self, moods, locked=True):
""" Remove a mood tag(s).
Parameters:
moods (list): List of strings.
locked (bool): True (default) to lock the field, False to unlock the field.
"""
self._edit_tags('mood', moods, locked=locked, remove=True)
class ProducerMixin(object):
""" Mixin for Plex objects that can have producers. """
def addProducer(self, producers, locked=True):
""" Add a producer tag(s).
Parameters:
producers (list): List of strings.
locked (bool): True (default) to lock the field, False to unlock the field.
"""
self._edit_tags('producer', producers, locked=locked)
def removeProducer(self, producers, locked=True):
""" Remove a producer tag(s).
Parameters:
producers (list): List of strings.
locked (bool): True (default) to lock the field, False to unlock the field.
"""
self._edit_tags('producer', producers, locked=locked, remove=True)
class SimilarArtistMixin(object):
""" Mixin for Plex objects that can have similar artists. """
def addSimilarArtist(self, artists, locked=True):
""" Add a similar artist tag(s).
Parameters:
artists (list): List of strings.
locked (bool): True (default) to lock the field, False to unlock the field.
"""
self._edit_tags('similar', artists, locked=locked)
def removeSimilarArtist(self, artists, locked=True):
""" Remove a similar artist tag(s).
Parameters:
artists (list): List of strings.
locked (bool): True (default) to lock the field, False to unlock the field.
"""
self._edit_tags('similar', artists, locked=locked, remove=True)
class StyleMixin(object):
""" Mixin for Plex objects that can have styles. """
def addStyle(self, styles, locked=True):
""" Add a style tag(s).
Parameters:
styles (list): List of strings.
locked (bool): True (default) to lock the field, False to unlock the field.
"""
self._edit_tags('style', styles, locked=locked)
def removeStyle(self, styles, locked=True):
""" Remove a style tag(s).
Parameters:
styles (list): List of strings.
locked (bool): True (default) to lock the field, False to unlock the field.
"""
self._edit_tags('style', styles, locked=locked, remove=True)
class TagMixin(object):
""" Mixin for Plex objects that can have tags. """
def addTag(self, tags, locked=True):
""" Add a tag(s).
Parameters:
tags (list): List of strings.
locked (bool): True (default) to lock the field, False to unlock the field.
"""
self._edit_tags('tag', tags, locked=locked)
def removeTag(self, tags, locked=True):
""" Remove a tag(s).
Parameters:
tags (list): List of strings.
locked (bool): True (default) to lock the field, False to unlock the field.
"""
self._edit_tags('tag', tags, locked=locked, remove=True)
class WriterMixin(object):
""" Mixin for Plex objects that can have writers. """
def addWriter(self, writers, locked=True):
""" Add a writer tag(s).
Parameters:
writers (list): List of strings.
locked (bool): True (default) to lock the field, False to unlock the field.
"""
self._edit_tags('writer', writers, locked=locked)
def removeWriter(self, writers, locked=True):
""" Remove a writer tag(s).
Parameters:
writers (list): List of strings.
locked (bool): True (default) to lock the field, False to unlock the field.
"""
self._edit_tags('writer', writers, locked=locked, remove=True)
| bsd-3-clause |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.