code stringlengths 3 1.05M | repo_name stringlengths 5 104 | path stringlengths 4 251 | language stringclasses 1 value | license stringclasses 15 values | size int64 3 1.05M |
|---|---|---|---|---|---|
# Copyright 2012 Nebula, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from django import forms
from django import http
import mock
import six
from horizon import exceptions
from horizon.test import helpers as test
from horizon import workflows
PROJECT_ID = "a23lkjre389fwenj"
INSTANCE_ID = "sdlkjhf9832roiw"
def local_callback_func(request, context):
return "one"
def other_callback_func(request, context):
return "two"
def extra_callback_func(request, context):
return "extra"
class TestActionOne(workflows.Action):
project_id = forms.ChoiceField(label="Project")
user_id = forms.ChoiceField(label="User")
class Meta(object):
name = "Test Action One"
slug = "test_action_one"
def populate_project_id_choices(self, request, context):
return [(PROJECT_ID, "test_project")]
def populate_user_id_choices(self, request, context):
return [(request.user.id, request.user.username)]
def handle(self, request, context):
return {"foo": "bar"}
class TestActionTwo(workflows.Action):
instance_id = forms.CharField(label="Instance")
class Meta(object):
name = "Test Action Two"
slug = "test_action_two"
class TestActionThree(workflows.Action):
extra = forms.CharField(widget=forms.widgets.Textarea)
class Meta(object):
name = "Test Action Three"
slug = "test_action_three"
class AdminAction(workflows.Action):
admin_id = forms.CharField(label="Admin")
class Meta(object):
name = "Admin Action"
slug = "admin_action"
permissions = ("horizon.test",)
class AdminForbiddenAction(workflows.Action):
admin_id = forms.CharField(label="Admin forbidden")
class Meta(object):
name = "Admin Action"
slug = "admin_action"
policy_rules = (('action', 'forbidden'),)
class TestStepOne(workflows.Step):
action_class = TestActionOne
contributes = ("project_id", "user_id")
class TestStepTwo(workflows.Step):
action_class = TestActionTwo
depends_on = ("project_id",)
contributes = ("instance_id",)
connections = {"project_id":
(local_callback_func,
"horizon.test.tests.workflows.other_callback_func")}
class TestExtraStep(workflows.Step):
action_class = TestActionThree
depends_on = ("project_id",)
contributes = ("extra_data",)
connections = {"project_id": (extra_callback_func,)}
after = TestStepOne
before = TestStepTwo
class AdminStep(workflows.Step):
action_class = AdminAction
contributes = ("admin_id",)
after = TestStepOne
before = TestStepTwo
class AdminForbiddenStep(workflows.Step):
action_class = AdminForbiddenAction
class TestWorkflow(workflows.Workflow):
slug = "test_workflow"
default_steps = (TestStepOne, TestStepTwo)
class TestWorkflowView(workflows.WorkflowView):
workflow_class = TestWorkflow
template_name = "workflow.html"
class TestFullscreenWorkflow(workflows.Workflow):
slug = 'test_fullscreen_workflow'
default_steps = (TestStepOne, TestStepTwo)
fullscreen = True
class TestFullscreenWorkflowView(workflows.WorkflowView):
workflow_class = TestFullscreenWorkflow
template_name = "workflow.html"
class WorkflowsTests(test.TestCase):
def setUp(self):
super(WorkflowsTests, self).setUp()
self.policy_patcher = mock.patch(
'openstack_auth.policy.check', lambda action, request: True)
self.policy_check = self.policy_patcher.start()
self.addCleanup(mock.patch.stopall)
def tearDown(self):
super(WorkflowsTests, self).tearDown()
self._reset_workflow()
def _reset_workflow(self):
TestWorkflow._cls_registry = set([])
def test_workflow_construction(self):
TestWorkflow.register(TestExtraStep)
flow = TestWorkflow(self.request)
self.assertQuerysetEqual(flow.steps,
['<TestStepOne: test_action_one>',
'<TestExtraStep: test_action_three>',
'<TestStepTwo: test_action_two>'])
self.assertEqual(set(['project_id']), flow.depends_on)
def test_step_construction(self):
step_one = TestStepOne(TestWorkflow(self.request))
# Action slug is moved from Meta by metaclass, and
# Step inherits slug from action.
self.assertEqual(TestActionOne.name, step_one.name)
self.assertEqual(TestActionOne.slug, step_one.slug)
# Handlers should be empty since there are no connections.
self.assertEqual(step_one._handlers, {})
step_two = TestStepTwo(TestWorkflow(self.request))
# Handlers should be populated since we do have connections.
self.assertEqual([local_callback_func, other_callback_func],
step_two._handlers["project_id"])
def test_step_invalid_connections_handlers_not_list_or_tuple(self):
class InvalidStepA(TestStepTwo):
connections = {'project_id': {}}
class InvalidStepB(TestStepTwo):
connections = {'project_id': ''}
with self.assertRaises(TypeError):
InvalidStepA(TestWorkflow(self.request))
with self.assertRaises(TypeError):
InvalidStepB(TestWorkflow(self.request))
def test_step_invalid_connection_handler_not_string_or_callable(self):
class InvalidStepA(TestStepTwo):
connections = {'project_id': (None,)}
class InvalidStepB(TestStepTwo):
connections = {'project_id': (0,)}
with self.assertRaises(TypeError):
InvalidStepA(TestWorkflow(self.request))
with self.assertRaises(TypeError):
InvalidStepB(TestWorkflow(self.request))
def test_step_invalid_callback(self):
# This should raise an exception
class InvalidStep(TestStepTwo):
connections = {"project_id": ('local_callback_func',)}
with self.assertRaises(ValueError):
InvalidStep(TestWorkflow(self.request))
def test_connection_handlers_called(self):
TestWorkflow.register(TestExtraStep)
flow = TestWorkflow(self.request)
# This should set the value without any errors, but trigger nothing
flow.context['does_not_exist'] = False
self.assertFalse(flow.context['does_not_exist'])
# The order here is relevant. Note that we inserted "extra" between
# steps one and two, and one has no handlers, so we should see
# a response from extra, then one from each of step two's handlers.
val = flow.context.set('project_id', PROJECT_ID)
self.assertEqual([('test_action_three', 'extra'),
('test_action_two', 'one'),
('test_action_two', 'two')], val)
def test_workflow_validation(self):
flow = TestWorkflow(self.request)
# Missing items fail validation.
with self.assertRaises(exceptions.WorkflowValidationError):
flow.is_valid()
# All required items pass validation.
seed = {"project_id": PROJECT_ID,
"user_id": self.user.id,
"instance_id": INSTANCE_ID}
req = self.factory.post("/", seed)
req.user = self.user
flow = TestWorkflow(req, context_seed={"project_id": PROJECT_ID})
for step in flow.steps:
if not step.action.is_valid():
self.fail("Step %s was unexpectedly invalid: %s"
% (step.slug, step.action.errors))
self.assertTrue(flow.is_valid())
# Additional items shouldn't affect validation
flow.context.set("extra_data", "foo")
self.assertTrue(flow.is_valid())
def test_workflow_finalization(self):
flow = TestWorkflow(self.request)
self.assertTrue(flow.finalize())
def test_workflow_view(self):
view = TestWorkflowView.as_view()
req = self.factory.get("/")
res = view(req)
self.assertEqual(200, res.status_code)
def test_workflow_registration(self):
req = self.factory.get("/foo")
flow = TestWorkflow(req)
self.assertQuerysetEqual(flow.steps,
['<TestStepOne: test_action_one>',
'<TestStepTwo: test_action_two>'])
TestWorkflow.register(TestExtraStep)
flow = TestWorkflow(req)
self.assertQuerysetEqual(flow.steps,
['<TestStepOne: test_action_one>',
'<TestExtraStep: test_action_three>',
'<TestStepTwo: test_action_two>'])
def test_workflow_render(self):
TestWorkflow.register(TestExtraStep)
req = self.factory.get("/foo")
flow = TestWorkflow(req)
output = http.HttpResponse(flow.render())
self.assertContains(output, six.text_type(flow.name))
self.assertContains(output, six.text_type(TestActionOne.name))
self.assertContains(output, six.text_type(TestActionTwo.name))
self.assertContains(output, six.text_type(TestActionThree.name))
def test_has_permissions(self):
self.assertQuerysetEqual(TestWorkflow._cls_registry, [])
TestWorkflow.register(AdminStep)
flow = TestWorkflow(self.request)
step = AdminStep(flow)
self.assertItemsEqual(step.permissions,
("horizon.test",))
self.assertQuerysetEqual(flow.steps,
['<TestStepOne: test_action_one>',
'<TestStepTwo: test_action_two>'])
self.set_permissions(['test'])
self.request.user = self.user
flow = TestWorkflow(self.request)
self.assertQuerysetEqual(flow.steps,
['<TestStepOne: test_action_one>',
'<AdminStep: admin_action>',
'<TestStepTwo: test_action_two>'])
def test_step_is_hidden_on_policy(self):
self.policy_patcher.stop()
def policy_check(action, request):
if action == (('action', 'forbidden'),):
return False
return True
with mock.patch('openstack_auth.policy.check', policy_check):
TestWorkflow.register(AdminForbiddenStep)
flow = TestWorkflow(self.request)
output = http.HttpResponse(flow.render())
self.assertNotContains(output,
six.text_type(AdminForbiddenAction.name))
def test_entry_point(self):
req = self.factory.get("/foo")
flow = TestWorkflow(req)
self.assertEqual("test_action_one", flow.get_entry_point())
flow = TestWorkflow(req, entry_point="test_action_two")
self.assertEqual("test_action_two", flow.get_entry_point())
| wolverineav/horizon | horizon/test/tests/workflows.py | Python | apache-2.0 | 11,478 |
# -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2018-04-27 16:15
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('legislative', '0020_auto_20180424_1009'),
]
operations = [
migrations.AddField(
model_name='personvote',
name='membership',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='legislative.BodyMembership'),
),
migrations.AlterField(
model_name='bodymembership',
name='district',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='body_memberships', to='geo.District'),
),
]
| access-missouri/am-django-project | am/legislative/migrations/0021_auto_20180427_1115.py | Python | bsd-2-clause | 848 |
# -*- coding: utf-8 -*-
"""
pybitcoin
~~~~~
:copyright: (c) 2014 by Halfmoon Labs
:license: MIT, see LICENSE for more details.
"""
# data structure constants
UINT_MAX = 2**32-1
# protocol constants
SATOSHIS_PER_COIN = 10**8
MAX_BYTES_AFTER_OP_RETURN = 80
# fee defaults
STANDARD_FEE = 1000 # 1000 satoshis = 10 bits = .01 mbits = .00001 BTC
OP_RETURN_FEE = 10000 # 10k satoshis = .0001 BTC
| blockstack/pybitcoin | pybitcoin/constants.py | Python | mit | 412 |
"""
The Plaid API
The Plaid REST API. Please see https://plaid.com/docs/api for more details. # noqa: E501
Generated by: https://openapi-generator.tech
"""
import re # noqa: F401
import sys # noqa: F401
from plaid.model_utils import ( # noqa: F401
ApiTypeError,
ModelComposed,
ModelNormal,
ModelSimple,
cached_property,
change_keys_js_to_python,
convert_js_args_to_python_args,
date,
datetime,
file_type,
none_type,
validate_get_composed_info,
)
def lazy_import():
from plaid.model.recurring_transaction_frequency import RecurringTransactionFrequency
from plaid.model.transaction_stream_amount import TransactionStreamAmount
globals()['RecurringTransactionFrequency'] = RecurringTransactionFrequency
globals()['TransactionStreamAmount'] = TransactionStreamAmount
class TransactionStream(ModelNormal):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
Attributes:
allowed_values (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
with a capitalized key describing the allowed value and an allowed
value. These dicts store the allowed enum values.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
discriminator_value_class_map (dict): A dict to go from the discriminator
variable value to the discriminator class name.
validations (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
that stores validations for max_length, min_length, max_items,
min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum,
inclusive_minimum, and regex.
additional_properties_type (tuple): A tuple of classes accepted
as additional properties values.
"""
allowed_values = {
}
validations = {
}
@cached_property
def additional_properties_type():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
"""
lazy_import()
return (bool, date, datetime, dict, float, int, list, str, none_type,) # noqa: E501
_nullable = False
@cached_property
def openapi_types():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
Returns
openapi_types (dict): The key is attribute name
and the value is attribute type.
"""
lazy_import()
return {
'account_id': (str,), # noqa: E501
'stream_id': (str,), # noqa: E501
'category_id': (str,), # noqa: E501
'category': ([str],), # noqa: E501
'description': (str,), # noqa: E501
'first_date': (date,), # noqa: E501
'last_date': (date,), # noqa: E501
'frequency': (RecurringTransactionFrequency,), # noqa: E501
'transaction_ids': ([str],), # noqa: E501
'average_amount': (TransactionStreamAmount,), # noqa: E501
'is_active': (bool,), # noqa: E501
}
@cached_property
def discriminator():
return None
attribute_map = {
'account_id': 'account_id', # noqa: E501
'stream_id': 'stream_id', # noqa: E501
'category_id': 'category_id', # noqa: E501
'category': 'category', # noqa: E501
'description': 'description', # noqa: E501
'first_date': 'first_date', # noqa: E501
'last_date': 'last_date', # noqa: E501
'frequency': 'frequency', # noqa: E501
'transaction_ids': 'transaction_ids', # noqa: E501
'average_amount': 'average_amount', # noqa: E501
'is_active': 'is_active', # noqa: E501
}
_composed_schemas = {}
required_properties = set([
'_data_store',
'_check_type',
'_spec_property_naming',
'_path_to_item',
'_configuration',
'_visited_composed_classes',
])
@convert_js_args_to_python_args
def __init__(self, account_id, stream_id, category_id, category, description, first_date, last_date, frequency, transaction_ids, average_amount, is_active, *args, **kwargs): # noqa: E501
"""TransactionStream - a model defined in OpenAPI
Args:
account_id (str): The ID of the account to which the stream belongs
stream_id (str): A unique id for the stream
category_id (str): The ID of the category to which this transaction belongs. See [Categories](https://plaid.com/docs/#category-overview).
category ([str]): A hierarchical array of the categories to which this transaction belongs. See [Categories](https://plaid.com/docs/#category-overview).
description (str): A description of the transaction stream.
first_date (date): The posted date of the earliest transaction in the stream.
last_date (date): The posted date of the latest transaction in the stream.
frequency (RecurringTransactionFrequency):
transaction_ids ([str]): An array of Plaid transaction IDs belonging to the stream, sorted by posted date.
average_amount (TransactionStreamAmount):
is_active (bool): indicates whether the transaction stream is still live.
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
self.account_id = account_id
self.stream_id = stream_id
self.category_id = category_id
self.category = category
self.description = description
self.first_date = first_date
self.last_date = last_date
self.frequency = frequency
self.transaction_ids = transaction_ids
self.average_amount = average_amount
self.is_active = is_active
for var_name, var_value in kwargs.items():
if var_name not in self.attribute_map and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self.additional_properties_type is None:
# discard variable.
continue
setattr(self, var_name, var_value)
| plaid/plaid-python | plaid/model/transaction_stream.py | Python | mit | 9,665 |
"""Provide dependency graph"""
import miasm2.expression.expression as m2_expr
from miasm2.core.graph import DiGraph
from miasm2.core.asmbloc import asm_label, expr_is_int_or_label, expr_is_label
from miasm2.expression.simplifications import expr_simp
from miasm2.ir.symbexec import symbexec
from miasm2.ir.ir import irbloc, AssignBlock
from miasm2.ir.translators import Translator
from miasm2.expression.expression_helper import possible_values
try:
import z3
except ImportError:
pass
class DependencyNode(object):
"""Node elements of a DependencyGraph
A dependency node stands for the dependency on the @element at line number
@line_nb in the IRblock named @label, *before* the evaluation of this
line.
"""
__slots__ = ["_label", "_element", "_line_nb",
"_step", "_nostep_repr", "_hash"]
def __init__(self, label, element, line_nb):
"""Create a dependency node with:
@label: asm_label instance
@element: Expr instance
@line_nb: int
"""
self._label = label
self._element = element
self._line_nb = line_nb
self._nostep_repr = (self._label, self._line_nb, self._element)
self._hash = hash(
(self._label, self._element, self._line_nb))
def __hash__(self):
"""Returns a hash of @self to uniquely identify @self"""
return self._hash
def __eq__(self, depnode):
"""Returns True if @self and @depnode are equals.
The attribute 'step' is not considered in the comparison.
"""
if not isinstance(depnode, self.__class__):
return False
return (self.label == depnode.label and
self.element == depnode.element and
self.line_nb == depnode.line_nb)
def __cmp__(self, node):
"""Compares @self with @node. The step attribute is not taken into
account in the comparison.
"""
if not isinstance(node, self.__class__):
raise ValueError("Compare error between %s, %s" % (self.__class__,
node.__class__))
return cmp((self.label, self.element, self.line_nb),
(node.label, node.element, node.line_nb))
def __str__(self):
"""Returns a string representation of DependencyNode"""
return "<%s %s %s %s>" % (self.__class__.__name__,
self.label.name, self.element,
self.line_nb)
def __repr__(self):
"""Returns a string representation of DependencyNode"""
return self.__str__()
@property
def nostep_repr(self):
"""Returns a representation of @self ignoring the step attribute"""
return self._nostep_repr
@property
def label(self):
"Name of the current IRBlock"
return self._label
@property
def element(self):
"Current tracked Expr"
return self._element
@property
def line_nb(self):
"Line in the current IRBlock"
return self._line_nb
class DependencyState(object):
"""
Store intermediate depnodes states during dependencygraph analysis
"""
def __init__(self, label, inputs, pending, line_nb=None):
self.label = label
self.inputs = inputs
self.history = [label]
self.pending = {k: set(v) for k, v in pending.iteritems()}
self.line_nb = line_nb
self.links = set()
# Init lazy elements
self._graph = None
def __repr__(self):
return "<State: %r (%r) (%r)>" % (self.label,
self.pending,
self.links)
def extend(self, label):
"""Return a copy of itself, with itself in history
@label: asm_label instance for the new DependencyState's label
"""
new_state = self.__class__(label, self.inputs, self.pending)
new_state.links = set(self.links)
new_state.history = self.history + [label]
return new_state
def get_done_state(self):
"""Returns immutable object representing current state"""
return (self.label, frozenset(self.links))
def as_graph(self):
"""Generates a Digraph of dependencies"""
graph = DiGraph()
for node_a, node_b in self.links:
if not node_b:
graph.add_node(node_a)
else:
graph.add_edge(node_a, node_b)
for parent, sons in self.pending.iteritems():
for son in sons:
graph.add_edge(parent, son)
return graph
@property
def graph(self):
"""Returns a DiGraph instance representing the DependencyGraph"""
if self._graph is None:
self._graph = self.as_graph()
return self._graph
def remove_pendings(self, nodes):
"""Remove resolved @nodes"""
for node in nodes:
del self.pending[node]
def add_pendings(self, future_pending):
"""Add @future_pending to the state"""
for node, depnodes in future_pending.iteritems():
if node not in self.pending:
self.pending[node] = depnodes
else:
self.pending[node].update(depnodes)
def link_element(self, element, line_nb):
"""Link element to its dependencies
@element: the element to link
@line_nb: the element's line
"""
depnode = DependencyNode(self.label, element, line_nb)
if not self.pending[element]:
# Create start node
self.links.add((depnode, None))
else:
# Link element to its known dependencies
for node_son in self.pending[element]:
self.links.add((depnode, node_son))
def link_dependencies(self, element, line_nb, dependencies,
future_pending):
"""Link unfollowed dependencies and create remaining pending elements.
@element: the element to link
@line_nb: the element's line
@dependencies: the element's dependencies
@future_pending: the future dependencies
"""
depnode = DependencyNode(self.label, element, line_nb)
# Update pending, add link to unfollowed nodes
for dependency in dependencies:
if not dependency.follow:
# Add non followed dependencies to the dependency graph
parent = DependencyNode(
self.label, dependency.element, line_nb)
self.links.add((parent, depnode))
continue
# Create future pending between new dependency and the current
# element
future_pending.setdefault(dependency.element, set()).add(depnode)
class DependencyResult(DependencyState):
"""Container and methods for DependencyGraph results"""
def __init__(self, state, ira):
self.label = state.label
self.inputs = state.inputs
self.history = state.history
self.pending = state.pending
self.line_nb = state.line_nb
self.links = state.links
self._ira = ira
# Init lazy elements
self._graph = None
self._has_loop = None
@property
def unresolved(self):
"""Set of nodes whose dependencies weren't found"""
return set(element for element in self.pending
if element != self._ira.IRDst)
@property
def relevant_nodes(self):
"""Set of nodes directly and indirectly influencing inputs"""
output = set()
for node_a, node_b in self.links:
output.add(node_a)
if node_b is not None:
output.add(node_b)
return output
@property
def relevant_labels(self):
"""List of labels containing nodes influencing inputs.
The history order is preserved."""
# Get used labels
used_labels = set(depnode.label for depnode in self.relevant_nodes)
# Keep history order
output = []
for label in self.history:
if label in used_labels:
output.append(label)
return output
@property
def has_loop(self):
"""True iff there is at least one data dependencies cycle (regarding
the associated depgraph)"""
if self._has_loop is None:
self._has_loop = self.graph.has_loop()
return self._has_loop
def irblock_slice(self, irb):
"""Slice of the dependency nodes on the irblock @irb
@irb: irbloc instance
"""
assignblks = []
line2elements = {}
for depnode in self.relevant_nodes:
if depnode.label != irb.label:
continue
line2elements.setdefault(depnode.line_nb,
set()).add(depnode.element)
for line_nb, elements in sorted(line2elements.iteritems()):
assignblk = AssignBlock()
for element in elements:
if element in irb.irs[line_nb]:
# constants, label, ... are not in destination
assignblk[element] = irb.irs[line_nb][element]
assignblks.append(assignblk)
return irbloc(irb.label, assignblks)
def emul(self, ctx=None, step=False):
"""Symbolic execution of relevant nodes according to the history
Return the values of inputs nodes' elements
@ctx: (optional) Initial context as dictionnary
@step: (optional) Verbose execution
Warning: The emulation is not sound if the inputs nodes depend on loop
variant.
"""
# Init
ctx_init = self._ira.arch.regs.regs_init
if ctx is not None:
ctx_init.update(ctx)
assignblks = []
# Build a single affectation block according to history
for label in self.relevant_labels[::-1]:
assignblks += self.irblock_slice(self._ira.blocs[label]).irs
# Eval the block
temp_label = asm_label("Temp")
symb_exec = symbexec(self._ira, ctx_init)
symb_exec.emulbloc(irbloc(temp_label, assignblks), step=step)
# Return only inputs values (others could be wrongs)
return {element: symb_exec.symbols[element]
for element in self.inputs}
class DependencyResultImplicit(DependencyResult):
"""Stand for a result of a DependencyGraph with implicit option
Provide path constraints using the z3 solver"""
# Z3 Solver instance
_solver = None
unsat_expr = m2_expr.ExprAff(m2_expr.ExprInt(0, 1),
m2_expr.ExprInt(1, 1))
def _gen_path_constraints(self, translator, expr, expected):
"""Generate path constraint from @expr. Handle special case with
generated labels
"""
out = []
expected_is_label = expr_is_label(expected)
for consval in possible_values(expr):
if (expected_is_label and
consval.value != expected):
continue
if (not expected_is_label and
expr_is_label(consval.value)):
continue
conds = z3.And(*[translator.from_expr(cond.to_constraint())
for cond in consval.constraints])
if expected != consval.value:
conds = z3.And(conds,
translator.from_expr(
m2_expr.ExprAff(consval.value,
expected)))
out.append(conds)
if out:
conds = z3.Or(*out)
else:
# Ex: expr: lblgen1, expected: 0x1234
# -> Avoid unconsistent solution lblgen1 = 0x1234
conds = translator.from_expr(self.unsat_expr)
return conds
def emul(self, ctx=None, step=False):
# Init
ctx_init = self._ira.arch.regs.regs_init
if ctx is not None:
ctx_init.update(ctx)
solver = z3.Solver()
symb_exec = symbexec(self._ira, ctx_init)
history = self.history[::-1]
history_size = len(history)
translator = Translator.to_language("z3")
size = self._ira.IRDst.size
for hist_nb, label in enumerate(history):
irb = self.irblock_slice(self._ira.blocs[label])
# Emul the block and get back destination
dst = symb_exec.emulbloc(irb, step=step)
# Add constraint
if hist_nb + 1 < history_size:
next_label = history[hist_nb + 1]
expected = symb_exec.eval_expr(m2_expr.ExprId(next_label,
size))
solver.add(
self._gen_path_constraints(translator, dst, expected))
# Save the solver
self._solver = solver
# Return only inputs values (others could be wrongs)
return {element: symb_exec.eval_expr(element)
for element in self.inputs}
@property
def is_satisfiable(self):
"""Return True iff the solution path admits at least one solution
PRE: 'emul'
"""
return self._solver.check() == z3.sat
@property
def constraints(self):
"""If satisfiable, return a valid solution as a Z3 Model instance"""
if not self.is_satisfiable:
raise ValueError("Unsatisfiable")
return self._solver.model()
class FollowExpr(object):
"Stand for an element (expression, depnode, ...) to follow or not"
__slots__ = ["follow", "element"]
def __init__(self, follow, element):
self.follow = follow
self.element = element
def __repr__(self):
return '%s(%r, %r)' % (self.__class__.__name__, self.follow, self.element)
@staticmethod
def to_depnodes(follow_exprs, label, line):
"""Build a set of FollowExpr(DependencyNode) from the @follow_exprs set
of FollowExpr
@follow_exprs: set of FollowExpr
@label: asm_label instance
@line: integer
"""
dependencies = set()
for follow_expr in follow_exprs:
dependencies.add(FollowExpr(follow_expr.follow,
DependencyNode(label,
follow_expr.element,
line)))
return dependencies
@staticmethod
def extract_depnodes(follow_exprs, only_follow=False):
"""Extract depnodes from a set of FollowExpr(Depnodes)
@only_follow: (optional) extract only elements to follow"""
return set(follow_expr.element
for follow_expr in follow_exprs
if not(only_follow) or follow_expr.follow)
class DependencyGraph(object):
"""Implementation of a dependency graph
A dependency graph contains DependencyNode as nodes. The oriented edges
stand for a dependency.
The dependency graph is made of the lines of a group of IRblock
*explicitely* or *implicitely* involved in the equation of given element.
"""
def __init__(self, ira, implicit=False, apply_simp=True, follow_mem=True,
follow_call=True):
"""Create a DependencyGraph linked to @ira
The IRA graph must have been computed
@ira: IRAnalysis instance
@implicit: (optional) Track IRDst for each block in the resulting path
Following arguments define filters used to generate dependencies
@apply_simp: (optional) Apply expr_simp
@follow_mem: (optional) Track memory syntactically
@follow_call: (optional) Track through "call"
"""
# Init
self._ira = ira
self._implicit = implicit
# Create callback filters. The order is relevant.
self._cb_follow = []
if apply_simp:
self._cb_follow.append(self._follow_simp_expr)
self._cb_follow.append(lambda exprs: self._follow_exprs(exprs,
follow_mem,
follow_call))
self._cb_follow.append(self._follow_nolabel)
@staticmethod
def _follow_simp_expr(exprs):
"""Simplify expression so avoid tracking useless elements,
as: XOR EAX, EAX
"""
follow = set()
for expr in exprs:
follow.add(expr_simp(expr))
return follow, set()
@staticmethod
def get_expr(expr, follow, nofollow):
"""Update @follow/@nofollow according to insteresting nodes
Returns same expression (non modifier visitor).
@expr: expression to handle
@follow: set of nodes to follow
@nofollow: set of nodes not to follow
"""
if isinstance(expr, m2_expr.ExprId):
follow.add(expr)
elif isinstance(expr, m2_expr.ExprInt):
nofollow.add(expr)
elif isinstance(expr, m2_expr.ExprMem):
follow.add(expr)
return expr
@staticmethod
def follow_expr(expr, _, nofollow, follow_mem=False, follow_call=False):
"""Returns True if we must visit sub expressions.
@expr: expression to browse
@follow: set of nodes to follow
@nofollow: set of nodes not to follow
@follow_mem: force the visit of memory sub expressions
@follow_call: force the visit of call sub expressions
"""
if not follow_mem and isinstance(expr, m2_expr.ExprMem):
nofollow.add(expr)
return False
if not follow_call and expr.is_function_call():
nofollow.add(expr)
return False
return True
@classmethod
def _follow_exprs(cls, exprs, follow_mem=False, follow_call=False):
"""Extracts subnodes from exprs and returns followed/non followed
expressions according to @follow_mem/@follow_call
"""
follow, nofollow = set(), set()
for expr in exprs:
expr.visit(lambda x: cls.get_expr(x, follow, nofollow),
lambda x: cls.follow_expr(x, follow, nofollow,
follow_mem, follow_call))
return follow, nofollow
@staticmethod
def _follow_nolabel(exprs):
"""Do not follow labels"""
follow = set()
for expr in exprs:
if not expr_is_int_or_label(expr):
follow.add(expr)
return follow, set()
def _follow_apply_cb(self, expr):
"""Apply callback functions to @expr
@expr : FollowExpr instance"""
follow = set([expr])
nofollow = set()
for callback in self._cb_follow:
follow, nofollow_tmp = callback(follow)
nofollow.update(nofollow_tmp)
out = set(FollowExpr(True, expr) for expr in follow)
out.update(set(FollowExpr(False, expr) for expr in nofollow))
return out
def _track_exprs(self, state, assignblk, line_nb):
"""Track pending expression in an assignblock"""
future_pending = {}
node_resolved = set()
for dst, src in assignblk.iteritems():
# Only track pending
if dst not in state.pending:
continue
# Track IRDst in implicit mode only
if dst == self._ira.IRDst and not self._implicit:
continue
assert dst not in node_resolved
node_resolved.add(dst)
dependencies = self._follow_apply_cb(src)
state.link_element(dst, line_nb)
state.link_dependencies(dst, line_nb,
dependencies, future_pending)
# Update pending nodes
state.remove_pendings(node_resolved)
state.add_pendings(future_pending)
def _compute_intrablock(self, state):
"""Follow dependencies tracked in @state in the current irbloc
@state: instance of DependencyState"""
irb = self._ira.blocs[state.label]
line_nb = len(irb.irs) if state.line_nb is None else state.line_nb
for cur_line_nb, assignblk in reversed(list(enumerate(irb.irs[:line_nb]))):
self._track_exprs(state, assignblk, cur_line_nb)
def get(self, label, elements, line_nb, heads):
"""Compute the dependencies of @elements at line number @line_nb in
the block named @label in the current IRA, before the execution of
this line. Dependency check stop if one of @heads is reached
@label: asm_label instance
@element: set of Expr instances
@line_nb: int
@heads: set of asm_label instances
Return an iterator on DiGraph(DependencyNode)
"""
# Init the algorithm
pending = {element: set() for element in elements}
state = DependencyState(label, elements, pending, line_nb)
todo = set([state])
done = set()
dpResultcls = DependencyResultImplicit if self._implicit else DependencyResult
while todo:
state = todo.pop()
self._compute_intrablock(state)
done_state = state.get_done_state()
if done_state in done:
continue
done.add(done_state)
if (not state.pending or
state.label in heads or
not self._ira.graph.predecessors(state.label)):
yield dpResultcls(state, self._ira)
if not state.pending:
continue
if self._implicit:
# Force IRDst to be tracked, except in the input block
state.pending[self._ira.IRDst] = set()
# Propagate state to parents
for pred in self._ira.graph.predecessors_iter(state.label):
todo.add(state.extend(pred))
def get_from_depnodes(self, depnodes, heads):
"""Alias for the get() method. Use the attributes of @depnodes as
argument.
PRE: Labels and lines of depnodes have to be equals
@depnodes: set of DependencyNode instances
@heads: set of asm_label instances
"""
lead = list(depnodes)[0]
elements = set(depnode.element for depnode in depnodes)
return self.get(lead.label, elements, lead.line_nb, heads)
| rom1sqr/miasm | miasm2/analysis/depgraph.py | Python | gpl-2.0 | 22,510 |
import traceback
import string
import os
import sys
import shutil
from FDO import *
import unittest
class DeleteTest(unittest.TestCase):
def setUp(self):
if not os.path.isdir("DeleteTest/SDF"):
os.makedirs("DeleteTest/SDF")
if not os.path.isdir("DeleteTest/SQLite"):
os.makedirs("DeleteTest/SQLite")
if os.path.isdir("DeleteTest/SHP"):
os.rmdir("DeleteTest/SHP")
shutil.copyfile("../../../../TestData/SDF/World_Countries.sdf", "DeleteTest/SDF/DeleteTest.sdf")
shutil.copyfile("../../../../TestData/SQLite/World_Countries.sqlite", "DeleteTest/SQLite/DeleteTest.sqlite")
shutil.copytree("../../../../TestData/SHP", "DeleteTest/SHP")
def tearDown(self):
shutil.rmtree("DeleteTest")
def _doDelete(self, conn):
geomName = None
desc = conn.CreateCommand(FdoCommandType_DescribeSchema)
self.assertIsNotNone(desc)
schemas = desc.Execute()
self.assertIsNotNone(schemas)
clsDef = schemas.GetClassDefinition(None, "World_Countries")
self.assertIsNotNone(clsDef)
geomProp = clsDef.GetGeometryProperty()
self.assertIsNotNone(geomProp)
geomName = geomProp.Name
deleteCmd = conn.CreateCommand(FdoCommandType_Delete)
self.assertIsNotNone(deleteCmd)
deleteCmd.SetFeatureClassName("World_Countries")
deleteCmd.SetFilter("NAME = 'Canada'")
deleted = deleteCmd.Execute()
self.assertEqual(66, deleted)
selectCmd = conn.CreateCommand(FdoCommandType_Select)
self.assertIsNotNone(selectCmd)
selectCmd.SetFeatureClassName("World_Countries")
selectCmd.SetFilter("NAME = 'Canada'")
fr = selectCmd.Execute()
try:
self.assertFalse(fr.ReadNext())
finally:
fr.Close()
def testSDFDelete(self):
connMgr = FdoFeatureAccessManager.GetConnectionManager()
conn = connMgr.CreateConnection("OSGeo.SDF")
conn.ConnectionString = "File=DeleteTest/SDF/DeleteTest.sdf"
self.assert_(conn.Open(), FdoConnectionState_Open)
try:
self._doDelete(conn)
finally:
conn.Close()
def testSHPDelete(self):
connMgr = FdoFeatureAccessManager.GetConnectionManager()
conn = connMgr.CreateConnection("OSGeo.SHP")
conn.ConnectionString = "DefaultFileLocation=DeleteTest/SHP"
self.assert_(conn.Open(), FdoConnectionState_Open)
try:
self._doDelete(conn)
finally:
conn.Close()
def testSQLiteDelete(self):
connMgr = FdoFeatureAccessManager.GetConnectionManager()
conn = connMgr.CreateConnection("OSGeo.SQLite")
conn.ConnectionString = "File=DeleteTest/SQLite/DeleteTest.sqlite"
self.assert_(conn.Open(), FdoConnectionState_Open)
try:
self._doDelete(conn)
finally:
conn.Close() | jumpinjackie/fdo-swig | Lang/Python/UnitTest/Src/DeleteTest.py | Python | lgpl-2.1 | 2,955 |
from django.db import models
from django.urls import reverse
class VoterData(models.Model):
name = models.CharField(max_length=36, verbose_name="Vardas")
surname = models.CharField(max_length=36, verbose_name="Pavardė")
id_code = models.IntegerField(max_length=36, verbose_name="Asmens Kodas")
class Voter(models.Model):
# Voter id (uuid4) comes from CEC server, idetifies real user.
voter_id = models.CharField(max_length=36, verbose_name="Autorizacijos numeris")
# Voter ballot id (uuid4) will be published, should not be associated with voter id.
ballot_id = models.CharField(max_length=36)
# Voter candidate codes
candidates = models.TextField()
def get_absolute_url(self):
return reverse('ballot', args=[self.ballot_id])
| 00riddle00/internet-voting-authorization | ivreg/models.py | Python | mit | 782 |
# -*- coding: utf-8 -*-
from WbtEvo import WbtEVO
from WbtEvo import WbtPaper
from WbtEvo import WbtOLDbutGOLD
from WbtEvo import WbtUbi
from storyLineObj_plus import Sl_text, Sl_State, Sl_TrigObj
from tool import Tool
import simplejson as json
import codecs
t = Tool()
wp = None
if __name__ == "__main__" and True:
wp = WbtUbi('progetti/Palestra RISK _UD3')
wp.onScene(0); wp.onSlide(0)
# 2e2b46db-dca6-4dac-a439-3d865377ba8b
wp.var('bookmark', 0, ('00000420-0000-4000-0000-111000000000', '00000420-0000-4099-0000-111000000000'))
wp.var('firstPage', 1, ('00000420-0000-4000-0000-111000000000', '00000420-0000-4099-0000-111000000000'))
wp.var('lastPage', int(wp.get_var('numPages').val), ('00000420-0000-4000-0000-111000000000', '00000420-0000-4099-0000-111000000000'))
wp.scene('LOADER')
wp.curSld = wp.curScn.new_slide('Loader', wp.layouts['blank'])
wp.f_xml.attrib['pG'] = str(wp.curScn.g)
# -=-=-=-=-=-=-=-=- #
bkg = wp.curSld.insert_pic("C:/christian_lavoro/wbt/__development__/__SARA__/asset/loader-layout.png", nick="loooololool")
bkg.cord(0, 0)
# -=-=-=-=-=-=-=-=- #
wp.curSld.navData(False, False, False)
txt = wp.curSld.insert_text( 'TIT CORSO', 'Titolo Corso', ['Cabin', '32', '#FFFFFF', True, False])
txt.margin(0, 0, 0, 0)
txt.scale(691, 62); txt.cord(15, 94)
txt = wp.curSld.insert_text( 'UD', 'UD 01', ['Cabin', '24', '#FFFFFF', True, False])
txt.margin(0, 0, 0, 0)
txt.scale(691, 48); txt.cord(15, 216)
txt = wp.curSld.insert_text( 'TIT UNITA', 'Titolo Unita', ['Cabin', '32', '#FFFFFF', True, False])
txt.margin(0, 0, 0, 0)
txt.scale(691, 62); txt.cord(15, 264)
# -=-=-=-=-=-=-=-=- #
scn_main = None
for scn in wp.l_scene:
if scn.nome == 'MAIN':
scn_main = scn
i = 1
for sld in scn_main.l_slide:
if sld.nome == 'BANK' or sld.nome == 'RES':
continue
if sld.nome == 'JOBSTOP':
trg = Sl_TrigObj( "", wp.curSld.g )
trg.act_setVar( wp.get_var('maxQuiz').g, i)
trg.evt_onTmLnEnd()
trg.condition( [wp.get_var('bookmark').g], '>', i )
wp.curSld.f_xml.find('trigLst').insert(0, trg.f_xml )
trg = Sl_TrigObj( "", wp.curSld.g )
trg.act_jumpToSlide( sld.g )
trg.evt_onTmLnEnd()
trg.condition( [wp.get_var('bookmark').g], '==', i )
wp.curSld.f_xml.find('trigLst').append( trg.f_xml )
i += 1
trg = Sl_TrigObj( "", wp.curSld.g )
trg.act_exeJS( "carica();" )
trg.evt_onTmLnStart()
wp.curSld.f_xml.find('trigLst').insert(0, trg.f_xml )
i = 1
for sld in scn_main.l_slide:
if i > 1:
break
trg = Sl_TrigObj( "", wp.curSld.g )
trg.act_jumpToSlide( sld.g )
trg.evt_onTmLnEnd()
wp.curSld.f_xml.find('trigLst').append( trg.f_xml )
i += 1
'''FIILEE.find('navigationsettings').find('browsersettings').attrib['playersize'] = 'Scale'
FIILEE.find('navigationsettings').find('resumesettings').attrib['restartresume'] = 'Never'
optGrup = None
for o in FIILEE.find('control_options').find('optiongroups').iter():
if o.attrib['name'] == 'controls':
optGrup = o.find('options')
for o in optGrup.find('optiongroups').iter():
if o.attrib['name'] == 'controls':
optGrup = o.find('options')'''
wp.curSld.save()
wp.save(justStory=True)
| christiansacchi/code-forest | storyline2_3_project_composer-Sara_Script-python/edo.py | Python | mit | 3,274 |
import itertools, sys
spinner = itertools.cycle(['-', '/', '|', '\\'])
while True:
sys.stdout.write(spinner.next())
sys.stdout.flush()
sys.stdout.write('\b')
| 2Dsharp/college | Fall-18/CG/PipeAnimation.py | Python | mit | 177 |
# Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import imp
def FindModule(name):
"""Gets the path of the named module.
This is useful for cases where we want to use subprocess.call on a module we
have imported, and safer than using __file__ since that can point to .pyc
files.
Args:
name: the string name of a module (e.g. 'dev_appserver')
Returns:
The path to the module.
"""
return imp.find_module(name)[1]
| modulexcite/catapult | catapult_build/module_finder.py | Python | bsd-3-clause | 551 |
# coding:utf8
import re
import requests
from bs4 import BeautifulSoup
baseurl = 'http://www.102468.com'
url = baseurl + '/?m=vod-type-id-1-pg-{p}.html'
headers = {
'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64; rv:49.0) Gecko/20100101 Firefox/49.0'
}
def make_request(url, session=None):
inner = False
if not session:
inner = True
session = requests.Session()
try:
response = session.get(url, headers=headers, timeout=10)
if inner:
session.close()
html = response.content
return html
except Exception, e:
raise e
def make_soup(url, session=None):
try:
html = make_request(url, session)
soup = BeautifulSoup(html, 'lxml')
if u'系统提示' in soup.head.title:
raise RuntimeError('Page {url} not found'.format(url=url))
return True, soup
except Exception, e:
print e
return False, str(e)
def parse_list(soup):
lis = soup.find('div', class_='plist').findAll('a', href=re.compile(r'/\?m=vod-play-id-\d+-src-\d-num-\d.html'))
if not lis:
raise RuntimeError('Page empty! Maybe it is already done!')
for li in lis:
ftitle = li.find('label', class_='name').getText().strip().encode('utf8')
furl = baseurl + li.get('href', None)
yield ftitle, furl
def parse_url(soup):
iframe = soup.find('iframe')
if iframe:
rurl = baseurl + iframe.get('src')
rflag, rsoup = make_soup(rurl)
if rflag:
vurl = baseurl + soup.find('iframe').get('src', None)
return parse_resource(vurl)
def parse_resource(url, session=None):
html = make_request(url, session)
vid = re.findall(r'(?<=vid=\')\S*(?=\')', html)[0]
purl = re.findall(r'(?<=purl=\')\S*(?=\')', html)[0]
return baseurl + '/ydisk/' + purl + vid
def run(p, fobj):
session = requests.Session()
flag, soup = make_soup(url=url.format(p=p), session=session)
for ftitle, furl in parse_list(soup):
fflag, fsoup = make_soup(url=furl, session=session)
if fflag:
vurl = parse_url(fsoup)
if vurl:
print ftitle, vurl
fobj.write(ftitle + '\t' + vurl + '\n')
fobj.flush()
else:
print ftitle, 'video url missing...'
session.close()
if __name__ == '__main__':
with open('videos.txt', 'a+') as fobj:
try:
i = 1
while True:
run(p=i, fobj=fobj)
i += 1
except RuntimeError, e:
print e
| brunobell/PracticalPythonScripts | sucker_102468_com.py | Python | mit | 2,602 |
#!/usr/bin/env python
#coding: iso-8859-15
#
# Make AntennaField.conf and iHBADeltas.conf file for given station and date
#
import sys,pgdb, pg
from datetime import *
from copy import deepcopy
from math import *
import numpy as np
import MLab as mlab
from database import *
# get info from database.py
dbName=getDBname()
dbHost=getDBhost()
db1 = pgdb.connect(user="postgres", host=dbHost, database=dbName)
cursor = db1.cursor()
# calling stored procedures only works from the pg module for some reason.
db2 = pg.connect(user="postgres", host=dbHost, dbname=dbName)
##
def print_help():
print "Usage: make_all_station_file date"
print " <date> : yyyy.yy e.g. 2008.75 for Oct 1st 2008"
##
## write header to antennaField file
##
def writeAntennaFieldHeader(frame):
# add to All Station config file
dataStr = ''
fileName = '../StaticMetaData/AntennaFields/All-AntennaFields.conf'
file = open(fileName, 'w')
dataStr += '#\n'
dataStr += '# AntennaPositions for all stations\n'
dataStr += '# %s target_date = %s\n' %(str(frame), sys.argv[1])
dataStr += '# Created: %s\n' %(datetime.now().strftime("%Y-%m-%d %H:%M:%S"))
dataStr += '#\n'
dataStr += '# NORMAL_VECTOR(1x3 matrix), floats\n'
dataStr += '# ROTATION_MATRIX(3x3 matrix), order -> 1e row,2e row,3e row, floats\n'
dataStr += '# CENTER_POS(1x3 matrix), floats\n'
dataStr += '# ANTENNA_OFFSET(..x2x3), ..=number of antennas, order -> ANT0(Xpol(X,Y,Z),Ypol(X,Y,Z)),ANT1(.., floats\n'
dataStr += '#\n'
file.write(dataStr)
file.close()
return
##
## write normal vector to antennaField file, in blitz format
##
def writeNormalVector(station, anttype):
try:
cursor.execute("select * from get_normal_vector(%s, %s)", (station, anttype))
vector = str(cursor.fetchone()[2]).replace('{','').replace('}','').split(',')
vector = np.array([float(v) for v in vector])
# write line to allStations file
file = open('../StaticMetaData/AntennaFields/All-AntennaFields.conf', 'a')
dataStr = '%s;%s;NORMAL_VECTOR;%6.6f;%6.6f;%6.6f\n' %\
(str(station), str(anttype), vector[0], vector[1], vector[2])
file.write(dataStr)
file.close()
except:
print 'ERR, no normal-vector for %s, %s' %(station, anttype)
return
##
## write rotation matrix to antennaField file, in blitz format
##
def writeRotationMatrix(station, anttype):
try:
cursor.execute("select * from get_rotation_matrix(%s, %s)", (station, anttype))
matrix = str(cursor.fetchone()[2]).replace('{','').replace('}','').split(',')
matrix = np.resize(np.array([float(m) for m in matrix]),(3,3))
Shape = np.shape(matrix)
# write line to allStations file
file = open('../StaticMetaData/AntennaFields/All-AntennaFields.conf', 'a')
dataStr = '%s;%s;ROTATION_MATRIX' %(str(station), str(anttype))
for row in range(Shape[0]):
for col in range(Shape[1]):
dataStr += ';%10.10f' %(matrix[row, col])
dataStr += '\n'
file.write(dataStr)
file.close()
except:
print 'ERR, no rotation-matrix for %s, %s' %(station, anttype)
return
##
## write antenna positions to antennaField file, in blitz format
##
def writeAntennaField(station, anttype, aPos):
Shape = np.shape(aPos)
Dims = len(Shape)
# write line to allStations file
file = open('../StaticMetaData/AntennaFields/All-AntennaFields.conf', 'a')
if Dims == 1:
dataStr = '%s;%s;CENTER_POS;%9.9f;%9.9f;%3.3f\n' %(str(station), str(anttype), aPos[0], aPos[1], aPos[2])
elif Dims == 3:
dataStr = '%s;%s;ANTENNA_OFFSET' %(str(station), str(anttype))
for ant in range(Shape[0]):
for pol in range(Shape[1]):
for pos in range(Shape[2]):
dataStr += ';%6.6f' %(aPos[ant, pol, pos])
dataStr += '\n'
file.write(dataStr)
file.close()
return
##
## MAIN
##
if __name__ == '__main__':
if len(sys.argv) != 2:
print_help()
sys.exit(1)
first = True
for stationname in db2.query("select distinct o.stationname from object o inner join reference_coord r on r.id = o.id").getresult():
station = stationname[0]
date_years = float(sys.argv[1])
frame = ''
# from database select all antennas for given station and target-date
cursor.execute("select * from get_gen_coord(%s, %f) order by objtype, number", (station, float(sys.argv[1])))
# start with empty arrays
aPosL = np.zeros((0,2,3))
aPosH = np.zeros((0,2,3))
aRefL = [1.0,2.0,3.0]
aRefH0 = [1.0,2.0,3.0]
aRefH1 = [1.0,2.0,3.0]
aRefH = [1.0,2.0,3.0]
# loop over all antennas
while (1):
record = cursor.fetchone()
if record == None:
break
if first:
first = False
frame = str(record[3])
## write positions to *.conf file
writeAntennaFieldHeader(frame)
#print record
# handle center coordinates
if record[1] == 'CLBA':
aRefL = [record[4],record[5],record[6]]
elif record[1] == 'CHBA0':
aRefH0 = [record[4],record[5],record[6]]
elif record[1] == 'CHBA1':
aRefH1 = [record[4],record[5],record[6]]
elif record[1] == 'CHBA':
aRefH = [record[4],record[5],record[6]]
else:
# get coordinates for even antenna(X)
even = [record[4],record[5],record[6]]
# get coordinates for odd antenna(Y)
record = cursor.fetchone()
if record == None:
break
odd = [record[4],record[5],record[6]]
# get used frame for translation
if record[1] == 'LBA':
aPosL = np.concatenate((aPosL, [[even,odd]]), axis=0)
elif record[1] == 'HBA' or record[1] == 'HBA0' or record[1] == 'HBA1':
aPosH = np.concatenate((aPosH, [[even,odd]]), axis=0)
if int(np.shape(aPosL)[0]) == 0 or int(np.shape(aPosH)[0]) == 0:
print 'ERR, no data found for %s' %(station)
exit(1)
# do something with the data
print 'Making %s-AntennaField.conf with LBA shape=%s HBA shape=%s' %(station, np.shape(aPosL), np.shape(aPosH))
aRef = None
# write LBA information to AntennaPos.conf
writeNormalVector(station, 'LBA')
writeRotationMatrix(station, 'LBA')
writeAntennaField(station, 'LBA', aRefL)
aOffset = aPosL - [[aRefL,aRefL]]
writeAntennaField(station, '', aOffset)
# write HBA information to AntennaPos.conf
# if not a core station
if station[0] != 'C':
writeNormalVector(station, 'HBA')
writeRotationMatrix(station, 'HBA')
writeAntennaField(station, 'HBA', aRefH)
aOffset = aPosH - [[aRefH,aRefH]]
writeAntennaField(station, '', aOffset)
# if core station add also information for HBA0 and HBA1 fields
if station[0] == 'C':
# write information for HBA0
writeNormalVector(station, 'HBA0')
writeRotationMatrix(station, 'HBA0')
writeAntennaField(station, 'HBA0', aRefH0)
# write information for HBA1
writeNormalVector(station, 'HBA1')
writeRotationMatrix(station, 'HBA1')
writeAntennaField(station, 'HBA1', aRefH1)
db1.close()
db2.close()
sys.exit(0)
| jjdmol/LOFAR | MAC/Deployment/data/Coordinates/make_all_station_file.py | Python | gpl-3.0 | 7,992 |
# This query is designed to be run against
# "Larry's data with Longitude/Latitude | fulldata" at:
# http://mewert-langmap.appspot.com/map?1onlw3HTjsXX_1dSnJS9RscD1-6uW8jN_GsSQ6HCSje0/oa4lgf8
#
# It requres a 'continent' column, a idsurfacescale column, and sometimes a
# language column.
# Array of family names and the letter to use. Note that family names must
# match families that are on glottolog.org.
VALID_FAMILIES = [
('Kru', {'letter': 'K' }),
('Gur', {'letter': 'G' }),
('Kwa', {'letter': 'W' }),
('Atlantic', {'letter': 'A' }),
('Mande', {'letter': 'M' }),
('Central Sudanic', {'letter': 'S' }),
('Nilotic', {'letter': 'N' }),
('Nubian', {'letter': 'X' }),
('Kainji', {'letter': 'J' }),
('Edoid', {'letter': 'E' }),
('Bantoid', {'letter': 'T' }),
('Igboid', {'letter': 'I' }),
('Chadic', {'letter': 'C'}),
('Cushitic', {'letter': 'H'}),
('Khoe-Kwadi', {'letter': 'O'}),
('Benue-Congo', {'letter': 'B' }),
('Volta-Congo', {'letter': 'V' }),
('Atlantic-Congo', {'letter': 'Z' }),
('Ijoid', {'letter': 'D'}),
]
# Utility to extract the default name from a glottolog "langoid" (language,
# language family, etc).
def get_name(struct):
return struct.get('name', {}).get('name', '')
# Determine if the passed in family name is an ancestor of parent_struct
# recursively.
def has_parent(family_name, parent_struct):
if get_name(parent_struct) == family_name:
return True
if 'parent' not in parent_struct:
return False
return has_parent(family_name, (parent_struct.get('parent') or {}))
# Assign the correct letter to a language for it's family based on the
# VALID_FAMILIES array at the top of the file.
def assign_family(output, glotto):
for (k, v) in VALID_FAMILIES:
if has_parent(k, glotto.get('parent', {})):
output['symbol'] = v['letter']
break
# Turns the idsurfacescale column into a proper python array.
def get_id_surface_scale(record):
s = record.get('idsurfacescale')
if not s:
return []
return s.split(' ')
# A simple color array to use to color each of the letters.
COLORS = [[0, 0, 0],
[0xF4, 0x43, 0x36],
[0x9C, 0x27, 0xB0],
[0x3F, 0x51, 0xB5],
[0x03, 0xA9, 0xF4],
[0x00, 0x96, 0x88]]
# The main process argument.
def process(record, glotto):
# Remove non-African data:
if record.get('continent') != 1:
return None
# Compute the number of level surface tones.
id_surface_scale = get_id_surface_scale(record)
levels = sum([1 for x in id_surface_scale if len(x) == 1])
# Base output info:
output = {
# Add the number of level surface tones in parenthesis for addition info
# while mousing over the various data points.
'name': '%s (%d)' %
(get_name(glotto) or 'No Glotto: ' + record.get('language'),
levels),
'longitude': glotto.get('longitude'),
'latitude': glotto.get('latitude'),
'idsurfacescale': id_surface_scale,
'level_surface_tones': levels,
'color': COLORS[levels]
}
# Assign the family above.
assign_family(output, glotto)
return output
# Generate the key algorithmically from the VALID_FAMILIES at the top of the
# file and label the different colors. Sorted alphabetically for the viewer's
# convenience.
def key():
keys = []
for name, attributes in VALID_FAMILIES:
keys.append((attributes.get('letter'), name))
keys = sorted(keys)
for i in range(6):
keys.append((COLORS[i], '%d number of level surface tones' % i))
return {
'name': 'African Languages by Family and Number of Level Surface Tones',
'key': keys
}
| sarum90/langmapper | static/snippets/africalevelsurfacetones.py | Python | mit | 4,100 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from uthportal.tasks.course import CourseTask
class ee353(CourseTask):
document_prototype = {
"code": "ee353",
"announcements": {
"link_site": "",
"link_eclass": "http://eclass.uth.gr/eclass/modules/announcements/rss.php?c=MHX321"
},
"info": {
"name": u"Ηλεκτρικές Μηχανές",
"code_site": u"HM353",
"code_eclass": u"MHX321",
"link_site": "",
"link_eclass": "http://eclass.uth.gr/eclass/courses/MHX321/"
}
}
| kkanellis/uthportal-server | uthportal/library/inf/courses/ee353.py | Python | gpl-3.0 | 604 |
# Copyright 2016 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Module containing base class for logging transport."""
class Transport(object):
"""Base class for Google Cloud Logging handler transports.
Subclasses of :class:`Transport` must have constructors that accept a
client and name object, and must override :meth:`send`.
"""
def send(
self, record, message, resource=None, labels=None, trace=None, span_id=None
):
"""Transport send to be implemented by subclasses.
:type record: :class:`logging.LogRecord`
:param record: Python log record that the handler was called with.
:type message: str
:param message: The message from the ``LogRecord`` after being
formatted by the associated log formatters.
:type resource: :class:`~google.cloud.logging.resource.Resource`
:param resource: (Optional) Monitored resource of the entry.
:type labels: dict
:param labels: (Optional) Mapping of labels for the entry.
"""
raise NotImplementedError
def flush(self):
"""Submit any pending log records.
For blocking/sync transports, this is a no-op.
"""
| tseaver/google-cloud-python | logging/google/cloud/logging/handlers/transports/base.py | Python | apache-2.0 | 1,740 |
from scipy import misc
from pathlib import Path
import image_utils as img
import hashlib
import os
from base64 import b64encode
for dir in os.listdir(os.path.join('res', 'orig')):
for file in os.listdir(os.path.join('res', 'orig', dir)):
if os.path.isfile(os.path.join('res', 'orig', dir, file)):
md5 = hashlib.md5()
r = b64encode(os.urandom(64)).decode('utf-8')
md5.update((dir + file + r).encode('utf-8'))
im = img.convert(os.path.join('res', 'orig', dir, file), (100,100))
img.save(os.path.join('res', 'dataset', dir + '_1' + md5.hexdigest() + '.jpg'), im)
im = img.rot90(im)
img.save(os.path.join('res', 'dataset', dir + '_2' + md5.hexdigest() + '.jpg'), im)
im = img.rot90(im)
img.save(os.path.join('res', 'dataset', dir + '_3' + md5.hexdigest() + '.jpg'), im)
im = img.rot90(im)
img.save(os.path.join('res', 'dataset', dir + '_4' + md5.hexdigest() + '.jpg'), im) | Proch92/euroClassifier | classificator/convert_images.py | Python | apache-2.0 | 905 |
import copy
import numpy as np
import nengo
from nengo.utils.network import with_self
class EnsembleArray(nengo.Network):
def __init__(self, neurons, n_ensembles, ens_dimensions=1, label=None,
**ens_kwargs):
if "dimensions" in ens_kwargs:
raise TypeError(
"'dimensions' is not a valid argument to EnsembleArray. "
"To set the number of ensembles, use 'n_ensembles'. To set "
"the number of dimensions per ensemble, use 'ens_dimensions'.")
label_prefix = "" if label is None else label + "_"
self.n_ensembles = n_ensembles
self.dimensions_per_ensemble = ens_dimensions
transform = np.eye(self.dimensions)
self.input = nengo.Node(size_in=self.dimensions, label="input")
for i in range(n_ensembles):
e = nengo.Ensemble(
copy.deepcopy(neurons), self.dimensions_per_ensemble,
label=label_prefix + str(i), **ens_kwargs)
trans = transform[i * self.dimensions_per_ensemble:
(i + 1) * self.dimensions_per_ensemble, :]
nengo.Connection(self.input, e, transform=trans, synapse=None)
self.add_output('output', function=None)
@with_self
def add_output(self, name, function, synapse=None, **conn_kwargs):
if function is None:
function_d = self.dimensions_per_ensemble
else:
func_output = function(np.zeros(self.dimensions_per_ensemble))
function_d = np.asarray(func_output).size
dim = self.n_ensembles * function_d
output = nengo.Node(size_in=dim, label=name)
setattr(self, name, output)
for i, e in enumerate(self.ensembles):
nengo.Connection(
e, output[i*function_d:(i+1)*function_d], function=function,
synapse=synapse, **conn_kwargs)
return output
@property
def dimensions(self):
return self.n_ensembles * self.dimensions_per_ensemble
| ZeitgeberH/nengo | nengo/networks/ensemblearray.py | Python | gpl-3.0 | 2,042 |
import cmdline
print("\n----- cmdline module -----\n")
print('* ' + '\n* '.join(dir(cmdline)))
print("\n----- parse_args() document -----\n")
print(cmdline.parse_args.__doc__)
| skitazaki/python-school-ja | src/mylib-sample.py | Python | mit | 178 |
#!/usr/bin/env python
#
# This script is based on the one found at http://vim.wikia.com/wiki/VimTip280
# but has been generalised. It searches the current working directory for
# *_test.py (good) or test_*.py (bad) files and runs each of the unit-tests
# found within.
#
# When run from within Vim as its 'makeprg' with the correct 'errorformat' set
# (by setting ":compiler pyunit"), any failure will deliver your cursor to the
# line that breaks the unit tests.
#
# Place this file somewhere where it can be run such as ${HOME}/bin/alltests.py
import unittest
import sys
import os
import re
import traceback
def find_all_test_files():
t_py_re = re.compile('^(test_.*|.*_test)\.py$')
is_test = lambda filename: t_py_re.match(filename)
drop_dot_py = lambda filename: filename[:-3]
return [drop_dot_py(module) for module in
filter(is_test, os.listdir(os.getcwd()))]
def suite():
sys.path.append(os.curdir)
modules_to_test = find_all_test_files()
print('Testing', ', '.join(modules_to_test))
alltests = unittest.TestSuite()
for module in map(__import__, modules_to_test):
alltests.addTest(unittest.findTestCases(module))
return alltests
if __name__ == '__main__':
try:
unittest.main(defaultTest='suite')
except SystemExit:
pass
except:
# we reverse the Exception/Traceback printout order so vim's
# quickfix works properly
exceptionType, exceptionValue, exceptionTraceback = sys.exc_info()
sys.stderr.write("Exception:\n")
ex = traceback.format_exception_only(exceptionType, exceptionValue)
for line in ex:
sys.stderr.write(line)
sys.stderr.write("\nTraceback (most recent call first):\n")
tb = traceback.format_tb(exceptionTraceback)
for line in reversed(tb):
sys.stderr.write(line)
| omalsa04/dotfiles | bin/alltests.py | Python | apache-2.0 | 1,875 |
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'Punn.youtube_id'
db.add_column('punns_punn', 'youtube_id',
self.gf('django.db.models.fields.CharField')(max_length=50, null=True, blank=True),
keep_default=False)
def backwards(self, orm):
# Deleting field 'Punn.youtube_id'
db.delete_column('punns_punn', 'youtube_id')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'punns.punn': {
'Meta': {'object_name': 'Punn'},
'author': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}),
'base62id': ('django.db.models.fields.CharField', [], {'max_length': '140', 'blank': 'True'}),
'content': ('django.db.models.fields.TextField', [], {'max_length': '10000', 'blank': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_video': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'karma': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'original_punn': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['punns.Punn']", 'null': 'True', 'blank': 'True'}),
'pic': ('sorl.thumbnail.fields.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'pub_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'null': 'True', 'blank': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '140', 'blank': 'True'}),
'source': ('django.db.models.fields.URLField', [], {'max_length': '300', 'blank': 'True'}),
'status': ('django.db.models.fields.CharField', [], {'max_length': '2', 'null': 'True', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '140'}),
'views': ('django.db.models.fields.IntegerField', [], {'default': '0', 'blank': 'True'}),
'youtube_id': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True', 'blank': 'True'})
}
}
complete_apps = ['punns'] | carquois/blobon | blobon/punns/migrations/0016_auto__add_field_punn_youtube_id.py | Python | mit | 5,502 |
from types import ModuleType
import logging
from KINCluster.core.cluster import Cluster
from KINCluster.core.extractor import Extractor
from KINCluster import settings as sets
class KINCluster:
__log = logging
__log.basicConfig(level=logging.INFO)
def __init__(self, pipeline, cluster=Cluster, extractor=Extractor, settings={}):
def getattrs(module):
keys = [key for key in dir(module) if not key.startswith('__')]
return {key: getattr(module, key) for key in keys}
self.settings = getattrs(sets)
if isinstance(settings, ModuleType):
settings = getattrs(settings)
if isinstance(settings, dict):
for key, value in settings.items():
self.settings[key] = value
self.pipeline = pipeline
self.cluster = cluster(settings=self.settings)
self.extractor = extractor
KINCluster.__log.info('KINCluster>> inited')
def run(self):
KINCluster.__log.info('KINCluster>> start running...')
for item in self.pipeline.capture_item():
self.cluster.put_item(item)
KINCluster.__log.info('KINCluster>> start clustering...')
self.cluster.cluster()
KINCluster.__log.info('KINCluster>> done clustering.')
KINCluster.__log.info('KINCluster>> start extracting...')
extractor = self.extractor(self.cluster)
for idx, _ in enumerate(self.cluster.dumps):
self.pipeline.dress_item(extractor.dump(idx))
KINCluster.__log.info('KINCluster>> done extracting...')
| memento7/KINCluster | KINCluster/KINCluster.py | Python | mit | 1,586 |
# Opus/UrbanSim urban simulation software.
# Copyright (C) 2010-2011 University of California, Berkeley, 2005-2009 University of Washington
# See opus_core/LICENSE
from opus_core.logger import logger
try:
from opus_core.opus_gdal import OpusGDAL
except:
raise ImportError, 'failed to import OpusGDAL from opus_core.opus_gdal'
import os, re, sys, time, traceback
from copy import copy
from opus_core.misc import directory_path_from_opus_path
from opus_gui.results_manager.run.indicator_framework.visualizer.visualizers.abstract_visualization import Visualization
class GeotiffMap(Visualization):
def __init__(self, source_data, dataset_name,
attribute = None,
years = None,
operation = None,
name = None,
package = None,
prototype_dataset = None,
storage_location = None):
Visualizer.__init__(self, source_data,
dataset_name, [attribute],
years, operation, name,
storage_location)
if prototype_dataset is None:
dir = directory_path_from_opus_path('%s.indicators.geotiff_files'%package)
#todo: check indicator package and find appropriate prototype image
prototype_dataset = os.path.join(dir,'idgrid.tif')
if not os.path.exists(prototype_dataset):
raise 'Error: %s does not exist. Cannot compute GeotiffMap'%prototype_dataset
self.prototype_dataset = prototype_dataset
def is_single_year_indicator_image_type(self):
return True
def get_file_extension(self):
return 'tif'
def get_visualization_shorthand(self):
return 'geotiff'
def get_additional_metadata(self):
return {'prototype_dataset':self.prototype_dataset}
def _create_indicator(self, year):
"""Create a geotiff image for the given indicator"""
values = self._get_indicator(year, wrap = False)
dataset = self._get_dataset(year = year)
values_in_2d_array = dataset.get_2d_attribute(
attribute=None,
attribute_data=values,
)
file_name = self.get_file_name(year = year)
indicator_directory = self.get_storage_location()
OpusGDAL().input_numpy_array_output_geotiff(
values_in_2d_array,
prototype_dataset = self.prototype_dataset,
output_directory = indicator_directory,
output_file_name = file_name)
return self.get_file_path(year)
from opus_core.tests import opus_unittest
from opus_gui.results_manager.run.indicator_framework.maker.source_data import SourceData
from opus_gui.results_manager.run.indicator_framework.test_classes.abstract_indicator_test import AbstractIndicatorTest
class Tests(AbstractIndicatorTest):
def skip_test_create_indicator(self):
####NOTE: THIS TEST FAILS BECAUSE THE OPUS_CORE DATASET DOES NOT HAVE 2D ATTRIBUTES, X/Y AXES
indicator_path = os.path.join(self.temp_cache_path, 'indicators')
self.assert_(not os.path.exists(indicator_path))
map = GeotiffMap(
source_data = self.source_data,
attribute = 'opus_core.test.attribute',
dataset_name = 'test',
prototype_dataset = None,
years = None
)
map.create(False)
self.assert_(os.path.exists(indicator_path))
self.assert_(os.path.exists(os.path.join(indicator_path, 'test__geotiff__attribute__1980.tif')))
if __name__ == '__main__':
try: import gdal
except: print "Could not import gdal library."
else:
opus_unittest.main()
| apdjustino/DRCOG_Urbansim | src/opus_gui/results_manager/run/indicator_framework/visualizer/visualizers/geotiff_map.py | Python | agpl-3.0 | 3,968 |
"""Constants for the Hue component."""
import logging
LOGGER = logging.getLogger('.')
DOMAIN = "hue"
API_NUPNP = 'https://www.meethue.com/api/nupnp'
| MartinHjelmare/home-assistant | homeassistant/components/hue/const.py | Python | apache-2.0 | 150 |
# This file is part of Indico.
# Copyright (C) 2002 - 2017 European Organization for Nuclear Research (CERN).
#
# Indico is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 3 of the
# License, or (at your option) any later version.
#
# Indico is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Indico; if not, see <http://www.gnu.org/licenses/>.
from __future__ import unicode_literals
from indico.modules.cephalopod.controllers import RHCephalopod, RHCephalopodSync, RHSystemInfo
from indico.web.flask.wrappers import IndicoBlueprint
cephalopod_blueprint = _bp = IndicoBlueprint('cephalopod', __name__, template_folder='templates',
virtual_template_folder='cephalopod')
_bp.add_url_rule('/admin/community-hub/', 'index', RHCephalopod, methods=('GET', 'POST'))
_bp.add_url_rule('/admin/community-hub/sync', 'sync', RHCephalopodSync, methods=('POST',))
_bp.add_url_rule('/system-info', 'system-info', RHSystemInfo)
| nop33/indico | indico/modules/cephalopod/blueprint.py | Python | gpl-3.0 | 1,345 |
# coding=utf-8
# Copyright 2021 The SLOE Logistic Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Run experiment to understand convergence of SLOE estimator of eta.
Tests the SLOE estimator empirically by computing it
over a range of sample sizes for a bunch of different seeds, and storing in
csv files to be analyzed in a colab.
"""
from absl import app
from absl import flags
import apache_beam as beam
from apache_beam.options import pipeline_options
import numpy as np
import sloe_logistic.sloe_experiments.experiment_helpers as exp_helper
import statsmodels.api as sm
FLAGS = flags.FLAGS
flags.DEFINE_string('output_path', '/tmp/counts.txt', 'The output file path')
flags.DEFINE_integer('num_sims', 100, 'number of simulations to run')
flags.DEFINE_string('img_path', '/tmp/counts.png', 'Path to save plots')
N_RANGE = [250, 500, 1000, 2000, 4000]
def multiple_sample_sizes(seed):
"""Run same seed over multiple sample sizes."""
for n in N_RANGE:
yield [n, seed]
def run_sim(params):
"""Runs simulation and computes estimated eta_hat to compare to truth."""
n = params[0]
seed = params[1]
kappa = FLAGS.features_per_sample
p = int(n * kappa)
gamma = np.sqrt(FLAGS.signal_strength)
rand_state = np.random.RandomState(201216 + seed)
p_positive = int(p / 8)
p_negative = p_positive
p_zero = p - p_positive - p_negative
beta = 2 * np.concatenate(
(np.ones(p_positive), -np.ones(p_negative), np.zeros(p_zero)))
beta *= gamma
features = rand_state.randn(n, p) / np.sqrt(p)
labels = (rand_state.rand(n) <= 1.0 /
(1.0 + np.exp(-features.dot(beta)))).astype(float)
logit_model = sm.Logit(labels, features)
logit_model_fit = logit_model.fit(disp=False)
beta_hat = logit_model_fit.params
hessian = logit_model.hessian(beta_hat)
# Computes X_i^T H^{-1} X_i for all examples. Used in Sherman-Morrison formula
# below.
xi_hessian_inv_xi = np.diag(
features.dot(np.linalg.solve(hessian, features.T)))
pred = logit_model_fit.predict(features)
# Sherman-Morrison formula for X_i^T H_{-i}^{-1} X_i, where H_{-i} is Hessian
# without i-th example.
mod = xi_hessian_inv_xi / (1.0 + xi_hessian_inv_xi * pred * (1 - pred))
infl = mod * (labels - pred) + features.dot(beta_hat)
eta_hat = np.var(infl)
eta_hat_simp = np.linalg.norm(beta_hat)**2
return np.array([n, seed, eta_hat, eta_hat_simp])
def main(unused_argv):
# If you have custom beam options add them here.
beam_options = pipeline_options.PipelineOptions()
with beam.Pipeline(options=beam_options) as pipe:
_ = (
pipe
| beam.Create(range(FLAGS.num_sims))
| beam.FlatMap(multiple_sample_sizes)
| 'PrepShuffle' >> beam.Reshuffle()
| beam.Map(run_sim)
| beam.Map(exp_helper.numpy_array_to_csv)
| beam.Reshuffle()
|
'WriteToText' >> beam.io.WriteToText(FLAGS.output_path, num_shards=5))
if __name__ == '__main__':
app.run(main)
| google-research/sloe-logistic | sloe_experiments/est_gamma.py | Python | apache-2.0 | 3,474 |
from __future__ import absolute_import
import sys
import pip
from pip.compat import stdlib_pkgs
from pip.basecommand import Command
from pip.operations.freeze import freeze
from pip.wheel import WheelCache
DEV_PKGS = ('pip', 'setuptools', 'distribute', 'wheel')
class FreezeCommand(Command):
"""
Output installed packages in requirements format.
packages are listed in a case-insensitive sorted order.
"""
name = 'freeze'
usage = """
%prog [options]"""
summary = 'Output installed packages in requirements format.'
log_streams = ("ext://sys.stderr", "ext://sys.stderr")
def __init__(self, *args, **kw):
super(FreezeCommand, self).__init__(*args, **kw)
self.cmd_opts.add_option(
'-r', '--requirement',
dest='requirements',
action='append',
default=[],
metavar='file',
help="Use the order in the given requirements file and its "
"comments when generating output. This option can be "
"used multiple times.")
self.cmd_opts.add_option(
'-f', '--find-links',
dest='find_links',
action='append',
default=[],
metavar='URL',
help='URL for finding packages, which will be added to the '
'output.')
self.cmd_opts.add_option(
'-l', '--local',
dest='local',
action='store_true',
default=False,
help='If in a virtualenv that has global access, do not output '
'globally-installed packages.')
self.cmd_opts.add_option(
'--user',
dest='user',
action='store_true',
default=False,
help='Only output packages installed in user-site.')
self.cmd_opts.add_option(
'--all',
dest='freeze_all',
action='store_true',
help='Do not skip these packages in the output:'
' %s' % ', '.join(DEV_PKGS))
self.cmd_opts.add_option(
'--exclude-editable',
dest='exclude_editable',
action='store_true',
help='Exclude editable package from output.')
self.parser.insert_option_group(0, self.cmd_opts)
def run(self, options, args):
format_control = pip.index.FormatControl(set(), set())
wheel_cache = WheelCache(options.cache_dir, format_control)
skip = set(stdlib_pkgs)
if not options.freeze_all:
skip.update(DEV_PKGS)
freeze_kwargs = dict(
requirement=options.requirements,
find_links=options.find_links,
local_only=options.local,
user_only=options.user,
skip_regex=options.skip_requirements_regex,
isolated=options.isolated_mode,
wheel_cache=wheel_cache,
skip=skip,
exclude_editable=options.exclude_editable)
for line in freeze(**freeze_kwargs):
sys.stdout.write(line + '\n')
| sigmavirus24/pip | pip/commands/freeze.py | Python | mit | 3,086 |
import socket, requests, json, codecs
from flask import Flask, request, jsonify
from manager import AcquireManager, HeartBeatManager
app = Flask(__name__)
manager = AcquireManager()
heart = HeartBeatManager()
@app.route('/')
def index():
return 'Hello World!'
@app.route('/get_my_ip')
def get_my_ip():
return jsonify({'ip': request.remote_addr}), 200
@app.route('/acquire')
def acquire():
client_ip = request.remote_addr
if manager.acquire(client_ip):
heart.born(callback=manager.free)
return jsonify({'mes': 'OK'}), 200
else:
return jsonify({'mes': 'NG'}), 200
@app.route('/free')
def free():
if manager.free():
return jsonify({'mes': 'OK'}), 200
else:
return jsonify({'mes': 'NG'}), 200
def main():
print("""\
Remote Sekisan Python-remix
Takumi Sueda, 2014
""")
f = codecs.open('setting.json', 'r', 'utf-8')
setting = json.loads(f.read())
user = setting['user']
manager.set_user_list(user)
heart.set_ttl(setting['ttl'])
app.run(host='0.0.0.0', debug=True)
if __name__ == '__main__':
main() | puhitaku/Remote-Sekisan-PyRemix | main.py | Python | mit | 1,112 |
# -*- python -*-
# Copyright (c) 2012 The Native Client Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from gdb_test import AssertEquals
import gdb_test
def test(gdb):
gdb.Command('break main')
AssertEquals(gdb.ResumeCommand('continue')['reason'], 'breakpoint-hit')
AssertEquals(gdb.Eval('test_call_from_gdb(1)'), '3')
AssertEquals(gdb.Eval('global_var'), '2')
if __name__ == '__main__':
gdb_test.RunTest(test, 'call_from_gdb')
| leighpauls/k2cro4 | native_client/tests/gdb/call_from_gdb.py | Python | bsd-3-clause | 528 |
import os
from django import VERSION as DJANGO_VERSION
import oscar
from oscar.defaults import * # noqa
# Path helper
location = lambda x: os.path.join(os.path.dirname(os.path.realpath(__file__)), x)
DATABASES = {
'default': {
'ENGINE': os.environ.get('DATABASE_ENGINE', 'django.db.backends.sqlite3'),
'NAME': os.environ.get('DATABASE_NAME', ':memory:'),
},
}
INSTALLED_APPS = [
'django.contrib.auth',
'django.contrib.admin',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.flatpages',
'django.contrib.staticfiles',
'widget_tweaks',
# contains models we need for testing
'tests._site.model_tests_app',
'tests._site.myauth',
# Use a custom partner app to test overriding models. I can't
# find a way of doing this on a per-test basis, so I'm using a
# global change.
] + oscar.get_core_apps(['tests._site.apps.partner', 'tests._site.apps.customer'])
AUTH_USER_MODEL = 'myauth.User'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [
location('_site/templates'),
oscar.OSCAR_MAIN_TEMPLATE_DIR,
],
'OPTIONS': {
'loaders': [
('django.template.loaders.cached.Loader', [
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
]),
],
'context_processors': [
'django.contrib.auth.context_processors.auth',
'django.template.context_processors.request',
'django.template.context_processors.debug',
'django.template.context_processors.i18n',
'django.template.context_processors.media',
'django.template.context_processors.static',
'django.contrib.messages.context_processors.messages',
'oscar.apps.search.context_processors.search_form',
'oscar.apps.customer.notifications.context_processors.notifications',
'oscar.apps.promotions.context_processors.promotions',
'oscar.apps.checkout.context_processors.checkout',
'oscar.core.context_processors.metadata',
]
}
}
]
if DJANGO_VERSION < (1, 10):
MIDDLEWARE_CLASSES = [
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'oscar.apps.basket.middleware.BasketMiddleware',
]
else:
MIDDLEWARE = [
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'oscar.apps.basket.middleware.BasketMiddleware',
]
AUTHENTICATION_BACKENDS = (
'oscar.apps.customer.auth_backends.EmailBackend',
'django.contrib.auth.backends.ModelBackend',
)
HAYSTACK_CONNECTIONS = {'default': {'ENGINE': 'haystack.backends.simple_backend.SimpleEngine'}}
PASSWORD_HASHERS = ['django.contrib.auth.hashers.MD5PasswordHasher']
ROOT_URLCONF = 'tests._site.urls'
LOGIN_REDIRECT_URL = '/accounts/'
STATIC_URL = '/static/'
DEBUG = False
SITE_ID = 1
USE_TZ = 1
APPEND_SLASH = True
DDF_DEFAULT_DATA_FIXTURE = 'tests.dynamic_fixtures.OscarDynamicDataFixtureClass'
SESSION_SERIALIZER = 'django.contrib.sessions.serializers.JSONSerializer'
LANGUAGE_CODE = 'en-gb'
# temporary workaround for issue in sorl-thumbnail in Python 3
# https://github.com/mariocesar/sorl-thumbnail/pull/254
THUMBNAIL_DEBUG = False,
OSCAR_INITIAL_ORDER_STATUS = 'A'
OSCAR_ORDER_STATUS_PIPELINE = {'A': ('B',), 'B': ()}
OSCAR_INITIAL_LINE_STATUS = 'a'
OSCAR_LINE_STATUS_PIPELINE = {'a': ('b', ), 'b': ()}
SECRET_KEY = 'notverysecret'
TEST_RUNNER = 'django.test.runner.DiscoverRunner'
| vicky2135/lucious | tests/settings.py | Python | bsd-3-clause | 4,196 |
def multiples(x):
if x % 147 == 0:
return 'Fang'
elif x % 7 == 0:
return 'Fizz'
elif x % 15 == 0:
return 'Foo'
return 'Far'
| the-zebulan/CodeWars | katas/beta/multiples_2.py | Python | mit | 164 |
# -*- coding: utf-8 -*-
# Generated by Django 1.9.1 on 2016-03-04 05:48
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('djsend', '0002_auto_20160303_1712'),
]
operations = [
migrations.RemoveField(
model_name='category',
name='setting',
),
migrations.RemoveField(
model_name='genericsinglestimuli',
name='block_type',
),
migrations.RemoveField(
model_name='genericstimulipair',
name='block_type',
),
migrations.RemoveField(
model_name='microcomponentpair',
name='setting',
),
migrations.AlterIndexTogether(
name='texttrial',
index_together=set([]),
),
migrations.RemoveField(
model_name='texttrial',
name='global_settings_type',
),
migrations.RemoveField(
model_name='texttrial',
name='save_with',
),
migrations.AlterModelOptions(
name='categorizationblock',
options={},
),
migrations.AlterModelOptions(
name='similarityblock',
options={},
),
migrations.AddField(
model_name='categorizationblock',
name='name',
field=models.CharField(default='block name', help_text='A short name to describe this block', max_length=24),
preserve_default=False,
),
migrations.AddField(
model_name='genericsettingblock',
name='name',
field=models.CharField(default='block name', help_text='A short name to describe this block', max_length=24),
preserve_default=False,
),
migrations.AddField(
model_name='similarityblock',
name='name',
field=models.CharField(default='block name', help_text='A short name to describe this block', max_length=24),
preserve_default=False,
),
migrations.AlterIndexTogether(
name='genericsettingblock',
index_together=set([]),
),
migrations.DeleteModel(
name='Category',
),
migrations.DeleteModel(
name='GenericSingleStimuli',
),
migrations.DeleteModel(
name='GenericStimuliPair',
),
migrations.DeleteModel(
name='MicroComponentPair',
),
migrations.DeleteModel(
name='TextTrial',
),
]
| rivasd/djPsych | djsend/migrations/0003_auto_20160304_0048.py | Python | gpl-3.0 | 2,647 |
## taken from http://www.djangosnippets.org/snippets/1289/
from django import template
from django.template import loader, Node, Variable
from django.utils.encoding import smart_str, smart_unicode
from django.template.defaulttags import url
from django.template import VariableDoesNotExist
register = template.Library()
@register.tag
def breadcrumb(parser, token):
"""
Renders the breadcrumb.
Examples:
{% breadcrumb "Title of breadcrumb" url_var %}
{% breadcrumb context_var url_var %}
{% breadcrumb "Just the title" %}
{% breadcrumb just_context_var %}
Parameters:
-First parameter is the title of the crumb,
-Second (optional) parameter is the url variable to link to, produced by url tag, i.e.:
{% url person_detail object.id as person_url %}
then:
{% breadcrumb person.name person_url %}
@author Andriy Drozdyuk
"""
return BreadcrumbNode(token.split_contents()[1:])
@register.tag
def breadcrumb_url(parser, token):
"""
Same as breadcrumb
but instead of url context variable takes in all the
arguments URL tag takes.
{% breadcrumb "Title of breadcrumb" person_detail person.id %}
{% breadcrumb person.name person_detail person.id %}
"""
bits = token.split_contents()
if len(bits)==2:
return breadcrumb(parser, token)
# Extract our extra title parameter
title = bits.pop(1)
token.contents = ' '.join(bits)
url_node = url(parser, token)
return UrlBreadcrumbNode(title, url_node)
class BreadcrumbNode(Node):
def __init__(self, vars):
"""
First var is title, second var is url context variable
"""
self.vars = map(Variable,vars)
def render(self, context):
title = self.vars[0].var
if title.find("'")==-1 and title.find('"')==-1:
try:
val = self.vars[0]
title = val.resolve(context)
except:
title = ''
else:
title=title.strip("'").strip('"')
title=smart_unicode(title)
url = None
if len(self.vars)>1:
val = self.vars[1]
try:
url = val.resolve(context)
except VariableDoesNotExist:
print 'URL does not exist', val
url = None
return create_crumb(title, url)
class UrlBreadcrumbNode(Node):
def __init__(self, title, url_node):
self.title = Variable(title)
self.url_node = url_node
def render(self, context):
title = self.title.var
if title.find("'")==-1 and title.find('"')==-1:
try:
val = self.title
title = val.resolve(context)
except:
title = ''
else:
title=title.strip("'").strip('"')
title=smart_unicode(title)
url = self.url_node.render(context)
return create_crumb(title, url)
def create_crumb(title, url=None):
"""
Helper function
"""
crumb = """<span class="breadcrumbs-arrow">""" \
"""<img src="/media/images/arrow.gif" alt="Arrow">""" \
"""</span>"""
if url:
crumb = "%s<a href='%s'>%s</a>" % (crumb, url, title)
else:
crumb = "%s %s" % (crumb, title)
return crumb
| jantoniomartin/django-machinery | indumatic/templatetags/breadcrumbs.py | Python | agpl-3.0 | 3,000 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2011 Ferreyra, Jonathan <jalejandroferreyra@gmail.com>
# Copyright 2011 Fernandez, Emiliano <emilianohfernandez@gmail.com>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
# MA 02110-1301, USA.
import os
from sys import argv
#~ class PathTools :
def getPathProgramFolder():
''' Obtiene la ruta de la carpeta del programa. '''
program_folder = convertPath(os.path.abspath(os.path.dirname(argv[0])) + "/")
return program_folder
def getPathDataFolder():
''' Obtiene la ruta del directorio data. '''
program_folder = convertPath(os.path.abspath(os.path.dirname(argv[0])) + "/")
data_folder = convertPath(os.path.dirname(program_folder[:-4])+'/data/')
return data_folder
def getPathRootFolder():
''' Obtiene la ruta del directorio de la aplicacion. '''
program_folder = convertPath(os.path.abspath(os.path.dirname(argv[0])) + "/")
root_folder = convertPath(os.path.dirname(program_folder[:-4])+'/')
return root_folder
def convertPath(path):
"""Convierte el path a el específico de la plataforma (separador)"""
if os.name == 'posix':
return "/"+apply( os.path.join, tuple(path.split('/')))
elif os.name == 'nt':
return apply( os.path.join, tuple(path.split('/')))
if __name__ == '__main__':
data = getPathDataFolder()
print data
| informaticameg/Posta | tools/pathtools.py | Python | gpl-3.0 | 2,123 |
from django.core.paginator import Paginator, EmptyPage, PageNotAnInteger
from django.http import Http404
from django.shortcuts import render
from .helpers import *
def listing(request):
random_article = None
article_list = helper_get_published_article()
paginator = Paginator(article_list, 10)
page = request.GET.get('page')
try:
articles = paginator.page(page)
except PageNotAnInteger:
# If page is not an integer, deliver first page.
articles = paginator.page(1)
except EmptyPage:
articles = paginator.page(paginator.num_pages)
next_articles = helper_get_random_articles()
if articles.number == 1:
random_article = helper_get_random_article()
return render(
request,
'list.html',
{
'random_article': random_article,
'articles' : articles,
'next_articles' : next_articles,
}
)
def detail(request, slug):
try:
article = Article.objects.filter(
publication_date__lte = datetime.date.today()
).get(slug=slug)
except Article.DoesNotExist:
raise Http404("Article does not exist")
next_articles = helper_get_random_articles()
return render(
request,
'detail.html',
{
'article': article,
'next_articles': next_articles,
}
)
| masood09/GaleArticles | article/views.py | Python | mit | 1,396 |
from mediadrop.forms.admin.storage.localfiles import *
| jobsafran/mediadrop | mediacore/forms/admin/storage/localfiles.py | Python | gpl-3.0 | 55 |
class TestStateLookup(object):
def test_simple_lookup(self):
from schedule.window import AWS_INSTANCE_STATES
assert AWS_INSTANCE_STATES.STOPPED == [80, 'stopped']
def test_state_value_matches_dict_values(self):
from schedule.window import AWS_INSTANCE_STATES
di = {u'Code': 16, u'Name': u'running'}
assert AWS_INSTANCE_STATES.RUNNING == di.values()
| LostProperty/schedule | tests.py | Python | mit | 399 |
# -*- coding: utf-8 -*-
# Copyright 2019 Joan Marín <Github@JoanMarin>
# License AGPL-3.0 or later (https://www.gnu.org/licenses/agpl).
from . import models
| odoo-colombia/l10n-colombia | l10n_co_product_uom/__init__.py | Python | agpl-3.0 | 159 |
#!/usr/bin/env python
# Usage: ./regrtest.py
import sys
import os, os.path
import subprocess
from subprocess import Popen, PIPE, call
import csv
timeout="2"
def isexec (fpath):
if fpath == None: return False
return os.path.isfile(fpath) and os.access(fpath, os.X_OK)
def which(program):
fpath, fname = os.path.split(program)
if fpath:
if isexec (program):
return program
else:
for path in os.environ["PATH"].split(os.pathsep):
exe_file = os.path.join(path, program)
if isexec (exe_file):
return exe_file
return None
def getBrunch ():
brunch = None
if 'COVENANT' in os.environ:
brunch = os.environ ['COVENANT']
brunch = os.path.join (brunch, "bin", "brunch.py")
if not isexec (brunch):
raise IOError ("Cannot find brunch. Set environment COVENANT variable.")
return brunch
def getCovenant ():
covenant = None
if 'COVENANT' in os.environ:
covenant = os.environ ['COVENANT']
covenant = os.path.join (covenant, "bin", "covenant")
if not isexec (covenant):
raise IOError ("Cannot find covenant. Set environment COVENANT variable.")
return covenant
def printf(format, *args):
sys.stdout.write(format % args)
def run_brunch(files, timeout, outdir, tool_cmmd, tool_args):
args = []
args.append(getBrunch ())
args.append(" ")
for f in files:
args.append(f)
args.append(" ")
args.append(" --mem 4096")
args.append(" --cpu " + timeout)
args.append(" --out " + outdir)
args.append(" --format base:Cpu:Result:Status")
args.append(" -- " + tool_cmmd + " " + tool_args)
strcmmd = ''.join(str(e) for e in args)
ret = call(strcmmd, stdout=None, stderr=None, shell=True)
if ret != 0:
printf("Error during %s\n",args)
sys.exit(2)
return
def show_results(_f, print_ko, print_err):
bench_data = {}
with open(_f, 'rb') as csvfile:
reader = csv.DictReader(csvfile)
err, unsafe, safe, total_bench, imprecise, incorrect = 0, 0, 0, 0, 0, 0
timeout, segfault, various, memory_out = 0, 0, 0, 0
solved_safe = []
solved_unsafe = []
unsolved = []
for row in reader:
total_bench +=1
if row['Result'] == "UNSAT":
if 'unsat' in row['base']:
safe +=1
solved_safe.append(row["base"])
bench_data.update({row['base']: row['Cpu']})
else:
incorrect +=1
if print_ko: print row['File'] + " " + row['Result'] + ' --> KO'
if row['Result'] == "SAT":
if 'sat' in row['base']:
unsafe +=1
solved_unsafe.append(row["base"])
bench_data.update({row['base']: row['Cpu']})
else:
imprecise +=1
if print_ko: print row['File'] + " " + row['Result'] + ' --> KO'
if row['Result'] == "ERR":
err +=1
bench_data.update({row['base']: '50'})
if row["Status"] == "-9":
if print_err: print row['File'] + ' --> TIMEOUT'
timeout += 1
if row["Status"] == "-6":
if print_err: print row['File'] + ' --> SEGFAULT'
segfault += 1
if row["Status"] == "-11":
if print_err: print row['File'] + ' --> MEMORY-OUT'
memory_out += 1
if row["Result"] == "ERR" and (row["Status"] == "0" or row["Status"] == "1"):
if print_err: print row['File'] + ' --> VARIOUS'
various += 1
print "\n\n==========SUMMARY of " + _f + " =========="
print "TOTAL N. BENCHMARK: " + str(total_bench)
print "SOLVED UNSAT : " + str(safe)
print "SOLVED SAT : " + str(unsafe)
print "NOT SOLVED : " + str(err)
print "IMPRECISE (UNSAT -> SAT): " + str(imprecise)
print "UNSOUND (SAT -> UNSAT) : " + str(incorrect)
print "ERROR SEGFAULT : " + str(segfault)
print "ERROR TIMEOUT : " + str(timeout)
print "ERROR MEMORY-OUT: " + str(memory_out)
print "ERROR VARIOUS : " + str(various)
print "==============================="
return bench_data, solved_safe, solved_unsafe
def main (argv):
infiles= []
for root, dirs, files in os.walk("./"):
for file in files:
if file.endswith(".cfg"):
fullname = os.path.join(root, file)
infiles.append(os.path.abspath(fullname))
run_brunch(infiles, timeout, "default-config", getCovenant (), "")
show_results(os.path.join("default-config","stats"), False, False)
if __name__ == "__main__":
try:
main (sys.argv)
except:
print "Unexpected error:", sys.exc_info()[0]
raise
| sav-tools/covenant | tests/play/regrtest.py | Python | mit | 4,962 |
# stacker, Python module for stacking of interferometric data.
# Copyright (C) 2014 Lukas Lindroos
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor,
# Boston, MA 02110-1301, USA.
from ctypes import c_double, POINTER, c_char_p, c_int, c_bool
import numpy as np
import stacker
import stacker.pb
c_stack = stacker.libstacker.stack
c_stack.restype = c_double
c_stack.argtype = [c_int, c_char_p, c_int,
c_int, c_char_p, c_int,
c_int, c_char_p, POINTER(c_double), c_int,
POINTER(c_double), POINTER(c_double), POINTER(c_double),
c_int, c_bool]
c_stack_mc = stacker.libstacker.stack_mc
c_stack_mc.argtype = [c_int, c_char_p, c_int,
c_int, c_char_p, POINTER(c_double), c_int,
POINTER(c_double), POINTER(c_double), POINTER(c_double),
c_int, c_int, POINTER(c_char_p),
POINTER(c_double), POINTER(c_double), c_int, c_bool]
def stack(coords, vis, outvis='', imagename='', cell='1arcsec', stampsize=32,
primarybeam='guess', datacolumn='corrected', use_cuda = False):
"""
Performs stacking in the uv domain.
coords -- A coordList object of all target coordinates.
vis -- Input uv data file.
outvis -- Output uv data file. Can be set to '' to not save
stacked visibilities.
datacolumn -- Either 'corrected' or 'data'. Which column stacking is
applied to.
primarybeam -- How to calculated primary beam. Currently only two
options, 'guess' (using casa builtin model) or
'constant' (i.e. no correction)
imagename -- Optional argument to image stacked data.
cell -- pixel size for target image
stampsize -- size of target image in pixels
returns: Estimate of stacked flux assuming point source.
"""
import shutil
import os
try:
from taskinit import casalog
except ImportError:
casalog = None
if casalog is not None:
casalog.origin('stacker')
casalog.post('#'*42, 'INFO')
casalog.post('#'*5 + ' {0: <31}'.format("Begin Task: Stacker")+'#'*5,
'INFO')
casalog.post('Number of stacking positions: {0}'.format(len(coords)),
'INFO')
if outvis != '':
if not os.access(outvis, os.F_OK):
shutil.copytree(vis, outvis)
infiletype, infilename, infileoptions = stacker._checkfile(vis, datacolumn)
if outvis != '':
outfiletype, outfilename, outfileoptions =\
stacker._checkfile(outvis, datacolumn)
else:
outfilename = ''
outfiletype = stacker.FILE_TYPE_NONE
outfileoptions = 0
if casalog is not None:
casalog.post('Input uv file: \'{0}\' of type {1}'.format(
infilename, stacker.FILETYPENAME[infiletype]), 'INFO')
if outvis != '':
casalog.post('Output uv file: \'{0}\' of type {1}'.format(
outfilename, stacker.FILETYPENAME[outfiletype]), 'INFO')
else:
_ = 'No output uv file given, will not write stacked visibility'
casalog.post(_, 'INFO')
# primary beam
if primarybeam == 'guess':
primarybeam = stacker.pb.guesspb(vis)
elif primarybeam in ['constant', 'none'] or primarybeam is None:
primarybeam = stacker.pb.PrimaryBeamModel()
pbtype, pbfile, pbnpars, pbpars = primarybeam.cdata()
x = [p.x for p in coords]
y = [p.y for p in coords]
weight = [p.weight for p in coords]
x = (c_double*len(x))(*x)
y = (c_double*len(y))(*y)
weight = (c_double*len(weight))(*weight)
import time
start = time.time()
flux = c_stack(infiletype, c_char_p(infilename), infileoptions,
outfiletype, c_char_p(outfilename), outfileoptions,
pbtype, c_char_p(pbfile), pbpars, pbnpars,
x, y, weight, c_int(len(coords)), c_bool(use_cuda))
stop = time.time()
# print("Started stack at {}".format(start))
# print("Finished stack at {}".format(stop))
print("Time used to stack: {0}".format(stop-start))
if imagename != '':
import clean
import clearcal
clearcal.clearcal(vis=outvis)
clean.clean(vis=outvis, imagename=imagename, field='0', mode='mfs',
cell=cell, imsize=stampsize, weighting='natural')
if casalog is not None:
casalog.post('#'*5 + ' {0: <31}'.format("End Task: stacker")+'#'*5)
casalog.post('#'*42)
return flux
def noise(coords, vis, weighting='sigma2', imagenames=[], beam=None, nrand=50,
stampsize=32, maskradius=None):
""" Calculate noise using a Monte Carlo method, can be time consuming. """
import stacker
import stacker.image
from math import pi
if beam is None:
try:
from taskinit import ia, qa
ia.open(imagenames[0])
beam = qa.convert(ia.restoringbeam()['major'], 'rad')['value']
ia.done()
except ImportError:
beam = 1/3600./180.*pi
dist = []
for i in range(nrand):
random_coords = stacker.randomizeCoords(coords, beam=beam)
if weighting == 'sigma2':
random_coords = stacker.image.calculate_sigma2_weights(
random_coords, imagenames, stampsize, maskradius)
dist.append(stack(random_coords, vis))
return np.std(np.real(np.array(dist)))
def noise_fast(coords, models, vis, datacolumn='corrected',
primarybeam='guess', use_cuda=True,
nbin=None, bins=None):
""" Calculate noise using a Monte Carlo method, can be time consuming. """
import stacker
if not use_cuda:
raise NotImplementedError
if len(coords) != len(models):
raise RuntimeError('Number of coordinate objects does not match number of models.')
nmc = len(coords)
infiletype, infilename, infileoptions = stacker._checkfile(vis, datacolumn)
if primarybeam == 'guess':
primarybeam = stacker.pb.guesspb(vis)
elif primarybeam in ['constant', 'none'] or primarybeam is None:
primarybeam = stacker.pb.PrimaryBeamModel()
pbtype, pbfile, pbnpars, pbpars = primarybeam.cdata()
coordlistlen = len(coords[0])
for coordlist in coords:
if coordlistlen != len(coordlist):
raise RuntimeError('Number of coordinates must be same in all samples.')
x = []
y = []
weight = []
for coordlist in coords:
x.extend([p.x for p in coordlist])
y.extend([p.y for p in coordlist])
weight.extend([p.weight for p in coordlist])
print('len(x) = {}'.format(len(x)))
x = (c_double*len(x))(*x)
y = (c_double*len(y))(*y)
weight = (c_double*len(weight))(*weight)
c_models = (c_char_p*len(models))(*models)
if bins is None or nbin is None:
raise 'No bins given!'
if len(bins) != nbin+1:
raise 'Number of bins must match nbin!'
c_bins = (c_double*(nbin+1))(*bins)
res_flux = (c_double*(nmc*nbin))(*([0]*(nmc*nbin)))
res_weight = (c_double*(nmc*nbin))(*([0]*(nmc*nbin)))
c_stack_mc(infiletype, c_char_p(infilename), infileoptions,
pbtype, c_char_p(pbfile), pbpars, pbnpars,
x, y, weight, c_int(len(coords[0])), c_int(nmc),
c_models,
res_flux, res_weight, c_bins, c_int(nbin),
use_cuda)
return np.array(list(res_flux)), np.array(list(res_weight))
| centowen/stacker | uv/__init__.py | Python | gpl-2.0 | 8,256 |
#!/usr/bin/env python
###############################################################################
#
# Copyright 2010 Locomatix, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
###############################################################################
import sys
import locomatix
from _utils import *
def delete_all_fences():
"""docstring for delete_all_objects"""
parser = locomatix.ArgsParser()
parser.add_description("Deletes all the fences")
args = parser.parse_args(sys.argv)
try:
lxclient = locomatix.Client(args['custid'], \
args['key'], \
args['secret-key'], \
args['host'], \
args['port'])
except:
print "Unable to connect to %s at port %d" % (args['host'],args['port'])
sys.exit(1)
try:
for fence in lxclient.list_fences():
lxclient.delete_fence(fence.fenceid)
dprint(args, lxclient.response_body(), None)
except locomatix.LxException, e:
dprint(args, lxclient.response_body(), "error: failed to delete all fences - %s" % str(e))
sys.exit(1)
except KeyboardInterrupt:
sys.exit(1)
if __name__ == '__main__':
delete_all_fences()
| locomatix/locomatix-python | locomatix/cli/delete_all_fences.py | Python | apache-2.0 | 1,745 |
L = [lambda x: x ** 2, # Inline function definition
lambda x: x ** 3,
lambda x: x ** 4] # A list of 3 callable functions
for f in L:
print(f(2)) # Prints 4, 8, 16
print(L[0](3)) # Prints 9
def f1(x): return x ** 2
def f2(x): return x ** 3 # Define named functions
def f3(x): return x ** 4
L = [f1, f2, f3] # Reference by name
for f in L:
print(f(2)) # Prints 4, 8, 16
print(L[0](3)) # Prints 9
| simontakite/sysadmin | pythonscripts/learningPython/lambdas1.py | Python | gpl-2.0 | 587 |
# Auto generated configuration file
# using:
# Revision: 1.19
# Source: /local/reps/CMSSW/CMSSW/Configuration/Applications/python/ConfigBuilder.py,v
# with command line options: step2 --conditions auto:run2_mc_HIon --scenario HeavyIons -n 2 --eventcontent AODSIM -s RAW2DIGI,L1Reco,RECO --datatier AODSIM --beamspot RealisticHI2011Collision --customise SLHCUpgradeSimulations/Configuration/postLS1Customs.customisePostLS1_HI --magField 38T_PostLS1 --no_exec
import FWCore.ParameterSet.Config as cms
process = cms.Process('AODSIM')
# import of standard configurations
process.load('Configuration.StandardSequences.Services_cff')
process.load('SimGeneral.HepPDTESSource.pythiapdt_cfi')
process.load('FWCore.MessageService.MessageLogger_cfi')
process.load('Configuration.EventContent.EventContentHeavyIons_cff')
process.load('Configuration.StandardSequences.GeometryRecoDB_cff')
process.load('Configuration.StandardSequences.MagneticField_38T_PostLS1_cff')
process.load('Configuration.StandardSequences.RawToDigi_cff')
process.load('Configuration.StandardSequences.L1Reco_cff')
process.load('Configuration.StandardSequences.ReconstructionHeavyIons_cff')
process.load('Configuration.StandardSequences.EndOfProcess_cff')
process.load('Configuration.StandardSequences.FrontierConditions_GlobalTag_condDBv2_cff')
process.maxEvents = cms.untracked.PSet(
output = cms.untracked.int32(-1)
)
# Input source
process.source = cms.Source("PoolSource",
fileNames = cms.untracked.vstring(
'/store/user/mnguyen/PyquenUnquenched_Dijet_NcollFilt_pthat80_740pre8_MCHI1_74_V4_GEN-SIM_v3/PyquenUnquenched_Dijet_pthat80_740pre8_MCHI2_74_V3_DIGI-RAW_v2/ee815b27030c232e2e0a7be48a50a463/step2_DIGI_L1_DIGI2RAW_RAW2DIGI_L1Reco_PU_103_1_koO.root'
),
secondaryFileNames = cms.untracked.vstring()
)
process.options = cms.untracked.PSet(
wantSummary = cms.untracked.bool(True)
)
# Production Info
process.configurationMetadata = cms.untracked.PSet(
annotation = cms.untracked.string('step2 nevts:2'),
name = cms.untracked.string('Applications'),
version = cms.untracked.string('$Revision: 1.19 $')
)
# Output definition
process.AODSIMoutput = cms.OutputModule("PoolOutputModule",
compressionAlgorithm = cms.untracked.string('LZMA'),
compressionLevel = cms.untracked.int32(4),
dataset = cms.untracked.PSet(
dataTier = cms.untracked.string('AODSIM'),
filterName = cms.untracked.string('')
),
eventAutoFlushCompressedSize = cms.untracked.int32(15728640),
fileName = cms.untracked.string('step2_RAW2DIGI_L1Reco_DIJET_AODSIM.root'),
outputCommands = process.AODSIMEventContent.outputCommands
)
process.AODSIMoutput.outputCommands += [ 'keep recoElectronSeeds_ecalDrivenElectronSeeds_*_*' ]
process.AODSIMoutput.outputCommands += [ 'keep recoTrackExtras_electronGsfTracks_*_*' ]
# Additional output definition
# Other statements
from Configuration.AlCa.GlobalTag_condDBv2 import GlobalTag
process.GlobalTag = GlobalTag(process.GlobalTag, 'auto:run2_mc_hi', '')
# Path and EndPath definitions
process.raw2digi_step = cms.Path(process.RawToDigi)
process.L1Reco_step = cms.Path(process.L1Reco)
process.reconstruction_step = cms.Path(process.reconstructionHeavyIons + process.hiMergedConformalPixelTracking)
process.endjob_step = cms.EndPath(process.endOfProcess)
process.AODSIMoutput_step = cms.EndPath(process.AODSIMoutput)
# Schedule definition
process.schedule = cms.Schedule(process.raw2digi_step,process.L1Reco_step,process.reconstruction_step,process.endjob_step,process.AODSIMoutput_step)
# customisation of the process.
# Automatic addition of the customisation function from SLHCUpgradeSimulations.Configuration.postLS1Customs
from SLHCUpgradeSimulations.Configuration.postLS1Customs import customisePostLS1_HI
#call to customisation function customisePostLS1_HI imported from SLHCUpgradeSimulations.Configuration.postLS1Customs
process = customisePostLS1_HI(process)
# End of customisation functions
| tuos/RECO2AOD | cfg/round3/v1/mc/step2_RAW2DIGI_L1Reco_DIJET_AODSIM.py | Python | mit | 3,961 |
# coding=utf-8
"""
© 2014 LinkedIn Corp. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
"""
"""
API for Correlator Module
This module finds correlation between two time series.
"""
from luminol import exceptions, utils
from luminol.algorithms.correlator_algorithms.all import correlator_algorithms
from luminol.anomaly_detector import AnomalyDetector
from luminol.constants import *
from luminol.modules.time_series import TimeSeries
class Correlator(object):
def __init__(self, time_series_a, time_series_b, time_period=None, use_anomaly_score=False, algorithm_name=None, algorithm_params=None):
"""
Initializer
:param time_series_a: a TimeSeries, a dictionary or a path to a csv file(str).
:param time_series_b: a TimeSeries, a dictionary or a path to a csv file(str).
:param time_period: a tuple (start, end) representing a data period for considering correlation.
:param str algorithm_name: name of the algorithm to use.
:param dict algorithm_params: additional params for the specific algorithm.
"""
self.time_series_a = self._load(time_series_a)
self.time_series_b = self._load(time_series_b)
if use_anomaly_score:
self.time_series_a = self._get_anomaly_scores(self.time_series_a)
self.time_series_b = self._get_anomaly_scores(self.time_series_b)
if time_period:
start_p, end_p = time_period
try:
self.time_series_a = self.time_series_a.crop(start_p, end_p)
self.time_series_b = self.time_series_b.crop(start_p, end_p)
# No data points fall into the specific time range.
except ValueError:
raise exceptions.NotEnoughDataPoints
self._sanity_check()
self.algorithm_params = {'time_series_a': self.time_series_a, 'time_series_b': self.time_series_b}
self._get_algorithm_and_params(algorithm_name, algorithm_params)
self._correlate()
def _get_anomaly_scores(self, time_series):
"""
Get anomaly scores of a time series.
:param TimeSeries time_series: a time_series.
"""
return AnomalyDetector(time_series, score_only=True).get_all_scores()
def _load(self, time_series):
"""
Load time series into a TimeSeries object.
:param timeseries: a TimeSeries, a dictionary or a path to a csv file(str).
:return TimeSeries: a TimeSeries object.
"""
if isinstance(time_series, TimeSeries):
return time_series
if isinstance(time_series, dict):
return TimeSeries(time_series)
return TimeSeries(utils.read_csv(time_series))
def _get_algorithm_and_params(self, algorithm_name, algorithm_params):
"""
Get the specific algorithm and merge the algorithm params.
:param str algorithm: name of the algorithm to use.
:param dict algorithm_params: additional params for the specific algorithm.
"""
algorithm_name = algorithm_name or CORRELATOR_ALGORITHM
try:
self.algorithm = correlator_algorithms[algorithm_name]
except KeyError:
raise exceptions.AlgorithmNotFound('luminol.Correlator: ' + str(algorithm_name) + ' not found.')
# Merge parameters.
if algorithm_params:
if not isinstance(algorithm_params, dict):
raise exceptions.InvalidDataFormat('luminol.Correlator: algorithm_params passed is not a dictionary.')
else:
self.algorithm_params = dict(algorithm_params.items() + self.algorithm_params.items())
def _sanity_check(self):
"""
Check if the time series have more than two data points.
"""
if len(self.time_series_a) < 2 or len(self.time_series_b) < 2:
raise exceptions.NotEnoughDataPoints('luminol.Correlator: Too few data points!')
def _correlate(self):
"""
Run correlation algorithm.
"""
a = self.algorithm(**self.algorithm_params)
self.correlation_result = a.run()
def get_correlation_result(self):
"""
Get correlation result.
:return CorrelationResult: a CorrelationResult object.
"""
return self.correlation_result
def is_correlated(self, threshold=None):
"""
Compare with a threshold to determine whether two timeseries correlate to each other.
:return: a CorrelationResult object if two time series correlate otherwise false.
"""
return self.correlation_result if self.correlation_result.coefficient >= threshold else False
| linkedin/naarad | lib/luminol/src/luminol/correlator.py | Python | apache-2.0 | 4,691 |
# -------------------------------------------------------------------
# Switch Class
# -------------------------------------------------------------------
# source: Brian Beck, PSF License, ActiveState Code
# http://code.activestate.com/recipes/410692/
class switch(object):
""" Readable switch construction
Example:
c = 'z'
for case in switch(c):
if case('a'): pass # only necessary if the rest of the suite is empty
if case('b'): pass
# ...
if case('y'): pass
if case('z'):
print "c is lowercase!"
break
if case('A'): pass
# ...
if case('Z'):
print "c is uppercase!"
break
if case(): # default
print "I dunno what c was!"
source: Brian Beck, PSF License, ActiveState Code
http://code.activestate.com/recipes/410692/
"""
def __init__(self, value):
self.value = value
self.fall = False
def __iter__(self):
"""Return the match method once, then stop"""
yield self.match
raise StopIteration
def match(self, *args):
"""Indicate whether or not to enter a case suite"""
if self.fall or not args:
return True
elif self.value in args:
self.fall = True
return True
else:
return False
#: class switch()
| AmritaLonkar/trunk | SU2_PY/SU2/util/switch.py | Python | gpl-2.0 | 1,518 |
"""
Barplot timeseries
==================
_thumb: .6, .4
"""
import numpy as np
import seaborn as sns
sns.set(style="white")
planets = sns.load_dataset("planets")
years = np.arange(2000, 2015)
g = sns.factorplot("year", data=planets, palette="BuPu",
size=6, aspect=1.5, x_order=years)
g.set_xticklabels(step=2)
| jakevdp/seaborn | examples/timeseries_of_barplots.py | Python | bsd-3-clause | 332 |
# This file is part of Buildbot. Buildbot is free software: you can
# redistribute it and/or modify it under the terms of the GNU General Public
# License as published by the Free Software Foundation, version 2.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 51
# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Copyright Buildbot Team Members
from __future__ import with_statement
import re
import os
import textwrap
import mock
import __builtin__
from zope.interface import implements
from twisted.trial import unittest
from twisted.application import service
from twisted.internet import defer
from buildbot import config, buildslave, interfaces, revlinks, locks
from buildbot.process import properties, factory
from buildbot.test.util import dirs, compat
from buildbot.test.util.config import ConfigErrorsMixin
from buildbot.changes import base as changes_base
from buildbot.schedulers import base as schedulers_base
from buildbot.status import base as status_base
global_defaults = dict(
title='Buildbot',
titleURL='http://buildbot.net',
buildbotURL='http://localhost:8080/',
changeHorizon=None,
eventHorizon=50,
logHorizon=None,
buildHorizon=None,
logCompressionLimit=4096,
logCompressionMethod='bz2',
logMaxTailSize=None,
logMaxSize=None,
properties=properties.Properties(),
mergeRequests=None,
prioritizeBuilders=None,
slavePortnum=None,
multiMaster=False,
debugPassword=None,
manhole=None,
)
class FakeChangeSource(changes_base.ChangeSource):
pass
class FakeStatusReceiver(status_base.StatusReceiver):
pass
class FakeScheduler(object):
implements(interfaces.IScheduler)
def __init__(self, name):
self.name = name
class FakeBuilder(object):
def __init__(self, **kwargs):
self.__dict__.update(kwargs)
class ConfigErrors(unittest.TestCase):
def test_constr(self):
ex = config.ConfigErrors(['a', 'b'])
self.assertEqual(ex.errors, ['a', 'b'])
def test_addError(self):
ex = config.ConfigErrors(['a'])
ex.addError('c')
self.assertEqual(ex.errors, ['a', 'c'])
def test_nonempty(self):
empty = config.ConfigErrors()
full = config.ConfigErrors(['a'])
self.failUnless(not empty)
self.failIf(not full)
def test_error_raises(self):
e = self.assertRaises(config.ConfigErrors, config.error, "message")
self.assertEqual(e.errors, ["message"])
def test_error_no_raise(self):
e = config.ConfigErrors()
self.patch(config, "_errors", e)
config.error("message")
self.assertEqual(e.errors, ["message"])
def test_str(self):
ex = config.ConfigErrors()
self.assertEqual(str(ex), "")
ex = config.ConfigErrors(["a"])
self.assertEqual(str(ex), "a")
ex = config.ConfigErrors(["a", "b"])
self.assertEqual(str(ex), "a\nb")
ex = config.ConfigErrors(["a"])
ex.addError('c')
self.assertEqual(str(ex), "a\nc")
class MasterConfig(ConfigErrorsMixin, dirs.DirsMixin, unittest.TestCase):
def setUp(self):
self.basedir = os.path.abspath('basedir')
self.filename = os.path.join(self.basedir, 'test.cfg')
return self.setUpDirs('basedir')
def tearDown(self):
return self.tearDownDirs()
# utils
def patch_load_helpers(self):
# patch out all of the "helpers" for laodConfig with null functions
for n in dir(config.MasterConfig):
if n.startswith('load_'):
typ = 'loader'
elif n.startswith('check_'):
typ = 'checker'
else:
continue
v = getattr(config.MasterConfig, n)
if callable(v):
if typ == 'loader':
self.patch(config.MasterConfig, n,
mock.Mock(side_effect=
lambda filename, config_dict: None))
else:
self.patch(config.MasterConfig, n,
mock.Mock(side_effect=
lambda: None))
def install_config_file(self, config_file, other_files={}):
config_file = textwrap.dedent(config_file)
with open(os.path.join(self.basedir, self.filename), "w") as f:
f.write(config_file)
for file, contents in other_files.items():
with open(file, "w") as f:
f.write(contents)
# tests
def test_defaults(self):
cfg = config.MasterConfig()
expected = dict(
#validation,
db=dict(
db_url='sqlite:///state.sqlite',
db_poll_interval=None),
metrics = None,
caches = dict(Changes=10, Builds=15),
schedulers = {},
builders = [],
slaves = [],
change_sources = [],
status = [],
user_managers = [],
revlink = revlinks.default_revlink_matcher
)
expected.update(global_defaults)
got = dict([
(attr, getattr(cfg, attr))
for attr, exp in expected.iteritems() ])
self.assertEqual(got, expected)
def test_defaults_validation(self):
# re's aren't comparable, but we can make sure the keys match
cfg = config.MasterConfig()
self.assertEqual(sorted(cfg.validation.keys()),
sorted([
'branch', 'revision', 'property_name', 'property_value',
]))
def test_loadConfig_missing_file(self):
self.assertRaisesConfigError(
re.compile("configuration file .* does not exist"),
lambda : config.MasterConfig.loadConfig(
self.basedir, self.filename))
def test_loadConfig_missing_basedir(self):
self.assertRaisesConfigError(
re.compile("basedir .* does not exist"),
lambda : config.MasterConfig.loadConfig(
os.path.join(self.basedir, 'NO'), 'test.cfg'))
def test_loadConfig_open_error(self):
"""
Check that loadConfig() raises correct ConfigError exception in cases
when configure file is found, but we fail to open it.
"""
def raise_IOError(*args):
raise IOError("error_msg")
self.install_config_file('#dummy')
# override build-in open() function to always rise IOError
self.patch(__builtin__, "open", raise_IOError)
# check that we got the expected ConfigError exception
self.assertRaisesConfigError(
re.compile("unable to open configuration file .*: error_msg"),
lambda : config.MasterConfig.loadConfig(
self.basedir, self.filename))
@compat.usesFlushLoggedErrors
def test_loadConfig_parse_error(self):
self.install_config_file('def x:\nbar')
self.assertRaisesConfigError(
re.compile("error while parsing.*traceback in logfile"),
lambda : config.MasterConfig.loadConfig(
self.basedir, self.filename))
self.assertEqual(len(self.flushLoggedErrors(SyntaxError)), 1)
def test_loadConfig_eval_ConfigError(self):
self.install_config_file("""\
from buildbot import config
BuildmasterConfig = { 'multiMaster': True }
config.error('oh noes!')""")
self.assertRaisesConfigError("oh noes",
lambda : config.MasterConfig.loadConfig(
self.basedir, self.filename))
def test_loadConfig_eval_ConfigErrors(self):
# We test a config that has embedded errors, as well
# as semantic errors that get added later. If an exception is raised
# prematurely, then the semantic errors wouldn't get reported.
self.install_config_file("""\
from buildbot import config
BuildmasterConfig = {}
config.error('oh noes!')
config.error('noes too!')""")
e = self.assertRaises(config.ConfigErrors,
lambda : config.MasterConfig.loadConfig(
self.basedir, self.filename))
self.assertEqual(e.errors, ['oh noes!', 'noes too!',
'no slaves are configured',
'no builders are configured'])
def test_loadConfig_no_BuildmasterConfig(self):
self.install_config_file('x=10')
self.assertRaisesConfigError("does not define 'BuildmasterConfig'",
lambda : config.MasterConfig.loadConfig(
self.basedir, self.filename))
def test_loadConfig_unknown_key(self):
self.patch_load_helpers()
self.install_config_file("""\
BuildmasterConfig = dict(foo=10)
""")
self.assertRaisesConfigError("Unknown BuildmasterConfig key foo",
lambda : config.MasterConfig.loadConfig(
self.basedir, self.filename))
def test_loadConfig_unknown_keys(self):
self.patch_load_helpers()
self.install_config_file("""\
BuildmasterConfig = dict(foo=10, bar=20)
""")
self.assertRaisesConfigError("Unknown BuildmasterConfig keys bar, foo",
lambda : config.MasterConfig.loadConfig(
self.basedir, self.filename))
def test_loadConfig_success(self):
self.patch_load_helpers()
self.install_config_file("""\
BuildmasterConfig = dict()
""")
rv = config.MasterConfig.loadConfig(
self.basedir, self.filename)
self.assertIsInstance(rv, config.MasterConfig)
# make sure all of the loaders and checkers are called
self.failUnless(rv.load_global.called)
self.failUnless(rv.load_validation.called)
self.failUnless(rv.load_db.called)
self.failUnless(rv.load_metrics.called)
self.failUnless(rv.load_caches.called)
self.failUnless(rv.load_schedulers.called)
self.failUnless(rv.load_builders.called)
self.failUnless(rv.load_slaves.called)
self.failUnless(rv.load_change_sources.called)
self.failUnless(rv.load_status.called)
self.failUnless(rv.load_user_managers.called)
self.failUnless(rv.check_single_master.called)
self.failUnless(rv.check_schedulers.called)
self.failUnless(rv.check_locks.called)
self.failUnless(rv.check_builders.called)
self.failUnless(rv.check_status.called)
self.failUnless(rv.check_horizons.called)
self.failUnless(rv.check_slavePortnum.called)
def test_loadConfig_with_local_import(self):
self.patch_load_helpers()
self.install_config_file("""\
from subsidiary_module import x
BuildmasterConfig = dict()
""",
{'basedir/subsidiary_module.py' : "x = 10"})
rv = config.MasterConfig.loadConfig(
self.basedir, self.filename)
self.assertIsInstance(rv, config.MasterConfig)
class MasterConfig_loaders(ConfigErrorsMixin, unittest.TestCase):
filename = 'test.cfg'
def setUp(self):
self.cfg = config.MasterConfig()
self.errors = config.ConfigErrors()
self.patch(config, '_errors', self.errors)
# utils
def assertResults(self, **expected):
self.failIf(self.errors, self.errors.errors)
got = dict([
(attr, getattr(self.cfg, attr))
for attr, exp in expected.iteritems() ])
self.assertEqual(got, expected)
# tests
def test_load_global_defaults(self):
self.cfg.load_global(self.filename, {})
self.assertResults(**global_defaults)
def test_load_global_string_param_not_string(self):
self.cfg.load_global(self.filename,
dict(title=10))
self.assertConfigError(self.errors, 'must be a string')
def test_load_global_int_param_not_int(self):
self.cfg.load_global(self.filename,
dict(changeHorizon='yes'))
self.assertConfigError(self.errors, 'must be an int')
def do_test_load_global(self, config_dict, **expected):
self.cfg.load_global(self.filename, config_dict)
self.assertResults(**expected)
def test_load_global_title(self):
self.do_test_load_global(dict(title='hi'), title='hi')
def test_load_global_projectURL(self):
self.do_test_load_global(dict(projectName='hey'), title='hey')
def test_load_global_titleURL(self):
self.do_test_load_global(dict(titleURL='hi'), titleURL='hi')
def test_load_global_buildbotURL(self):
self.do_test_load_global(dict(buildbotURL='hey'), buildbotURL='hey')
def test_load_global_changeHorizon(self):
self.do_test_load_global(dict(changeHorizon=10), changeHorizon=10)
def test_load_global_changeHorizon_none(self):
self.do_test_load_global(dict(changeHorizon=None), changeHorizon=None)
def test_load_global_eventHorizon(self):
self.do_test_load_global(dict(eventHorizon=10), eventHorizon=10)
def test_load_global_logHorizon(self):
self.do_test_load_global(dict(logHorizon=10), logHorizon=10)
def test_load_global_buildHorizon(self):
self.do_test_load_global(dict(buildHorizon=10), buildHorizon=10)
def test_load_global_logCompressionLimit(self):
self.do_test_load_global(dict(logCompressionLimit=10),
logCompressionLimit=10)
def test_load_global_logCompressionMethod(self):
self.do_test_load_global(dict(logCompressionMethod='gz'),
logCompressionMethod='gz')
def test_load_global_logCompressionMethod_invalid(self):
self.cfg.load_global(self.filename,
dict(logCompressionMethod='foo'))
self.assertConfigError(self.errors, "must be 'bz2' or 'gz'")
def test_load_global_codebaseGenerator(self):
func = lambda _: "dummy"
self.do_test_load_global(dict(codebaseGenerator=func),
codebaseGenerator=func)
def test_load_global_codebaseGenerator_invalid(self):
self.cfg.load_global(self.filename,
dict(codebaseGenerator='dummy'))
self.assertConfigError(self.errors,
"codebaseGenerator must be a callable "
"accepting a dict and returning a str")
def test_load_global_logMaxSize(self):
self.do_test_load_global(dict(logMaxSize=123), logMaxSize=123)
def test_load_global_logMaxTailSize(self):
self.do_test_load_global(dict(logMaxTailSize=123), logMaxTailSize=123)
def test_load_global_properties(self):
exp = properties.Properties()
exp.setProperty('x', 10, self.filename)
self.do_test_load_global(dict(properties=dict(x=10)), properties=exp)
def test_load_global_properties_invalid(self):
self.cfg.load_global(self.filename,
dict(properties='yes'))
self.assertConfigError(self.errors, "must be a dictionary")
def test_load_global_mergeRequests_bool(self):
self.do_test_load_global(dict(mergeRequests=False),
mergeRequests=False)
def test_load_global_mergeRequests_callable(self):
callable = lambda : None
self.do_test_load_global(dict(mergeRequests=callable),
mergeRequests=callable)
def test_load_global_mergeRequests_invalid(self):
self.cfg.load_global(self.filename,
dict(mergeRequests='yes'))
self.assertConfigError(self.errors,
"must be a callable, True, or False")
def test_load_global_prioritizeBuilders_callable(self):
callable = lambda : None
self.do_test_load_global(dict(prioritizeBuilders=callable),
prioritizeBuilders=callable)
def test_load_global_prioritizeBuilders_invalid(self):
self.cfg.load_global(self.filename,
dict(prioritizeBuilders='yes'))
self.assertConfigError(self.errors, "must be a callable")
def test_load_global_slavePortnum_int(self):
self.do_test_load_global(dict(slavePortnum=123),
slavePortnum='tcp:123')
def test_load_global_slavePortnum_str(self):
self.do_test_load_global(dict(slavePortnum='udp:123'),
slavePortnum='udp:123')
def test_load_global_multiMaster(self):
self.do_test_load_global(dict(multiMaster=1), multiMaster=1)
def test_load_global_debugPassword(self):
self.do_test_load_global(dict(debugPassword='xyz'),
debugPassword='xyz')
def test_load_global_manhole(self):
mh = mock.Mock(name='manhole')
self.do_test_load_global(dict(manhole=mh), manhole=mh)
def test_load_global_revlink_callable(self):
callable = lambda : None
self.do_test_load_global(dict(revlink=callable),
revlink=callable)
def test_load_global_revlink_invalid(self):
self.cfg.load_global(self.filename, dict(revlink=''))
self.assertConfigError(self.errors, "must be a callable")
def test_load_validation_defaults(self):
self.cfg.load_validation(self.filename, {})
self.assertEqual(sorted(self.cfg.validation.keys()),
sorted([
'branch', 'revision', 'property_name', 'property_value',
]))
def test_load_validation_invalid(self):
self.cfg.load_validation(self.filename,
dict(validation='plz'))
self.assertConfigError(self.errors, "must be a dictionary")
def test_load_validation_unk_keys(self):
self.cfg.load_validation(self.filename,
dict(validation=dict(users='.*')))
self.assertConfigError(self.errors, "unrecognized validation key(s)")
def test_load_validation(self):
r = re.compile('.*')
self.cfg.load_validation(self.filename,
dict(validation=dict(branch=r)))
self.assertEqual(self.cfg.validation['branch'], r)
# check that defaults are still around
self.assertIn('revision', self.cfg.validation)
def test_load_db_defaults(self):
self.cfg.load_db(self.filename, {})
self.assertResults(
db=dict(db_url='sqlite:///state.sqlite', db_poll_interval=None))
def test_load_db_db_url(self):
self.cfg.load_db(self.filename, dict(db_url='abcd'))
self.assertResults(db=dict(db_url='abcd', db_poll_interval=None))
def test_load_db_db_poll_interval(self):
self.cfg.load_db(self.filename, dict(db_poll_interval=2))
self.assertResults(
db=dict(db_url='sqlite:///state.sqlite', db_poll_interval=2))
def test_load_db_dict(self):
self.cfg.load_db(self.filename,
dict(db=dict(db_url='abcd', db_poll_interval=10)))
self.assertResults(db=dict(db_url='abcd', db_poll_interval=10))
def test_load_db_unk_keys(self):
self.cfg.load_db(self.filename,
dict(db=dict(db_url='abcd', db_poll_interval=10, bar='bar')))
self.assertConfigError(self.errors, "unrecognized keys in")
def test_load_db_not_int(self):
self.cfg.load_db(self.filename,
dict(db=dict(db_url='abcd', db_poll_interval='ten')))
self.assertConfigError(self.errors, "must be an int")
def test_load_metrics_defaults(self):
self.cfg.load_metrics(self.filename, {})
self.assertResults(metrics=None)
def test_load_metrics_invalid(self):
self.cfg.load_metrics(self.filename, dict(metrics=13))
self.assertConfigError(self.errors, "must be a dictionary")
def test_load_metrics(self):
self.cfg.load_metrics(self.filename,
dict(metrics=dict(foo=1)))
self.assertResults(metrics=dict(foo=1))
def test_load_caches_defaults(self):
self.cfg.load_caches(self.filename, {})
self.assertResults(caches=dict(Changes=10, Builds=15))
def test_load_caches_invalid(self):
self.cfg.load_caches(self.filename, dict(caches=13))
self.assertConfigError(self.errors, "must be a dictionary")
def test_load_caches_buildCacheSize(self):
self.cfg.load_caches(self.filename,
dict(buildCacheSize=13))
self.assertResults(caches=dict(Builds=13, Changes=10))
def test_load_caches_buildCacheSize_and_caches(self):
self.cfg.load_caches(self.filename,
dict(buildCacheSize=13, caches=dict(builds=11)))
self.assertConfigError(self.errors, "cannot specify")
def test_load_caches_changeCacheSize(self):
self.cfg.load_caches(self.filename,
dict(changeCacheSize=13))
self.assertResults(caches=dict(Changes=13, Builds=15))
def test_load_caches_changeCacheSize_and_caches(self):
self.cfg.load_caches(self.filename,
dict(changeCacheSize=13, caches=dict(changes=11)))
self.assertConfigError(self.errors, "cannot specify")
def test_load_caches(self):
self.cfg.load_caches(self.filename,
dict(caches=dict(foo=1)))
self.assertResults(caches=dict(Changes=10, Builds=15, foo=1))
def test_load_caches_entries_test(self):
self.cfg.load_caches(self.filename,
dict(caches=dict(foo="1")))
self.assertConfigError(self.errors,
"value for cache size 'foo' must be an integer")
def test_load_schedulers_defaults(self):
self.cfg.load_schedulers(self.filename, {})
self.assertResults(schedulers={})
def test_load_schedulers_not_list(self):
self.cfg.load_schedulers(self.filename,
dict(schedulers=dict()))
self.assertConfigError(self.errors, "must be a list of")
def test_load_schedulers_not_instance(self):
self.cfg.load_schedulers(self.filename,
dict(schedulers=[mock.Mock()]))
self.assertConfigError(self.errors, "must be a list of")
def test_load_schedulers_dupe(self):
sch1 = FakeScheduler(name='sch')
sch2 = FakeScheduler(name='sch')
self.cfg.load_schedulers(self.filename,
dict(schedulers=[ sch1, sch2 ]))
self.assertConfigError(self.errors,
"scheduler name 'sch' used multiple times")
def test_load_schedulers(self):
class Sch(schedulers_base.BaseScheduler):
def __init__(self, name):
self.name = name
sch = Sch('sch')
self.cfg.load_schedulers(self.filename,
dict(schedulers=[sch]))
self.assertResults(schedulers=dict(sch=sch))
def test_load_builders_defaults(self):
self.cfg.load_builders(self.filename, {})
self.assertResults(builders=[])
def test_load_builders_not_list(self):
self.cfg.load_builders(self.filename,
dict(builders=dict()))
self.assertConfigError(self.errors, "must be a list")
def test_load_builders_not_instance(self):
self.cfg.load_builders(self.filename,
dict(builders=[mock.Mock()]))
self.assertConfigError(self.errors, "is not a builder config (in c['builders']")
def test_load_builders(self):
bldr = config.BuilderConfig(name='x',
factory=factory.BuildFactory(), slavename='x')
self.cfg.load_builders(self.filename,
dict(builders=[bldr]))
self.assertResults(builders=[bldr])
def test_load_builders_dict(self):
bldr = dict(name='x', factory=factory.BuildFactory(), slavename='x')
self.cfg.load_builders(self.filename,
dict(builders=[bldr]))
self.assertIsInstance(self.cfg.builders[0], config.BuilderConfig)
self.assertEqual(self.cfg.builders[0].name, 'x')
@compat.usesFlushWarnings
def test_load_builders_abs_builddir(self):
bldr = dict(name='x', factory=factory.BuildFactory(), slavename='x',
builddir=os.path.abspath('.'))
self.cfg.load_builders(self.filename,
dict(builders=[bldr]))
self.assertEqual(
len(self.flushWarnings([self.cfg.load_builders])),
1)
def test_load_slaves_defaults(self):
self.cfg.load_slaves(self.filename, {})
self.assertResults(slaves=[])
def test_load_slaves_not_list(self):
self.cfg.load_slaves(self.filename,
dict(slaves=dict()))
self.assertConfigError(self.errors, "must be a list")
def test_load_slaves_not_instance(self):
self.cfg.load_slaves(self.filename,
dict(slaves=[mock.Mock()]))
self.assertConfigError(self.errors, "must be a list of")
def test_load_slaves_reserved_names(self):
for name in 'debug', 'change', 'status':
self.cfg.load_slaves(self.filename,
dict(slaves=[buildslave.BuildSlave(name, 'x')]))
self.assertConfigError(self.errors, "is reserved")
self.errors.errors[:] = [] # clear out the errors
def test_load_slaves(self):
sl = buildslave.BuildSlave('foo', 'x')
self.cfg.load_slaves(self.filename,
dict(slaves=[sl]))
self.assertResults(slaves=[sl])
def test_load_change_sources_defaults(self):
self.cfg.load_change_sources(self.filename, {})
self.assertResults(change_sources=[])
def test_load_change_sources_not_instance(self):
self.cfg.load_change_sources(self.filename,
dict(change_source=[mock.Mock()]))
self.assertConfigError(self.errors, "must be a list of")
def test_load_change_sources_single(self):
chsrc = FakeChangeSource()
self.cfg.load_change_sources(self.filename,
dict(change_source=chsrc))
self.assertResults(change_sources=[chsrc])
def test_load_change_sources_list(self):
chsrc = FakeChangeSource()
self.cfg.load_change_sources(self.filename,
dict(change_source=[chsrc]))
self.assertResults(change_sources=[chsrc])
def test_load_status_not_list(self):
self.cfg.load_status(self.filename, dict(status="not-list"))
self.assertConfigError(self.errors, "must be a list of")
def test_load_status_not_status_rec(self):
self.cfg.load_status(self.filename, dict(status=['fo']))
self.assertConfigError(self.errors, "must be a list of")
def test_load_user_managers_defaults(self):
self.cfg.load_user_managers(self.filename, {})
self.assertResults(user_managers=[])
def test_load_user_managers_not_list(self):
self.cfg.load_user_managers(self.filename,
dict(user_managers='foo'))
self.assertConfigError(self.errors, "must be a list")
def test_load_user_managers(self):
um = mock.Mock()
self.cfg.load_user_managers(self.filename,
dict(user_managers=[um]))
self.assertResults(user_managers=[um])
class MasterConfig_checkers(ConfigErrorsMixin, unittest.TestCase):
def setUp(self):
self.cfg = config.MasterConfig()
self.errors = config.ConfigErrors()
self.patch(config, '_errors', self.errors)
# utils
def setup_basic_attrs(self):
# set up a basic config for checking; this will be modified below
sch = mock.Mock()
sch.name = 'sch'
sch.listBuilderNames = lambda : [ 'b1', 'b2' ]
b1 = mock.Mock()
b1.name = 'b1'
b2 = mock.Mock()
b2.name = 'b2'
self.cfg.schedulers = dict(sch=sch)
self.cfg.slaves = [ mock.Mock() ]
self.cfg.builders = [ b1, b2 ]
def setup_builder_locks(self,
builder_lock=None,
dup_builder_lock=False,
bare_builder_lock=False):
"""Set-up two mocked builders with specified locks.
@type builder_lock: string or None
@param builder_lock: Name of the lock to add to first builder.
If None, no lock is added.
@type dup_builder_lock: boolean
@param dup_builder_lock: if True, add a lock with duplicate name
to the second builder
@type dup_builder_lock: boolean
@param bare_builder_lock: if True, add bare lock objects, don't wrap
them into locks.LockAccess object
"""
def bldr(name):
b = mock.Mock()
b.name = name
b.locks = []
b.factory.steps = [ ('cls', (), dict(locks=[])) ]
return b
def lock(name):
l = mock.Mock(spec=locks.MasterLock)
l.name = name
if bare_builder_lock:
return l
return locks.LockAccess(l, "counting", _skipChecks=True)
b1, b2 = bldr('b1'), bldr('b2')
self.cfg.builders = [ b1, b2 ]
if builder_lock:
b1.locks.append(lock(builder_lock))
if dup_builder_lock:
b2.locks.append(lock(builder_lock))
# tests
def test_check_single_master_multimaster(self):
self.cfg.multiMaster = True
self.cfg.check_single_master()
self.assertNoConfigErrors(self.errors)
def test_check_single_master_no_builders(self):
self.setup_basic_attrs()
self.cfg.builders = [ ]
self.cfg.check_single_master()
self.assertConfigError(self.errors, "no builders are configured")
def test_check_single_master_no_slaves(self):
self.setup_basic_attrs()
self.cfg.slaves = [ ]
self.cfg.check_single_master()
self.assertConfigError(self.errors, "no slaves are configured")
def test_check_single_master_unsch_builder(self):
self.setup_basic_attrs()
b3 = mock.Mock()
b3.name = 'b3'
self.cfg.builders.append(b3)
self.cfg.check_single_master()
self.assertConfigError(self.errors, "have no schedulers to drive them")
def test_check_schedulers_unknown_builder(self):
self.setup_basic_attrs()
del self.cfg.builders[1] # remove b2, leaving b1
self.cfg.check_schedulers()
self.assertConfigError(self.errors, "Unknown builder 'b2'")
def test_check_schedulers_ignored_in_multiMaster(self):
self.setup_basic_attrs()
del self.cfg.builders[1] # remove b2, leaving b1
self.cfg.multiMaster = True
self.cfg.check_schedulers()
self.assertNoConfigErrors(self.errors)
def test_check_schedulers(self):
self.setup_basic_attrs()
self.cfg.check_schedulers()
self.assertNoConfigErrors(self.errors)
def test_check_locks_dup_builder_lock(self):
self.setup_builder_locks(builder_lock='l', dup_builder_lock=True)
self.cfg.check_locks()
self.assertConfigError(self.errors, "Two locks share")
def test_check_locks(self):
self.setup_builder_locks(builder_lock='bl')
self.cfg.check_locks()
self.assertNoConfigErrors(self.errors)
def test_check_locks_none(self):
# no locks in the whole config, should be fine
self.setup_builder_locks()
self.cfg.check_locks()
self.assertNoConfigErrors(self.errors)
def test_check_locks_bare(self):
# check_locks() should be able to handle bare lock object,
# lock objects that are not wrapped into LockAccess() object
self.setup_builder_locks(builder_lock='oldlock',
bare_builder_lock=True)
self.cfg.check_locks()
self.assertNoConfigErrors(self.errors)
def test_check_builders_unknown_slave(self):
sl = mock.Mock()
sl.slavename = 'xyz'
self.cfg.slaves = [ sl ]
b1 = FakeBuilder(slavenames=[ 'xyz', 'abc' ], builddir='x', name='b1')
self.cfg.builders = [ b1 ]
self.cfg.check_builders()
self.assertConfigError(self.errors,
"builder 'b1' uses unknown slaves 'abc'")
def test_check_builders_duplicate_name(self):
b1 = FakeBuilder(slavenames=[], name='b1', builddir='1')
b2 = FakeBuilder(slavenames=[], name='b1', builddir='2')
self.cfg.builders = [ b1, b2 ]
self.cfg.check_builders()
self.assertConfigError(self.errors,
"duplicate builder name 'b1'")
def test_check_builders_duplicate_builddir(self):
b1 = FakeBuilder(slavenames=[], name='b1', builddir='dir')
b2 = FakeBuilder(slavenames=[], name='b2', builddir='dir')
self.cfg.builders = [ b1, b2 ]
self.cfg.check_builders()
self.assertConfigError(self.errors,
"duplicate builder builddir 'dir'")
def test_check_builders(self):
sl = mock.Mock()
sl.slavename = 'a'
self.cfg.slaves = [ sl ]
b1 = FakeBuilder(slavenames=[ 'a' ], name='b1', builddir='dir1')
b2 = FakeBuilder(slavenames=[ 'a' ], name='b2', builddir='dir2')
self.cfg.builders = [ b1, b2 ]
self.cfg.check_builders()
self.assertNoConfigErrors(self.errors)
def test_check_status_fails(self):
st = FakeStatusReceiver()
st.checkConfig = lambda status: config.error("oh noes")
self.cfg.status = [ st ]
self.cfg.check_status()
self.assertConfigError(self.errors, "oh noes")
def test_check_status(self):
st = FakeStatusReceiver()
st.checkConfig = mock.Mock()
self.cfg.status = [ st ]
self.cfg.check_status()
self.assertNoConfigErrors(self.errors)
st.checkConfig.assert_called_once_with(self.cfg.status)
def test_check_horizons(self):
self.cfg.logHorizon = 100
self.cfg.buildHorizon = 50
self.cfg.check_horizons()
self.assertConfigError(self.errors, "logHorizon must be less")
def test_check_slavePortnum_set(self):
self.cfg.slavePortnum = 10
self.cfg.check_slavePortnum()
self.assertNoConfigErrors(self.errors)
def test_check_slavePortnum_not_set_slaves(self):
self.cfg.slaves = [ mock.Mock() ]
self.cfg.check_slavePortnum()
self.assertConfigError(self.errors,
"slaves are configured, but no slavePortnum is set")
def test_check_slavePortnum_not_set_debug(self):
self.cfg.debugPassword = 'ssh'
self.cfg.check_slavePortnum()
self.assertConfigError(self.errors,
"debug client is configured, but no slavePortnum is set")
class BuilderConfig(ConfigErrorsMixin, unittest.TestCase):
factory = factory.BuildFactory()
# utils
def assertAttributes(self, cfg, **expected):
got = dict([
(attr, getattr(cfg, attr))
for attr, exp in expected.iteritems() ])
self.assertEqual(got, expected)
# tests
def test_no_name(self):
self.assertRaisesConfigError(
"builder's name is required",
lambda : config.BuilderConfig(
factory=self.factory, slavenames=['a']))
def test_reserved_name(self):
self.assertRaisesConfigError(
"builder names must not start with an underscore: '_a'",
lambda : config.BuilderConfig(name='_a',
factory=self.factory, slavenames=['a']))
def test_no_factory(self):
self.assertRaisesConfigError(
"builder 'a' has no factory",
lambda : config.BuilderConfig(
name='a', slavenames=['a']))
def test_wrong_type_factory(self):
self.assertRaisesConfigError(
"builder 'a's factory is not",
lambda : config.BuilderConfig(
factory=[], name='a', slavenames=['a']))
def test_no_slavenames(self):
self.assertRaisesConfigError(
"builder 'a': at least one slavename is required",
lambda : config.BuilderConfig(
name='a', factory=self.factory))
def test_bogus_slavenames(self):
self.assertRaisesConfigError(
"slavenames must be a list or a string",
lambda : config.BuilderConfig(
name='a', slavenames={1:2}, factory=self.factory))
def test_bogus_slavename(self):
self.assertRaisesConfigError(
"slavename must be a string",
lambda : config.BuilderConfig(
name='a', slavename=1, factory=self.factory))
def test_bogus_category(self):
self.assertRaisesConfigError(
"category must be a string",
lambda : config.BuilderConfig(category=13,
name='a', slavenames=['a'], factory=self.factory))
def test_inv_nextSlave(self):
self.assertRaisesConfigError(
"nextSlave must be a callable",
lambda : config.BuilderConfig(nextSlave="foo",
name="a", slavenames=['a'], factory=self.factory))
def test_inv_nextBuild(self):
self.assertRaisesConfigError(
"nextBuild must be a callable",
lambda : config.BuilderConfig(nextBuild="foo",
name="a", slavenames=['a'], factory=self.factory))
def test_inv_canStartBuild(self):
self.assertRaisesConfigError(
"canStartBuild must be a callable",
lambda : config.BuilderConfig(canStartBuild="foo",
name="a", slavenames=['a'], factory=self.factory))
def test_inv_env(self):
self.assertRaisesConfigError(
"builder's env must be a dictionary",
lambda : config.BuilderConfig(env="foo",
name="a", slavenames=['a'], factory=self.factory))
def test_defaults(self):
cfg = config.BuilderConfig(
name='a b c', slavename='a', factory=self.factory)
self.assertIdentical(cfg.factory, self.factory)
self.assertAttributes(cfg,
name='a b c',
slavenames=['a'],
builddir='a_b_c',
slavebuilddir='a_b_c',
category='',
nextSlave=None,
locks=[],
env={},
properties={},
mergeRequests=None,
description=None)
def test_args(self):
cfg = config.BuilderConfig(
name='b', slavename='s1', slavenames='s2', builddir='bd',
slavebuilddir='sbd', factory=self.factory, category='c',
nextSlave=lambda : 'ns', nextBuild=lambda : 'nb', locks=['l'],
env=dict(x=10), properties=dict(y=20), mergeRequests='mr',
description='buzz')
self.assertIdentical(cfg.factory, self.factory)
self.assertAttributes(cfg,
name='b',
slavenames=['s2', 's1'],
builddir='bd',
slavebuilddir='sbd',
category='c',
locks=['l'],
env={'x':10},
properties={'y':20},
mergeRequests='mr',
description='buzz')
def test_getConfigDict(self):
ns = lambda : 'ns'
nb = lambda : 'nb'
cfg = config.BuilderConfig(
name='b', slavename='s1', slavenames='s2', builddir='bd',
slavebuilddir='sbd', factory=self.factory, category='c',
nextSlave=ns, nextBuild=nb, locks=['l'],
env=dict(x=10), properties=dict(y=20), mergeRequests='mr',
description='buzz')
self.assertEqual(cfg.getConfigDict(), {'builddir': 'bd',
'category': 'c',
'description': 'buzz',
'env': {'x': 10},
'factory': self.factory,
'locks': ['l'],
'mergeRequests': 'mr',
'name': 'b',
'nextBuild': nb,
'nextSlave': ns,
'properties': {'y': 20},
'slavebuilddir': 'sbd',
'slavenames': ['s2', 's1'],
})
class FakeService(config.ReconfigurableServiceMixin,
service.Service):
succeed = True
call_index = 1
def reconfigService(self, new_config):
self.called = FakeService.call_index
FakeService.call_index += 1
d = config.ReconfigurableServiceMixin.reconfigService(self, new_config)
if not self.succeed:
@d.addCallback
def fail(_):
raise ValueError("oh noes")
return d
class FakeMultiService(config.ReconfigurableServiceMixin,
service.MultiService):
def reconfigService(self, new_config):
self.called = True
d = config.ReconfigurableServiceMixin.reconfigService(self, new_config)
return d
class ReconfigurableServiceMixin(unittest.TestCase):
def test_service(self):
svc = FakeService()
d = svc.reconfigService(mock.Mock())
@d.addCallback
def check(_):
self.assertTrue(svc.called)
return d
@defer.inlineCallbacks
def test_service_failure(self):
svc = FakeService()
svc.succeed = False
try:
yield svc.reconfigService(mock.Mock())
except ValueError:
pass
else:
self.fail("should have raised ValueError")
def test_multiservice(self):
svc = FakeMultiService()
ch1 = FakeService()
ch1.setServiceParent(svc)
ch2 = FakeMultiService()
ch2.setServiceParent(svc)
ch3 = FakeService()
ch3.setServiceParent(ch2)
d = svc.reconfigService(mock.Mock())
@d.addCallback
def check(_):
self.assertTrue(svc.called)
self.assertTrue(ch1.called)
self.assertTrue(ch2.called)
self.assertTrue(ch3.called)
return d
def test_multiservice_priority(self):
parent = FakeMultiService()
svc128 = FakeService()
svc128.setServiceParent(parent)
services = [ svc128 ]
for i in range(20, 1, -1):
svc = FakeService()
svc.reconfig_priority = i
svc.setServiceParent(parent)
services.append(svc)
d = parent.reconfigService(mock.Mock())
@d.addCallback
def check(_):
prio_order = [ svc.called for svc in services ]
called_order = sorted(prio_order)
self.assertEqual(prio_order, called_order)
return d
@compat.usesFlushLoggedErrors
@defer.inlineCallbacks
def test_multiservice_nested_failure(self):
svc = FakeMultiService()
ch1 = FakeService()
ch1.setServiceParent(svc)
ch1.succeed = False
try:
yield svc.reconfigService(mock.Mock())
except ValueError:
pass
else:
self.fail("should have raised ValueError")
| denny820909/builder | lib/python2.7/site-packages/buildbot-0.8.8-py2.7.egg/buildbot/test/unit/test_config.py | Python | mit | 43,114 |
"""
Python Interchangeable Virtual Instrument Library
Copyright (c) 2014 Alex Forencich
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
from .agilent4000A import *
class agilentDSOX4022A(agilent4000A):
"Agilent InfiniiVision DSOX4022A IVI oscilloscope driver"
def __init__(self, *args, **kwargs):
self.__dict__.setdefault('_instrument_id', 'DSO-X 4022A')
super(agilentDSOX4022A, self).__init__(*args, **kwargs)
self._analog_channel_count = 2
self._digital_channel_count = 0
self._channel_count = self._analog_channel_count + self._digital_channel_count
self._bandwidth = 200e6
self._init_channels()
| sephalon/python-ivi | ivi/agilent/agilentDSOX4022A.py | Python | mit | 1,689 |
# Copyright (c) 2014 OpenStack Foundation. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import abc
from neutron_lib.api import converters
from neutron_lib import constants
from neutron_lib import exceptions
import six
from neutron._i18n import _
from neutron.api import extensions
DISTRIBUTED = 'distributed'
EXTENDED_ATTRIBUTES_2_0 = {
'routers': {
DISTRIBUTED: {'allow_post': True,
'allow_put': True,
'is_visible': True,
'default': constants.ATTR_NOT_SPECIFIED,
'convert_to': converters.convert_to_boolean_if_not_none,
'enforce_policy': True},
}
}
class DVRMacAddressNotFound(exceptions.NotFound):
message = _("Distributed Virtual Router Mac Address for "
"host %(host)s does not exist.")
class MacAddressGenerationFailure(exceptions.ServiceUnavailable):
message = _("Unable to generate unique DVR mac for host %(host)s.")
class Dvr(extensions.ExtensionDescriptor):
"""Extension class supporting distributed virtual router."""
@classmethod
def get_name(cls):
return "Distributed Virtual Router"
@classmethod
def get_alias(cls):
return constants.L3_DISTRIBUTED_EXT_ALIAS
@classmethod
def get_description(cls):
return "Enables configuration of Distributed Virtual Routers."
@classmethod
def get_updated(cls):
return "2014-06-1T10:00:00-00:00"
def get_required_extensions(self):
return ["router"]
@classmethod
def get_resources(cls):
"""Returns Ext Resources."""
return []
def get_extended_resources(self, version):
if version == "2.0":
return EXTENDED_ATTRIBUTES_2_0
else:
return {}
@six.add_metaclass(abc.ABCMeta)
class DVRMacAddressPluginBase(object):
@abc.abstractmethod
def get_dvr_mac_address_list(self, context):
pass
@abc.abstractmethod
def get_dvr_mac_address_by_host(self, context, host):
pass
| sebrandon1/neutron | neutron/extensions/dvr.py | Python | apache-2.0 | 2,605 |
"""Excitation lists base classes
"""
from math import sqrt
import numpy as np
import gpaw.mpi as mpi
from gpaw.output import initialize_text_stream
class ExcitationList(list):
"""General Excitation List class.
"""
def __init__(self, calculator=None, txt=None):
# initialise empty list
list.__init__(self)
self.calculator = calculator
if not txt and calculator:
txt = calculator.txt
self.txt, firsttime = initialize_text_stream(txt, mpi.rank)
def get_calculator(self):
return self.calculator
def get_energies(self):
"""Get excitation energies in Hartrees"""
el = []
for ex in self:
el.append(ex.get_energy())
return np.array(el)
def get_trk(self):
"""Evaluate the Thonmas Reiche Kuhn sum rule"""
trkm = np.zeros((3))
for ex in self:
trkm += ex.get_energy()*ex.get_dipol_me()**2
return 2.*trkm # scale to get the number of electrons
def get_polarizabilities(self, lmax=7):
"""Calculate the Polarisabilities
see Jamorski et al. J. Chem. Phys. 104 (1996) 5134"""
S = np.zeros((lmax+1))
for ex in self:
e = ex.get_energy()
f = ex.get_oscillator_strength()[0]
for l in range(lmax+1):
S[l] += e**(-2 * l) * f
return S
def set_calculator(self, calculator):
self.calculator = calculator
def __str__(self):
string = '# ' + str(type(self))
if len(self) != 0:
string += ', %d excitations:' % len(self)
string += '\n'
for ex in self:
string += '# '+ex.__str__()+"\n"
return string
class Excitation:
def get_energy(self):
"""Get the excitations energy relative to the ground state energy
in Hartrees.
"""
return self.energy
def get_dipol_me(self):
"""return the excitations dipole matrix element
including the occupation factor"""
return self.me / sqrt(self.energy)
def get_oscillator_strength(self, form='r'):
"""Return the excitations dipole oscillator strength.
self.me is assumed to be::
form='r': sqrt(f * E) * <I|r|J>,
form='v': sqrt(f / E) * <I|d/(dr)|J>
for f = multiplicity, E = transition energy and initial and
final states::
|I>, |J>
"""
if form == 'r':
# length form
me = self.me
elif form == 'v':
raise NotImplemented
# velocity form
me = self.muv
osz = [0.]
for c in range(3):
val = 2. * me[c]**2
osz.append(val)
osz[0] += val / 3.
return osz
def set_energy(self, E):
"""Set the excitations energy relative to the ground state energy"""
self.energy = E
| qsnake/gpaw | gpaw/lrtddft/excitation.py | Python | gpl-3.0 | 2,982 |
""" This module loads all the classes from the VTK IO library into its
namespace. This is a required module."""
import os
if os.name == 'posix':
from libvtkIOPython import *
else:
from vtkIOPython import *
| naucoin/VTKSlicerWidgets | Wrapping/Python/vtk/io.py | Python | bsd-3-clause | 217 |
# -*- coding: utf-8 -*-
import sys
from collections import namedtuple, OrderedDict
from itertools import chain
from contextlib import contextmanager
import numpy
from AnyQt.QtWidgets import (
QGraphicsWidget, QGraphicsObject, QGraphicsLinearLayout, QGraphicsPathItem,
QGraphicsScene, QGraphicsView, QGridLayout, QFormLayout, QSizePolicy,
QGraphicsSimpleTextItem,
QGraphicsLayoutItem, QAction,
)
from AnyQt.QtGui import (
QTransform, QPainterPath, QPainterPathStroker, QColor, QBrush, QPen,
QFont, QFontMetrics, QPolygonF, QKeySequence
)
from AnyQt.QtCore import Qt, QSize, QSizeF, QPointF, QRectF, QLineF, QEvent
from AnyQt.QtCore import pyqtSignal as Signal
import pyqtgraph as pg
import Orange.data
from Orange.data.domain import filter_visible
from Orange.data import Domain
import Orange.misc
from Orange.clustering.hierarchical import \
postorder, preorder, Tree, tree_from_linkage, dist_matrix_linkage, \
leaves, prune, top_clusters
from Orange.widgets import widget, gui, settings
from Orange.widgets.utils import colorpalette, itemmodels
from Orange.widgets.utils.annotated_data import (create_annotated_table,
ANNOTATED_DATA_SIGNAL_NAME)
from Orange.widgets.io import FileFormat
__all__ = ["OWHierarchicalClustering"]
LINKAGE = ["Single", "Average", "Weighted", "Complete", "Ward"]
def dendrogram_layout(tree, expand_leaves=False):
coords = []
cluster_geometry = {}
leaf_idx = 0
for node in postorder(tree):
cluster = node.value
if node.is_leaf:
if expand_leaves:
start = float(cluster.first) + 0.5
end = float(cluster.last - 1) + 0.5
else:
start = end = leaf_idx + 0.5
leaf_idx += 1
center = (start + end) / 2.0
cluster_geometry[node] = (start, center, end)
coords.append((node, (start, center, end)))
else:
left = node.left
right = node.right
left_center = cluster_geometry[left][1]
right_center = cluster_geometry[right][1]
start, end = left_center, right_center
center = (start + end) / 2.0
cluster_geometry[node] = (start, center, end)
coords.append((node, (start, center, end)))
return coords
Point = namedtuple("Point", ["x", "y"])
Element = namedtuple("Element", ["anchor", "path"])
def Path_toQtPath(geom):
p = QPainterPath()
anchor, points = geom
if len(points) > 1:
p.moveTo(*points[0])
for (x, y) in points[1:]:
p.lineTo(x, y)
elif len(points) == 1:
r = QRectF(0, 0, 1e-0, 1e-9)
r.moveCenter(*points[0])
p.addRect(r)
elif len(points) == 0:
r = QRectF(0, 0, 1e-16, 1e-16)
r.moveCenter(QPointF(*anchor))
p.addRect(r)
return p
#: Dendrogram orientation flags
Left, Top, Right, Bottom = 1, 2, 3, 4
def dendrogram_path(tree, orientation=Left):
layout = dendrogram_layout(tree)
T = {}
paths = {}
rootdata = tree.value
base = rootdata.height
if orientation == Bottom:
transform = lambda x, y: (x, y)
if orientation == Top:
transform = lambda x, y: (x, base - y)
elif orientation == Left:
transform = lambda x, y: (base - y, x)
elif orientation == Right:
transform = lambda x, y: (y, x)
for node, (start, center, end) in layout:
if node.is_leaf:
x, y = transform(center, 0)
anchor = Point(x, y)
paths[node] = Element(anchor, ())
else:
left, right = paths[node.left], paths[node.right]
lines = (left.anchor,
Point(*transform(start, node.value.height)),
Point(*transform(end, node.value.height)),
right.anchor)
anchor = Point(*transform(center, node.value.height))
paths[node] = Element(anchor, lines)
T[node] = Tree((node, paths[node]),
tuple(T[ch] for ch in node.branches))
return T[tree]
def make_pen(brush=Qt.black, width=1, style=Qt.SolidLine,
cap_style=Qt.SquareCap, join_style=Qt.BevelJoin,
cosmetic=False):
pen = QPen(brush)
pen.setWidth(width)
pen.setStyle(style)
pen.setCapStyle(cap_style)
pen.setJoinStyle(join_style)
pen.setCosmetic(cosmetic)
return pen
def update_pen(pen, brush=None, width=None, style=None,
cap_style=None, join_style=None,
cosmetic=None):
pen = QPen(pen)
if brush is not None:
pen.setBrush(QBrush(brush))
if width is not None:
pen.setWidth(width)
if style is not None:
pen.setStyle(style)
if cap_style is not None:
pen.setCapStyle(cap_style)
if join_style is not None:
pen.setJoinStyle(join_style)
if cosmetic is not None:
pen.setCosmetic(cosmetic)
return pen
def path_stroke(path, width=1, join_style=Qt.MiterJoin):
stroke = QPainterPathStroker()
stroke.setWidth(width)
stroke.setJoinStyle(join_style)
stroke.setMiterLimit(1.0)
return stroke.createStroke(path)
def path_outline(path, width=1, join_style=Qt.MiterJoin):
stroke = path_stroke(path, width, join_style)
return stroke.united(path)
@contextmanager
def blocked(obj):
old = obj.signalsBlocked()
obj.blockSignals(True)
try:
yield obj
finally:
obj.blockSignals(old)
class DendrogramWidget(QGraphicsWidget):
"""A Graphics Widget displaying a dendrogram."""
class ClusterGraphicsItem(QGraphicsPathItem):
_rect = None
def shape(self):
if self._rect is not None:
p = QPainterPath()
p.addRect(self.boundingRect())
return p
else:
return super().shape()
def setRect(self, rect):
self.prepareGeometryChange()
self._rect = QRectF(rect)
def boundingRect(self):
if self._rect is not None:
return QRectF(self._rect)
else:
return super().boundingRect()
#: Orientation
Left, Top, Right, Bottom = 1, 2, 3, 4
#: Selection flags
NoSelection, SingleSelection, ExtendedSelection = 0, 1, 2
#: Emitted when a user clicks on the cluster item.
itemClicked = Signal(ClusterGraphicsItem)
selectionChanged = Signal()
selectionEdited = Signal()
def __init__(self, parent=None, root=None, orientation=Left,
hoverHighlightEnabled=True, selectionMode=ExtendedSelection):
QGraphicsWidget.__init__(self, parent)
self.orientation = orientation
self._root = None
self._highlighted_item = None
#: a list of selected items
self._selection = OrderedDict()
#: a {node: item} mapping
self._items = {}
#: container for all cluster items.
self._itemgroup = QGraphicsWidget(self)
self._itemgroup.setGeometry(self.contentsRect())
self._cluster_parent = {}
self.__hoverHighlightEnabled = hoverHighlightEnabled
self.__selectionMode = selectionMode
self.setContentsMargins(0, 0, 0, 0)
self.set_root(root)
def clear(self):
for item in self._items.values():
item.setParentItem(None)
if item.scene() is self.scene() and self.scene() is not None:
self.scene().removeItem(item)
for item in self._selection.values():
item.setParentItem(None)
if item.scene():
item.scene().removeItem(item)
self._root = None
self._items = {}
self._selection = OrderedDict()
self._highlighted_item = None
self._cluster_parent = {}
def set_root(self, root):
"""Set the root cluster.
:param Tree root: Root tree.
"""
self.clear()
self._root = root
if root:
pen = make_pen(Qt.blue, width=1, cosmetic=True,
join_style=Qt.MiterJoin)
for node in postorder(root):
item = DendrogramWidget.ClusterGraphicsItem(self._itemgroup)
item.setAcceptHoverEvents(True)
item.setPen(pen)
item.node = node
item.installSceneEventFilter(self)
for branch in node.branches:
assert branch in self._items
self._cluster_parent[branch] = node
self._items[node] = item
self.updateGeometry()
self._relayout()
self._rescale()
def item(self, node):
"""Return the DendrogramNode instance representing the cluster.
:type cluster: :class:`Tree`
"""
return self._items.get(node)
def height_at(self, point):
"""Return the cluster height at the point in widget local coordinates.
"""
if not self._root:
return 0
tpoint = self.mapToItem(self._itemgroup, point)
if self.orientation in [self.Left, self.Right]:
height = tpoint.x()
else:
height = tpoint.y()
if self.orientation in [self.Left, self.Bottom]:
base = self._root.value.height
height = base - height
return height
def pos_at_height(self, height):
"""Return a point in local coordinates for `height` (in cluster
height scale).
"""
if not self._root:
return QPointF()
if self.orientation in [self.Left, self.Bottom]:
base = self._root.value.height
height = base - height
if self.orientation in [self.Left, self.Right]:
p = QPointF(height, 0)
else:
p = QPointF(0, height)
return self.mapFromItem(self._itemgroup, p)
def _set_hover_item(self, item):
"""Set the currently highlighted item."""
if self._highlighted_item is item:
return
def branches(item):
return [self._items[ch] for ch in item.node.branches]
if self._highlighted_item:
pen = make_pen(Qt.blue, width=1, cosmetic=True)
for it in postorder(self._highlighted_item, branches):
it.setPen(pen)
self._highlighted_item = item
if item:
hpen = make_pen(Qt.blue, width=2, cosmetic=True)
for it in postorder(item, branches):
it.setPen(hpen)
def leaf_items(self):
"""Iterate over the dendrogram leaf items (:class:`QGraphicsItem`).
"""
if self._root:
return (self._items[leaf] for leaf in leaves(self._root))
else:
return iter(())
def leaf_anchors(self):
"""Iterate over the dendrogram leaf anchor points (:class:`QPointF`).
The points are in the widget local coordinates.
"""
for item in self.leaf_items():
anchor = QPointF(item.element.anchor)
yield self.mapFromItem(item, anchor)
def selected_nodes(self):
"""Return the selected clusters."""
return [item.node for item in self._selection]
def set_selected_items(self, items):
"""Set the item selection.
:param items: List of `GraphicsItems`s to select.
"""
to_remove = set(self._selection) - set(items)
to_add = set(items) - set(self._selection)
for sel in to_remove:
self._remove_selection(sel)
for sel in to_add:
self._add_selection(sel)
if to_add or to_remove:
self._re_enumerate_selections()
self.selectionChanged.emit()
def set_selected_clusters(self, clusters):
"""Set the selected clusters.
:param Tree items: List of cluster nodes to select .
"""
self.set_selected_items(list(map(self.item, clusters)))
def is_selected(self, item):
return item in self._selection
def is_included(self, item):
return self._selected_super_item(item) is not None
def select_item(self, item, state):
"""Set the `item`s selection state to `select_state`
:param item: QGraphicsItem.
:param bool state: New selection state for item.
"""
if state is False and item not in self._selection or \
state == True and item in self._selection:
return # State unchanged
if item in self._selection:
if state == False:
self._remove_selection(item)
self.selectionChanged.emit()
else:
# If item is already inside another selected item,
# remove that selection
super_selection = self._selected_super_item(item)
if super_selection:
self._remove_selection(super_selection)
# Remove selections this selection will override.
sub_selections = self._selected_sub_items(item)
for sub in sub_selections:
self._remove_selection(sub)
if state:
self._add_selection(item)
self._re_enumerate_selections()
elif item in self._selection:
self._remove_selection(item)
self.selectionChanged.emit()
def _add_selection(self, item):
"""Add selection rooted at item
"""
outline = self._selection_poly(item)
selection_item = QGraphicsPathItem(self)
selection_item.setPos(self.contentsRect().topLeft())
selection_item.setPen(make_pen(width=1, cosmetic=True))
transform = self._itemgroup.transform()
path = transform.map(outline)
margin = 4
if item.node.is_leaf:
ppath = QPainterPath()
ppath.addRect(path.boundingRect()
.adjusted(-margin, -margin, margin, margin))
else:
ppath = QPainterPath()
ppath.addPolygon(path)
ppath = path_outline(ppath, width=margin * 2,)
selection_item.setPath(ppath)
selection_item.unscaled_path = outline
self._selection[item] = selection_item
def _remove_selection(self, item):
"""Remove selection rooted at item."""
selection_item = self._selection[item]
selection_item.hide()
selection_item.setParentItem(None)
if self.scene():
self.scene().removeItem(selection_item)
del self._selection[item]
self._re_enumerate_selections()
def _selected_sub_items(self, item):
"""Return all selected subclusters under item."""
def branches(item):
return [self._items[ch] for ch in item.node.branches]
res = []
for item in list(preorder(item, branches))[1:]:
if item in self._selection:
res.append(item)
return res
def _selected_super_item(self, item):
"""Return the selected super item if it exists."""
def branches(item):
return [self._items[ch] for ch in item.node.branches]
for selected_item in self._selection:
if item in set(preorder(selected_item, branches)):
return selected_item
return None
def _re_enumerate_selections(self):
"""Re enumerate the selection items and update the colors."""
# Order the clusters
items = sorted(self._selection.items(),
key=lambda item: item[0].node.value.first)
palette = colorpalette.ColorPaletteGenerator(len(items))
for i, (item, selection_item) in enumerate(items):
# delete and then reinsert to update the ordering
del self._selection[item]
self._selection[item] = selection_item
color = palette[i]
color.setAlpha(150)
selection_item.setBrush(QColor(color))
def _selection_poly(self, item):
"""Return an selection item covering the selection rooted at item.
"""
def left(item):
return [self._items[ch] for ch in item.node.branches[:1]]
def right(item):
return [self._items[ch] for ch in item.node.branches[-1:]]
itemsleft = list(preorder(item, left))[::-1]
itemsright = list(preorder(item, right))
# itemsleft + itemsright walks from the leftmost leaf up to the root
# and down to the rightmost leaf
assert itemsleft[0].node.is_leaf
assert itemsright[-1].node.is_leaf
if item.node.is_leaf:
# a single anchor point
vert = [itemsleft[0].element.anchor]
else:
vert = []
for it in itemsleft[1:]:
vert.extend([it.element.path[0], it.element.path[1],
it.element.anchor])
for it in itemsright[:-1]:
vert.extend([it.element.anchor,
it.element.path[-2], it.element.path[-1]])
# close the polygon
vert.append(vert[0])
def isclose(a, b, rel_tol=1e-6):
return abs(a - b) < rel_tol * max(abs(a), abs(b))
def isclose_p(p1, p2, rel_tol=1e-6):
return isclose(p1.x, p2.x, rel_tol) and \
isclose(p1.y, p2.y, rel_tol)
# merge consecutive vertices that are (too) close
acc = [vert[0]]
for v in vert[1:]:
if not isclose_p(v, acc[-1]):
acc.append(v)
vert = acc
return QPolygonF([QPointF(*p) for p in vert])
def _update_selection_items(self):
"""Update the shapes of selection items after a scale change.
"""
transform = self._itemgroup.transform()
for item, selection in self._selection.items():
path = transform.map(selection.unscaled_path)
ppath = QPainterPath()
margin = 4
if item.node.is_leaf:
ppath.addRect(path.boundingRect()
.adjusted(-margin, -margin, margin, margin))
else:
ppath.addPolygon(path)
ppath = path_outline(ppath, width=margin * 2)
selection.setPath(ppath)
def _relayout(self):
if not self._root:
return
self._layout = dendrogram_path(self._root, self.orientation)
for node_geom in postorder(self._layout):
node, geom = node_geom.value
item = self._items[node]
item.element = geom
item.setPath(Path_toQtPath(geom))
item.setZValue(-node.value.height)
r = item.path().boundingRect()
base = self._root.value.height
if self.orientation == Left:
r.setRight(base)
elif self.orientation == Right:
r.setLeft(0)
elif self.orientation == Top:
r.setBottom(base)
else:
r.setTop(0)
item.setRect(r)
def _rescale(self):
if self._root is None:
return
crect = self.contentsRect()
leaf_count = len(list(leaves(self._root)))
if self.orientation in [Left, Right]:
drect = QSizeF(self._root.value.height, leaf_count)
else:
drect = QSizeF(leaf_count, self._root.value.height)
eps = numpy.finfo(numpy.float64).eps
if abs(drect.width()) < eps:
sx = 1.0
else:
sx = crect.width() / drect.width()
if abs(drect.height()) < eps:
sy = 1.0
else:
sy = crect.height() / drect.height()
transform = QTransform().scale(sx, sy)
self._itemgroup.setPos(crect.topLeft())
self._itemgroup.setTransform(transform)
self._selection_items = None
self._update_selection_items()
def sizeHint(self, which, constraint=QSizeF()):
fm = QFontMetrics(self.font())
spacing = fm.lineSpacing()
mleft, mtop, mright, mbottom = self.getContentsMargins()
if self._root and which == Qt.PreferredSize:
nleaves = len([node for node in self._items.keys()
if not node.branches])
if self.orientation in [self.Left, self.Right]:
return QSizeF(250, spacing * nleaves + mleft + mright)
else:
return QSizeF(spacing * nleaves + mtop + mbottom, 250)
elif which == Qt.MinimumSize:
return QSizeF(mleft + mright + 10, mtop + mbottom + 10)
else:
return QSizeF()
def sceneEventFilter(self, obj, event):
if isinstance(obj, DendrogramWidget.ClusterGraphicsItem):
if event.type() == QEvent.GraphicsSceneHoverEnter and \
self.__hoverHighlightEnabled:
self._set_hover_item(obj)
event.accept()
return True
elif event.type() == QEvent.GraphicsSceneMousePress and \
event.button() == Qt.LeftButton:
is_selected = self.is_selected(obj)
is_included = self.is_included(obj)
current_selection = list(self._selection)
if self.__selectionMode == DendrogramWidget.SingleSelection:
if event.modifiers() & Qt.ControlModifier:
self.set_selected_items(
[obj] if not is_selected else [])
elif event.modifiers() & Qt.AltModifier:
self.set_selected_items([])
elif event.modifiers() & Qt.ShiftModifier:
if not is_included:
self.set_selected_items([obj])
elif current_selection != [obj]:
self.set_selected_items([obj])
elif self.__selectionMode == DendrogramWidget.ExtendedSelection:
if event.modifiers() & Qt.ControlModifier:
self.select_item(obj, not is_selected)
elif event.modifiers() & Qt.AltModifier:
self.select_item(self._selected_super_item(obj), False)
elif event.modifiers() & Qt.ShiftModifier:
if not is_included:
self.select_item(obj, True)
elif current_selection != [obj]:
self.set_selected_items([obj])
if current_selection != self._selection:
self.selectionEdited.emit()
self.itemClicked.emit(obj)
event.accept()
return True
if event.type() == QEvent.GraphicsSceneHoverLeave:
self._set_hover_item(None)
return super().sceneEventFilter(obj, event)
def changeEvent(self, event):
super().changeEvent(event)
if event.type() == QEvent.FontChange:
self.updateGeometry()
# QEvent.ContentsRectChange is missing in PyQt4 <= 4.11.3
if event.type() == 178: # QEvent.ContentsRectChange:
self._rescale()
def resizeEvent(self, event):
super().resizeEvent(event)
self._rescale()
def mousePressEvent(self, event):
QGraphicsWidget.mousePressEvent(self, event)
# A mouse press on an empty widget part
if event.modifiers() == Qt.NoModifier and self._selection:
self.set_selected_clusters([])
class OWHierarchicalClustering(widget.OWWidget):
name = "Hierarchical Clustering"
description = "Display a dendrogram of a hierarchical clustering " \
"constructed from the input distance matrix."
icon = "icons/HierarchicalClustering.svg"
priority = 2100
inputs = [("Distances", Orange.misc.DistMatrix, "set_distances")]
outputs = [("Selected Data", Orange.data.Table, widget.Default),
(ANNOTATED_DATA_SIGNAL_NAME, Orange.data.Table)]
settingsHandler = settings.DomainContextHandler()
#: Selected linkage
linkage = settings.Setting(1)
#: Index of the selected annotation item (variable, ...)
annotation = settings.ContextSetting("Enumeration")
#: Out-of-context setting for the case when the "Name" option is available
annotation_if_names = settings.Setting("Name")
#: Out-of-context setting for the case with just "Enumerate" and "None"
annotation_if_enumerate = settings.Setting("Enumerate")
#: Selected tree pruning (none/max depth)
pruning = settings.Setting(0)
#: Maximum depth when max depth pruning is selected
max_depth = settings.Setting(10)
#: Selected cluster selection method (none, cut distance, top n)
selection_method = settings.Setting(0)
#: Cut height ratio wrt root height
cut_ratio = settings.Setting(75.0)
#: Number of top clusters to select
top_n = settings.Setting(3)
#: Dendrogram zoom factor
zoom_factor = settings.Setting(0)
append_clusters = settings.Setting(True)
cluster_role = settings.Setting(2)
cluster_name = settings.Setting("Cluster")
autocommit = settings.Setting(True)
graph_name = "scene"
#: Cluster variable domain role
AttributeRole, ClassRole, MetaRole = 0, 1, 2
cluster_roles = ["Attribute", "Class variable", "Meta variable"]
basic_annotations = ["None", "Enumeration"]
def __init__(self):
super().__init__()
self.matrix = None
self.items = None
self.linkmatrix = None
self.root = None
self._displayed_root = None
self.cutoff_height = 0.0
gui.comboBox(
self.controlArea, self, "linkage", items=LINKAGE, box="Linkage",
callback=self._invalidate_clustering)
model = itemmodels.VariableListModel()
model[:] = self.basic_annotations
self.label_cb = gui.comboBox(
self.controlArea, self, "annotation", box="Annotation",
model=model, callback=self._update_labels, contentsLength=12)
box = gui.radioButtons(
self.controlArea, self, "pruning", box="Pruning",
callback=self._invalidate_pruning)
grid = QGridLayout()
box.layout().addLayout(grid)
grid.addWidget(
gui.appendRadioButton(box, "None", addToLayout=False),
0, 0
)
self.max_depth_spin = gui.spin(
box, self, "max_depth", minv=1, maxv=100,
callback=self._invalidate_pruning,
keyboardTracking=False
)
grid.addWidget(
gui.appendRadioButton(box, "Max depth:", addToLayout=False),
1, 0)
grid.addWidget(self.max_depth_spin, 1, 1)
self.selection_box = gui.radioButtons(
self.controlArea, self, "selection_method",
box="Selection",
callback=self._selection_method_changed)
grid = QGridLayout()
self.selection_box.layout().addLayout(grid)
grid.addWidget(
gui.appendRadioButton(
self.selection_box, "Manual", addToLayout=False),
0, 0
)
grid.addWidget(
gui.appendRadioButton(
self.selection_box, "Height ratio:", addToLayout=False),
1, 0
)
self.cut_ratio_spin = gui.spin(
self.selection_box, self, "cut_ratio", 0, 100, step=1e-1,
spinType=float, callback=self._selection_method_changed
)
self.cut_ratio_spin.setSuffix("%")
grid.addWidget(self.cut_ratio_spin, 1, 1)
grid.addWidget(
gui.appendRadioButton(
self.selection_box, "Top N:", addToLayout=False),
2, 0
)
self.top_n_spin = gui.spin(self.selection_box, self, "top_n", 1, 20,
callback=self._selection_method_changed)
grid.addWidget(self.top_n_spin, 2, 1)
self.zoom_slider = gui.hSlider(
self.controlArea, self, "zoom_factor", box="Zoom",
minValue=-6, maxValue=3, step=1, ticks=True, createLabel=False,
callback=self.__zoom_factor_changed)
zoom_in = QAction(
"Zoom in", self, shortcut=QKeySequence.ZoomIn,
triggered=self.__zoom_in
)
zoom_out = QAction(
"Zoom out", self, shortcut=QKeySequence.ZoomOut,
triggered=self.__zoom_out
)
zoom_reset = QAction(
"Reset zoom", self,
shortcut=QKeySequence(Qt.ControlModifier | Qt.Key_0),
triggered=self.__zoom_reset
)
self.addActions([zoom_in, zoom_out, zoom_reset])
self.controlArea.layout().addStretch()
box = gui.vBox(self.controlArea, "Output")
gui.checkBox(box, self, "append_clusters", "Append cluster IDs",
callback=self._invalidate_output)
ibox = gui.indentedBox(box)
name_edit = gui.lineEdit(ibox, self, "cluster_name")
name_edit.editingFinished.connect(self._invalidate_output)
cb = gui.comboBox(
ibox, self, "cluster_role", callback=self._invalidate_output,
items=self.cluster_roles
)
form = QFormLayout(
fieldGrowthPolicy=QFormLayout.AllNonFixedFieldsGrow,
labelAlignment=Qt.AlignLeft,
spacing=8
)
form.addRow("Name:", name_edit)
form.addRow("Place:", cb)
ibox.layout().addSpacing(5)
ibox.layout().addLayout(form)
ibox.layout().addSpacing(5)
gui.auto_commit(box, self, "autocommit", "Send Selected", "Send Automatically",
box=False)
self.scene = QGraphicsScene()
self.view = QGraphicsView(
self.scene,
horizontalScrollBarPolicy=Qt.ScrollBarAlwaysOff,
verticalScrollBarPolicy=Qt.ScrollBarAlwaysOn,
alignment=Qt.AlignLeft | Qt.AlignVCenter
)
def axis_view(orientation):
ax = pg.AxisItem(orientation=orientation, maxTickLength=7)
scene = QGraphicsScene()
scene.addItem(ax)
view = QGraphicsView(
scene,
horizontalScrollBarPolicy=Qt.ScrollBarAlwaysOff,
verticalScrollBarPolicy=Qt.ScrollBarAlwaysOn,
alignment=Qt.AlignLeft | Qt.AlignVCenter
)
view.setFixedHeight(ax.size().height())
ax.line = SliderLine(orientation=Qt.Horizontal,
length=ax.size().height())
scene.addItem(ax.line)
return view, ax
self.top_axis_view, self.top_axis = axis_view("top")
self.mainArea.layout().setSpacing(1)
self.mainArea.layout().addWidget(self.top_axis_view)
self.mainArea.layout().addWidget(self.view)
self.bottom_axis_view, self.bottom_axis = axis_view("bottom")
self.mainArea.layout().addWidget(self.bottom_axis_view)
self._main_graphics = QGraphicsWidget()
self._main_layout = QGraphicsLinearLayout(Qt.Horizontal)
self._main_layout.setSpacing(10)
self._main_graphics.setLayout(self._main_layout)
self.scene.addItem(self._main_graphics)
self.dendrogram = DendrogramWidget()
self.dendrogram.setSizePolicy(QSizePolicy.MinimumExpanding,
QSizePolicy.MinimumExpanding)
self.dendrogram.selectionChanged.connect(self._invalidate_output)
self.dendrogram.selectionEdited.connect(self._selection_edited)
self.labels = GraphicsSimpleTextList()
self.labels.setSizePolicy(QSizePolicy.Preferred, QSizePolicy.Preferred)
self.labels.setAlignment(Qt.AlignLeft)
self.labels.setMaximumWidth(200)
self.labels.layout().setSpacing(0)
self._main_layout.addItem(self.dendrogram)
self._main_layout.addItem(self.labels)
self._main_layout.setAlignment(
self.dendrogram, Qt.AlignLeft | Qt.AlignVCenter)
self._main_layout.setAlignment(
self.labels, Qt.AlignLeft | Qt.AlignVCenter)
self.view.viewport().installEventFilter(self)
self.top_axis_view.viewport().installEventFilter(self)
self.bottom_axis_view.viewport().installEventFilter(self)
self._main_graphics.installEventFilter(self)
self.cut_line = SliderLine(self.dendrogram,
orientation=Qt.Horizontal)
self.cut_line.valueChanged.connect(self._dendrogram_slider_changed)
self.cut_line.hide()
self.bottom_axis.line.valueChanged.connect(self._axis_slider_changed)
self.top_axis.line.valueChanged.connect(self._axis_slider_changed)
self.dendrogram.geometryChanged.connect(self._dendrogram_geom_changed)
self._set_cut_line_visible(self.selection_method == 1)
def set_distances(self, matrix):
self.error()
if matrix is not None:
N, _ = matrix.shape
if N < 2:
self.error("Empty distance matrix")
matrix = None
self.matrix = matrix
if matrix is not None:
self._set_items(matrix.row_items, matrix.axis)
else:
self._set_items(None)
self._invalidate_clustering()
self.unconditional_commit()
def _set_items(self, items, axis=1):
self.closeContext()
self.items = items
model = self.label_cb.model()
if len(model) == 3:
self.annotation_if_names = self.annotation
elif len(model) == 2:
self.annotation_if_enumerate = self.annotation
if isinstance(items, Orange.data.Table) and axis:
model[:] = chain(
self.basic_annotations,
[model.Separator],
items.domain.class_vars,
items.domain.metas,
[model.Separator] if (items.domain.class_vars or items.domain.metas) and
next(filter_visible(items.domain.attributes), False) else [],
filter_visible(items.domain.attributes)
)
if items.domain.class_vars:
self.annotation = items.domain.class_vars[0]
else:
self.annotation = "Enumeration"
self.openContext(items.domain)
else:
name_option = bool(
items is not None and (
not axis or
isinstance(items, list) and
all(isinstance(var, Orange.data.Variable) for var in items)))
model[:] = self.basic_annotations + ["Name"] * name_option
self.annotation = self.annotation_if_names if name_option \
else self.annotation_if_enumerate
def _clear_plot(self):
self.labels.set_labels([])
self.dendrogram.set_root(None)
def _set_displayed_root(self, root):
self._clear_plot()
self._displayed_root = root
self.dendrogram.set_root(root)
self._update_labels()
self._main_graphics.resize(
self._main_graphics.size().width(),
self._main_graphics.sizeHint(Qt.PreferredSize).height()
)
self._main_graphics.layout().activate()
def _update(self):
self._clear_plot()
distances = self.matrix
if distances is not None:
method = LINKAGE[self.linkage].lower()
Z = dist_matrix_linkage(distances, linkage=method)
tree = tree_from_linkage(Z)
self.linkmatrix = Z
self.root = tree
self.top_axis.setRange(tree.value.height, 0.0)
self.bottom_axis.setRange(tree.value.height, 0.0)
if self.pruning:
self._set_displayed_root(prune(tree, level=self.max_depth))
else:
self._set_displayed_root(tree)
else:
self.linkmatrix = None
self.root = None
self._set_displayed_root(None)
self._apply_selection()
def _update_labels(self):
labels = []
if self.root and self._displayed_root:
indices = [leaf.value.index for leaf in leaves(self.root)]
if self.annotation == "None":
labels = []
elif self.annotation == "Enumeration":
labels = [str(i+1) for i in indices]
elif self.annotation == "Name":
attr = self.matrix.row_items.domain.attributes
labels = [str(attr[i]) for i in indices]
elif isinstance(self.annotation, Orange.data.Variable):
col_data, _ = self.items.get_column_view(self.annotation)
labels = [self.annotation.str_val(val) for val in col_data]
labels = [labels[idx] for idx in indices]
else:
labels = []
if labels and self._displayed_root is not self.root:
joined = leaves(self._displayed_root)
labels = [", ".join(labels[leaf.value.first: leaf.value.last])
for leaf in joined]
self.labels.set_labels(labels)
self.labels.setMinimumWidth(1 if labels else -1)
def _invalidate_clustering(self):
self._update()
self._update_labels()
self._invalidate_output()
def _invalidate_output(self):
self.commit()
def _invalidate_pruning(self):
if self.root:
selection = self.dendrogram.selected_nodes()
ranges = [node.value.range for node in selection]
if self.pruning:
self._set_displayed_root(
prune(self.root, level=self.max_depth))
else:
self._set_displayed_root(self.root)
selected = [node for node in preorder(self._displayed_root)
if node.value.range in ranges]
self.dendrogram.set_selected_clusters(selected)
self._apply_selection()
def commit(self):
items = getattr(self.matrix, "items", self.items)
if not items:
self.send("Selected Data", None)
self.send(ANNOTATED_DATA_SIGNAL_NAME, None)
return
selection = self.dendrogram.selected_nodes()
selection = sorted(selection, key=lambda c: c.value.first)
indices = [leaf.value.index for leaf in leaves(self.root)]
maps = [indices[node.value.first:node.value.last]
for node in selection]
selected_indices = list(chain(*maps))
unselected_indices = sorted(set(range(self.root.value.last)) -
set(selected_indices))
if not selected_indices:
self.send("Selected Data", None)
annotated_data = create_annotated_table(items, []) \
if self.selection_method == 0 and self.matrix.axis else None
self.send(ANNOTATED_DATA_SIGNAL_NAME, annotated_data)
return
selected_data = None
if isinstance(items, Orange.data.Table) and self.matrix.axis == 1:
# Select rows
c = numpy.zeros(self.matrix.shape[0])
for i, indices in enumerate(maps):
c[indices] = i
c[unselected_indices] = len(maps)
mask = c != len(maps)
if self.append_clusters:
clust_var = Orange.data.DiscreteVariable(
str(self.cluster_name),
values=["C{}".format(i + 1)
for i in range(len(maps))] +
["Other"]
)
data, domain = items, items.domain
attrs = domain.attributes
class_ = domain.class_vars
metas = domain.metas
if self.cluster_role == self.AttributeRole:
attrs = attrs + (clust_var,)
elif self.cluster_role == self.ClassRole:
class_ = class_ + (clust_var,)
elif self.cluster_role == self.MetaRole:
metas = metas + (clust_var,)
domain = Orange.data.Domain(attrs, class_, metas)
data = Orange.data.Table.from_table(domain, items)
data.get_column_view(clust_var)[0][:] = c
else:
data = items
if selected_indices:
selected_data = data[mask]
if self.append_clusters:
def remove_other_value(vars_):
vars_ = list(vars_)
clust_var = vars_[-1].copy()
clust_var.values.pop()
vars_[-1] = clust_var
return vars_
if self.cluster_role == self.AttributeRole:
attrs = remove_other_value(attrs)
elif self.cluster_role == self.ClassRole:
class_ = remove_other_value(class_)
elif self.cluster_role == self.MetaRole:
metas = remove_other_value(metas)
selected_data.domain = Domain(attrs, class_, metas)
elif isinstance(items, Orange.data.Table) and self.matrix.axis == 0:
# Select columns
domain = Orange.data.Domain(
[items.domain[i] for i in selected_indices],
items.domain.class_vars, items.domain.metas)
selected_data = items.from_table(domain, items)
data = None
self.send("Selected Data", selected_data)
annotated_data = create_annotated_table(data, selected_indices) if \
self.selection_method == 0 else None
self.send(ANNOTATED_DATA_SIGNAL_NAME, annotated_data)
def sizeHint(self):
return QSize(800, 500)
def eventFilter(self, obj, event):
if obj is self.view.viewport() and event.type() == QEvent.Resize:
width = self.view.viewport().width() - 2
self._main_graphics.setMaximumWidth(width)
self._main_graphics.setMinimumWidth(width)
self._main_graphics.layout().activate()
elif event.type() == QEvent.MouseButtonPress and \
(obj is self.top_axis_view.viewport() or
obj is self.bottom_axis_view.viewport()):
self.selection_method = 1
# Map click point to cut line local coordinates
pos = self.top_axis_view.mapToScene(event.pos())
cut = self.top_axis.line.mapFromScene(pos)
self.top_axis.line.setValue(cut.x())
# update the line visibility, output, ...
self._selection_method_changed()
elif obj is self._main_graphics and \
event.type() == QEvent.LayoutRequest:
self.__update_size_constraints()
return super().eventFilter(obj, event)
def onDeleteWidget(self):
super().onDeleteWidget()
self._clear_plot()
self.dendrogram.clear()
self.dendrogram.deleteLater()
def _dendrogram_geom_changed(self):
pos = self.dendrogram.pos_at_height(self.cutoff_height)
geom = self.dendrogram.geometry()
crect = self.dendrogram.contentsRect()
self._set_slider_value(pos.x(), geom.width())
self.cut_line.setLength(geom.height())
self.top_axis.resize(crect.width(), self.top_axis.height())
self.top_axis.setPos(geom.left() + crect.left(), 0)
self.top_axis.line.setPos(self.cut_line.scenePos().x(), 0)
self.bottom_axis.resize(crect.width(), self.bottom_axis.height())
self.bottom_axis.setPos(geom.left() + crect.left(), 0)
self.bottom_axis.line.setPos(self.cut_line.scenePos().x(), 0)
geom = self._main_graphics.geometry()
assert geom.topLeft() == QPointF(0, 0)
self.scene.setSceneRect(geom)
geom.setHeight(self.top_axis.size().height())
self.top_axis.scene().setSceneRect(geom)
self.bottom_axis.scene().setSceneRect(geom)
def _axis_slider_changed(self, value):
self.cut_line.setValue(value)
def _dendrogram_slider_changed(self, value):
p = QPointF(value, 0)
cl_height = self.dendrogram.height_at(p)
self.set_cutoff_height(cl_height)
# Sync the cut positions between the dendrogram and the axis.
self._set_slider_value(value, self.dendrogram.size().width())
def _set_slider_value(self, value, span):
with blocked(self.cut_line):
self.cut_line.setValue(value)
self.cut_line.setRange(0, span)
with blocked(self.top_axis.line):
self.top_axis.line.setValue(value)
self.top_axis.line.setRange(0, span)
with blocked(self.bottom_axis.line):
self.bottom_axis.line.setValue(value)
self.bottom_axis.line.setRange(0, span)
def set_cutoff_height(self, height):
self.cutoff_height = height
if self.root:
self.cut_ratio = 100 * height / self.root.value.height
self.select_max_height(height)
def _set_cut_line_visible(self, visible):
self.cut_line.setVisible(visible)
self.top_axis.line.setVisible(visible)
self.bottom_axis.line.setVisible(visible)
def select_top_n(self, n):
root = self._displayed_root
if root:
clusters = top_clusters(root, n)
self.dendrogram.set_selected_clusters(clusters)
def select_max_height(self, height):
root = self._displayed_root
if root:
clusters = clusters_at_height(root, height)
self.dendrogram.set_selected_clusters(clusters)
def _selection_method_changed(self):
self._set_cut_line_visible(self.selection_method == 1)
if self.root:
self._apply_selection()
def _apply_selection(self):
if not self.root:
return
if self.selection_method == 0:
pass
elif self.selection_method == 1:
height = self.cut_ratio * self.root.value.height / 100
self.set_cutoff_height(height)
pos = self.dendrogram.pos_at_height(height)
self._set_slider_value(pos.x(), self.dendrogram.size().width())
elif self.selection_method == 2:
self.select_top_n(self.top_n)
def _selection_edited(self):
# Selection was edited by clicking on a cluster in the
# dendrogram view.
self.selection_method = 0
self._selection_method_changed()
self._invalidate_output()
def __zoom_in(self):
def clip(minval, maxval, val):
return min(max(val, minval), maxval)
self.zoom_factor = clip(self.zoom_slider.minimum(),
self.zoom_slider.maximum(),
self.zoom_factor + 1)
self.__zoom_factor_changed()
def __zoom_out(self):
def clip(minval, maxval, val):
return min(max(val, minval), maxval)
self.zoom_factor = clip(self.zoom_slider.minimum(),
self.zoom_slider.maximum(),
self.zoom_factor - 1)
self.__zoom_factor_changed()
def __zoom_reset(self):
self.zoom_factor = 0
self.__zoom_factor_changed()
def __update_size_constraints(self):
size = self._main_graphics.size()
preferred = self._main_graphics.sizeHint(
Qt.PreferredSize, constraint=QSizeF(size.width(), -1))
self._main_graphics.resize(QSizeF(size.width(), preferred.height()))
self._main_graphics.layout().activate()
def __zoom_factor_changed(self):
font = self.scene.font()
factor = (1.25 ** self.zoom_factor)
font = qfont_scaled(font, factor)
self.labels.setFont(font)
self.dendrogram.setFont(font)
self.__update_size_constraints()
def send_report(self):
annot = self.label_cb.currentText()
if isinstance(self.annotation, str):
annot = annot.lower()
if self.selection_method == 0:
sel = "manual"
elif self.selection_method == 1:
sel = "at {:.1f} of height".format(self.cut_ratio)
else:
sel = "top {} clusters".format(self.top_n)
self.report_items((
("Linkage", LINKAGE[self.linkage].lower()),
("Annotation", annot),
("Prunning",
self.pruning != 0 and "{} levels".format(self.max_depth)),
("Selection", sel),
("Cluster ID in output",
self.append_clusters and
"{} (as {})".format(
self.cluster_name,
self.cluster_roles[self.cluster_role].lower()))
))
self.report_plot()
def qfont_scaled(font, factor):
scaled = QFont(font)
if font.pointSizeF() != -1:
scaled.setPointSizeF(font.pointSizeF() * factor)
elif font.pixelSize() != -1:
scaled.setPixelSize(int(font.pixelSize() * factor))
return scaled
class GraphicsSimpleTextList(QGraphicsWidget):
"""A simple text list widget."""
def __init__(self, labels=[], orientation=Qt.Vertical,
alignment=Qt.AlignCenter, parent=None):
QGraphicsWidget.__init__(self, parent)
layout = QGraphicsLinearLayout(orientation)
layout.setContentsMargins(0, 0, 0, 0)
layout.setSpacing(0)
self.setLayout(layout)
self.orientation = orientation
self.alignment = alignment
self.setSizePolicy(QSizePolicy.Expanding, QSizePolicy.Expanding)
self.label_items = []
self.set_labels(labels)
def clear(self):
"""Remove all text items."""
layout = self.layout()
for i in reversed(range(layout.count())):
witem = layout.itemAt(i)
witem.item.setParentItem(None)
if self.scene():
self.scene().removeItem(witem.item)
layout.removeAt(i)
self.label_items = []
self.updateGeometry()
def set_labels(self, labels):
"""Set the text labels."""
self.clear()
orientation = Qt.Horizontal if self.orientation == Qt.Vertical else Qt.Vertical
for text in labels:
item = QGraphicsSimpleTextItem(text, self)
item.setFont(self.font())
item.setToolTip(text)
witem = WrapperLayoutItem(item, orientation, parent=self)
self.layout().addItem(witem)
self.layout().setAlignment(witem, self.alignment)
self.label_items.append(item)
self.layout().activate()
self.updateGeometry()
def setAlignment(self, alignment):
"""Set alignment of text items in the widget
"""
self.alignment = alignment
layout = self.layout()
for i in range(layout.count()):
layout.setAlignment(layout.itemAt(i), alignment)
def setVisible(self, visible):
QGraphicsWidget.setVisible(self, visible)
self.updateGeometry()
def changeEvent(self, event):
if event.type() == QEvent.FontChange:
self.__update_font()
return super().changeEvent(event)
def __iter__(self):
return iter(self.label_items)
def __update_font(self):
for item in self.label_items:
item.setFont(self.font())
layout = self.layout()
for i in range(layout.count()):
layout.itemAt(i).updateGeometry()
self.layout().invalidate()
self.updateGeometry()
class WrapperLayoutItem(QGraphicsLayoutItem):
"""A Graphics layout item wrapping a QGraphicsItem allowing it
to be managed by a layout.
"""
def __init__(self, item, orientation=Qt.Horizontal, parent=None):
QGraphicsLayoutItem.__init__(self, parent)
self.orientation = orientation
self.item = item
if orientation == Qt.Vertical:
self.item.setRotation(-90)
self.setSizePolicy(QSizePolicy.Expanding, QSizePolicy.Fixed)
else:
self.setSizePolicy(QSizePolicy.Fixed, QSizePolicy.Expanding)
def setGeometry(self, rect):
QGraphicsLayoutItem.setGeometry(self, rect)
if self.orientation == Qt.Horizontal:
self.item.setPos(rect.topLeft())
else:
self.item.setPos(rect.bottomLeft())
def sizeHint(self, which, constraint=QSizeF()):
if which == Qt.PreferredSize:
size = self.item.boundingRect().size()
if self.orientation == Qt.Horizontal:
return size
else:
return QSizeF(size.height(), size.width())
else:
return QSizeF()
def setFont(self, font):
self.item.setFont(font)
self.updateGeometry()
def setText(self, text):
self.item.setText(text)
self.updateGeometry()
def setToolTip(self, tip):
self.item.setToolTip(tip)
class SliderLine(QGraphicsObject):
"""A movable slider line."""
valueChanged = Signal(float)
linePressed = Signal()
lineMoved = Signal()
lineReleased = Signal()
rangeChanged = Signal(float, float)
def __init__(self, parent=None, orientation=Qt.Vertical, value=0.0,
length=10.0, **kwargs):
self._orientation = orientation
self._value = value
self._length = length
self._min = 0.0
self._max = 1.0
self._line = QLineF()
self._pen = QPen()
super().__init__(parent, **kwargs)
self.setAcceptedMouseButtons(Qt.LeftButton)
self.setPen(make_pen(brush=QColor(50, 50, 50), width=1, cosmetic=True))
if self._orientation == Qt.Vertical:
self.setCursor(Qt.SizeVerCursor)
else:
self.setCursor(Qt.SizeHorCursor)
def setPen(self, pen):
pen = QPen(pen)
if self._pen != pen:
self.prepareGeometryChange()
self._pen = pen
self._line = None
self.update()
def pen(self):
return QPen(self._pen)
def setValue(self, value):
value = min(max(value, self._min), self._max)
if self._value != value:
self.prepareGeometryChange()
self._value = value
self._line = None
self.valueChanged.emit(value)
def value(self):
return self._value
def setRange(self, minval, maxval):
maxval = max(minval, maxval)
if minval != self._min or maxval != self._max:
self._min = minval
self._max = maxval
self.rangeChanged.emit(minval, maxval)
self.setValue(self._value)
def setLength(self, length):
if self._length != length:
self.prepareGeometryChange()
self._length = length
self._line = None
def length(self):
return self._length
def setOrientation(self, orientation):
if self._orientation != orientation:
self.prepareGeometryChange()
self._orientation = orientation
self._line = None
if self._orientation == Qt.Vertical:
self.setCursor(Qt.SizeVerCursor)
else:
self.setCursor(Qt.SizeHorCursor)
def mousePressEvent(self, event):
event.accept()
self.linePressed.emit()
def mouseMoveEvent(self, event):
pos = event.pos()
if self._orientation == Qt.Vertical:
self.setValue(pos.y())
else:
self.setValue(pos.x())
self.lineMoved.emit()
event.accept()
def mouseReleaseEvent(self, event):
if self._orientation == Qt.Vertical:
self.setValue(event.pos().y())
else:
self.setValue(event.pos().x())
self.lineReleased.emit()
event.accept()
def boundingRect(self):
if self._line is None:
if self._orientation == Qt.Vertical:
self._line = QLineF(0, self._value, self._length, self._value)
else:
self._line = QLineF(self._value, 0, self._value, self._length)
r = QRectF(self._line.p1(), self._line.p2())
penw = self.pen().width()
return r.adjusted(-penw, -penw, penw, penw)
def paint(self, painter, *args):
if self._line is None:
self.boundingRect()
painter.save()
painter.setPen(self.pen())
painter.drawLine(self._line)
painter.restore()
def clusters_at_height(root, height):
"""Return a list of clusters by cutting the clustering at `height`.
"""
lower = set()
cluster_list = []
for cl in preorder(root):
if cl in lower:
continue
if cl.value.height < height:
cluster_list.append(cl)
lower.update(preorder(cl))
return cluster_list
def main(argv=None):
from AnyQt.QtWidgets import QApplication
import sip
import Orange.distance as distance
if argv is None:
argv = sys.argv
argv = list(argv)
app = QApplication(argv)
if len(argv) > 1:
filename = argv[1]
else:
filename = "iris.tab"
w = OWHierarchicalClustering()
data = Orange.data.Table(filename)
matrix = distance.Euclidean(distance._preprocess(data))
w.set_distances(matrix)
w.handleNewSignals()
w.show()
w.raise_()
rval = app.exec_()
w.set_distances(None)
w.handleNewSignals()
w.onDeleteWidget()
sip.delete(w)
del w
app.processEvents()
return rval
if __name__ == "__main__":
sys.exit(main())
| cheral/orange3 | Orange/widgets/unsupervised/owhierarchicalclustering.py | Python | bsd-2-clause | 57,860 |
# encoding: utf-8
# module PyQt4.QtCore
# from /usr/lib/python3/dist-packages/PyQt4/QtCore.cpython-34m-x86_64-linux-gnu.so
# by generator 1.135
# no doc
# imports
import sip as __sip
from .QAnimationGroup import QAnimationGroup
class QSequentialAnimationGroup(QAnimationGroup):
""" QSequentialAnimationGroup(QObject parent=None) """
def addPause(self, p_int): # real signature unknown; restored from __doc__
""" QSequentialAnimationGroup.addPause(int) -> QPauseAnimation """
return QPauseAnimation
def currentAnimation(self): # real signature unknown; restored from __doc__
""" QSequentialAnimationGroup.currentAnimation() -> QAbstractAnimation """
return QAbstractAnimation
def currentAnimationChanged(self, *args, **kwargs): # real signature unknown
""" QSequentialAnimationGroup.currentAnimationChanged[QAbstractAnimation] [signal] """
pass
def duration(self): # real signature unknown; restored from __doc__
""" QSequentialAnimationGroup.duration() -> int """
return 0
def event(self, QEvent): # real signature unknown; restored from __doc__
""" QSequentialAnimationGroup.event(QEvent) -> bool """
return False
def insertPause(self, p_int, p_int_1): # real signature unknown; restored from __doc__
""" QSequentialAnimationGroup.insertPause(int, int) -> QPauseAnimation """
return QPauseAnimation
def updateCurrentTime(self, p_int): # real signature unknown; restored from __doc__
""" QSequentialAnimationGroup.updateCurrentTime(int) """
pass
def updateDirection(self, QAbstractAnimation_Direction): # real signature unknown; restored from __doc__
""" QSequentialAnimationGroup.updateDirection(QAbstractAnimation.Direction) """
pass
def updateState(self, QAbstractAnimation_State, QAbstractAnimation_State_1): # real signature unknown; restored from __doc__
""" QSequentialAnimationGroup.updateState(QAbstractAnimation.State, QAbstractAnimation.State) """
pass
def __init__(self, QObject_parent=None): # real signature unknown; restored from __doc__
pass
| ProfessorX/Config | .PyCharm30/system/python_stubs/-1247971765/PyQt4/QtCore/QSequentialAnimationGroup.py | Python | gpl-2.0 | 2,169 |
"""In-memory representation of interfaces and other data structures.
The objects in this module are used to build a representation of an XML interface
file in memory.
@see: L{reader} constructs these data-structures
@see: U{http://0install.net/interface-spec.html} description of the domain model
@var defaults: Default values for the 'default' attribute for <environment> bindings of
well-known variables.
"""
# Copyright (C) 2009, Thomas Leonard
# See the README file for details, or visit http://0install.net.
from zeroinstall import _
import os, re, locale, sys
from zeroinstall import SafeException, version
from zeroinstall.injector.namespaces import XMLNS_IFACE
from zeroinstall.injector.versions import parse_version
from zeroinstall import support
network_offline = 'off-line'
network_minimal = 'minimal'
network_full = 'full'
network_levels = (network_offline, network_minimal, network_full)
class InvalidInterface(SafeException):
"""Raised when parsing an invalid feed."""
feed_url = None
def __init__(self, message, ex = None):
"""@type message: str"""
if ex:
try:
message += "\n\n(exact error: %s)" % ex
except:
# Some Python messages have type str but contain UTF-8 sequences.
# (e.g. IOException). Adding these to a Unicode 'message' (e.g.
# after gettext translation) will cause an error.
import codecs
decoder = codecs.lookup('utf-8')
decex = decoder.decode(str(ex), errors = 'replace')[0]
message += "\n\n(exact error: %s)" % decex
SafeException.__init__(self, message)
def __unicode__(self):
"""@rtype: str"""
if hasattr(SafeException, '__unicode__'):
# Python >= 2.6
if self.feed_url:
return _('%s [%s]') % (SafeException.__unicode__(self), self.feed_url)
return SafeException.__unicode__(self)
else:
return support.unicode(SafeException.__str__(self))
def _best_language_match(options):
"""@type options: {str: str}
@rtype: str"""
(language, encoding) = locale.getlocale()
if language:
# xml:lang uses '-', while LANG uses '_'
language = language.replace('_', '-')
else:
language = 'en-US'
return (options.get(language, None) or # Exact match (language+region)
options.get(language.split('-', 1)[0], None) or # Matching language
options.get('en', None)) # English
class Interface(object):
"""An Interface represents some contract of behaviour.
@ivar uri: the URI for this interface.
@ivar stability_policy: user's configured policy.
Implementations at this level or higher are preferred.
Lower levels are used only if there is no other choice.
"""
__slots__ = ['uri']
def __init__(self, uri):
"""@type uri: str"""
assert uri
if uri.startswith('http:') or uri.startswith('https:') or os.path.isabs(uri):
self.uri = uri
else:
raise SafeException(_("Interface name '%s' doesn't start "
"with 'http:' or 'https:'") % uri)
self.reset()
def reset(self):
pass
def get_name(self):
"""@rtype: str"""
return '(' + os.path.basename(self.uri) + ')'
def __repr__(self):
"""@rtype: str"""
return _("<Interface %s>") % self.uri
class ZeroInstallFeed(object):
"""A feed lists available implementations of an interface.
@ivar url: the URL for this feed
@ivar implementations: Implementations in this feed, indexed by ID
@type implementations: {str: L{Implementation}}
@ivar name: human-friendly name
@ivar summaries: short textual description (in various languages, since 0.49)
@type summaries: {str: str}
@ivar descriptions: long textual description (in various languages, since 0.49)
@type descriptions: {str: str}
@ivar last_modified: timestamp on signature
@ivar local_path: the path of this local feed, or None if remote (since 1.7)
@type local_path: str | None
@ivar feeds: list of <feed> elements in this feed
@type feeds: [L{Feed}]
@ivar feed_for: interfaces for which this could be a feed
@type feed_for: set(str)
@ivar metadata: extra elements we didn't understand
"""
# _main is deprecated
__slots__ = ['url', 'implementations', 'name', 'descriptions', 'first_description', 'summaries', 'first_summary',
'last_modified', 'feeds', 'feed_for', 'metadata', 'local_path', 'feed_element']
def __init__(self, feed_element, local_path = None):
"""Create a feed object from a DOM.
@param feed_element: the root element of a feed file
@type feed_element: L{qdom.Element}
@param local_path: the pathname of this local feed, or None for remote feeds
@type local_path: str | None"""
self.local_path = local_path
self.implementations = {}
self.name = None
self.summaries = {} # { lang: str }
self.first_summary = None
self.last_modified = None
self.feeds = []
self.metadata = []
self.feed_element = feed_element
if feed_element is None:
return # XXX subclass?
if feed_element.name not in ('interface', 'feed'):
raise SafeException("Root element should be <interface>, not <%s>" % feed_element.name)
assert feed_element.uri == XMLNS_IFACE, "Wrong namespace on root element: %s" % feed_element.uri
if local_path:
self.url = local_path
else:
assert local_path is None
self.url = feed_element.getAttribute('uri')
if not self.url:
raise InvalidInterface(_("<interface> uri attribute missing"))
min_injector_version = feed_element.getAttribute('min-injector-version')
if min_injector_version:
if parse_version(min_injector_version) > parse_version(version):
raise InvalidInterface(_("This feed requires version %(min_version)s or later of "
"Zero Install, but I am only version %(version)s. "
"You can get a newer version from http://0install.net") %
{'min_version': min_injector_version, 'version': version})
for x in feed_element.childNodes:
if x.uri != XMLNS_IFACE:
self.metadata.append(x)
continue
if x.name == 'name':
self.name = x.content
elif x.name == 'description':
pass
elif x.name == 'summary':
if self.first_summary == None:
self.first_summary = x.content
self.summaries[x.attrs.get("http://www.w3.org/XML/1998/namespace lang", 'en')] = x.content
elif x.name == 'feed-for':
pass
elif x.name == 'feed':
pass
else:
self.metadata.append(x)
if not self.name:
raise InvalidInterface(_("Missing <name> in feed"))
if not self.summary:
raise InvalidInterface(_("Missing <summary> in feed"))
def get_name(self):
"""@rtype: str"""
return self.name or '(' + os.path.basename(self.url) + ')'
def __repr__(self):
return _("<Feed %s>") % self.url
def get_metadata(self, uri, name):
"""Return a list of interface metadata elements with this name and namespace URI.
@type uri: str
@type name: str"""
return [m for m in self.metadata if m.name == name and m.uri == uri]
@property
def summary(self):
return _best_language_match(self.summaries) or self.first_summary
if sys.version_info[0] > 2:
# Python 3
def escape(uri):
"""Convert each space to %20, etc
@type uri: str
@rtype: str"""
return re.sub(b'[^-_.a-zA-Z0-9]',
lambda match: ('%%%02x' % ord(match.group(0))).encode('ascii'),
uri.encode('utf-8')).decode('ascii')
def _pretty_escape(uri):
"""Convert each space to %20, etc
: is preserved and / becomes #. This makes for nicer strings,
and may replace L{escape} everywhere in future.
@type uri: str
@rtype: str"""
if os.name == "posix":
# Only preserve : on Posix systems
preserveRegex = b'[^-_.a-zA-Z0-9:/]'
else:
# Other OSes may not allow the : character in file names
preserveRegex = b'[^-_.a-zA-Z0-9/]'
return re.sub(preserveRegex,
lambda match: ('%%%02x' % ord(match.group(0))).encode('ascii'),
uri.encode('utf-8')).decode('ascii').replace('/', '#')
else:
# Python 2
def escape(uri):
"""Convert each space to %20, etc
@type uri: str
@rtype: str"""
return re.sub('[^-_.a-zA-Z0-9]',
lambda match: '%%%02x' % ord(match.group(0)),
uri.encode('utf-8'))
def _pretty_escape(uri):
"""Convert each space to %20, etc
: is preserved and / becomes #. This makes for nicer strings,
and may replace L{escape} everywhere in future.
@type uri: str
@rtype: str"""
if os.name == "posix":
# Only preserve : on Posix systems
preserveRegex = '[^-_.a-zA-Z0-9:/]'
else:
# Other OSes may not allow the : character in file names
preserveRegex = '[^-_.a-zA-Z0-9/]'
return re.sub(preserveRegex,
lambda match: '%%%02x' % ord(match.group(0)),
uri.encode('utf-8')).replace('/', '#')
| turtledb/0install | zeroinstall/injector/model.py | Python | lgpl-2.1 | 8,460 |
# These imports are for python3 compatibility inside python2
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
__author__ = 'Jeffrey Phillips Freeman (WI2ARD)'
__maintainer__ = 'Jeffrey Phillips Freeman (WI2ARD)'
__email__ = 'jeffrey.freeman@syncleus.com'
__license__ = 'Apache License, Version 2.0'
__copyright__ = 'Copyright 2016, Syncleus, Inc. and contributors'
__credits__ = []
# from click.testing import CliRunner
# from apex.cli import main
#
# def test_main():
# runner = CliRunner()
# result = runner.invoke(main, [])
#
# assert result.output == '()\n'
# assert result.exit_code == 0
| Syncleus/apex | tests/test_apex.py | Python | apache-2.0 | 671 |
# -*- coding:utf-8 -*-
import tornado.web
import turbo.app
from turbo import app_config
from turbo.core.exceptions import ResponseError, ResponseMsg
# start use session from here
# from lib.session import SessionStore, SessionObject
from turbo.template import turbo_jinja2
class MixinHandler(turbo.app.BaseHandler):
pass
class BaseHandler(MixinHandler):
# session_initializer = {
# 'uid': None,
# 'avatar': None,
# 'nickname': None,
# }
# session_object = SessionObject
# session_store = SessionStore()
def initialize(self):
super(BaseHandler, self).initialize()
self._params = self.parameter
@turbo_jinja2
def render_string(self, template_name, **kwargs):
pass
def prepare(self):
super(BaseHandler, self).prepare()
def response_msg(self, msg='', code=1):
raise ResponseMsg(code, msg)
def response_error(self, msg='', code=1):
raise ResponseError(code, msg)
def http_error(self, status_code=404):
raise tornado.web.HTTPError(status_code)
def write_error(self, status_code, **kwargs):
"""Override to implement custom error pages.
http://tornado.readthedocs.org/en/stable/_modules/tornado/web.html#RequestHandler.write_error
"""
super(BaseHandler, self).write_error(status_code, **kwargs)
class ErrorHandler(BaseHandler):
def initialize(self, status_code):
super(ErrorHandler, self).initialize()
self.set_status(status_code)
def prepare(self):
if not self.is_ajax():
if self.get_status() == 404:
raise self.http_error(404)
else:
self.wo_resp({'code': 1, 'msg': 'Api Not found'})
self.finish()
return
def check_xsrf_cookie(self):
# POSTs to an ErrorHandler don't actually have side effects,
# so we don't need to check the xsrf token. This allows POSTs
# to the wrong url to return a 404 instead of 403.
pass
from turbo.conf import app_config
app_config.error_handler = ErrorHandler
| wecatch/app-turbo | demos/jinja2-support/apps/base.py | Python | apache-2.0 | 2,105 |
import time
import grovepi
from threading import Thread
class Sensor(Thread):
def __init__ (self, pin, avg_fraction=0.8):
Thread.__init__(self)
self.name = "Grove sound sensor"
self.shortname = "Sound"
self.pin = pin
self.avg_fraction = avg_fraction
self.value = 0
self.avg_value = 0
self.stop = False
grovepi.pinMode(pin, "INPUT")
def get_log_header(self, delimiter):
return "raw%caverage" % (delimiter)
def get_log_string(self, delimiter):
return "%.2f%c%.2f" % (self.value, delimiter, self.avg_value)
def get_str1(self):
return "val: %.2f" % (self.value)
def get_str2(self):
return "avg: %.2f" % (self.avg_value)
def avg(self, current, new, fraction):
return (current * (1.0-fraction)) + (new * fraction)
def run(self):
while not self.stop:
try:
self.value = grovepi.analogRead(self.pin)
self.avg_value = self.avg(self.avg_value, self.value, self.avg_fraction)
except IOError as e:
print ("Sound sensor exception: %s" % (e))
time.sleep(0.01)
| mywdka/grovepi_sensorlogger | sensors/sound.py | Python | gpl-3.0 | 1,185 |
"""Support for the Amazon Polly text to speech service."""
import logging
import voluptuous as vol
from homeassistant.components.tts import PLATFORM_SCHEMA, Provider
import homeassistant.helpers.config_validation as cv
_LOGGER = logging.getLogger(__name__)
CONF_REGION = 'region_name'
CONF_ACCESS_KEY_ID = 'aws_access_key_id'
CONF_SECRET_ACCESS_KEY = 'aws_secret_access_key'
CONF_PROFILE_NAME = 'profile_name'
ATTR_CREDENTIALS = 'credentials'
DEFAULT_REGION = 'us-east-1'
SUPPORTED_REGIONS = ['us-east-1', 'us-east-2', 'us-west-1', 'us-west-2',
'ca-central-1', 'eu-west-1', 'eu-central-1', 'eu-west-2',
'eu-west-3', 'ap-southeast-1', 'ap-southeast-2',
'ap-northeast-2', 'ap-northeast-1', 'ap-south-1',
'sa-east-1']
CONF_VOICE = 'voice'
CONF_OUTPUT_FORMAT = 'output_format'
CONF_SAMPLE_RATE = 'sample_rate'
CONF_TEXT_TYPE = 'text_type'
SUPPORTED_VOICES = [
'Zhiyu', # Chinese
'Mads', 'Naja', # Danish
'Ruben', 'Lotte', # Dutch
'Russell', 'Nicole', # English Austrailian
'Brian', 'Amy', 'Emma', # English
'Aditi', 'Raveena', # English, Indian
'Joey', 'Justin', 'Matthew', 'Ivy', 'Joanna', 'Kendra', 'Kimberly',
'Salli', # English
'Geraint', # English Welsh
'Mathieu', 'Celine', 'Lea', # French
'Chantal', # French Canadian
'Hans', 'Marlene', 'Vicki', # German
'Aditi', # Hindi
'Karl', 'Dora', # Icelandic
'Giorgio', 'Carla', 'Bianca', # Italian
'Takumi', 'Mizuki', # Japanese
'Seoyeon', # Korean
'Liv', # Norwegian
'Jacek', 'Jan', 'Ewa', 'Maja', # Polish
'Ricardo', 'Vitoria', # Portuguese, Brazilian
'Cristiano', 'Ines', # Portuguese, European
'Carmen', # Romanian
'Maxim', 'Tatyana', # Russian
'Enrique', 'Conchita', 'Lucia', # Spanish European
'Mia', # Spanish Mexican
'Miguel', 'Penelope', # Spanish US
'Astrid', # Swedish
'Filiz', # Turkish
'Gwyneth', # Welsh
]
SUPPORTED_OUTPUT_FORMATS = ['mp3', 'ogg_vorbis', 'pcm']
SUPPORTED_SAMPLE_RATES = ['8000', '16000', '22050']
SUPPORTED_SAMPLE_RATES_MAP = {
'mp3': ['8000', '16000', '22050'],
'ogg_vorbis': ['8000', '16000', '22050'],
'pcm': ['8000', '16000'],
}
SUPPORTED_TEXT_TYPES = ['text', 'ssml']
CONTENT_TYPE_EXTENSIONS = {
'audio/mpeg': 'mp3',
'audio/ogg': 'ogg',
'audio/pcm': 'pcm',
}
DEFAULT_VOICE = 'Joanna'
DEFAULT_OUTPUT_FORMAT = 'mp3'
DEFAULT_TEXT_TYPE = 'text'
DEFAULT_SAMPLE_RATES = {
'mp3': '22050',
'ogg_vorbis': '22050',
'pcm': '16000',
}
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Optional(CONF_REGION, default=DEFAULT_REGION):
vol.In(SUPPORTED_REGIONS),
vol.Inclusive(CONF_ACCESS_KEY_ID, ATTR_CREDENTIALS): cv.string,
vol.Inclusive(CONF_SECRET_ACCESS_KEY, ATTR_CREDENTIALS): cv.string,
vol.Exclusive(CONF_PROFILE_NAME, ATTR_CREDENTIALS): cv.string,
vol.Optional(CONF_VOICE, default=DEFAULT_VOICE): vol.In(SUPPORTED_VOICES),
vol.Optional(CONF_OUTPUT_FORMAT, default=DEFAULT_OUTPUT_FORMAT):
vol.In(SUPPORTED_OUTPUT_FORMATS),
vol.Optional(CONF_SAMPLE_RATE):
vol.All(cv.string, vol.In(SUPPORTED_SAMPLE_RATES)),
vol.Optional(CONF_TEXT_TYPE, default=DEFAULT_TEXT_TYPE):
vol.In(SUPPORTED_TEXT_TYPES),
})
def get_engine(hass, config):
"""Set up Amazon Polly speech component."""
output_format = config.get(CONF_OUTPUT_FORMAT)
sample_rate = config.get(
CONF_SAMPLE_RATE, DEFAULT_SAMPLE_RATES[output_format])
if sample_rate not in SUPPORTED_SAMPLE_RATES_MAP.get(output_format):
_LOGGER.error("%s is not a valid sample rate for %s",
sample_rate, output_format)
return None
config[CONF_SAMPLE_RATE] = sample_rate
import boto3
profile = config.get(CONF_PROFILE_NAME)
if profile is not None:
boto3.setup_default_session(profile_name=profile)
aws_config = {
CONF_REGION: config.get(CONF_REGION),
CONF_ACCESS_KEY_ID: config.get(CONF_ACCESS_KEY_ID),
CONF_SECRET_ACCESS_KEY: config.get(CONF_SECRET_ACCESS_KEY),
}
del config[CONF_REGION]
del config[CONF_ACCESS_KEY_ID]
del config[CONF_SECRET_ACCESS_KEY]
polly_client = boto3.client('polly', **aws_config)
supported_languages = []
all_voices = {}
all_voices_req = polly_client.describe_voices()
for voice in all_voices_req.get('Voices'):
all_voices[voice.get('Id')] = voice
if voice.get('LanguageCode') not in supported_languages:
supported_languages.append(voice.get('LanguageCode'))
return AmazonPollyProvider(
polly_client, config, supported_languages, all_voices)
class AmazonPollyProvider(Provider):
"""Amazon Polly speech api provider."""
def __init__(self, polly_client, config, supported_languages,
all_voices):
"""Initialize Amazon Polly provider for TTS."""
self.client = polly_client
self.config = config
self.supported_langs = supported_languages
self.all_voices = all_voices
self.default_voice = self.config.get(CONF_VOICE)
self.name = 'Amazon Polly'
@property
def supported_languages(self):
"""Return a list of supported languages."""
return self.supported_langs
@property
def default_language(self):
"""Return the default language."""
return self.all_voices.get(self.default_voice).get('LanguageCode')
@property
def default_options(self):
"""Return dict include default options."""
return {CONF_VOICE: self.default_voice}
@property
def supported_options(self):
"""Return a list of supported options."""
return [CONF_VOICE]
def get_tts_audio(self, message, language=None, options=None):
"""Request TTS file from Polly."""
voice_id = options.get(CONF_VOICE, self.default_voice)
voice_in_dict = self.all_voices.get(voice_id)
if language != voice_in_dict.get('LanguageCode'):
_LOGGER.error("%s does not support the %s language",
voice_id, language)
return None, None
resp = self.client.synthesize_speech(
OutputFormat=self.config[CONF_OUTPUT_FORMAT],
SampleRate=self.config[CONF_SAMPLE_RATE],
Text=message,
TextType=self.config[CONF_TEXT_TYPE],
VoiceId=voice_id
)
return (CONTENT_TYPE_EXTENSIONS[resp.get('ContentType')],
resp.get('AudioStream').read())
| MartinHjelmare/home-assistant | homeassistant/components/amazon_polly/tts.py | Python | apache-2.0 | 6,590 |
"""
SoftLayer.CLI.helpers
~~~~~~~~~~~~~~~~~~~~~
Helpers to be used in CLI modules in SoftLayer.CLI.modules.*
:license: MIT, see LICENSE for more details.
"""
from SoftLayer.CLI import exceptions
def resolve_id(resolver, identifier, name='object'):
"""Resolves a single id using a resolver function.
:param resolver: function that resolves ids. Should return None or a list
of ids.
:param string identifier: a string identifier used to resolve ids
:param string name: the object type, to be used in error messages
"""
ids = resolver(identifier)
if len(ids) == 0:
raise exceptions.CLIAbort("Error: Unable to find %s '%s'"
% (name, identifier))
if len(ids) > 1:
raise exceptions.CLIAbort(
"Error: Multiple %s found for '%s': %s" %
(name, identifier, ', '.join([str(_id) for _id in ids])))
return ids[0]
| cloudify-cosmo/softlayer-python | SoftLayer/CLI/helpers.py | Python | mit | 956 |
def render(blocks, name):
with open(name, "wb") as f:
for block in blocks:
f.write(block.content)
| vasili-v/ctauto | ctauto/renderer.py | Python | gpl-3.0 | 122 |
import re
from django import forms
from django.contrib.auth.models import User
from django.core.exceptions import ValidationError
from decommentariis.models import CommentaryEntry, TEIEntry, TEISection
from decommentariis.models import Cohort, CohortMembers, CohortTexts
from crispy_forms.helper import FormHelper
from crispy_forms.layout import Layout, Fieldset, ButtonHolder, Submit
from crispy_forms.bootstrap import *
class UserForm(forms.ModelForm) :
password = forms.CharField(widget=forms.PasswordInput())
class Meta :
model = User
fields = ('username', 'email', 'password')
pass
class CohortCreateForm(forms.ModelForm) :
cohort_name = forms.CharField(widget=forms.TextInput(), max_length=64)
cohort_description = forms.CharField(widget=forms.Textarea())
auto_id = True
def __init__(self, *args, **kwargs):
self.helper = FormHelper()
self.helper.form_id = 'id-cohortform'
self.helper.form_class = 'form-horizontal'
self.helper.form_method = 'post'
self.helper.form_action = 'cohort_add'
self.helper.html5_required = False
self.helper.layout = Layout(Field('cohort_name'),
Field('cohort_description'),
StrictButton('Create Cohort', type='submit', css_class="btn-success pull-right"))
super(CohortCreateForm, self).__init__(*args, **kwargs)
def clean_cohort_name(self) :
data = self.cleaned_data['cohort_name']
if re.search(r"[\s\W]+", data) :
raise ValidationError('Name must not contain whitespace, and should only contain A-Z, a-z, 0-9, and _ to be valid')
elif re.search(r"^\d+", data) :
raise ValidationError('Name must not begin with digits (but may contain them)')
else :
return data
class Meta:
model = Cohort
fields = ('cohort_name', 'cohort_description')
class CohortEditForm(forms.ModelForm) :
#cohort_name = forms.CharField(widget=forms.TextInput(attrs={'readonly': 'readonly'}), max_length=64)
cohort_description = forms.CharField(widget=forms.Textarea())
auto_id = True
def __init__(self, *args, **kwargs):
self.helper = FormHelper()
self.helper.form_id = 'id-cohortform'
self.helper.form_class = 'form-horizontal'
self.helper.form_method = 'post'
# self.helper.form_action = ''
self.helper.html5_required = False
self.helper.layout = Layout(
#Field('cohort_name'),
Field('cohort_description'),
StrictButton('Edit Cohort', type='submit', css_class="btn-success pull-right"))
super(CohortEditForm, self).__init__(*args, **kwargs)
def clean_cohort_name(self) :
if self.instance and self.instance.cohort_name and self.instance.cohort_name != self.cleaned_data['cohort_name'] :
raise ValidationError('Cannot change value of cohort_name')
elif self.instance and self.instance.cohort_name :
return self.instance.cohort_name
else :
raise ValidationError('Cannot change value of cohort_name')
def clean(self) :
cleaned_data=super(CohortEditForm, self).clean()
return cleaned_data
class Meta:
model = Cohort
fields = ('cohort_description',)
| scotartt/commentarius | decommentariis/decommentariis/forms.py | Python | gpl-2.0 | 2,980 |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import logging
from collections import OrderedDict
from typing import Dict, List
import numpy as np
from fairseq.data import data_utils
from . import FairseqDataset
logger = logging.getLogger(__name__)
class MultiCorpusDataset(FairseqDataset):
"""
Stores multiple instances of FairseqDataset together. Requires each instance
to be the same dataset, as the collate method needs to work on batches with
samples from each dataset.
Allows specifying a distribution over the datasets to use. Note that unlike
MultiCorpusSampledDataset, this distribution allows sampling for each item,
rather than on a batch level.
Each time ordered_indices() is called, a new sample is generated with
the specified distribution.
Args:
datasets: a OrderedDict of FairseqDataset instances.
distribution: a List containing the probability of getting an utterance from
corresponding dataset
"""
def __init__(
self, datasets: Dict[str, FairseqDataset], distribution: List[float], seed: int
):
super().__init__()
assert isinstance(datasets, OrderedDict)
assert len(datasets) == len(distribution)
self.datasets = datasets
self.distribution = distribution
self.seed = seed
# Avoid repeated conversions to list later
self.dataset_list = list(datasets.values())
self.total_num_instances = 0
first_dataset = list(self.datasets.values())[0]
self.dataset_offsets = []
for dataset in datasets.values():
assert isinstance(dataset, FairseqDataset)
assert type(dataset) is type(first_dataset)
self.dataset_offsets.append(self.total_num_instances)
self.total_num_instances += len(dataset)
def ordered_indices(self):
with data_utils.numpy_seed(self.seed, self.epoch):
# Used to store the order of indices of each dataset to use
indices = [
np.random.permutation(len(dataset))
for dataset in self.datasets.values()
]
# Keep track of which samples we've used for each dataset
counters = [0 for _ in self.datasets]
return np.array(
[
self._sample(indices, counters)
for _ in range(self.total_num_instances)
],
dtype=np.int64,
)
def _sample(self, indices, counters):
# First pick dataset
dataset_idx = np.random.choice(len(self.distribution), p=self.distribution)
# Then get dataset internal index
idx = indices[dataset_idx][counters[dataset_idx]]
# Convert to multi-datasets index
idx += self.dataset_offsets[dataset_idx]
counters[dataset_idx] += 1
# Reset if we reach end
if counters[dataset_idx] == len(self.dataset_list[dataset_idx]):
counters[dataset_idx] = 0
indices[dataset_idx] = np.random.permutation(
len(self.dataset_list[dataset_idx])
)
return idx
def _map_index(self, index: int):
"""
If dataset A has length N and dataset B has length M
then index 1 maps to index 1 of dataset A, and index N + 1
maps to index 1 of B.
"""
counter = 0
for key, dataset in self.datasets.items():
if index < counter + len(dataset):
return index - counter, key
counter += len(dataset)
raise ValueError(
"Invalid index: {}, max: {}".format(index, self.total_num_instances)
)
def __len__(self):
"""
Length of this dataset is the sum of individual datasets
"""
return self.total_num_instances
def __getitem__(self, index):
index, key = self._map_index(index)
return self.datasets[key][index]
def collater(self, samples):
"""
Since we enforce all datsets to be the same, collating is just
picking the first one and doing collate.
"""
if len(samples) == 0:
return None
return list(self.datasets.values())[0].collater(samples)
def num_tokens(self, index: int):
index, key = self._map_index(index)
return self.datasets[key].num_tokens(index)
def size(self, index: int):
index, key = self._map_index(index)
return self.datasets[key].size(index)
def set_epoch(self, epoch, **unused):
super().set_epoch(epoch)
self.epoch = epoch
@property
def supports_prefetch(self):
return False
| hfp/libxsmm | samples/deeplearning/sparse_training/fairseq/fairseq/data/multi_corpus_dataset.py | Python | bsd-3-clause | 4,849 |
class Tile():
def __init__(self, blocked, block_sight=None):
self.blocked = blocked
if block_sight is None: block_sight = blocked
self.block_sight = block_sight | jonathanabennett/practicerl | Tile.py | Python | gpl-3.0 | 179 |
#!/usr/bin/env python
from distutils.core import setup
setup(
name='amdtk',
version='1.0.0',
description='Acoustic Model Discovery Toolkit',
author='Lucas Ondel',
author_email='lucas.ondel@gmail.com',
packages=[
'amdtk',
'amdtk/io',
'amdtk/densities',
'amdtk/models',
'amdtk/interface'
],
license='MIT')
| amdtkdev/amdtk | setup.py | Python | mit | 378 |
#!/usr/bin/python3
from numpy import *
import sys
import os.path
inputfile = sys.argv[1]
outputdir = sys.argv[2]
filename = os.path.basename(inputfile)
dotpos = filename.rindex('.')
basename = filename[:dotpos]
extension = filename[dotpos+1:]
sliceSize = float(sys.argv[3])
sliceNumber = 0
outputFileName = "%s/%s.%09d.%s"%(outputdir,basename,sliceNumber,extension)
outputFile = open(outputFileName,'w')
f = open(inputfile, 'r')
while 1:
lines = f.readlines(100000)
if not lines:
break
for line in lines:
a = line.split()
t = float(a[0])
currentTime = sliceNumber*sliceSize
if t >= currentTime:
outputFile.close()
outputFileName = "%s/%s.%09d.%s"%(outputdir,basename,sliceNumber,extension)
print(outputFileName)
outputFile = open(outputFileName,'w')
sliceNumber += 1
outputFile.write(line)
f.close()
| fzenke/pubsim | mov/tiser_slice.py | Python | gpl-2.0 | 928 |
import os
from datetime import datetime
import shutil
from twisted.trial import unittest
from scrapy.extensions.spiderstate import SpiderState
from scrapy.spiders import Spider
from scrapy.exceptions import NotConfigured
from scrapy.utils.test import get_crawler
class SpiderStateTest(unittest.TestCase):
def test_store_load(self):
jobdir = self.mktemp()
os.mkdir(jobdir)
try:
spider = Spider(name='default')
dt = datetime.now()
ss = SpiderState(jobdir)
ss.spider_opened(spider)
spider.state['one'] = 1
spider.state['dt'] = dt
ss.spider_closed(spider)
spider2 = Spider(name='default')
ss2 = SpiderState(jobdir)
ss2.spider_opened(spider2)
self.assertEqual(spider.state, {'one': 1, 'dt': dt})
ss2.spider_closed(spider2)
finally:
shutil.rmtree(jobdir)
def test_state_attribute(self):
# state attribute must be present if jobdir is not set, to provide a
# consistent interface
spider = Spider(name='default')
ss = SpiderState()
ss.spider_opened(spider)
self.assertEqual(spider.state, {})
ss.spider_closed(spider)
def test_not_configured(self):
crawler = get_crawler(Spider)
self.assertRaises(NotConfigured, SpiderState.from_crawler, crawler)
| pawelmhm/scrapy | tests/test_spiderstate.py | Python | bsd-3-clause | 1,418 |
import time
import sys
commands = {
'OUTPUT_RESET': 'A2',
'OUTPUT_STOP': 'A3',
'OUTPUT_POWER': 'A4', # seems to max out around 0x1F with 0x20 backwards
'OUTPUT_SPEED': 'A5',
'OUTPUT_START': 'A6',
'OUTPUT_POLARITY': 'A7', # 0x01 forwards, 0x00 toggle, 0xFF backwards
}
motors = {
'A': 1,
'B': 2,
'C': 4,
'D': 8
}
def ev3motor(cmd,m,pwr):
motorhx = 0
for i in list(m):
motorhx += motors[i]
motorhx = "%0.2X" % motorhx
cmdhx = commands[cmd]
cmdstr = cmdhx + '00' + motorhx
print(cmdstr)
ev3motor('OUTPUT_START','AB','')
sys.exit()
# command to start motor on port A at speed 20
# 0C 00 00 00 80 00 00 A4 00 01 14 A6 00 01
# 12 0 0 0 128 0 0 164 0 1 20 166 0 1
#
# Length: 0C 00 -> 12
# Counter: 00 00 -> 0
# Reply: 80 -> No reply
# Variables: 00 00 -> None (?)
# Command: A4 -> opOUTPUT_POWER
# 00: Null block
# Motor: 01 -> A
# Value: 14 -> 20
# Command: A6 -> opOUTPUT_START
# 00: Null block
# Motor: 01 -> A
start_motor_str = '0C000000800000A400061FA60006'
start_motor = bytes.fromhex(start_motor_str)
change_motor_power_str = '09000000800000A70006FF'
change_motor_power = bytes.fromhex(change_motor_power_str)
# command to stop motor on port A
# 09 00 01 00 80 00 00 A3 00 01 00
# 9 0 1 0 128 0 0 163 0 1 0
#
# Length: 09 00 -> 9
# Counter: 01 00 -> 1
# Reply: 80 -> No reply
# Variables: 00 00 -> None (?)
# Command: A3 -> opOUTPUT_STOP
# 00: Null block
# Motor: 01 -> A
# Value: 00 -> Float
stop_motor_str = '09000100800000A3000600'
stop_motor = bytes.fromhex(stop_motor_str)
# send commands to EV3 via bluetooth
with open('/dev/tty.EV3-SerialPort', 'wb', 0) as bt:
bt.write(start_motor)
time.sleep(5)
bt.write(change_motor_power)
time.sleep(5)
bt.write(stop_motor)
| loopspace/microbit | lego/sender.py | Python | mit | 1,789 |
def fit_model(name, model, X_train, y_train, X_test, X_last, pred, pred_last):
"""Fits a classification model (for our purposes this is LR, LDA and QDA)
using the training data, then makes a prediction and subsequent "hit rate"
for the test data."""
from flask import Flask, request, render_template, jsonify
import pandas.io.sql as sql
import sqlite3
import platform
import datetime
import numpy as np
import pandas as pd
import json
#import pygal
import matplotlib.pyplot as plt
from scipy.stats import norm
from bokeh.charts import Histogram
import plotly
#from pandas.io.data import DataReader
from pandas_datareader import wb, DataReader
from sklearn.linear_model import LogisticRegression
from sklearn.lda import LDA
from sklearn.qda import QDA
from bokeh.layouts import gridplot
from bokeh.plotting import figure, show, output_file
# Fit and predict the model on the training, and then test, data
model.fit(X_train, y_train)
pred[name] = model.predict(X_test)
pred_last[name] = model.predict(X_last)
# Create a series with 1 being correct direction, 0 being wrong
# and then calculate the hit rate based on the actual direction
pred["%s_Correct" % name] = (1.0+pred[name]*pred["Actual"])/2.0
hit_rate = np.mean(pred["%s_Correct" % name])
print "%s: %.3f" % (name, hit_rate)
return hit_rate
| ivtransgruasortiz/paquete_pip | ciff_2017_af3_asio/fit_model.py | Python | mit | 1,446 |
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_policy import policy
_READER = "role:reader"
_MEMBER = "role:member"
_ADMIN = "role:admin"
_PROJECT_MEMBER = f"{_MEMBER} and project_id:%(target.secret.project_id)s"
_PROJECT_ADMIN = f"{_ADMIN} and project_id:%(target.secret.project_id)s"
_SECRET_CREATOR = "user_id:%(target.secret.creator_id)s"
_SECRET_IS_NOT_PRIVATE = "True:%(target.secret.read_project_access)s"
rules = [
policy.DocumentedRuleDefault(
name='secret:decrypt',
check_str='rule:secret_decrypt_non_private_read or ' +
'rule:secret_project_creator or ' +
'rule:secret_project_admin or rule:secret_acl_read or ' +
f"({_PROJECT_MEMBER} and ({_SECRET_CREATOR} or " +
f"{_SECRET_IS_NOT_PRIVATE})) or {_PROJECT_ADMIN}",
scope_types=['project'],
description='Retrieve a secrets payload.',
operations=[
{
'path': '/v1/secrets/{uuid}/payload',
'method': 'GET'
}
]
),
policy.DocumentedRuleDefault(
name='secret:get',
check_str='rule:secret_non_private_read or ' +
'rule:secret_project_creator or ' +
'rule:secret_project_admin or rule:secret_acl_read or ' +
f"({_PROJECT_MEMBER} and ({_SECRET_CREATOR} or " +
f"{_SECRET_IS_NOT_PRIVATE})) or {_PROJECT_ADMIN}",
scope_types=['project'],
description='Retrieves a secrets metadata.',
operations=[
{
'path': '/v1/secrets/{secret-id}',
'method': 'GET"'
}
]
),
policy.DocumentedRuleDefault(
name='secret:put',
check_str='rule:admin_or_creator and rule:secret_project_match or ' +
f"({_PROJECT_MEMBER} and ({_SECRET_CREATOR} or " +
f"{_SECRET_IS_NOT_PRIVATE})) or {_PROJECT_ADMIN}",
scope_types=['project'],
description='Add the payload to an existing metadata-only secret.',
operations=[
{
'path': '/v1/secrets/{secret-id}',
'method': 'PUT'
}
]
),
policy.DocumentedRuleDefault(
name='secret:delete',
check_str='rule:secret_project_admin or ' +
'rule:secret_project_creator or ' +
'(rule:secret_project_creator_role and ' +
'not rule:secret_private_read) or ' +
f"({_PROJECT_MEMBER} and ({_SECRET_CREATOR} or " +
f"{_SECRET_IS_NOT_PRIVATE})) or {_PROJECT_ADMIN}",
scope_types=['project'],
description='Delete a secret by uuid.',
operations=[
{
'path': '/v1/secrets/{secret-id}',
'method': 'DELETE'
}
]
),
policy.DocumentedRuleDefault(
name='secrets:post',
check_str=f'rule:admin_or_creator or {_MEMBER}',
scope_types=['project'],
description='Creates a Secret entity.',
operations=[
{
'path': '/v1/secrets',
'method': 'POST'
}
]
),
policy.DocumentedRuleDefault(
name='secrets:get',
check_str=f'rule:all_but_audit or {_MEMBER}',
scope_types=['project'],
description='Lists a projects secrets.',
operations=[
{
'path': '/v1/secrets',
'method': 'GET'
}
]
)
]
def list_rules():
return rules
| openstack/barbican | barbican/common/policies/secrets.py | Python | apache-2.0 | 4,101 |
import re
import requests,time
import difflib
import xbmc,xbmcaddon
from ..scraper import Scraper
from ..common import clean_search, random_agent,send_log,error_log
from ..modules import cfscrape
scraper = cfscrape.create_scraper()
dev_log = xbmcaddon.Addon('script.module.universalscrapers').getSetting("dev_log")
heads = {'User-Agent':random_agent}
class serieswatch(Scraper):
domains = ['watch-series.co']
name = "serieswatch"
sources = []
def __init__(self):
self.base_link = 'https://ww1.watch-series.co'
self.search_link = '/search.html?keyword='
self.scraper = cfscrape.create_scraper()
def scrape_episode(self, title, show_year, year, season, episode, imdb, tvdb, debrid = False):
try:
start_time = time.time()
start_url = self.base_link+self.search_link+title.replace(' ','%20')+'%20season%20'+season
#print start_url
html = self.scraper.get(start_url,timeout=10).content
match = re.compile('<div class="video-thumbimg">.+?href="(.+?)".+?title="(.+?)"',re.DOTALL).findall(html)
for url,name in match:
season_name_check = title.lower().replace(' ','')+'season'+season
name_check = name.replace('-','').replace(' ','').lower()
check = difflib.SequenceMatcher(a=season_name_check,b=name_check)
d = check.ratio()*100
if int(d)>80:
html2 = self.scraper.get(self.base_link+url+'/season',timeout=10).content
episodes = re.findall('<div class="video_container">.+?<a href="(.+?)" class="view_more"></a></div>.+?class="videoHname"><b>(.+?)</b></a></span>.+?<div class="video_date icon-calendar">.+?, (.+?)</div>',html2,re.DOTALL)
for url2,ep_no,aired_year in episodes:
url2 = self.base_link+url2
ep_no = ep_no.replace('Episode ','').replace(':','')
if ep_no == episode:
#print url2
self.get_sources(url2,title,year,season,episode,start_time)
return self.sources
except Exception as argument:
if dev_log == 'true':
error_log(self.name,argument)
return []
def scrape_movie(self, title, year, debrid = False):
try:
start_time = time.time()
start_url = self.base_link+self.search_link+title.replace(' ','%20')
html = self.scraper.get(start_url,timeout=10).content
match = re.compile('<div class="video-thumbimg">.+?href="(.+?)".+?title="(.+?)"',re.DOTALL).findall(html)
for url,name in match:
season_name_check = title.lower().replace(' ','')
name_check = name.replace('-','').replace(' ','').lower()
check = difflib.SequenceMatcher(a=season_name_check,b=name_check)
d = check.ratio()*100
if int(d)>80:
#print name
html2 = scraper.get(self.base_link+url,timeout=10).content
final_page_match = re.compile('<div class="vc_col-sm-8 wpb_column column_container">.+?Released:(.+?)<.+?/series/(.+?)"',re.DOTALL).findall(html2)
for release_year,fin_url in final_page_match:
release_year = release_year.replace(' ','')
fin_url = self.base_link+'/series/'+fin_url
if release_year == year:
self.get_sources(fin_url,title,year,'','',start_time)
return self.sources
except Exception as argument:
if dev_log == 'true':
error_log(self.name,argument)
return[]
def get_sources(self,url2,title,year,season,episode,start_time):
try:
#print url2
quality = 'SD'
html = requests.get(url2).content
count = 0
match = re.compile('href="#".+?data-video="(.+?)".+?class=".+?">(.+?)<',re.DOTALL).findall(html)
for url,source_name in match:
if 'm1' in source_name:
source_name = 'Gvideo'
if 'vidnode' in url:
url = 'http:'+url
html2 = requests.get(url,timeout=3).content
single = re.findall("file: '(.+?)'.+?label: '(.+?)'",html2)
for playlink,quality in single:
#print playlink
quality = quality.replace(' ','').lower()
if quality.lower() == 'auto' or quality.lower() == 'autop':
if 'm22' in quality:
quality = '720p'
elif 'm37' in quality:
quality = '1080p'
else:
quality = 'SD'
count +=1
self.sources.append({'source': source_name, 'quality': quality, 'scraper': self.name, 'url': playlink,'direct': True})
elif 'ocloud' in url:
html2 = requests.get(url,timeout=3,headers=heads).content
base_url = re.findall('base href="(.+?)"',html2)[0]
try:
link,ID = re.findall("<div id=\"quality\">.+?href='(.+?)'.+?id=\"(.+?)\"",html2,re.DOTALL)[0]
if '720' in ID:
link = base_url+link[1:].replace('.','').replace('/embed','embed')
#print link
#print '##############'
html2 = requests.get(link,headers=heads,timeout=3).content
#print html2
except Exception as e:
print str(e)
try:
playlink,quality = re.findall("ifleID = '(.+?)'.+?quality = '(.+?)'",str(html2),re.DOTALL)[0]
#print playlink
count +=1
self.sources.append({'source': 'Ocloud', 'quality': quality, 'scraper': self.name, 'url': playlink,'direct': False})
except Exception as e:
print str(e)
else:
count +=1
self.sources.append({'source': source_name, 'quality': quality, 'scraper': self.name, 'url': url,'direct': False})
if dev_log=='true':
end_time = time.time() - start_time
send_log(self.name,end_time,count,title,year, season=season,episode=episode)
except:
pass
| felipenaselva/felipe.repository | script.module.universalscrapers/lib/universalscrapers/scraperplugins/serieswatch.py | Python | gpl-2.0 | 6,840 |
#
# opencv_version_check - displays the opencv version on the machine
#
# Start from command line using 'python opencv_version_check.py'.
import cv2
import cv2.cv
def main():
# opencv_version_check - prints opencv version
print "OpenCV %s" % cv2.__version__
if __name__ == "__main__":
main()
| j71200/drone | scripts/opencv_version_check.py | Python | gpl-3.0 | 310 |
from setuptools import setup, find_packages
setup(
name="peinfo",
version="3.0.0",
author="Facebook, Marcus LaFerrera (@mlaferrera)",
url="https://github.com/PUNCH-Cyber/stoq-plugins-public",
license="Apache License 2.0",
description="Gather relevant information about an executable using pefile",
packages=find_packages(),
include_package_data=True,
)
| PUNCH-Cyber/stoq-plugins-public | peinfo/setup.py | Python | apache-2.0 | 386 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Part of the PsychoPy library
# Copyright (C) 2002-2018 Jonathan Peirce (C) 2019-2020 Open Science Tools Ltd.
# Distributed under the terms of the GNU General Public License (GPL).
import requests
from psychopy import logging, prefs
import wx
from datetime import datetime
newsURL = "http://news.psychopy.org/"
CRITICAL = 40
ANNOUNCE = 30
TIP = 20
JOKE = 10
def getNewsItems(app=None):
url = newsURL + "news_items.json"
try:
resp = requests.get(url, timeout=0.5)
except (requests.ConnectionError, requests.exceptions.ReadTimeout):
return None
if resp.status_code == 200:
try:
items = resp.json()
except Exception as e:
logging.warning("Found, but failed to parse '{}'".format(url))
print(str(e))
else:
logging.debug("failed to connect to '{}'".format(url))
if app:
app.news = items["news"]
return items["news"]
def showNews(app=None, checkPrev=True):
"""Brings up an internal html viewer and show the latest psychopy news
:Returns:
itemShown : bool
"""
if checkPrev and app.news:
toShow = None
if 'lastNewsDate' in prefs.appData:
lastNewsDate = prefs.appData['lastNewsDate']
else:
lastNewsDate = ""
for item in app.news:
if item['importance'] >= ANNOUNCE and item['date'] > lastNewsDate:
toShow = item
break
# update prefs lastNewsDate to match JavaScript Date().toISOString()
now = datetime.utcnow().strftime('%Y-%m-%dT%H:%M:%S.%f')[:-3] + 'Z'
prefs.appData['lastNewsDate'] = now
prefs.saveAppData()
if not toShow:
return 0
else:
return 0
dlg = wx.Dialog(None, style=wx.DEFAULT_DIALOG_STYLE | wx.RESIZE_BORDER,
size=(800, 400))
browser = wx.html2.WebView.New(dlg)
# do layout
sizer = wx.BoxSizer(wx.VERTICAL)
sizer.Add(browser, 1, wx.EXPAND, 10)
dlg.SetSizer(sizer)
browser.LoadURL(newsURL)
dlg.Show()
return 1
#
# class NewsFrame(wx.Dialog):
# """This class is used by to open an internal browser for the user stuff
# """
# style =
#
# def __init__(self, parent, style=style, *args, **kwargs):
# # create the dialog
# wx.Dialog.__init__(self, parent, style=style, *args, **kwargs)
# # create browser window for authentication
# self.browser = wx.html2.WebView.New(self)
#
# # do layout
# sizer = wx.BoxSizer(wx.VERTICAL)
# sizer.Add(self.browser, 1, wx.EXPAND, 10)
# self.SetSizer(sizer)
#
# self.browser.LoadURL(newsURL)
# self.Show()
| psychopy/versions | psychopy/app/connections/news.py | Python | gpl-3.0 | 2,755 |
# coding=utf-8
"""
InaSAFE Disaster risk assessment tool developed by AusAid -
**ISClipper test suite.**
Contact : ole.moller.nielsen@gmail.com
.. note:: This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
"""
from safe.common.testing import get_qgis_app
__author__ = 'tim@linfiniti.com'
__date__ = '20/01/2011'
__copyright__ = ('Copyright 2012, Australia Indonesia Facility for '
'Disaster Reduction')
import os
import unittest
from qgis.core import (
QgsProviderRegistry,
QgsCoordinateReferenceSystem,
QgsRasterLayer)
from safe_qgis.safe_interface import EXPDATA
QGIS_APP = get_qgis_app()
class QGISTest(unittest.TestCase):
"""Test the QGIS Environment"""
def test_qgis_environment(self):
"""QGIS environment has the expected providers"""
r = QgsProviderRegistry.instance()
#for item in r.providerList():
# print str(item)
#print 'Provider count: %s' % len(r.providerList())
assert 'gdal' in r.providerList()
assert 'ogr' in r.providerList()
assert 'postgres' in r.providerList()
#assert 'wfs' in r.providerList()
def testProjInterpretation(self):
"""Test that QGIS properly parses a proj4 string.
see https://github.com/AIFDR/inasafe/issues/349
"""
myCrs = QgsCoordinateReferenceSystem()
myProj4 = (
'GEOGCS["GCS_WGS_1984",DATUM["D_WGS_1984",'
'SPHEROID["WGS_1984",6378137.0,298.257223563]],'
'PRIMEM["Greenwich",0.0],UNIT["Degree",'
'0.0174532925199433]]')
myCrs.createFromWkt(myProj4)
myAuthId = myCrs.authid()
myExpectedAuthId = 'EPSG:4326'
self.assertEqual(myAuthId, myExpectedAuthId)
# now test for a loaded layer
path = os.path.join(EXPDATA, 'glp10ag.asc')
myTitle = 'people'
layer = QgsRasterLayer(path, myTitle)
myAuthId = layer.crs().authid()
self.assertEqual(myAuthId, myExpectedAuthId)
if __name__ == '__main__':
unittest.main()
| drayanaindra/inasafe | safe_qgis/test/test_qgis_environment.py | Python | gpl-3.0 | 2,248 |
#!/usr/bin/python
# -*- encoding: utf-8 -*-
###########################################################################
# Module Writen to OpenERP, Open Source Management Solution
# Copyright (C) Vauxoo (<http://vauxoo.com>).
# All Rights Reserved
###############Credits######################################################
# Coded by: Julio Cesar Serna Hernandez(julio@vauxoo.com)
#############################################################################
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
################################################################################
{
"name": "Account Reconcile Advance Tax",
"version": "1.0",
"author" : "Vauxoo",
"category": "Accounting",
"website" : "http://www.vauxoo.com/",
"description": """
Create Entries Tax Effectively Paid :
=====================================
This module creates the tax effectively paid of the invoices associated
with the advance
""",
'depends': [
'account_reconcile_advance',
'account_voucher_tax'
],
'data': [
],
'demo': [],
'test': [],
'installable': True,
'active': False,
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| 3dfxsoftware/cbss-addons | account_reconcile_advance_tax/__openerp__.py | Python | gpl-2.0 | 1,869 |
from dataclasses import dataclass
from decimal import Decimal
from typing import Union
from babel.numbers import get_currency_precision
from django.conf import settings
from django.contrib.sites.models import Site
from prices import Money, MoneyRange, TaxedMoney, TaxedMoneyRange
class TaxError(Exception):
"""Default tax error."""
def zero_money(currency: str = settings.DEFAULT_CURRENCY) -> Money:
"""Return a money object set to zero.
This is a function used as a model's default.
"""
return Money(0, currency)
def zero_taxed_money(currency: str = settings.DEFAULT_CURRENCY) -> TaxedMoney:
zero = zero_money(currency)
return TaxedMoney(net=zero, gross=zero)
def include_taxes_in_prices() -> bool:
return Site.objects.get_current().settings.include_taxes_in_prices
def display_gross_prices() -> bool:
return Site.objects.get_current().settings.display_gross_prices
def charge_taxes_on_shipping() -> bool:
return Site.objects.get_current().settings.charge_taxes_on_shipping
def get_display_price(
base: Union[TaxedMoney, TaxedMoneyRange], display_gross: bool = False
) -> Money:
"""Return the price amount that should be displayed based on settings."""
if not display_gross:
display_gross = display_gross_prices()
if isinstance(base, TaxedMoneyRange):
if display_gross:
base = MoneyRange(start=base.start.gross, stop=base.stop.gross)
else:
base = MoneyRange(start=base.start.net, stop=base.stop.net)
if isinstance(base, TaxedMoney):
base = base.gross if display_gross else base.net
return base
def quantize_price(
price: Union["TaxedMoney", "Money", "Decimal", "TaxedMoneyRange"], currency: str
) -> Union["TaxedMoney", "Money", "Decimal", "TaxedMoneyRange"]:
precision = get_currency_precision(currency)
number_places = Decimal(10) ** -precision
return price.quantize(number_places)
@dataclass(frozen=True)
class TaxType:
"""Dataclass for unifying tax type object that comes from tax gateway."""
code: str
description: str
| maferelo/saleor | saleor/core/taxes.py | Python | bsd-3-clause | 2,095 |
# -*- coding: utf-8 -*-
# Scrapy settings for findJob project
#
# For simplicity, this file contains only settings considered important or
# commonly used. You can find more settings consulting the documentation:
#
# http://doc.scrapy.org/en/latest/topics/settings.html
# http://scrapy.readthedocs.org/en/latest/topics/downloader-middleware.html
# http://scrapy.readthedocs.org/en/latest/topics/spider-middleware.html
BOT_NAME = 'findJob'
SPIDER_MODULES = ['findJob.spiders']
NEWSPIDER_MODULE = 'findJob.spiders'
SPLASH_URL = 'http://localhost:8050/'
DUPEFILTER_CLASS = 'scrapy_splash.SplashAwareDupeFilter'
HTTPCACHE_STORAGE = 'scrapy_splash.SplashAwareFSCacheStorage'
FEED_EXPORTERS = {
'json': 'scrapy.contrib.exporter.JsonItemExporter',
}
FEED_FORMAT = 'json'
FEED_URI = "jobResults.json"
# Crawl responsibly by identifying yourself (and your website) on the user-agent
#USER_AGENT = 'findJob (+http://www.yourdomain.com)'
# Obey robots.txt rules
ROBOTSTXT_OBEY = True
# Configure maximum concurrent requests performed by Scrapy (default: 16)
#CONCURRENT_REQUESTS = 32
# Configure a delay for requests for the same website (default: 0)
# See http://scrapy.readthedocs.org/en/latest/topics/settings.html#download-delay
# See also autothrottle settings and docs
#DOWNLOAD_DELAY = 3
# The download delay setting will honor only one of:
#CONCURRENT_REQUESTS_PER_DOMAIN = 16
#CONCURRENT_REQUESTS_PER_IP = 16
# Disable cookies (enabled by default)
#COOKIES_ENABLED = False
# Disable Telnet Console (enabled by default)
#TELNETCONSOLE_ENABLED = False
# Override the default request headers:
#DEFAULT_REQUEST_HEADERS = {
# 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
# 'Accept-Language': 'en',
#}
# Enable or disable spider middlewares
# See http://scrapy.readthedocs.org/en/latest/topics/spider-middleware.html
#SPIDER_MIDDLEWARES = {
# 'findJob.middlewares.MyCustomSpiderMiddleware': 543,
#}
SPIDER_MIDDLEWARES = {
'scrapy_splash.SplashDeduplicateArgsMiddleware': 100,
}
# Enable or disable downloader middlewares
# See http://scrapy.readthedocs.org/en/latest/topics/downloader-middleware.html
#DOWNLOADER_MIDDLEWARES = {
# 'findJob.middlewares.MyCustomDownloaderMiddleware': 543,
#}
DOWNLOADER_MIDDLEWARES = {
'scrapy_splash.SplashCookiesMiddleware': 723,
'scrapy_splash.SplashMiddleware': 725,
'scrapy.downloadermiddlewares.httpcompression.HttpCompressionMiddleware': 810,
}
# Enable or disable extensions
# See http://scrapy.readthedocs.org/en/latest/topics/extensions.html
#EXTENSIONS = {
# 'scrapy.extensions.telnet.TelnetConsole': None,
#}
# Configure item pipelines
# See http://scrapy.readthedocs.org/en/latest/topics/item-pipeline.html
#ITEM_PIPELINES = {
# 'findJob.pipelines.SomePipeline': 300,
#}
# Enable and configure the AutoThrottle extension (disabled by default)
# See http://doc.scrapy.org/en/latest/topics/autothrottle.html
#AUTOTHROTTLE_ENABLED = True
# The initial download delay
#AUTOTHROTTLE_START_DELAY = 5
# The maximum download delay to be set in case of high latencies
#AUTOTHROTTLE_MAX_DELAY = 60
# The average number of requests Scrapy should be sending in parallel to
# each remote server
#AUTOTHROTTLE_TARGET_CONCURRENCY = 1.0
# Enable showing throttling stats for every response received:
#AUTOTHROTTLE_DEBUG = False
# Enable and configure HTTP caching (disabled by default)
# See http://scrapy.readthedocs.org/en/latest/topics/downloader-middleware.html#httpcache-middleware-settings
#HTTPCACHE_ENABLED = True
#HTTPCACHE_EXPIRATION_SECS = 0
#HTTPCACHE_DIR = 'httpcache'
#HTTPCACHE_IGNORE_HTTP_CODES = []
#HTTPCACHE_STORAGE = 'scrapy.extensions.httpcache.FilesystemCacheStorage'
| Gavsum/JobScrape | findJob/findJob/settings.py | Python | mit | 3,706 |
import os, shutil, re, glob, subprocess, logging, gzip
from autotest.client.shared import log, global_config, software_manager
from autotest.client import utils
GLOBAL_CONFIG = global_config.global_config
_LOG_INSTALLED_PACKAGES = GLOBAL_CONFIG.get_config_value('CLIENT',
'log_installed_packages',
type=bool, default=False)
_DEFAULT_COMMANDS_TO_LOG_PER_TEST = []
_DEFAULT_COMMANDS_TO_LOG_PER_BOOT = [
"lspci -vvn", "gcc --version", "ld --version", "mount", "hostname",
"uptime",
]
_DEFAULT_COMMANDS_TO_LOG_BEFORE_ITERATION = []
_DEFAULT_COMMANDS_TO_LOG_AFTER_ITERATION = []
_DEFAULT_FILES_TO_LOG_PER_TEST = []
_DEFAULT_FILES_TO_LOG_PER_BOOT = [
"/proc/pci", "/proc/meminfo", "/proc/slabinfo", "/proc/version",
"/proc/cpuinfo", "/proc/modules", "/proc/interrupts", "/proc/partitions",
]
_DEFAULT_FILES_TO_LOG_BEFORE_ITERATION = [
"/proc/schedstat", "/proc/meminfo", "/proc/slabinfo", "/proc/interrupts"
]
_DEFAULT_FILES_TO_LOG_AFTER_ITERATION = [
"/proc/schedstat", "/proc/meminfo", "/proc/slabinfo", "/proc/interrupts"
]
class loggable(object):
""" Abstract class for representing all things "loggable" by sysinfo. """
def __init__(self, logf, log_in_keyval):
self.logf = logf
self.log_in_keyval = log_in_keyval
def readline(self, logdir):
path = os.path.join(logdir, self.logf)
if os.path.exists(path):
return utils.read_one_line(path)
else:
return ""
class logfile(loggable):
def __init__(self, path, logf=None, log_in_keyval=False):
if not logf:
logf = os.path.basename(path)
super(logfile, self).__init__(logf, log_in_keyval)
self.path = path
def __repr__(self):
r = "sysinfo.logfile(%r, %r, %r)"
r %= (self.path, self.logf, self.log_in_keyval)
return r
def __eq__(self, other):
if isinstance(other, logfile):
return (self.path, self.logf) == (other.path, other.logf)
elif isinstance(other, loggable):
return False
return NotImplemented
def __ne__(self, other):
result = self.__eq__(other)
if result is NotImplemented:
return result
return not result
def __hash__(self):
return hash((self.path, self.logf))
def run(self, logdir):
if os.path.exists(self.path):
try:
shutil.copyfile(self.path, os.path.join(logdir, self.logf))
except IOError:
logging.info("Not logging %s (lack of permissions)",
self.path)
class command(loggable):
def __init__(self, cmd, logf=None, log_in_keyval=False, compress_log=False):
if not logf:
logf = cmd.replace(" ", "_")
super(command, self).__init__(logf, log_in_keyval)
self.cmd = cmd
self._compress_log = compress_log
def __repr__(self):
r = "sysinfo.command(%r, %r, %r)"
r %= (self.cmd, self.logf, self.log_in_keyval)
return r
def __eq__(self, other):
if isinstance(other, command):
return (self.cmd, self.logf) == (other.cmd, other.logf)
elif isinstance(other, loggable):
return False
return NotImplemented
def __ne__(self, other):
result = self.__eq__(other)
if result is NotImplemented:
return result
return not result
def __hash__(self):
return hash((self.cmd, self.logf))
def run(self, logdir):
env = os.environ.copy()
if "PATH" not in env:
env["PATH"] = "/usr/bin:/bin"
logf_path = os.path.join(logdir, self.logf)
stdin = open(os.devnull, "r")
stderr = open(os.devnull, "w")
stdout = open(logf_path, "w")
try:
subprocess.call(self.cmd, stdin=stdin, stdout=stdout, stderr=stderr,
shell=True, env=env)
finally:
for f in (stdin, stdout, stderr):
f.close()
if self._compress_log and os.path.exists(logf_path):
utils.run('gzip -9 "%s"' % logf_path, ignore_status=True,
verbose=False)
class base_sysinfo(object):
def __init__(self, job_resultsdir):
self.sysinfodir = self._get_sysinfodir(job_resultsdir)
# pull in the post-test logs to collect
self.test_loggables = set()
for cmd in _DEFAULT_COMMANDS_TO_LOG_PER_TEST:
self.test_loggables.add(command(cmd))
for filename in _DEFAULT_FILES_TO_LOG_PER_TEST:
self.test_loggables.add(logfile(filename))
# pull in the EXTRA post-boot logs to collect
self.boot_loggables = set()
for cmd in _DEFAULT_COMMANDS_TO_LOG_PER_BOOT:
self.boot_loggables.add(command(cmd))
for filename in _DEFAULT_FILES_TO_LOG_PER_BOOT:
self.boot_loggables.add(logfile(filename))
# pull in the pre test iteration logs to collect
self.before_iteration_loggables = set()
for cmd in _DEFAULT_COMMANDS_TO_LOG_BEFORE_ITERATION:
self.before_iteration_loggables.add(
command(cmd, logf=cmd.replace(" ", "_") + '.before'))
for fname in _DEFAULT_FILES_TO_LOG_BEFORE_ITERATION:
self.before_iteration_loggables.add(
logfile(fname, logf=os.path.basename(fname) + '.before'))
# pull in the post test iteration logs to collect
self.after_iteration_loggables = set()
for cmd in _DEFAULT_COMMANDS_TO_LOG_AFTER_ITERATION:
self.after_iteration_loggables.add(
command(cmd, logf=cmd.replace(" ", "_") + '.after'))
for fname in _DEFAULT_FILES_TO_LOG_AFTER_ITERATION:
self.after_iteration_loggables.add(
logfile(fname, logf=os.path.basename(fname) + '.after'))
# add in a couple of extra files and commands we want to grab
self.test_loggables.add(command("df -mP", logf="df"))
# We compress the dmesg because it can get large when kernels are
# configured with a large buffer and some tests trigger OOMs or
# other large "spam" that fill it up...
self.test_loggables.add(command("dmesg -c", logf="dmesg",
compress_log=True))
self.boot_loggables.add(logfile("/proc/cmdline",
log_in_keyval=True))
# log /proc/mounts but with custom filename since we already
# log the output of the "mount" command as the filename "mount"
self.boot_loggables.add(logfile('/proc/mounts', logf='proc_mounts'))
self.boot_loggables.add(command("uname -a", logf="uname",
log_in_keyval=True))
self.sm = software_manager.SoftwareManager()
def __getstate__(self):
ret = dict(self.__dict__)
ret["sm"] = None
return ret
def serialize(self):
return {"boot": self.boot_loggables, "test": self.test_loggables}
def deserialize(self, serialized):
self.boot_loggables = serialized["boot"]
self.test_loggables = serialized["test"]
@staticmethod
def _get_sysinfodir(resultsdir):
sysinfodir = os.path.join(resultsdir, "sysinfo")
if not os.path.exists(sysinfodir):
os.makedirs(sysinfodir)
return sysinfodir
def _get_reboot_count(self):
if not glob.glob(os.path.join(self.sysinfodir, "*")):
return -1
else:
return len(glob.glob(os.path.join(self.sysinfodir, "boot.*")))
def _get_boot_subdir(self, next=False):
reboot_count = self._get_reboot_count()
if next:
reboot_count += 1
if reboot_count < 1:
return self.sysinfodir
else:
boot_dir = "boot.%d" % (reboot_count - 1)
return os.path.join(self.sysinfodir, boot_dir)
def _get_iteration_subdir(self, test, iteration):
iter_dir = "iteration.%d" % iteration
logdir = os.path.join(self._get_sysinfodir(test.outputdir), iter_dir)
if not os.path.exists(logdir):
os.mkdir(logdir)
return logdir
@log.log_and_ignore_errors("post-reboot sysinfo error:")
def log_per_reboot_data(self):
""" Logging hook called whenever a job starts, and again after
any reboot. """
logdir = self._get_boot_subdir(next=True)
if not os.path.exists(logdir):
os.mkdir(logdir)
for log in (self.test_loggables | self.boot_loggables):
log.run(logdir)
if _LOG_INSTALLED_PACKAGES:
# also log any installed packages
installed_path = os.path.join(logdir, "installed_packages")
installed_packages = "\n".join(self.sm.list_all()) + "\n"
utils.open_write_close(installed_path, installed_packages)
@log.log_and_ignore_errors("pre-test sysinfo error:")
def log_before_each_test(self, test):
""" Logging hook called before a test starts. """
if _LOG_INSTALLED_PACKAGES:
self._installed_packages = self.sm.list_all()
if os.path.exists("/var/log/messages"):
stat = os.stat("/var/log/messages")
self._messages_size = stat.st_size
self._messages_inode = stat.st_ino
elif os.path.exists("/var/log/syslog"):
stat = os.stat("/var/log/syslog")
self._messages_size = stat.st_size
self._messages_inode = stat.st_ino
@log.log_and_ignore_errors("post-test sysinfo error:")
def log_after_each_test(self, test):
""" Logging hook called after a test finishs. """
test_sysinfodir = self._get_sysinfodir(test.outputdir)
# create a symlink in the test sysinfo dir to the current boot
reboot_dir = self._get_boot_subdir()
assert os.path.exists(reboot_dir)
symlink_dest = os.path.join(test_sysinfodir, "reboot_current")
symlink_src = utils.get_relative_path(reboot_dir,
os.path.dirname(symlink_dest))
try:
os.symlink(symlink_src, symlink_dest)
except Exception, e:
raise Exception, '%s: whilst linking %s to %s' % (e, symlink_src,
symlink_dest)
# run all the standard logging commands
for log in self.test_loggables:
log.run(test_sysinfodir)
# grab any new data from the system log
self._log_messages(test_sysinfodir)
# log some sysinfo data into the test keyval file
keyval = self.log_test_keyvals(test_sysinfodir)
test.write_test_keyval(keyval)
if _LOG_INSTALLED_PACKAGES:
# log any changes to installed packages
old_packages = set(self._installed_packages)
new_packages = set(self.sm.list_all())
added_path = os.path.join(test_sysinfodir, "added_packages")
added_packages = "\n".join(new_packages - old_packages) + "\n"
utils.open_write_close(added_path, added_packages)
removed_path = os.path.join(test_sysinfodir, "removed_packages")
removed_packages = "\n".join(old_packages - new_packages) + "\n"
utils.open_write_close(removed_path, removed_packages)
@log.log_and_ignore_errors("pre-test siteration sysinfo error:")
def log_before_each_iteration(self, test, iteration=None):
""" Logging hook called before a test iteration."""
if not iteration:
iteration = test.iteration
logdir = self._get_iteration_subdir(test, iteration)
for log in self.before_iteration_loggables:
log.run(logdir)
@log.log_and_ignore_errors("post-test siteration sysinfo error:")
def log_after_each_iteration(self, test, iteration=None):
""" Logging hook called after a test iteration."""
if not iteration:
iteration = test.iteration
logdir = self._get_iteration_subdir(test, iteration)
for log in self.after_iteration_loggables:
log.run(logdir)
def _log_messages(self, logdir):
""" Log all of the new data in the system log. """
try:
# log all of the new data in the system log
logpaths = ["/var/log/messages", "/var/log/syslog"]
for logpath in logpaths:
if os.path.exists(logpath):
break
else:
raise ValueError("System log file not found (looked for %s)" %
logpaths)
bytes_to_skip = 0
if hasattr(self, "_messages_size"):
current_inode = os.stat(logpath).st_ino
if current_inode == self._messages_inode:
bytes_to_skip = self._messages_size
in_messages = open(logpath)
out_file_basename = os.path.basename(logpath) + ".gz"
out_file_name = os.path.join(logdir, out_file_basename)
out_messages = gzip.GzipFile(out_file_name, "w")
try:
in_messages.seek(bytes_to_skip)
while True:
# Read data in managable chunks rather than all at once.
in_data = in_messages.read(200000)
if not in_data:
break
out_messages.write(in_data)
finally:
out_messages.close()
in_messages.close()
except ValueError, e:
logging.info(e)
except (IOError, OSError):
logging.info("Not logging %s (lack of permissions)", logpath)
except Exception, e:
logging.info("System log collection failed: %s", e)
@staticmethod
def _read_sysinfo_keyvals(loggables, logdir):
keyval = {}
for log in loggables:
if log.log_in_keyval:
keyval["sysinfo-" + log.logf] = log.readline(logdir)
return keyval
def log_test_keyvals(self, test_sysinfodir):
""" Logging hook called by log_after_each_test to collect keyval
entries to be written in the test keyval. """
keyval = {}
# grab any loggables that should be in the keyval
keyval.update(self._read_sysinfo_keyvals(
self.test_loggables, test_sysinfodir))
keyval.update(self._read_sysinfo_keyvals(
self.boot_loggables,
os.path.join(test_sysinfodir, "reboot_current")))
# remove hostname from uname info
# Linux lpt36 2.6.18-smp-230.1 #1 [4069269] SMP Fri Oct 24 11:30:...
if "sysinfo-uname" in keyval:
kernel_vers = " ".join(keyval["sysinfo-uname"].split()[2:])
keyval["sysinfo-uname"] = kernel_vers
# grab the total avail memory, not used by sys tables
path = os.path.join(test_sysinfodir, "reboot_current", "meminfo")
if os.path.exists(path):
mem_data = open(path).read()
match = re.search(r"^MemTotal:\s+(\d+) kB$", mem_data,
re.MULTILINE)
if match:
keyval["sysinfo-memtotal-in-kb"] = match.group(1)
# guess the system's total physical memory, including sys tables
keyval["sysinfo-phys-mbytes"] = utils.rounded_memtotal()//1024
# return what we collected
return keyval
| ColinIanKing/autotest | client/base_sysinfo.py | Python | gpl-2.0 | 15,628 |
# -*- coding: utf-8 -*-
# © 2016-TODAY LasLabs Inc.
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl.html).
from openerp.tests.common import TransactionCase
class TestPaymentTransaction(TransactionCase):
def setUp(self, *args, **kwargs):
super(TestPaymentTransaction, self).setUp(*args, **kwargs)
self.journal_id = self.env['account.journal'].create({
'name': 'Journal',
'code': 'BNK',
'type': 'bank',
'company_id': self.env.ref('base.main_company').id,
})
self.acquirer_id = self.env['payment.acquirer'].search([
('provider', '=', 'authorize'),
('company_id', '=', self.env.ref('base.main_company').id),
], limit=1)
self.acquirer_id.write({'journal_id': self.journal_id.id})
self.partner_id = self.env['res.partner'].create({'name': 'Partner',
'country_id': 1,
})
self.product_id = self.env['product.product'].create({
'name': 'Test Product',
'list_price': 123.45,
})
self.account_type_id = self.env['account.account.type'].create({
'name': 'Test Account Type',
'code': 'TestType',
})
self.account_id = self.env['account.account'].create({
'name': 'Test Account',
'code': 'TEST',
'user_type': self.account_type_id.id,
'company_id': self.env.ref('base.main_company').id,
})
self.invoice_id = self.env['account.invoice'].create({
'partner_id': self.partner_id.id,
'account_id': self.account_id.id,
'company_id': self.env.ref('base.main_company').id,
'state': 'open',
'invoice_line': [(0, 0, {
'product_id': self.product_id.id,
'account_id': self.account_id.id,
'name': 'Test Line',
'price_unit': 123.45,
'quantity': 1,
})],
})
self.invoice_id.action_move_create()
self.PaymentTransaction = self.env['payment.transaction']
self.authorize_post_data = {
'return_url': u'/shop/payment/validate',
'x_MD5_Hash': u'7934485E1C105940BE854208D10FAB4F',
'x_account_number': u'XXXX0027',
'x_address': u'Huge Street 2/543',
'x_amount': u'123.45',
'x_auth_code': u'E4W7IU',
'x_avs_code': u'Y',
'x_card_type': u'Visa',
'x_cavv_response': u'2',
'x_city': u'Sun City',
'x_company': u'',
'x_country': u'Belgium',
'x_cust_id': u'',
'x_cvv2_resp_code': u'',
'x_description': u'',
'x_duty': u'0.00',
'x_email': u'norbert.buyer@exampl',
'x_fax': u'',
'x_first_name': u'Norbert',
'x_freight': u'0.00',
'x_invoice_num': self.invoice_id.number,
'x_last_name': u'Buyer',
'x_method': u'CC',
'x_phone': u'0032 12 34 56 78',
'x_po_num': u'',
'x_response_code': u'0',
'x_response_reason_code': u'1',
'x_response_reason_text': u'This transaction has been approved.',
'x_ship_to_address': u'Huge Street 2/543',
'x_ship_to_city': u'Sun City',
'x_ship_to_company': u'',
'x_ship_to_country': u'Belgium',
'x_ship_to_first_name': u'Norbert',
'x_ship_to_last_name': u'Buyer',
'x_ship_to_state': u'',
'x_ship_to_zip': u'1000',
'x_state': u'',
'x_tax': u'0.00',
'x_tax_exempt': u'FALSE',
'x_test_request': u'false',
'x_trans_id': u'2217460311',
'x_type': u'auth_capture',
'x_zip': u'1000'
}
def _new_txn(self, state='draft'):
return self.env['payment.transaction'].create({
'reference': 'Test',
'acquirer_id': self.acquirer_id.id,
'amount': 123.45,
'state': state,
'currency_id': self.invoice_id.currency_id.id,
'partner_id': self.invoice_id.partner_id.id,
'partner_country_id': self.invoice_id.partner_id.country_id.id,
'partner_city': self.authorize_post_data.get('x_city'),
'partner_address': self.authorize_post_data.get('x_address'),
})
def test_authorize_form_get_tx_from_data_tx_create(self):
""" Validate that transaction is created when one doesn't exist """
tx_id = self.PaymentTransaction._authorize_form_get_tx_from_data(
self.authorize_post_data,
)
self.assertEqual(
tx_id.reference, '%s [%s]' % (
self.invoice_id.number, self.authorize_post_data['x_trans_id']
),
)
self.assertEqual(
tx_id.state, 'draft',
)
# @TODO: Figure out the account/line wire crossing in this test
# def test_authorize_form_validate_does_voucher(self):
# """ Validate that transaction is completed on right Authorize res"""
# self.authorize_post_data['x_response_code'] = 1
# tx_id = self._new_txn()
# self.PaymentTransaction._authorize_form_validate(
# tx_id, self.authorize_post_data,
# )
# voucher_ids = self.env['account.voucher'].search([
# ('partner_id', '=', self.partner_id.id),
# ])
# self.assertEqual(
# 1, len(voucher_ids),
# )
def test_authorize_form_validate_does_not_voucher(self):
""" Validate that transaction is left alone unless valid """
self.authorize_post_data['x_response_code'] = 0
tx_id = self._new_txn()
self.PaymentTransaction._authorize_form_validate(
tx_id, self.authorize_post_data,
)
voucher_ids = self.env['account.voucher'].search([
('partner_id', '=', self.partner_id.id),
])
self.assertEqual(
0, len(voucher_ids)
)
| stephen144/odoo-payment | payment_authorize_auto_reconcile/tests/test_payment_transaction.py | Python | agpl-3.0 | 6,205 |
# Copyright 2013 C. A. Fitzgerald
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from xml.sax import ContentHandler
class RssHandler( ContentHandler ):
def __init__(self, processor ):
self.tag_path = []
self.current_characters = ""
self.item_guid = ""
self.item_link = ""
self.site = ""
self.processor = processor
def startElement(self, tag, attributes ):
self.tag_path.append( tag );
def endElement(self, tag ):
if self.tag_path[0] == 'rss':
if self.tag_path[-1] == 'link' and self.tag_path[-2] == 'item':
self.item_link = self.current_characters.strip()
if self.tag_path[-1] == 'guid' and self.tag_path[-2] == 'item':
self.item_guid = self.current_characters.strip()
if self.tag_path[-1] == 'link' and self.tag_path[-2] == 'channel':
self.site = self.current_characters.strip()
if self.tag_path[-1] == 'item' and not self.item_link == "":
if self.item_guid == "":
self.item_guid = self.item_link;
print '----------'
print self.item_guid
self.processor.process_item( self.site + self.item_guid, self.item_link )
self.item_guid = self.item_link = ""
self.current_characters = ""
self.tag_path.pop()
def characters(self, content):
self.current_characters = self.current_characters + content | riotopsys/rss-fetch | rssfetch/rss_handler.py | Python | apache-2.0 | 1,782 |
# -*- coding: utf-8 -*-
from settings import *
import sys
if '%d' in MEDIA_URL:
MEDIA_URL = MEDIA_URL % MEDIA_VERSION
if '%s' in ADMIN_MEDIA_PREFIX:
ADMIN_MEDIA_PREFIX = ADMIN_MEDIA_PREFIX % MEDIA_URL
TEMPLATE_DEBUG = DEBUG
MANAGERS = ADMINS
# You can override Django's or some apps' locales with these folders:
if os.path.exists(os.path.join(COMMON_DIR, 'locale_overrides_common')):
INSTALLED_APPS += ('locale_overrides_common',)
if os.path.exists(os.path.join(PROJECT_DIR, 'locale_overrides')):
INSTALLED_APPS += ('locale_overrides',)
# Add admin interface media files if necessary
if 'django.contrib.admin' in INSTALLED_APPS:
INSTALLED_APPS += ('django_aep_export.admin_media',)
# Always add Django templates (exported from zip)
INSTALLED_APPS += (
'django_aep_export.django_templates',
)
# Add start markers, so apps can insert JS/CSS at correct position
def add_app_media(env, combine, *appmedia):
COMBINE_MEDIA = env['COMBINE_MEDIA']
COMBINE_MEDIA.setdefault(combine, ())
if '!START!' not in COMBINE_MEDIA[combine]:
COMBINE_MEDIA[combine] = ('!START!',) + COMBINE_MEDIA[combine]
index = list(COMBINE_MEDIA[combine]).index('!START!')
COMBINE_MEDIA[combine] = COMBINE_MEDIA[combine][:index] + \
appmedia + COMBINE_MEDIA[combine][index:]
def add_uncombined_app_media(env, app):
"""Copy all media files directly"""
path = os.path.join(
os.path.dirname(__import__(app, {}, {}, ['']).__file__), 'media')
app = app.rsplit('.', 1)[-1]
for root, dirs, files in os.walk(path):
for file in files:
if file.endswith(('.css', '.js')):
base = os.path.join(root, file)[len(path):].replace(os.sep,
'/').lstrip('/')
target = '%s/%s' % (app, base)
add_app_media(env, target, target)
def check_app_imports(app):
before = sys.modules.keys()
__import__(app, {}, {}, [''])
after = sys.modules.keys()
added = [key[len(app)+1:] for key in after if key not in before and
key.startswith(app + '.') and key[len(app)+1:]]
if added:
import logging
logging.warn('The app "%(app)s" contains imports in '
'its __init__.py (at least %(added)s). This can cause '
'strange bugs due to recursive imports! You should '
'either do the import lazily (within functions) or '
'ignore the app settings/urlsauto with '
'IGNORE_APP_SETTINGS and IGNORE_APP_URLSAUTO in '
'your settings.py.'
% {'app': app, 'added': ', '.join(added)})
# Import app-specific settings
for app in INSTALLED_APPS:
# This is an optimization. Django's apps don't have special settings.
# Also, allow for ignoring some apps' settings.
if app.startswith('django.') or app.endswith('.*') or \
app == 'appenginepatcher' or app in IGNORE_APP_SETTINGS:
continue
try:
# First we check if __init__.py doesn't import anything
check_app_imports(app)
data = __import__(app + '.settings', {}, {}, [''])
for key, value in data.__dict__.items():
if not key.startswith('_'):
globals()[key] = value
except ImportError:
pass
# Remove start markers
for combine in COMBINE_MEDIA:
if '!START!' not in COMBINE_MEDIA[combine]:
continue
index = list(COMBINE_MEDIA[combine]).index('!START!')
COMBINE_MEDIA[combine] = COMBINE_MEDIA[combine][:index] + \
COMBINE_MEDIA[combine][index+1:]
try:
from settings_overrides import *
except ImportError:
pass
| adamfisk/littleshoot-client | server/common/appengine/patch/common/appenginepatch/ragendja/settings_post.py | Python | gpl-2.0 | 3,683 |
# Copyright 2018 The Cirq Developers
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Abstract base classes for different types of simulators.
Simulator types include:
SimulatesSamples: mimics the interface of quantum hardware.
SimulatesAmplitudes: computes amplitudes of desired bitstrings in the
final state of the simulation.
SimulatesFinalState: allows access to the final state of the simulation.
SimulatesIntermediateState: allows for access to the state of the simulation
as the simulation iterates through the moments of a cirq.
"""
import abc
import collections
from typing import (
Any,
Callable,
cast,
Dict,
Generic,
Iterator,
List,
Sequence,
Set,
Tuple,
TYPE_CHECKING,
TypeVar,
Union,
)
import warnings
import numpy as np
from cirq import circuits, ops, protocols, study, value, work
from cirq.sim.act_on_args import ActOnArgs
from cirq.sim.operation_target import OperationTarget
if TYPE_CHECKING:
import cirq
TStepResult = TypeVar('TStepResult', bound='StepResult')
TSimulationTrialResult = TypeVar('TSimulationTrialResult', bound='SimulationTrialResult')
TSimulatorState = TypeVar('TSimulatorState')
TActOnArgs = TypeVar('TActOnArgs', bound=ActOnArgs)
class SimulatesSamples(work.Sampler, metaclass=abc.ABCMeta):
"""Simulator that mimics running on quantum hardware.
Implementors of this interface should implement the _run method.
"""
def run_sweep(
self,
program: 'cirq.AbstractCircuit',
params: 'cirq.Sweepable',
repetitions: int = 1,
) -> Sequence['cirq.Result']:
return list(self.run_sweep_iter(program, params, repetitions))
def run_sweep_iter(
self,
program: 'cirq.AbstractCircuit',
params: 'cirq.Sweepable',
repetitions: int = 1,
) -> Iterator['cirq.Result']:
"""Runs the supplied Circuit, mimicking quantum hardware.
In contrast to run, this allows for sweeping over different parameter
values.
Args:
program: The circuit to simulate.
params: Parameters to run with the program.
repetitions: The number of repetitions to simulate.
Returns:
Result list for this run; one for each possible parameter
resolver.
Raises:
ValueError: If the circuit has no measurements.
"""
if not program.has_measurements():
raise ValueError("Circuit has no measurements to sample.")
for param_resolver in study.to_resolvers(params):
records = {}
if repetitions == 0:
for _, op, _ in program.findall_operations_with_gate_type(ops.MeasurementGate):
records[protocols.measurement_key_name(op)] = np.empty([0, 1, 1])
else:
records = self._run(
circuit=program, param_resolver=param_resolver, repetitions=repetitions
)
flat_records = False
for k, v in records.items():
if v.ndim == 2:
flat_records = True
records[k] = v.reshape((v.shape[0], 1, v.shape[1]))
if flat_records:
warnings.warn(
(
'Starting in Cirq v0.15, values in the output of simulator._run must '
'be 3D instead of 2D, with a new dimension between the existing two '
'to capture "instances" of a key.'
),
DeprecationWarning,
)
yield study.ResultDict(params=param_resolver, records=records)
@abc.abstractmethod
def _run(
self,
circuit: 'cirq.AbstractCircuit',
param_resolver: 'cirq.ParamResolver',
repetitions: int,
) -> Dict[str, np.ndarray]:
"""Run a simulation, mimicking quantum hardware.
Args:
circuit: The circuit to simulate.
param_resolver: Parameters to run with the program.
repetitions: Number of times to repeat the run. It is expected that
this is validated greater than zero before calling this method.
Returns:
A dictionary from measurement gate key to measurement
results. Measurement results are stored in a 3-dimensional
numpy array, the first dimension corresponding to the repetition.
the second to the instance of that key in the circuit, and the
third to the actual boolean measurement results (ordered by the
qubits being measured.)
"""
raise NotImplementedError()
class SimulatesAmplitudes(metaclass=value.ABCMetaImplementAnyOneOf):
"""Simulator that computes final amplitudes of given bitstrings.
Given a circuit and a list of bitstrings, computes the amplitudes
of the given bitstrings in the state obtained by applying the circuit
to the all zeros state. Implementors of this interface should implement
the compute_amplitudes_sweep_iter method.
"""
def compute_amplitudes(
self,
program: 'cirq.AbstractCircuit',
bitstrings: Sequence[int],
param_resolver: 'cirq.ParamResolverOrSimilarType' = None,
qubit_order: 'cirq.QubitOrderOrList' = ops.QubitOrder.DEFAULT,
) -> Sequence[complex]:
"""Computes the desired amplitudes.
The initial state is assumed to be the all zeros state.
Args:
program: The circuit to simulate.
bitstrings: The bitstrings whose amplitudes are desired, input
as an integer array where each integer is formed from measured
qubit values according to `qubit_order` from most to least
significant qubit, i.e. in big-endian ordering.
param_resolver: Parameters to run with the program.
qubit_order: Determines the canonical ordering of the qubits. This
is often used in specifying the initial state, i.e. the
ordering of the computational basis states.
Returns:
List of amplitudes.
"""
return self.compute_amplitudes_sweep(
program, bitstrings, study.ParamResolver(param_resolver), qubit_order
)[0]
def compute_amplitudes_sweep(
self,
program: 'cirq.AbstractCircuit',
bitstrings: Sequence[int],
params: 'cirq.Sweepable',
qubit_order: 'cirq.QubitOrderOrList' = ops.QubitOrder.DEFAULT,
) -> Sequence[Sequence[complex]]:
"""Wraps computed amplitudes in a list.
Prefer overriding `compute_amplitudes_sweep_iter`.
"""
return list(self.compute_amplitudes_sweep_iter(program, bitstrings, params, qubit_order))
def _compute_amplitudes_sweep_to_iter(
self,
program: 'cirq.AbstractCircuit',
bitstrings: Sequence[int],
params: 'cirq.Sweepable',
qubit_order: 'cirq.QubitOrderOrList' = ops.QubitOrder.DEFAULT,
) -> Iterator[Sequence[complex]]:
if type(self).compute_amplitudes_sweep == SimulatesAmplitudes.compute_amplitudes_sweep:
raise RecursionError(
"Must define either compute_amplitudes_sweep or compute_amplitudes_sweep_iter."
)
yield from self.compute_amplitudes_sweep(program, bitstrings, params, qubit_order)
@value.alternative(
requires='compute_amplitudes_sweep', implementation=_compute_amplitudes_sweep_to_iter
)
def compute_amplitudes_sweep_iter(
self,
program: 'cirq.AbstractCircuit',
bitstrings: Sequence[int],
params: 'cirq.Sweepable',
qubit_order: 'cirq.QubitOrderOrList' = ops.QubitOrder.DEFAULT,
) -> Iterator[Sequence[complex]]:
"""Computes the desired amplitudes.
The initial state is assumed to be the all zeros state.
Args:
program: The circuit to simulate.
bitstrings: The bitstrings whose amplitudes are desired, input
as an integer array where each integer is formed from measured
qubit values according to `qubit_order` from most to least
significant qubit, i.e. in big-endian ordering.
params: Parameters to run with the program.
qubit_order: Determines the canonical ordering of the qubits. This
is often used in specifying the initial state, i.e. the
ordering of the computational basis states.
Returns:
An Iterator over lists of amplitudes. The outer dimension indexes
the circuit parameters and the inner dimension indexes bitstrings.
"""
raise NotImplementedError()
def sample_from_amplitudes(
self,
circuit: 'cirq.AbstractCircuit',
param_resolver: 'cirq.ParamResolver',
seed: 'cirq.RANDOM_STATE_OR_SEED_LIKE',
repetitions: int = 1,
qubit_order: 'cirq.QubitOrderOrList' = ops.QubitOrder.DEFAULT,
) -> Dict[int, int]:
"""Uses amplitude simulation to sample from the given circuit.
This implements the algorithm outlined by Bravyi, Gosset, and Liu in
https://arxiv.org/abs/2112.08499 to more efficiently calculate samples
given an amplitude-based simulator.
Simulators which also implement SimulatesSamples or SimulatesFullState
should prefer `run()` or `simulate()`, respectively, as this method
only accelerates sampling for amplitude-based simulators.
Args:
circuit: The circuit to simulate.
param_resolver: Parameters to run with the program.
seed: Random state to use as a seed. This must be provided
manually - if the simulator has its own seed, it will not be
used unless it is passed as this argument.
repetitions: The number of repetitions to simulate.
qubit_order: Determines the canonical ordering of the qubits. This
is often used in specifying the initial state, i.e. the
ordering of the computational basis states.
Returns:
A dict of bitstrings sampled from the final state of `circuit` to
the number of occurrences of that bitstring.
Raises:
ValueError: if 'circuit' has non-unitary elements, as differences
in behavior between sampling steps break this algorithm.
"""
prng = value.parse_random_state(seed)
qubits = ops.QubitOrder.as_qubit_order(qubit_order).order_for(circuit.all_qubits())
base_circuit = circuits.Circuit(ops.I(q) for q in qubits) + circuit.unfreeze()
qmap = {q: i for i, q in enumerate(qubits)}
current_samples = {(0,) * len(qubits): repetitions}
solved_circuit = protocols.resolve_parameters(base_circuit, param_resolver)
if not protocols.has_unitary(solved_circuit):
raise ValueError("sample_from_amplitudes does not support non-unitary behavior.")
if protocols.is_measurement(solved_circuit):
raise ValueError("sample_from_amplitudes does not support intermediate measurement.")
for m_id, moment in enumerate(solved_circuit[1:]):
circuit_prefix = solved_circuit[: m_id + 1]
for t, op in enumerate(moment.operations):
new_samples: Dict[Tuple[int, ...], int] = collections.defaultdict(int)
qubit_indices = {qmap[q] for q in op.qubits}
subcircuit = circuit_prefix + circuits.Moment(moment.operations[: t + 1])
for current_sample, count in current_samples.items():
sample_set = [current_sample]
for idx in qubit_indices:
sample_set = [
target[:idx] + (result,) + target[idx + 1 :]
for target in sample_set
for result in [0, 1]
]
bitstrings = [int(''.join(map(str, sample)), base=2) for sample in sample_set]
amps = self.compute_amplitudes(subcircuit, bitstrings, qubit_order=qubit_order)
weights = np.abs(np.square(np.array(amps))).astype(np.float64)
weights /= np.linalg.norm(weights, 1)
subsample = prng.choice(len(sample_set), p=weights, size=count)
for sample_index in subsample:
new_samples[sample_set[sample_index]] += 1
current_samples = new_samples
return {int(''.join(map(str, k)), base=2): v for k, v in current_samples.items()}
class SimulatesExpectationValues(metaclass=value.ABCMetaImplementAnyOneOf):
"""Simulator that computes exact expectation values of observables.
Given a circuit and an observable map, computes exact (to float precision)
expectation values for each observable at the end of the circuit.
Implementors of this interface should implement the
simulate_expectation_values_sweep_iter method.
"""
def simulate_expectation_values(
self,
program: 'cirq.AbstractCircuit',
observables: Union['cirq.PauliSumLike', List['cirq.PauliSumLike']],
param_resolver: 'cirq.ParamResolverOrSimilarType' = None,
qubit_order: 'cirq.QubitOrderOrList' = ops.QubitOrder.DEFAULT,
initial_state: Any = None,
permit_terminal_measurements: bool = False,
) -> List[float]:
"""Simulates the supplied circuit and calculates exact expectation
values for the given observables on its final state.
This method has no perfect analogy in hardware. Instead compare with
Sampler.sample_expectation_values, which calculates estimated
expectation values by sampling multiple times.
Args:
program: The circuit to simulate.
observables: An observable or list of observables.
param_resolver: Parameters to run with the program.
qubit_order: Determines the canonical ordering of the qubits. This
is often used in specifying the initial state, i.e. the
ordering of the computational basis states.
initial_state: The initial state for the simulation. The form of
this state depends on the simulation implementation. See
documentation of the implementing class for details.
permit_terminal_measurements: If the provided circuit ends with
measurement(s), this method will generate an error unless this
is set to True. This is meant to prevent measurements from
ruining expectation value calculations.
Returns:
A list of expectation values, with the value at index `n`
corresponding to `observables[n]` from the input.
Raises:
ValueError if 'program' has terminal measurement(s) and
'permit_terminal_measurements' is False.
"""
return self.simulate_expectation_values_sweep(
program,
observables,
study.ParamResolver(param_resolver),
qubit_order,
initial_state,
permit_terminal_measurements,
)[0]
def simulate_expectation_values_sweep(
self,
program: 'cirq.AbstractCircuit',
observables: Union['cirq.PauliSumLike', List['cirq.PauliSumLike']],
params: 'cirq.Sweepable',
qubit_order: 'cirq.QubitOrderOrList' = ops.QubitOrder.DEFAULT,
initial_state: Any = None,
permit_terminal_measurements: bool = False,
) -> List[List[float]]:
"""Wraps computed expectation values in a list.
Prefer overriding `simulate_expectation_values_sweep_iter`.
"""
return list(
self.simulate_expectation_values_sweep_iter(
program,
observables,
params,
qubit_order,
initial_state,
permit_terminal_measurements,
)
)
def _simulate_expectation_values_sweep_to_iter(
self,
program: 'cirq.AbstractCircuit',
observables: Union['cirq.PauliSumLike', List['cirq.PauliSumLike']],
params: 'cirq.Sweepable',
qubit_order: 'cirq.QubitOrderOrList' = ops.QubitOrder.DEFAULT,
initial_state: Any = None,
permit_terminal_measurements: bool = False,
) -> Iterator[List[float]]:
if (
type(self).simulate_expectation_values_sweep
== SimulatesExpectationValues.simulate_expectation_values_sweep
):
raise RecursionError(
"Must define either simulate_expectation_values_sweep or "
"simulate_expectation_values_sweep_iter."
)
yield from self.simulate_expectation_values_sweep(
program,
observables,
params,
qubit_order,
initial_state,
permit_terminal_measurements,
)
@value.alternative(
requires='simulate_expectation_values_sweep',
implementation=_simulate_expectation_values_sweep_to_iter,
)
def simulate_expectation_values_sweep_iter(
self,
program: 'cirq.AbstractCircuit',
observables: Union['cirq.PauliSumLike', List['cirq.PauliSumLike']],
params: 'cirq.Sweepable',
qubit_order: 'cirq.QubitOrderOrList' = ops.QubitOrder.DEFAULT,
initial_state: Any = None,
permit_terminal_measurements: bool = False,
) -> Iterator[List[float]]:
"""Simulates the supplied circuit and calculates exact expectation
values for the given observables on its final state, sweeping over the
given params.
This method has no perfect analogy in hardware. Instead compare with
Sampler.sample_expectation_values, which calculates estimated
expectation values by sampling multiple times.
Args:
program: The circuit to simulate.
observables: An observable or list of observables.
params: Parameters to run with the program.
qubit_order: Determines the canonical ordering of the qubits. This
is often used in specifying the initial state, i.e. the
ordering of the computational basis states.
initial_state: The initial state for the simulation. The form of
this state depends on the simulation implementation. See
documentation of the implementing class for details.
permit_terminal_measurements: If the provided circuit ends in a
measurement, this method will generate an error unless this
is set to True. This is meant to prevent measurements from
ruining expectation value calculations.
Returns:
An Iterator over expectation-value lists. The outer index determines
the sweep, and the inner index determines the observable. For
instance, results[1][3] would select the fourth observable measured
in the second sweep.
Raises:
ValueError if 'program' has terminal measurement(s) and
'permit_terminal_measurements' is False.
"""
class SimulatesFinalState(
Generic[TSimulationTrialResult], metaclass=value.GenericMetaImplementAnyOneOf
):
"""Simulator that allows access to the simulator's final state.
Implementors of this interface should implement the simulate_sweep_iter
method. This simulator only returns the state of the quantum system
for the final step of a simulation. This simulator state may be a state
vector, the density matrix, or another representation, depending on the
implementation. For simulators that also allow stepping through
a circuit see `SimulatesIntermediateState`.
"""
def simulate(
self,
program: 'cirq.AbstractCircuit',
param_resolver: 'cirq.ParamResolverOrSimilarType' = None,
qubit_order: 'cirq.QubitOrderOrList' = ops.QubitOrder.DEFAULT,
initial_state: Any = None,
) -> TSimulationTrialResult:
"""Simulates the supplied Circuit.
This method returns a result which allows access to the entire
simulator's final state.
Args:
program: The circuit to simulate.
param_resolver: Parameters to run with the program.
qubit_order: Determines the canonical ordering of the qubits. This
is often used in specifying the initial state, i.e. the
ordering of the computational basis states.
initial_state: The initial state for the simulation. The form of
this state depends on the simulation implementation. See
documentation of the implementing class for details.
Returns:
SimulationTrialResults for the simulation. Includes the final state.
"""
return self.simulate_sweep(
program, study.ParamResolver(param_resolver), qubit_order, initial_state
)[0]
def simulate_sweep(
self,
program: 'cirq.AbstractCircuit',
params: 'cirq.Sweepable',
qubit_order: 'cirq.QubitOrderOrList' = ops.QubitOrder.DEFAULT,
initial_state: Any = None,
) -> List[TSimulationTrialResult]:
"""Wraps computed states in a list.
Prefer overriding `simulate_sweep_iter`.
"""
return list(self.simulate_sweep_iter(program, params, qubit_order, initial_state))
def _simulate_sweep_to_iter(
self,
program: 'cirq.AbstractCircuit',
params: 'cirq.Sweepable',
qubit_order: 'cirq.QubitOrderOrList' = ops.QubitOrder.DEFAULT,
initial_state: Any = None,
) -> Iterator[TSimulationTrialResult]:
if type(self).simulate_sweep == SimulatesFinalState.simulate_sweep:
raise RecursionError("Must define either simulate_sweep or simulate_sweep_iter.")
yield from self.simulate_sweep(program, params, qubit_order, initial_state)
@value.alternative(requires='simulate_sweep', implementation=_simulate_sweep_to_iter)
def simulate_sweep_iter(
self,
program: 'cirq.AbstractCircuit',
params: 'cirq.Sweepable',
qubit_order: 'cirq.QubitOrderOrList' = ops.QubitOrder.DEFAULT,
initial_state: Any = None,
) -> Iterator[TSimulationTrialResult]:
"""Simulates the supplied Circuit.
This method returns a result which allows access to the entire final
simulator state. In contrast to simulate, this allows for sweeping
over different parameter values.
Args:
program: The circuit to simulate.
params: Parameters to run with the program.
qubit_order: Determines the canonical ordering of the qubits. This
is often used in specifying the initial state, i.e. the
ordering of the computational basis states.
initial_state: The initial state for the simulation. The form of
this state depends on the simulation implementation. See
documentation of the implementing class for details.
Returns:
Iterator over SimulationTrialResults for this run, one for each
possible parameter resolver.
"""
raise NotImplementedError()
class SimulatesIntermediateState(
Generic[TStepResult, TSimulationTrialResult, TSimulatorState, TActOnArgs],
SimulatesFinalState[TSimulationTrialResult],
metaclass=abc.ABCMeta,
):
"""A SimulatesFinalState that simulates a circuit by moments.
Whereas a general SimulatesFinalState may return the entire simulator
state at the end of a circuit, a SimulatesIntermediateState can
simulate stepping through the moments of a circuit.
Implementors of this interface should implement the _core_iterator
method.
Note that state here refers to simulator state, which is not necessarily
a state vector.
"""
def simulate_sweep_iter(
self,
program: 'cirq.AbstractCircuit',
params: 'cirq.Sweepable',
qubit_order: 'cirq.QubitOrderOrList' = ops.QubitOrder.DEFAULT,
initial_state: Any = None,
) -> Iterator[TSimulationTrialResult]:
"""Simulates the supplied Circuit.
This method returns a result which allows access to the entire
state vector. In contrast to simulate, this allows for sweeping
over different parameter values.
Args:
program: The circuit to simulate.
params: Parameters to run with the program.
qubit_order: Determines the canonical ordering of the qubits. This
is often used in specifying the initial state, i.e. the
ordering of the computational basis states.
initial_state: The initial state for the simulation. This can be
either a raw state or an `OperationTarget`. The form of the
raw state depends on the simulation implementation. See
documentation of the implementing class for details.
Returns:
List of SimulationTrialResults for this run, one for each
possible parameter resolver.
"""
qubit_order = ops.QubitOrder.as_qubit_order(qubit_order)
for param_resolver in study.to_resolvers(params):
state = (
initial_state.copy()
if isinstance(initial_state, OperationTarget)
else initial_state
)
all_step_results = self.simulate_moment_steps(
program, param_resolver, qubit_order, state
)
measurements: Dict[str, np.ndarray] = {}
for step_result in all_step_results:
for k, v in step_result.measurements.items():
measurements[k] = np.array(v, dtype=np.uint8)
yield self._create_simulator_trial_result(
params=param_resolver,
measurements=measurements,
final_step_result=step_result,
)
def simulate_moment_steps(
self,
circuit: 'cirq.AbstractCircuit',
param_resolver: 'cirq.ParamResolverOrSimilarType' = None,
qubit_order: 'cirq.QubitOrderOrList' = ops.QubitOrder.DEFAULT,
initial_state: Any = None,
) -> Iterator[TStepResult]:
"""Returns an iterator of StepResults for each moment simulated.
If the circuit being simulated is empty, a single step result should
be returned with the state being set to the initial state.
Args:
circuit: The Circuit to simulate.
param_resolver: A ParamResolver for determining values of Symbols.
qubit_order: Determines the canonical ordering of the qubits. This
is often used in specifying the initial state, i.e. the
ordering of the computational basis states.
initial_state: The initial state for the simulation. This can be
either a raw state or a `TActOnArgs`. The form of the
raw state depends on the simulation implementation. See
documentation of the implementing class for details.
Returns:
Iterator that steps through the simulation, simulating each
moment and returning a StepResult for each moment.
"""
param_resolver = study.ParamResolver(param_resolver)
resolved_circuit = protocols.resolve_parameters(circuit, param_resolver)
check_all_resolved(resolved_circuit)
actual_initial_state = 0 if initial_state is None else initial_state
return self._base_iterator(resolved_circuit, qubit_order, actual_initial_state)
def _base_iterator(
self,
circuit: 'cirq.AbstractCircuit',
qubit_order: 'cirq.QubitOrderOrList',
initial_state: Any,
) -> Iterator[TStepResult]:
"""Iterator over StepResult from Moments of a Circuit.
This is a thin wrapper around `create_act_on_args` and `_core_iterator`.
Overriding this method was the old way of creating a circuit iterator,
and this method is planned to be formally put on the deprecation path.
Going forward, override the aforementioned two methods in custom
simulators.
Args:
circuit: The circuit to simulate.
qubit_order: Determines the canonical ordering of the qubits. This
is often used in specifying the initial state, i.e. the
ordering of the computational basis states.
initial_state: The initial state for the simulation. The form of
this state depends on the simulation implementation. See
documentation of the implementing class for details.
Yields:
StepResults from simulating a Moment of the Circuit.
"""
qubits = ops.QubitOrder.as_qubit_order(qubit_order).order_for(circuit.all_qubits())
act_on_args = self._create_act_on_args(initial_state, qubits)
return self._core_iterator(circuit, act_on_args)
@abc.abstractmethod
def _create_act_on_args(
self,
initial_state: Any,
qubits: Sequence['cirq.Qid'],
) -> 'cirq.OperationTarget[TActOnArgs]':
"""Creates the OperationTarget state for a simulator.
Custom simulators should implement this method.
Args:
initial_state: The initial state for the simulation. The form of
this state depends on the simulation implementation. See
documentation of the implementing class for details.
qubits: Determines the canonical ordering of the qubits. This
is often used in specifying the initial state, i.e. the
ordering of the computational basis states.
Returns:
The `OperationTarget` for this simulator.
"""
@abc.abstractmethod
def _core_iterator(
self,
circuit: 'cirq.AbstractCircuit',
sim_state: 'cirq.OperationTarget[TActOnArgs]',
all_measurements_are_terminal: bool = False,
) -> Iterator[TStepResult]:
"""Iterator over StepResult from Moments of a Circuit.
Custom simulators should implement this method.
Args:
circuit: The circuit to simulate.
sim_state: The initial args for the simulation. The form of
this state depends on the simulation implementation. See
documentation of the implementing class for details.
all_measurements_are_terminal: Whether all measurements in
the circuit are terminal.
Yields:
StepResults from simulating a Moment of the Circuit.
"""
@abc.abstractmethod
def _create_simulator_trial_result(
self,
params: 'cirq.ParamResolver',
measurements: Dict[str, np.ndarray],
final_step_result: TStepResult,
) -> TSimulationTrialResult:
"""This method can be implemented to create a trial result.
Args:
params: The ParamResolver for this trial.
measurements: The measurement results for this trial.
final_step_result: The final step result of the simulation.
Returns:
The SimulationTrialResult.
"""
raise NotImplementedError()
class StepResult(Generic[TSimulatorState], metaclass=abc.ABCMeta):
"""Results of a step of a SimulatesIntermediateState.
Attributes:
measurements: A dictionary from measurement gate key to measurement
results, ordered by the qubits that the measurement operates on.
"""
def __init__(self, sim_state: 'cirq.OperationTarget') -> None:
self.measurements = sim_state.log_of_measurement_results
self._classical_data = sim_state.classical_data
@abc.abstractmethod
def _simulator_state(self) -> TSimulatorState:
"""Returns the simulator state of the simulator after this step.
This method starts with an underscore to indicate that it is private.
To access public state, see public methods on StepResult.
The form of the simulator_state depends on the implementation of the
simulation,see documentation for the implementing class for the form of
details.
"""
@abc.abstractmethod
def sample(
self,
qubits: List['cirq.Qid'],
repetitions: int = 1,
seed: 'cirq.RANDOM_STATE_OR_SEED_LIKE' = None,
) -> np.ndarray:
"""Samples from the system at this point in the computation.
Note that this does not collapse the state vector.
Args:
qubits: The qubits to be sampled in an order that influence the
returned measurement results.
repetitions: The number of samples to take.
seed: A seed for the pseudorandom number generator.
Returns:
Measurement results with True corresponding to the ``|1⟩`` state.
The outer list is for repetitions, and the inner corresponds to
measurements ordered by the supplied qubits. These lists
are wrapped as a numpy ndarray.
"""
raise NotImplementedError()
def sample_measurement_ops(
self,
measurement_ops: List['cirq.GateOperation'],
repetitions: int = 1,
seed: 'cirq.RANDOM_STATE_OR_SEED_LIKE' = None,
*,
_allow_repeated=False,
) -> Dict[str, np.ndarray]:
"""Samples from the system at this point in the computation.
Note that this does not collapse the state vector.
In contrast to `sample` which samples qubits, this takes a list of
`cirq.GateOperation` instances whose gates are `cirq.MeasurementGate`
instances and then returns a mapping from the key in the measurement
gate to the resulting bit strings. Different measurement operations must
not act on the same qubits.
Args:
measurement_ops: `GateOperation` instances whose gates are
`MeasurementGate` instances to be sampled form.
repetitions: The number of samples to take.
seed: A seed for the pseudorandom number generator.
_allow_repeated: If True, adds extra dimension to the result,
corresponding to the number of times a key is repeated.
Returns: A dictionary from measurement gate key to measurement
results. Measurement results are stored in a 2-dimensional
numpy array, the first dimension corresponding to the repetition
and the second to the actual boolean measurement results (ordered
by the qubits being measured.)
Raises:
ValueError: If the operation's gates are not `MeasurementGate`
instances or a qubit is acted upon multiple times by different
operations from `measurement_ops`.
"""
# Sanity checks.
for op in measurement_ops:
gate = op.gate
if not isinstance(gate, ops.MeasurementGate):
raise ValueError(f'{op.gate} was not a MeasurementGate')
result = collections.Counter(
key for op in measurement_ops for key in protocols.measurement_key_names(op)
)
if result and not _allow_repeated:
duplicates = [k for k, v in result.most_common() if v > 1]
if duplicates:
raise ValueError(f"Measurement key {','.join(duplicates)} repeated")
# Find measured qubits, ensuring a consistent ordering.
measured_qubits = []
seen_qubits: Set[cirq.Qid] = set()
for op in measurement_ops:
for q in op.qubits:
if q not in seen_qubits:
seen_qubits.add(q)
measured_qubits.append(q)
# Perform whole-system sampling of the measured qubits.
indexed_sample = self.sample(measured_qubits, repetitions, seed=seed)
# Extract results for each measurement.
results: Dict[str, Any] = {}
qubits_to_index = {q: i for i, q in enumerate(measured_qubits)}
for op in measurement_ops:
gate = cast(ops.MeasurementGate, op.gate)
key = gate.key
out = np.zeros(shape=(repetitions, len(op.qubits)), dtype=np.int8)
inv_mask = gate.full_invert_mask()
for i, q in enumerate(op.qubits):
out[:, i] = indexed_sample[:, qubits_to_index[q]]
if inv_mask[i]:
out[:, i] ^= out[:, i] < 2
if _allow_repeated:
if key not in results:
results[key] = []
results[key].append(out)
else:
results[gate.key] = out
return (
results
if not _allow_repeated
else {k: np.array(v).swapaxes(0, 1) for k, v in results.items()}
)
@value.value_equality(unhashable=True)
class SimulationTrialResult:
"""Results of a simulation by a SimulatesFinalState.
Unlike Result these results contain the final simulator_state of the
system. This simulator_state is dependent on the simulation implementation
and may be, for example, the state vector or the density matrix of the
system.
Attributes:
params: A ParamResolver of settings used for this result.
measurements: A dictionary from measurement gate key to measurement
results. Measurement results are a numpy ndarray of actual boolean
measurement results (ordered by the qubits acted on by the
measurement gate.)
"""
def __init__(
self,
params: 'cirq.ParamResolver',
measurements: Dict[str, np.ndarray],
final_simulator_state: Any = None,
final_step_result: 'cirq.StepResult' = None,
) -> None:
"""Initializes the `SimulationTrialResult` class.
Args:
params: A ParamResolver of settings used for this result.
measurements: A dictionary from measurement gate key to measurement
results. Measurement results are a numpy ndarray of actual
boolean measurement results (ordered by the qubits acted on by
the measurement gate.)
final_simulator_state: The final simulator state.
final_step_result: The step result coming from the simulation, that
can be used to get the final simulator state. This is primarily
for cases when calculating simulator state may be expensive and
unneeded. If this is provided, then final_simulator_state
should not be, and vice versa.
Raises:
ValueError: If `final_step_result` and `final_simulator_state` are both
None or both not None.
"""
if [final_step_result, final_simulator_state].count(None) != 1:
raise ValueError(
'Exactly one of final_simulator_state and final_step_result should be provided'
)
self.params = params
self.measurements = measurements
self._final_step_result = final_step_result
self._final_simulator_state_cache = final_simulator_state
@property
def _final_simulator_state(self):
if self._final_simulator_state_cache is None:
self._final_simulator_state_cache = self._final_step_result._simulator_state()
return self._final_simulator_state_cache
def __repr__(self) -> str:
return (
f'cirq.SimulationTrialResult(params={self.params!r}, '
f'measurements={self.measurements!r}, '
f'final_simulator_state={self._final_simulator_state!r})'
)
def __str__(self) -> str:
def bitstring(vals):
separator = ' ' if np.max(vals) >= 10 else ''
return separator.join(str(int(v)) for v in vals)
results = sorted([(key, bitstring(val)) for key, val in self.measurements.items()])
if not results:
return '(no measurements)'
return ' '.join([f'{key}={val}' for key, val in results])
def _repr_pretty_(self, p: Any, cycle: bool) -> None:
"""Text output in Jupyter."""
if cycle:
# There should never be a cycle. This is just in case.
p.text('SimulationTrialResult(...)')
else:
p.text(str(self))
def _value_equality_values_(self) -> Any:
measurements = {k: v.tolist() for k, v in sorted(self.measurements.items())}
return self.params, measurements, self._final_simulator_state
@property
def qubit_map(self) -> Dict['cirq.Qid', int]:
"""A map from Qid to index used to define the ordering of the basis in
the result.
"""
return self._final_simulator_state.qubit_map
def _qid_shape_(self) -> Tuple[int, ...]:
return _qubit_map_to_shape(self.qubit_map)
def _qubit_map_to_shape(qubit_map: Dict['cirq.Qid', int]) -> Tuple[int, ...]:
qid_shape: List[int] = [-1] * len(qubit_map)
try:
for q, i in qubit_map.items():
qid_shape[i] = q.dimension
except IndexError:
raise ValueError(f'Invalid qubit_map. Qubit index out of bounds. Map is <{qubit_map!r}>.')
if -1 in qid_shape:
raise ValueError(f'Invalid qubit_map. Duplicate qubit index. Map is <{qubit_map!r}>.')
return tuple(qid_shape)
def check_all_resolved(circuit):
"""Raises if the circuit contains unresolved symbols."""
if protocols.is_parameterized(circuit):
unresolved = [op for moment in circuit for op in moment if protocols.is_parameterized(op)]
raise ValueError(
'Circuit contains ops whose symbols were not specified in '
'parameter sweep. Ops: {}'.format(unresolved)
)
def split_into_matching_protocol_then_general(
circuit: 'cirq.AbstractCircuit',
predicate: Callable[['cirq.Operation'], bool],
) -> Tuple['cirq.AbstractCircuit', 'cirq.AbstractCircuit']:
"""Splits the circuit into a matching prefix and non-matching suffix.
The splitting happens in a per-qubit fashion. A non-matching operation on
qubit A will cause later operations on A to be part of the non-matching
suffix, but later operations on other qubits will continue to be put into
the matching part (as long as those qubits have had no non-matching operation
up to that point).
"""
blocked_qubits: Set[cirq.Qid] = set()
matching_prefix = circuits.Circuit()
general_suffix = circuits.Circuit()
for moment in circuit:
matching_part = []
general_part = []
for op in moment:
qs = set(op.qubits)
if not predicate(op) or not qs.isdisjoint(blocked_qubits):
blocked_qubits |= qs
if qs.isdisjoint(blocked_qubits):
matching_part.append(op)
else:
general_part.append(op)
if matching_part:
matching_prefix.append(circuits.Moment(matching_part))
if general_part:
general_suffix.append(circuits.Moment(general_part))
return matching_prefix, general_suffix
| quantumlib/Cirq | cirq-core/cirq/sim/simulator.py | Python | apache-2.0 | 44,153 |
#!/usr/bin/env python
"""
This is where the mainline sits and is responsible for setting up the logging,
the argument parsing and for starting up dashmat.
"""
from dashmat.actions import available_actions
from dashmat.collector import Collector
from dashmat.errors import BadTask
from delfick_app import App as DelfickApp
import logging
log = logging.getLogger("dashmat.executor")
class App(DelfickApp):
cli_categories = ['dashmat']
cli_description = "Application that reads YAML and serves up pretty dashboards"
cli_environment_defaults = {"DASHMAT_CONFIG": ("--config", 'dashmat.yml')}
cli_positional_replacements = [('--task', 'list_tasks'), ('--artifact', "")]
def execute(self, cli_args, args_dict, extra_args, logging_handler):
args_dict["dashmat"]["debug"] = cli_args.debug
collector = Collector()
collector.prepare(args_dict["dashmat"]["config"], args_dict)
if hasattr(collector, "configuration") and "term_colors" in collector.configuration:
self.setup_logging_theme(logging_handler, colors=collector.configuration["term_colors"])
task = cli_args.dashmat_chosen_task
if task not in available_actions:
raise BadTask("Unknown task", available=list(available_actions.keys()), wanted=task)
available_actions[task](collector)
def setup_other_logging(self, args, verbose=False, silent=False, debug=False):
logging.getLogger("requests").setLevel([logging.CRITICAL, logging.ERROR][verbose or debug])
def specify_other_args(self, parser, defaults):
parser.add_argument("--config"
, help = "The config file to read"
, dest = "dashmat_config"
, **defaults["--config"]
)
parser.add_argument("--no-dynamic-dashboard-js"
, help = "Turn off transpiling the dashboard javascript at runtime"
, dest = "dashmat_dynamic_dashboard_js"
, action = "store_false"
)
parser.add_argument("--redis-host"
, help = "Redis host to store data in"
, dest = "dashmat_redis_host"
, default = ""
)
parser.add_argument("--task"
, help = "The task to run"
, dest = "dashmat_chosen_task"
, **defaults["--task"]
)
parser.add_argument("--host"
, help = "The host to serve the dashboards on"
, dest = "dashmat_host"
, default = "localhost"
)
parser.add_argument("--port"
, help = "The port to serve the dashboards on"
, default = 7546
, dest = "dashmat_port"
, type = int
)
parser.add_argument("--artifact"
, help = "Extra argument to be used as decided by each task"
, dest = "dashmat_artifact"
, **defaults['--artifact']
)
parser.add_argument("--without-checks"
, help = "Don't run the cronned checks, useful for development"
, dest = "dashmat_without_checks"
, action = "store_true"
)
return parser
main = App.main
if __name__ == '__main__':
main()
| realestate-com-au/dashmat | dashmat/executor.py | Python | mit | 3,227 |
import tkinter as tk
from tkinter import Button
import time
import numpy as np
from PIL import ImageTk, Image
PhotoImage = ImageTk.PhotoImage
UNIT = 100 # 픽셀 수
HEIGHT = 5 # 그리드월드 세로
WIDTH = 5 # 그리드월드 가로
TRANSITION_PROB = 1
POSSIBLE_ACTIONS = [0, 1, 2, 3] # 상, 하, 좌, 우
ACTIONS = [(-1, 0), (1, 0), (0, -1), (0, 1)] # 좌표로 나타낸 행동
REWARDS = []
class GraphicDisplay(tk.Tk):
def __init__(self, agent):
super(GraphicDisplay, self).__init__()
self.title('Policy Iteration')
self.geometry('{0}x{1}'.format(HEIGHT * UNIT, HEIGHT * UNIT + 50))
self.texts = []
self.arrows = []
self.env = Env()
self.agent = agent
self.evaluation_count = 0
self.improvement_count = 0
self.is_moving = 0
(self.up, self.down, self.left, self.right), self.shapes = self.load_images()
self.canvas = self._build_canvas()
self.text_reward(2, 2, "R : 1.0")
self.text_reward(1, 2, "R : -1.0")
self.text_reward(2, 1, "R : -1.0")
def _build_canvas(self):
canvas = tk.Canvas(self, bg='white',
height=HEIGHT * UNIT,
width=WIDTH * UNIT)
# 버튼 초기화
iteration_button = Button(self, text="Evaluate",
command=self.evaluate_policy)
iteration_button.configure(width=10, activebackground="#33B5E5")
canvas.create_window(WIDTH * UNIT * 0.13, HEIGHT * UNIT + 10,
window=iteration_button)
policy_button = Button(self, text="Improve",
command=self.improve_policy)
policy_button.configure(width=10, activebackground="#33B5E5")
canvas.create_window(WIDTH * UNIT * 0.37, HEIGHT * UNIT + 10,
window=policy_button)
policy_button = Button(self, text="move", command=self.move_by_policy)
policy_button.configure(width=10, activebackground="#33B5E5")
canvas.create_window(WIDTH * UNIT * 0.62, HEIGHT * UNIT + 10,
window=policy_button)
policy_button = Button(self, text="reset", command=self.reset)
policy_button.configure(width=10, activebackground="#33B5E5")
canvas.create_window(WIDTH * UNIT * 0.87, HEIGHT * UNIT + 10,
window=policy_button)
# 그리드 생성
for col in range(0, WIDTH * UNIT, UNIT): # 0~400 by 80
x0, y0, x1, y1 = col, 0, col, HEIGHT * UNIT
canvas.create_line(x0, y0, x1, y1)
for row in range(0, HEIGHT * UNIT, UNIT): # 0~400 by 80
x0, y0, x1, y1 = 0, row, HEIGHT * UNIT, row
canvas.create_line(x0, y0, x1, y1)
# 캔버스에 이미지 추가
self.rectangle = canvas.create_image(50, 50, image=self.shapes[0])
canvas.create_image(250, 150, image=self.shapes[1])
canvas.create_image(150, 250, image=self.shapes[1])
canvas.create_image(250, 250, image=self.shapes[2])
canvas.pack()
return canvas
def load_images(self):
up = PhotoImage(Image.open("../img/up.png").resize((13, 13)))
right = PhotoImage(Image.open("../img/right.png").resize((13, 13)))
left = PhotoImage(Image.open("../img/left.png").resize((13, 13)))
down = PhotoImage(Image.open("../img/down.png").resize((13, 13)))
rectangle = PhotoImage(Image.open("../img/rectangle.png").resize((65, 65)))
triangle = PhotoImage(Image.open("../img/triangle.png").resize((65, 65)))
circle = PhotoImage(Image.open("../img/circle.png").resize((65, 65)))
return (up, down, left, right), (rectangle, triangle, circle)
def reset(self):
if self.is_moving == 0:
self.evaluation_count = 0
self.improvement_count = 0
for i in self.texts:
self.canvas.delete(i)
for i in self.arrows:
self.canvas.delete(i)
self.agent.value_table = [[0.0] * WIDTH for _ in range(HEIGHT)]
self.agent.policy_table = ([[[0.25, 0.25, 0.25, 0.25]] * WIDTH
for _ in range(HEIGHT)])
self.agent.policy_table[2][2] = []
x, y = self.canvas.coords(self.rectangle)
self.canvas.move(self.rectangle, UNIT / 2 - x, UNIT / 2 - y)
def text_value(self, row, col, contents, font='Helvetica', size=10,
style='normal', anchor="nw"):
origin_x, origin_y = 85, 70
x, y = origin_y + (UNIT * col), origin_x + (UNIT * row)
font = (font, str(size), style)
text = self.canvas.create_text(x, y, fill="black", text=contents,
font=font, anchor=anchor)
return self.texts.append(text)
def text_reward(self, row, col, contents, font='Helvetica', size=10,
style='normal', anchor="nw"):
origin_x, origin_y = 5, 5
x, y = origin_y + (UNIT * col), origin_x + (UNIT * row)
font = (font, str(size), style)
text = self.canvas.create_text(x, y, fill="black", text=contents,
font=font, anchor=anchor)
return self.texts.append(text)
def rectangle_move(self, action):
base_action = np.array([0, 0])
location = self.find_rectangle()
self.render()
if action == 0 and location[0] > 0: # 상
base_action[1] -= UNIT
elif action == 1 and location[0] < HEIGHT - 1: # 하
base_action[1] += UNIT
elif action == 2 and location[1] > 0: # 좌
base_action[0] -= UNIT
elif action == 3 and location[1] < WIDTH - 1: # 우
base_action[0] += UNIT
# move agent
self.canvas.move(self.rectangle, base_action[0], base_action[1])
def find_rectangle(self):
temp = self.canvas.coords(self.rectangle)
x = (temp[0] / 100) - 0.5
y = (temp[1] / 100) - 0.5
return int(y), int(x)
def move_by_policy(self):
if self.improvement_count != 0 and self.is_moving != 1:
self.is_moving = 1
x, y = self.canvas.coords(self.rectangle)
self.canvas.move(self.rectangle, UNIT / 2 - x, UNIT / 2 - y)
x, y = self.find_rectangle()
while len(self.agent.policy_table[x][y]) != 0:
self.after(100,
self.rectangle_move(self.agent.get_action([x, y])))
x, y = self.find_rectangle()
self.is_moving = 0
def draw_one_arrow(self, col, row, policy):
if col == 2 and row == 2:
return
if policy[0] > 0: # up
origin_x, origin_y = 50 + (UNIT * row), 10 + (UNIT * col)
self.arrows.append(self.canvas.create_image(origin_x, origin_y,
image=self.up))
if policy[1] > 0: # down
origin_x, origin_y = 50 + (UNIT * row), 90 + (UNIT * col)
self.arrows.append(self.canvas.create_image(origin_x, origin_y,
image=self.down))
if policy[2] > 0: # left
origin_x, origin_y = 10 + (UNIT * row), 50 + (UNIT * col)
self.arrows.append(self.canvas.create_image(origin_x, origin_y,
image=self.left))
if policy[3] > 0: # right
origin_x, origin_y = 90 + (UNIT * row), 50 + (UNIT * col)
self.arrows.append(self.canvas.create_image(origin_x, origin_y,
image=self.right))
def draw_from_policy(self, policy_table):
for i in range(HEIGHT):
for j in range(WIDTH):
self.draw_one_arrow(i, j, policy_table[i][j])
def print_value_table(self, value_table):
for i in range(WIDTH):
for j in range(HEIGHT):
self.text_value(i, j, value_table[i][j])
def render(self):
time.sleep(0.1)
self.canvas.tag_raise(self.rectangle)
self.update()
def evaluate_policy(self):
self.evaluation_count += 1
for i in self.texts:
self.canvas.delete(i)
self.agent.policy_evaluation()
self.print_value_table(self.agent.value_table)
def improve_policy(self):
self.improvement_count += 1
for i in self.arrows:
self.canvas.delete(i)
self.agent.policy_improvement()
self.draw_from_policy(self.agent.policy_table)
class Env:
def __init__(self):
self.transition_probability = TRANSITION_PROB
self.width = WIDTH
self.height = HEIGHT
self.reward = [[0] * WIDTH for _ in range(HEIGHT)]
self.possible_actions = POSSIBLE_ACTIONS
self.reward[2][2] = 1 # (2,2) 좌표 동그라미 위치에 보상 1
self.reward[1][2] = -1 # (1,2) 좌표 세모 위치에 보상 -1
self.reward[2][1] = -1 # (2,1) 좌표 세모 위치에 보상 -1
self.all_state = []
for x in range(WIDTH):
for y in range(HEIGHT):
state = [x, y]
self.all_state.append(state)
def get_reward(self, state, action):
next_state = self.state_after_action(state, action)
return self.reward[next_state[0]][next_state[1]]
def state_after_action(self, state, action_index):
action = ACTIONS[action_index]
return self.check_boundary([state[0] + action[0], state[1] + action[1]])
@staticmethod
def check_boundary(state):
state[0] = (0 if state[0] < 0 else WIDTH - 1
if state[0] > WIDTH - 1 else state[0])
state[1] = (0 if state[1] < 0 else HEIGHT - 1
if state[1] > HEIGHT - 1 else state[1])
return state
def get_transition_prob(self, state, action):
return self.transition_probability
def get_all_states(self):
return self.all_state | zzsza/TIL | reinforcement_learning/textbook summary/environment.py | Python | mit | 10,109 |
#jaula-change.py
import bge, bpy
import Player
from Level import Level
def main():
cont = bge.logic.getCurrentController()
obj = cont.owner
print("hola", obj.name)
touch = obj.sensors["Touch"]
replace = obj.actuators["replacemesh"]
if touch.positive:
scn = bge.logic.getCurrentScene()
player = scn.objects["player"]
if player['last_collision_id'] != obj['id']:
player['last_collision_id'] = obj['id']
print("last_col ",obj['id'])
print("hola", obj.name)
if obj["kinects"] == True:
print(obj["kinects"])
# change mesh
print("chage jaula")
cont.activate(replace) | husk00/audiogames | argentina/src/lib/jaulachange.py | Python | gpl-3.0 | 727 |
# -*- coding: utf-8 -*-
from irc3.compat import asyncio
from irc3d import IrcServer
import irc3
@irc3.plugin
class Plugin(object):
def __init__(self, context):
self.log = context.log
self.context = context
@irc3.event(irc3.rfc.CONNECTED)
def connected(self, **kw):
self.context.join('#dcc')
@irc3.event(irc3.rfc.JOIN)
@asyncio.coroutine
def join(self, mask=None, **kw):
if mask.nick != self.context.nick and mask.nick == 'receiver':
# receiver joined the chan. offer a file
conn = yield from self.context.dcc_send(mask, __file__)
yield from conn.closed
self.context.log.info('file sent to %s', mask.nick)
@irc3.event(irc3.rfc.CTCP)
@asyncio.coroutine
def on_ctcp(self, mask=None, **kwargs):
# parse ctcp message
name, host, port, size = kwargs['ctcp'].split()[2:]
self.context.log.info('%s is offering %s', mask.nick, name)
# get the file
conn = yield from self.context.create_task(self.context.dcc_get(
mask, host, port, '/tmp/sent.py', int(size)))
yield from conn.closed
self.context.log.info('file received from %s', mask.nick)
# end loop by setting future's result
self.context.config.file_received.set_result(True)
def main():
loop = asyncio.get_event_loop()
# run a test server
server = IrcServer.from_config(dict(
loop=loop,
servername='test',
includes=['irc3d.plugins.core'],
))
server.run(forever=False)
cfg = dict(
host='localhost',
port=6667,
nick='sender',
includes=[__name__],
loop=loop,
)
# this bot will send the file
sender = irc3.IrcBot.from_config(cfg)
sender.run(forever=False)
file_received = asyncio.Future()
def f():
# this bot will receive the file
receiver.run(forever=False)
# assume receiver is created *after* sender
receiver = irc3.IrcBot.from_config(cfg,
nick='receiver',
file_received=file_received)
loop.call_later(.2, receiver.run, False)
loop.run_until_complete(file_received)
if __name__ == '__main__':
main()
| mrhanky17/irc3 | examples/dcc_send_and_get.py | Python | mit | 2,286 |
from ..utils import *
##
# Minions
class UNG_020:
"""Arcanologist"""
pass
class UNG_021:
"""Steam Surger"""
pass
class UNG_027:
"""Pyros"""
pass
class UNG_846:
"""Shimmering Tempest"""
pass
##
# Spells
class UNG_018:
"""Flame Geyser"""
requirements = {PlayReq.REQ_TARGET_TO_PLAY: 0}
pass
class UNG_024:
"""Mana Bind"""
pass
class UNG_028:
"""Open the Waygate"""
pass
class UNG_941:
"""Primordial Glyph"""
pass
class UNG_948:
"""Molten Reflection"""
requirements = {
PlayReq.REQ_FRIENDLY_TARGET: 0,
PlayReq.REQ_MINION_TARGET: 0,
PlayReq.REQ_TARGET_TO_PLAY: 0}
pass
class UNG_955:
"""Meteor"""
requirements = {
PlayReq.REQ_MINION_TARGET: 0,
PlayReq.REQ_TARGET_TO_PLAY: 0}
pass
| jleclanche/fireplace | fireplace/cards/ungoro/mage.py | Python | agpl-3.0 | 730 |
from __future__ import print_function
# Time: O(n)
# Space: O(1)
# Rotate an array of n elements to the right by k steps.
#
# For example, with n = 7 and k = 3, the array [1,2,3,4,5,6,7] is rotated to [5,6,7,1,2,3,4].
#
# Note:
# Try to come up as many solutions as you can, there are at least 3 different ways to solve this problem.
class Solution(object):
"""
:type nums: List[int]
:type k: int
:rtype: void Do not return anything, modify nums in-place instead.
"""
def rotate(self, nums, k):
k %= len(nums)
self.reverse(nums, 0, len(nums))
self.reverse(nums, 0, k)
self.reverse(nums, k, len(nums))
def reverse(self, nums, start, end):
while start < end:
nums[start], nums[end - 1] = nums[end - 1], nums[start]
start += 1
end -= 1
# Time: O(n)
# Space: O(1)
from fractions import gcd
class Solution2(object):
"""
:type nums: List[int]
:type k: int
:rtype: void Do not return anything, modify nums in-place instead.
"""
def rotate(self, nums, k):
def apply_cycle_permutation(k, offset, cycle_len, nums):
tmp = nums[offset]
for i in xrange(1, cycle_len):
nums[(offset + i * k) % len(nums)], tmp = tmp, nums[(offset + i * k) % len(nums)]
nums[offset] = tmp
k %= len(nums)
num_cycles = gcd(len(nums), k)
cycle_len = len(nums) / num_cycles
for i in xrange(num_cycles):
apply_cycle_permutation(k, i, cycle_len, nums)
# Time: O(n)
# Space: O(1)
class Solution3(object):
"""
:type nums: List[int]
:type k: int
:rtype: void Do not return anything, modify nums in-place instead.
"""
def rotate(self, nums, k):
count = 0
start = 0
while count < len(nums):
curr = start
prev = nums[curr]
while True:
idx = (curr + k) % len(nums)
nums[idx], prev = prev, nums[idx]
curr = idx
count += 1
if start == curr:
break
start += 1
# Time: O(n)
# Space: O(n)
class Solution4(object):
"""
:type nums: List[int]
:type k: int
:rtype: void Do not return anything, modify nums in-place instead.
"""
def rotate(self, nums, k):
"""
:type nums: List[int]
:type k: int
:rtype: void Do not return anything, modify nums in-place instead.
"""
nums[:] = nums[len(nums) - k:] + nums[:len(nums) - k]
# Time: O(k * n)
# Space: O(1)
class Solution5(object):
"""
:type nums: List[int]
:type k: int
:rtype: void Do not return anything, modify nums in-place instead.
"""
def rotate(self, nums, k):
while k > 0:
nums.insert(0, nums.pop())
k -= 1
if __name__ == '__main__':
nums = [1, 2, 3, 4, 5, 6, 7]
Solution().rotate(nums, 3)
print(nums)
| kamyu104/LeetCode | Python/rotate-array.py | Python | mit | 2,999 |
import htmls
from django import test
from django.conf import settings
from django.template.loader import render_to_string
from model_bakery import baker
from devilry.devilry_account.templatetags import devilry_account_tags
class TestDevilryUserVerboseInline(test.TestCase):
def test_no_fullname_cssclass(self):
testuser = baker.make(settings.AUTH_USER_MODEL,
fullname='')
selector = htmls.S(
render_to_string('devilry_account/templatetags/user-verbose-inline.django.html',
devilry_account_tags.devilry_user_verbose_inline(testuser)))
self.assertTrue(selector.exists('.devilry-user-verbose-inline-shortnameonly'))
self.assertFalse(selector.exists('.devilry-user-verbose-inline-both'))
def test_no_fullname_text(self):
testuser = baker.make(settings.AUTH_USER_MODEL,
fullname='',
shortname='testuser')
selector = htmls.S(
render_to_string('devilry_account/templatetags/user-verbose-inline.django.html',
devilry_account_tags.devilry_user_verbose_inline(testuser)))
self.assertEqual('testuser',
selector.one('.devilry-user-verbose-inline').alltext_normalized)
def test_with_fullname_cssclass(self):
testuser = baker.make(settings.AUTH_USER_MODEL,
fullname='Test User')
selector = htmls.S(
render_to_string('devilry_account/templatetags/user-verbose-inline.django.html',
devilry_account_tags.devilry_user_verbose_inline(testuser)))
self.assertFalse(selector.exists('.devilry-user-verbose-inline-shortnameonly'))
self.assertTrue(selector.exists('.devilry-user-verbose-inline-both'))
def test_with_fullname_text(self):
testuser = baker.make(settings.AUTH_USER_MODEL,
fullname='Test User',
shortname='testuser')
selector = htmls.S(
render_to_string('devilry_account/templatetags/user-verbose-inline.django.html',
devilry_account_tags.devilry_user_verbose_inline(testuser)))
self.assertEqual('Test User',
selector.one('.devilry-user-verbose-inline-fullname').alltext_normalized)
self.assertEqual('(testuser)',
selector.one('.devilry-user-verbose-inline-shortname').alltext_normalized)
self.assertEqual('Test User(testuser)',
selector.one('.devilry-user-verbose-inline').alltext_normalized)
class TestDevilryMultipleUsersVerboseInline(test.TestCase):
def test_no_fullname_cssclass(self):
testuser = baker.make(settings.AUTH_USER_MODEL,
fullname='')
selector = htmls.S(
render_to_string('devilry_account/templatetags/multiple-users-verbose-inline.django.html',
devilry_account_tags.devilry_multiple_users_verbose_inline([testuser])))
self.assertTrue(selector.exists('.devilry-user-verbose-inline-shortnameonly'))
self.assertFalse(selector.exists('.devilry-user-verbose-inline-both'))
def test_no_fullname_text(self):
testuser = baker.make(settings.AUTH_USER_MODEL,
fullname='',
shortname='testuser')
selector = htmls.S(
render_to_string('devilry_account/templatetags/multiple-users-verbose-inline.django.html',
devilry_account_tags.devilry_multiple_users_verbose_inline([testuser])))
self.assertEqual('testuser',
selector.one('.devilry-user-verbose-inline').alltext_normalized)
def test_with_fullname_cssclass(self):
testuser = baker.make(settings.AUTH_USER_MODEL,
fullname='Test User')
selector = htmls.S(
render_to_string('devilry_account/templatetags/multiple-users-verbose-inline.django.html',
devilry_account_tags.devilry_multiple_users_verbose_inline([testuser])))
self.assertFalse(selector.exists('.devilry-user-verbose-inline-shortnameonly'))
self.assertTrue(selector.exists('.devilry-user-verbose-inline-both'))
def test_with_fullname_text(self):
testuser = baker.make(settings.AUTH_USER_MODEL,
fullname='Test User',
shortname='testuser')
selector = htmls.S(
render_to_string('devilry_account/templatetags/multiple-users-verbose-inline.django.html',
devilry_account_tags.devilry_multiple_users_verbose_inline([testuser])))
self.assertEqual('Test User',
selector.one('.devilry-user-verbose-inline-fullname').alltext_normalized)
self.assertEqual('(testuser)',
selector.one('.devilry-user-verbose-inline-shortname').alltext_normalized)
self.assertEqual('Test User(testuser)',
selector.one('.devilry-user-verbose-inline').alltext_normalized)
def test_multiple(self):
testuser1 = baker.make(settings.AUTH_USER_MODEL,
shortname='testuser1')
testuser2 = baker.make(settings.AUTH_USER_MODEL,
shortname='testuser2')
selector = htmls.S(
render_to_string('devilry_account/templatetags/multiple-users-verbose-inline.django.html',
devilry_account_tags.devilry_multiple_users_verbose_inline(
[testuser1, testuser2])))
shortnames = [element.alltext_normalized
for element in selector.list('.devilry-user-verbose-inline')]
self.assertEqual(['testuser1', 'testuser2'], shortnames)
| devilry/devilry-django | devilry/devilry_account/tests/test_devilry_account_tags.py | Python | bsd-3-clause | 5,952 |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
from __future__ import absolute_import, division, unicode_literals
# Copyright (C) 2018 Ben McGinnes <ben@gnupg.org>
#
# This program is free software; you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free Software
# Foundation; either version 2 of the License, or (at your option) any later
# version.
#
# This program is free software; you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the Free
# Software Foundation; either version 2.1 of the License, or (at your option)
# any later version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License and the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU General Public License and the GNU
# Lesser General Public along with this program; if not, see
# <https://www.gnu.org/licenses/>.
import sys
from groups import group_lists
"""
Uses the groups module to generate Mutt crypt-hooks from gpg.conf.
"""
if len(sys.argv) >= 2:
hook_file = sys.argv[1]
else:
hook_file = input("Enter the filename to save the crypt-hooks in: ")
with open(hook_file, "w") as f:
f.write("""# Change settings based upon message recipient
#
# send-hook [!]<pattern> <command>
#
# <command> is executed when sending mail to an address matching <pattern>
#
# crypt-hook regexp key-id
# The crypt-hook command provides a method by which you can
# specify the ID of the public key to be used when encrypting
# messages to a certain recipient. The meaning of "key ID" is to
# be taken broadly: This can be a different e-mail address, a
# numerical key ID, or even just an arbitrary search string. You
# may use multiple crypt-hooks with the same regexp; multiple
# matching crypt-hooks result in the use of multiple key-ids for a
# recipient.
""")
for n in range(len(group_lists)):
rule = group_lists[n][0].replace(".", "\\\\.")
with open(hook_file, "a") as f:
f.write("\n")
f.write("# {0}\n".format(group_lists[n][0]))
for i in range(len(group_lists[n][1])):
f.write("crypt-hook {0} {1}\n".format(rule, group_lists[n][1][i]))
| gpg/gpgme | lang/python/examples/howto/mutt-groups.py | Python | lgpl-2.1 | 2,447 |
#!/usr/bin/env python3
# Copyright (c) 2015 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
from test_framework.test_framework import ComparisonTestFramework
from test_framework.util import *
from test_framework.mininode import CTransaction, NetworkThread
from test_framework.blocktools import create_coinbase, create_block
from test_framework.comptool import TestInstance, TestManager
from test_framework.script import CScript, OP_1NEGATE, OP_CHECKSEQUENCEVERIFY, OP_DROP
from io import BytesIO
import time
import itertools
'''
This test is meant to exercise BIP forks
Connect to a single node.
regtest lock-in with 108/144 block signalling
activation after a further 144 blocks
mine 2 block and save coinbases for later use
mine 141 blocks to transition from DEFINED to STARTED
mine 100 blocks signalling readiness and 44 not in order to fail to change state this period
mine 108 blocks signalling readiness and 36 blocks not signalling readiness (STARTED->LOCKED_IN)
mine a further 143 blocks (LOCKED_IN)
test that enforcement has not triggered (which triggers ACTIVE)
test that enforcement has triggered
'''
class BIP9SoftForksTest(ComparisonTestFramework):
def __init__(self):
self.num_nodes = 1
def setup_network(self):
self.nodes = start_nodes(1, self.options.tmpdir,
extra_args=[['-debug', '-whitelist=127.0.0.1']],
binary=[self.options.testbinary])
def run_test(self):
self.test = TestManager(self, self.options.tmpdir)
self.test.add_all_connections(self.nodes)
NetworkThread().start() # Start up network handling in another thread
self.test.run()
def create_transaction(self, node, coinbase, to_address, amount):
from_txid = node.getblock(coinbase)['tx'][0]
inputs = [{ "txid" : from_txid, "vout" : 0}]
outputs = { to_address : amount }
rawtx = node.createrawtransaction(inputs, outputs)
tx = CTransaction()
f = BytesIO(hex_str_to_bytes(rawtx))
tx.deserialize(f)
tx.nVersion = 2
return tx
def sign_transaction(self, node, tx):
signresult = node.signrawtransaction(bytes_to_hex_str(tx.serialize()))
tx = CTransaction()
f = BytesIO(hex_str_to_bytes(signresult['hex']))
tx.deserialize(f)
return tx
def generate_blocks(self, number, version, test_blocks = []):
for i in range(number):
block = create_block(self.tip, create_coinbase(self.height), self.last_block_time + 1)
block.nVersion = version
block.rehash()
block.solve()
test_blocks.append([block, True])
self.last_block_time += 1
self.tip = block.sha256
self.height += 1
return test_blocks
def get_bip9_status(self, key):
info = self.nodes[0].getblockchaininfo()
for row in info['bip9_softforks']:
if row['id'] == key:
return row
raise IndexError ('key:"%s" not found' % key)
def test_BIP(self, bipName, activated_version, invalidate, invalidatePostSignature, bitno):
# generate some coins for later
self.coinbase_blocks = self.nodes[0].generate(2)
self.height = 3 # height of the next block to build
self.tip = int("0x" + self.nodes[0].getbestblockhash(), 0)
self.nodeaddress = self.nodes[0].getnewaddress()
self.last_block_time = int(time.time())
assert_equal(self.get_bip9_status(bipName)['status'], 'defined')
tmpl = self.nodes[0].getblocktemplate({})
assert(bipName not in tmpl['rules'])
assert(bipName not in tmpl['vbavailable'])
assert_equal(tmpl['vbrequired'], 0)
assert_equal(tmpl['version'], 0x20000000)
# Test 1
# Advance from DEFINED to STARTED
test_blocks = self.generate_blocks(141, 4)
yield TestInstance(test_blocks, sync_every_block=False)
assert_equal(self.get_bip9_status(bipName)['status'], 'started')
tmpl = self.nodes[0].getblocktemplate({})
assert(bipName not in tmpl['rules'])
assert_equal(tmpl['vbavailable'][bipName], bitno)
assert_equal(tmpl['vbrequired'], 0)
assert(tmpl['version'] & activated_version)
# Test 2
# Fail to achieve LOCKED_IN 100 out of 144 signal bit 1
# using a variety of bits to simulate multiple parallel softforks
test_blocks = self.generate_blocks(50, activated_version) # 0x20000001 (signalling ready)
test_blocks = self.generate_blocks(20, 4, test_blocks) # 0x00000004 (signalling not)
test_blocks = self.generate_blocks(50, activated_version, test_blocks) # 0x20000101 (signalling ready)
test_blocks = self.generate_blocks(24, 4, test_blocks) # 0x20010000 (signalling not)
yield TestInstance(test_blocks, sync_every_block=False)
assert_equal(self.get_bip9_status(bipName)['status'], 'started')
tmpl = self.nodes[0].getblocktemplate({})
assert(bipName not in tmpl['rules'])
assert_equal(tmpl['vbavailable'][bipName], bitno)
assert_equal(tmpl['vbrequired'], 0)
assert(tmpl['version'] & activated_version)
# Test 3
# 108 out of 144 signal bit 1 to achieve LOCKED_IN
# using a variety of bits to simulate multiple parallel softforks
test_blocks = self.generate_blocks(58, activated_version) # 0x20000001 (signalling ready)
test_blocks = self.generate_blocks(26, 4, test_blocks) # 0x00000004 (signalling not)
test_blocks = self.generate_blocks(50, activated_version, test_blocks) # 0x20000101 (signalling ready)
test_blocks = self.generate_blocks(10, 4, test_blocks) # 0x20010000 (signalling not)
yield TestInstance(test_blocks, sync_every_block=False)
assert_equal(self.get_bip9_status(bipName)['status'], 'locked_in')
tmpl = self.nodes[0].getblocktemplate({})
assert(bipName not in tmpl['rules'])
# Test 4
# 143 more version 536870913 blocks (waiting period-1)
test_blocks = self.generate_blocks(143, 4)
yield TestInstance(test_blocks, sync_every_block=False)
assert_equal(self.get_bip9_status(bipName)['status'], 'locked_in')
tmpl = self.nodes[0].getblocktemplate({})
assert(bipName not in tmpl['rules'])
# Test 5
# Check that the new rule is enforced
spendtx = self.create_transaction(self.nodes[0],
self.coinbase_blocks[0], self.nodeaddress, 1.0)
invalidate(spendtx)
spendtx = self.sign_transaction(self.nodes[0], spendtx)
spendtx.rehash()
invalidatePostSignature(spendtx)
spendtx.rehash()
block = create_block(self.tip, create_coinbase(self.height), self.last_block_time + 1)
block.nVersion = activated_version
block.vtx.append(spendtx)
block.hashMerkleRoot = block.calc_merkle_root()
block.rehash()
block.solve()
self.last_block_time += 1
self.tip = block.sha256
self.height += 1
yield TestInstance([[block, True]])
assert_equal(self.get_bip9_status(bipName)['status'], 'active')
tmpl = self.nodes[0].getblocktemplate({})
assert(bipName in tmpl['rules'])
assert(bipName not in tmpl['vbavailable'])
assert_equal(tmpl['vbrequired'], 0)
assert(not (tmpl['version'] & (1 << bitno)))
# Test 6
# Check that the new sequence lock rules are enforced
spendtx = self.create_transaction(self.nodes[0],
self.coinbase_blocks[1], self.nodeaddress, 1.0)
invalidate(spendtx)
spendtx = self.sign_transaction(self.nodes[0], spendtx)
spendtx.rehash()
invalidatePostSignature(spendtx)
spendtx.rehash()
block = create_block(self.tip, create_coinbase(self.height), self.last_block_time + 1)
block.nVersion = 5
block.vtx.append(spendtx)
block.hashMerkleRoot = block.calc_merkle_root()
block.rehash()
block.solve()
self.last_block_time += 1
yield TestInstance([[block, False]])
# Restart all
stop_nodes(self.nodes)
wait_electrumds()
shutil.rmtree(self.options.tmpdir)
self.setup_chain()
self.setup_network()
self.test.clear_all_connections()
self.test.add_all_connections(self.nodes)
NetworkThread().start() # Start up network handling in another thread
def get_tests(self):
for test in itertools.chain(
self.test_BIP('csv', 0x20000001, self.sequence_lock_invalidate, self.donothing, 0),
self.test_BIP('csv', 0x20000001, self.mtp_invalidate, self.donothing, 0),
self.test_BIP('csv', 0x20000001, self.donothing, self.csv_invalidate, 0)
):
yield test
def donothing(self, tx):
return
def csv_invalidate(self, tx):
'''Modify the signature in vin 0 of the tx to fail CSV
Prepends -1 CSV DROP in the scriptSig itself.
'''
tx.vin[0].scriptSig = CScript([OP_1NEGATE, OP_CHECKSEQUENCEVERIFY, OP_DROP] +
list(CScript(tx.vin[0].scriptSig)))
def sequence_lock_invalidate(self, tx):
'''Modify the nSequence to make it fails once sequence lock rule is activated (high timespan)
'''
tx.vin[0].nSequence = 0x00FFFFFF
tx.nLockTime = 0
def mtp_invalidate(self, tx):
'''Modify the nLockTime to make it fails once MTP rule is activated
'''
# Disable Sequence lock, Activate nLockTime
tx.vin[0].nSequence = 0x90FFFFFF
tx.nLockTime = self.last_block_time
if __name__ == '__main__':
BIP9SoftForksTest().main()
| sinraf96/electrum | qa/rpc-tests/bip9-softforks.py | Python | mit | 9,959 |
#!/usr/bin/env python -t
# -*- coding: utf-8 -*-
# Copyright (C) 2017 Jonathan Delvaux <pyshell@djoproject.net>
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from pyshell.arg.accessor.container import ContainerAccessor
| djo938/supershell | pyshell/arg/accessor/test/container_test.py | Python | gpl-3.0 | 792 |
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for StatSummarizer Python wrapper."""
from tensorflow.core.framework import attr_value_pb2
from tensorflow.core.framework import graph_pb2
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import tensor_util
from tensorflow.python.platform import test
from tensorflow.tools.graph_transforms import TransformGraph
class TransformGraphTest(test.TestCase):
# This test constructs a graph with a relu op that's not used by the normal
# inference path, and then tests that the strip_unused transform removes it as
# expected.
def testTransformGraph(self):
input_graph_def = graph_pb2.GraphDef()
const_op1 = input_graph_def.node.add()
const_op1.op = "Const"
const_op1.name = "const_op1"
const_op1.attr["dtype"].CopyFrom(attr_value_pb2.AttrValue(
type=dtypes.float32.as_datatype_enum))
const_op1.attr["value"].CopyFrom(
attr_value_pb2.AttrValue(tensor=tensor_util.make_tensor_proto(
[1, 2], dtypes.float32, [1, 2])))
const_op2 = input_graph_def.node.add()
const_op2.op = "Const"
const_op2.name = "const_op2"
const_op2.attr["dtype"].CopyFrom(attr_value_pb2.AttrValue(
type=dtypes.float32.as_datatype_enum))
const_op2.attr["value"].CopyFrom(
attr_value_pb2.AttrValue(tensor=tensor_util.make_tensor_proto(
[3, 4], dtypes.float32, [1, 2])))
# Create an add that has two constants as inputs.
add_op = input_graph_def.node.add()
add_op.op = "Add"
add_op.attr["T"].CopyFrom(attr_value_pb2.AttrValue(
type=dtypes.float32.as_datatype_enum))
add_op.name = "add_op"
add_op.input.extend(["const_op1", "const_op2"])
# Create a relu that reads from the add.
relu_op = input_graph_def.node.add()
relu_op.op = "Relu"
relu_op.attr["T"].CopyFrom(attr_value_pb2.AttrValue(
type=dtypes.float32.as_datatype_enum))
relu_op.name = "relu_op"
relu_op.input.extend(["add_op"])
# We're specifying that add_op is the final output, and so the relu isn't
# needed.
input_names = []
output_names = ["add_op"]
transforms = ["strip_unused_nodes"]
transformed_graph_def = TransformGraph(input_graph_def, input_names,
output_names, transforms)
# We expect that the relu is no longer present after running the transform.
for node in transformed_graph_def.node:
self.assertNotEqual("Relu", node.op)
if __name__ == "__main__":
test.main()
| tensorflow/tensorflow | tensorflow/tools/graph_transforms/python/transform_graph_test.py | Python | apache-2.0 | 3,185 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
from pisi.actionsapi import cmaketools
from pisi.actionsapi import pisitools
from pisi.actionsapi import shelltools
from pisi.actionsapi import get
WorkDir="ntadej-tano-78a3b67"
def setup():
shelltools.makedirs("build")
shelltools.cd("build")
cmaketools.configure("-DCMAKE_INSTALL_PREFIX=/usr \
-DCMAKE_BUILD_TYPE=Release \
-DCMAKE_CXX_FLAGS_RELEASE:STRING='-DNDEBUG' \
-DCMAKE_C_FLAGS_RELEASE:STRING='-DNDEBUG' \
-DEDITOR_WITH_VLCQT=ON", sourceDir="..")
def build():
shelltools.cd("build")
cmaketools.make()
def install():
shelltools.cd("build")
cmaketools.install()
shelltools.cd("..")
pisitools.dodoc("AUTHORS", "INSTALL", "NEWS", "LICENSE.GPL", "README", "VERSION")
| aydemir/pardus-magic | tano/actions.py | Python | bsd-3-clause | 794 |
# -*- coding: utf-8 -*-
#
# Copyright © 2010 Andreas Blixt
#
# This file is part of Storyteller.
#
# Storyteller is free software: you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the Free
# Software Foundation, either version 3 of the License, or (at your option)
# any later version.
#
# Storyteller is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# Storyteller. If not, see http://www.gnu.org/licenses/.
#
import time
from storyteller import settings
def _get_value(obj, name):
"""Gets a value from an object. First tries to get the attribute with the
specified name. If that fails, it tries to use the object as a dict
instead. If the value is callable, the return value of the callable is
used.
"""
try:
value = getattr(obj, name)
except AttributeError:
try:
# If the attribute doesn't exist, attempt to use the object as a dict.
value = obj[name]
except:
# Failing that, just return None.
return None
# If the value is callable, call it and use its return value.
return value() if callable(value) else value
def get_dict(obj, attributes):
"""Returns a dict with keys/values of a list of attributes from an object.
"""
result = dict()
for attr in attributes:
if isinstance(attr, basestring):
alias = None
else:
# If a value in the attributes list is not a string, it should be
# two packed values: the attribute name and the key name it should
# have in the dict.
attr, alias = attr
# Since the obj variable is needed for future iterations, its value is
# stored in a new variable that can be manipulated.
value = obj
if '.' in attr:
# Dots in the attribute name can be used to fetch values deeper
# into the object structure.
for sub_attr in attr.split('.'):
value = _get_value(value, sub_attr)
if not alias:
alias = sub_attr
else:
value = _get_value(value, attr)
# Store the value in the dict.
result[alias if alias else attr] = value
return result
def public(func):
"""A decorator that defines a function as publicly accessible.
"""
func.__public = True
return func
def set_cookie(handler, name, value, expires=None, path='/'):
# Build cookie data.
if expires:
ts = time.strftime('%a, %d-%b-%Y %H:%M:%S GMT', expires.timetuple())
cookie = '%s=%s; expires=%s; path=%s' % (name, value, ts, path)
else:
cookie = '%s=%s; path=%s' % (name, value, path)
# Send cookie to browser.
handler.response.headers['Set-Cookie'] = cookie
handler.request.cookies[name] = value
| blixt/storyteller | src/storyteller/utils.py | Python | gpl-3.0 | 3,118 |
# -*- coding: utf-8 -*-
# Generated by Django 1.10.2 on 2017-03-01 06:42
from __future__ import unicode_literals
import os
from django.db import migrations
from django.contrib.auth.hashers import make_password
def create_default_superuser(apps, schema_editor):
"""
Creates a default super user
"""
User = apps.get_model('auth', 'user')
username = os.environ.get('ADMIN_USERNAME', 'admin')
password = os.environ.get('ADMIN_PASS', 'admin@123')
default_super_user = User(
username=username,
is_superuser=True,
password=make_password(password),
is_staff=True
)
default_super_user.save()
Chart = apps.get_model('squealy', 'Chart')
default_chart = Chart(
name='Welcome To Squealy',
url='welcome-to-squealy',
query="""
/*
This is a sample report auto-generated by SQueaLy.
------------------------------------------------------
Title - Worldwide Buisiness Analytics Market Share
*/
SELECT 'SAP' as company, 21 as share
union
SELECT 'Oracle', 14
union
SELECT 'IBM', 13
union
SELECT 'SAS Institute', 12
union
SELECT 'Microsoft', 9
union
SELECT 'Others', 31;
""",
type='PieChart',
options={
"is3D": "true",
"pieSliceText": "label",
"title": "Worldwide Buisiness Analytics Market Share",
"height": 350
}
)
default_chart.save()
Validation = apps.get_model('squealy', 'Validation')
default_validation = Validation(
chart=default_chart,
query="""
/*This validation will pass
if at least 1 row is returned.*/
select 1,2,3 as "forcing_validation_pass";
""",
name='sample-validation'
)
default_validation.save()
class Migration(migrations.Migration):
dependencies = [
('squealy', '0001_initial'),
('auth', '0001_initial'),
]
operations = [
migrations.RunPython(create_default_superuser),
]
| dakshgautam/squealy | squealy/migrations/default_superuser.py | Python | mit | 2,046 |
from algorithms.sorting.bubblesort import bubble_sort
def test_bubble_sort_small(array_ints_small, assert_sorted):
assert_sorted(array_ints_small, bubble_sort)
def test_bubble_sort_large(array_ints_large, assert_sorted):
assert_sorted(array_ints_large[:800], bubble_sort)
| dieb/algorithms.py | tests/sorting/test_bubblesort.py | Python | mit | 283 |
#!/usr/bin/env python
#
# Copyright (c) 2016, The OpenThread Authors.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. Neither the name of the copyright holder nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
import time
import unittest
import config
import mle
import network_layer
import node
LEADER = 1
ROUTER1 = 2
class Cert_5_1_05_RouterAddressTimeout(unittest.TestCase):
def setUp(self):
self.simulator = config.create_default_simulator()
self.nodes = {}
for i in range(1, 3):
self.nodes[i] = node.Node(i, simulator=self.simulator)
self.nodes[LEADER].set_panid(0xface)
self.nodes[LEADER].set_mode('rsdn')
self.nodes[LEADER].add_whitelist(self.nodes[ROUTER1].get_addr64())
self.nodes[LEADER].enable_whitelist()
self.nodes[ROUTER1].set_panid(0xface)
self.nodes[ROUTER1].set_mode('rsdn')
self._setUpRouter1()
def _setUpRouter1(self):
self.nodes[ROUTER1].add_whitelist(self.nodes[LEADER].get_addr64())
self.nodes[ROUTER1].enable_whitelist()
self.nodes[ROUTER1].set_router_selection_jitter(1)
def tearDown(self):
for node in list(self.nodes.values()):
node.stop()
node.destroy()
self.simulator.stop()
def test(self):
self.nodes[LEADER].start()
self.simulator.go(5)
self.assertEqual(self.nodes[LEADER].get_state(), 'leader')
self.nodes[ROUTER1].start()
self.simulator.go(5)
self.assertEqual(self.nodes[ROUTER1].get_state(), 'router')
rloc16 = self.nodes[ROUTER1].get_addr16()
self.nodes[ROUTER1].reset()
self._setUpRouter1()
self.simulator.go(200)
self.nodes[ROUTER1].start()
self.simulator.go(15)
self.assertEqual(self.nodes[ROUTER1].get_state(), 'router')
self.assertNotEqual(self.nodes[ROUTER1].get_addr16(), rloc16)
rloc16 = self.nodes[ROUTER1].get_addr16()
self.nodes[ROUTER1].reset()
self._setUpRouter1()
self.simulator.go(300)
self.nodes[ROUTER1].start()
self.simulator.go(15)
self.assertEqual(self.nodes[ROUTER1].get_state(), 'router')
self.assertEqual(self.nodes[ROUTER1].get_addr16(), rloc16)
leader_messages = self.simulator.get_messages_sent_by(LEADER)
router1_messages = self.simulator.get_messages_sent_by(ROUTER1)
# 2 - All
leader_messages.next_mle_message(mle.CommandType.ADVERTISEMENT)
router1_messages.next_mle_message(mle.CommandType.PARENT_REQUEST)
leader_messages.next_mle_message(mle.CommandType.PARENT_RESPONSE)
router1_messages.next_mle_message(mle.CommandType.CHILD_ID_REQUEST)
leader_messages.next_mle_message(mle.CommandType.CHILD_ID_RESPONSE)
# 3 - Router1
router1_messages.next_mle_message(mle.CommandType.LINK_REQUEST)
router1_messages.next_mle_message(mle.CommandType.PARENT_REQUEST)
router1_messages.next_mle_message(mle.CommandType.CHILD_ID_REQUEST)
msg = router1_messages.next_coap_message("0.02")
msg.assertCoapMessageRequestUriPath("/a/as")
# 4 - Leader
leader_messages.next_mle_message(mle.CommandType.PARENT_RESPONSE)
leader_messages.next_mle_message(mle.CommandType.CHILD_ID_RESPONSE)
msg = leader_messages.next_coap_message("2.04")
msg.assertCoapMessageContainsTlv(network_layer.Status)
msg.assertCoapMessageContainsOptionalTlv(network_layer.RouterMask)
status_tlv = msg.get_coap_message_tlv(network_layer.Status)
self.assertEqual(network_layer.StatusValues.SUCCESS, status_tlv.status)
# 6 - Router1
router1_messages.next_mle_message(mle.CommandType.LINK_REQUEST)
router1_messages.next_mle_message(mle.CommandType.PARENT_REQUEST)
router1_messages.next_mle_message(mle.CommandType.CHILD_ID_REQUEST)
msg = router1_messages.next_coap_message("0.02")
msg.assertCoapMessageRequestUriPath("/a/as")
# 7 - Leader
leader_messages.next_mle_message(mle.CommandType.PARENT_RESPONSE)
leader_messages.next_mle_message(mle.CommandType.CHILD_ID_RESPONSE)
msg = leader_messages.next_coap_message("2.04")
msg.assertCoapMessageContainsTlv(network_layer.Status)
msg.assertCoapMessageContainsOptionalTlv(network_layer.RouterMask)
status_tlv = msg.get_coap_message_tlv(network_layer.Status)
self.assertEqual(network_layer.StatusValues.SUCCESS, status_tlv.status)
if __name__ == '__main__':
unittest.main()
| gandreello/openthread | tests/scripts/thread-cert/Cert_5_1_05_RouterAddressTimeout.py | Python | bsd-3-clause | 5,990 |
# -*- coding: utf-8 -*-
"""
Created on Thu Nov 28 22:49:20 2013
stab
This is a direct re-implementation of Des Higham's SDE scripts.
Mean-square and asymptotic stability test for Euler-Maruyama
SDE is
dX = \lambda X dt + \mu X dW, X(0) = X_0
with \lambda = -3, \mu = \sqrt{3}, X_0 = 1.
@author: ih3
"""
import numpy as np
import matplotlib.pyplot as plt
T = 20.0
M = 50000
X0 = 1.0
lam = -3.0
mu = np.sqrt(3.0)
dt = 0.25
N = 80
plt.subplot(2,1,1)
for k in range(3):
Dt = 2**k*dt
L = N/2**k
Xms = np.zeros(L)
Xtemp = X0 * np.ones(M)
t = np.linspace(0,T,L)
for j in range(L):
Winc = np.sqrt(Dt)*np.random.randn(M)
Xtemp += (Dt*lam + mu*Winc)*Xtemp
Xms[j] = np.mean(Xtemp**2)
plt.semilogy(t,Xms)
plt.legend(('\Delta t = 1/4', '\Delta t = 1/2', '\Delta t = 1'))
plt.title('Mean-Square: \lambda = -3, \mu = \sqrt{3}')
plt.ylabel('E[X^2]')
plt.subplot(2,1,2)
T = 500.0
lam = 0.5
mu = np.sqrt(6)
N = 2000
for k in range(3):
Dt = 2**k*dt
L = N/2**k
Xemabs = np.zeros(L)
Xtemp = X0
t = np.linspace(0,T,L)
for j in range(L):
Winc = np.sqrt(Dt)*np.random.randn(1)
Xtemp += (Dt*lam + mu*Winc)*Xtemp
Xemabs[j] = np.abs(Xtemp)
plt.semilogy(t,Xemabs)
plt.legend(('\Delta t = 1/4', '\Delta t = 1/2', '\Delta t = 1'))
plt.title('Single Path: \lambda = 1/2, \mu = \sqrt{6}')
plt.ylabel('|X|')
plt.show()
| IanHawke/HighamSDEs | stab.py | Python | unlicense | 1,407 |
# Copyright 2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from tempest.common.utils import data_utils
from tempest.thirdparty.boto import test as boto_test
class EC2SecurityGroupTest(boto_test.BotoTestCase):
@classmethod
def resource_setup(cls):
super(EC2SecurityGroupTest, cls).resource_setup()
cls.client = cls.os.ec2api_client
def test_create_authorize_security_group(self):
# EC2 Create, authorize/revoke security group
group_name = data_utils.rand_name("securty_group-")
group_description = group_name + " security group description "
group = self.client.create_security_group(group_name,
group_description)
self.addResourceCleanUp(self.client.delete_security_group, group_name)
groups_get = self.client.get_all_security_groups(
groupnames=(group_name,))
self.assertEqual(len(groups_get), 1)
group_get = groups_get[0]
self.assertEqual(group.name, group_get.name)
self.assertEqual(group.name, group_get.name)
# ping (icmp_echo) and other icmp allowed from everywhere
# from_port and to_port act as icmp type
success = self.client.authorize_security_group(group_name,
ip_protocol="icmp",
cidr_ip="0.0.0.0/0",
from_port=-1,
to_port=-1)
self.assertTrue(success)
# allow standard ssh port from anywhere
success = self.client.authorize_security_group(group_name,
ip_protocol="tcp",
cidr_ip="0.0.0.0/0",
from_port=22,
to_port=22)
self.assertTrue(success)
# TODO(afazekas): Duplicate tests
group_get = self.client.get_all_security_groups(
groupnames=(group_name,))[0]
# remove listed rules
for ip_permission in group_get.rules:
for cidr in ip_permission.grants:
self.assertTrue(self.client.revoke_security_group(group_name,
ip_protocol=ip_permission.ip_protocol,
cidr_ip=cidr,
from_port=ip_permission.from_port,
to_port=ip_permission.to_port))
group_get = self.client.get_all_security_groups(
groupnames=(group_name,))[0]
# all rules shuld be removed now
self.assertEqual(0, len(group_get.rules))
| afaheem88/tempest_neutron | tempest/thirdparty/boto/test_ec2_security_groups.py | Python | apache-2.0 | 3,379 |
# Copyright (C) 2009 Stijn Cole
# Copyright (C) 2010-2011 Richard Lincoln
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from numpy import zeros
from scipy.sparse.linalg import splu
from pypower.makeYbus import makeYbus
def AugYbus(baseMVA, bus, branch, xd_tr, gbus, P, Q, U0):
""" Constructs augmented bus admittance matrix Ybus.
@param baseMVA: power base
@param bus: bus data
@param branch: branch data
@param xd_tr: d component of transient reactance
@param gbus: generator buses
@param P: load active power
@param Q: load reactive power
@param U0: steady-state bus voltages
@return: factorised augmented bus admittance matrix
@see: U{http://www.esat.kuleuven.be/electa/teaching/matdyn/}
"""
# Calculate bus admittance matrix
Ybus, _, _ = makeYbus(baseMVA, bus, branch)
# Calculate equivalent load admittance
yload = (P - 1j * Q) / (abs(U0)**2)
# Calculate equivalent generator admittance
ygen = zeros(Ybus.shape[0])
ygen[gbus] = 1 / (1j * xd_tr)
# Add equivalent load and generator admittance to Ybus matrix
for i in range(Ybus.shape[0]):
Ybus[i, i] = Ybus[i, i] + ygen[i] + yload[i]
# Factorise
return splu(Ybus)
| rwl/PyDyn | pydyn/AugYbus.py | Python | apache-2.0 | 1,727 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.