text stringlengths 4 1.02M | meta dict |
|---|---|
from django.urls import path
from wagtail.contrib.forms.views import (
DeleteSubmissionsView,
FormPagesListView,
get_submissions_list_view,
)
app_name = "wagtailforms"
urlpatterns = [
path("", FormPagesListView.as_view(), name="index"),
path(
"submissions/<int:page_id>/", get_submissions_list_view, name="list_submissions"
),
path(
"submissions/<int:page_id>/delete/",
DeleteSubmissionsView.as_view(),
name="delete_submissions",
),
]
| {
"content_hash": "9299c8bca981efce55ed006188db0ace",
"timestamp": "",
"source": "github",
"line_count": 20,
"max_line_length": 88,
"avg_line_length": 25.05,
"alnum_prop": 0.6526946107784432,
"repo_name": "zerolab/wagtail",
"id": "bd209c42e8ab052fb15be65b72d4dd87b7c8fa3d",
"size": "501",
"binary": false,
"copies": "4",
"ref": "refs/heads/main",
"path": "wagtail/contrib/forms/urls.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "2522"
},
{
"name": "Dockerfile",
"bytes": "2041"
},
{
"name": "HTML",
"bytes": "593037"
},
{
"name": "JavaScript",
"bytes": "615631"
},
{
"name": "Makefile",
"bytes": "1413"
},
{
"name": "Python",
"bytes": "6560334"
},
{
"name": "SCSS",
"bytes": "219204"
},
{
"name": "Shell",
"bytes": "6845"
},
{
"name": "TypeScript",
"bytes": "288102"
}
],
"symlink_target": ""
} |
from oslo_log import log as logging
from heat_integrationtests.common import test
from heat_integrationtests.scenario import scenario_base
LOG = logging.getLogger(__name__)
class CeilometerAlarmTest(scenario_base.ScenarioTestsBase):
"""Class is responsible for testing of ceilometer usage."""
def setUp(self):
super(CeilometerAlarmTest, self).setUp()
self.template = self._load_template(__file__,
'test_ceilometer_alarm.yaml',
'templates')
def check_instance_count(self, stack_identifier, expected):
stack = self.client.stacks.get(stack_identifier)
actual = self._stack_output(stack, 'asg_size')
if actual != expected:
LOG.warning('check_instance_count exp:%d, act:%s' % (expected,
actual))
return actual == expected
def test_alarm(self):
"""Confirm we can create an alarm and trigger it."""
# 1. create the stack
stack_identifier = self.stack_create(template=self.template)
# 2. send ceilometer a metric (should cause the alarm to fire)
sample = {}
sample['counter_type'] = 'gauge'
sample['counter_name'] = 'test_meter'
sample['counter_volume'] = 1
sample['counter_unit'] = 'count'
sample['resource_metadata'] = {'metering.stack_id':
stack_identifier.split('/')[-1]}
sample['resource_id'] = 'shouldnt_matter'
self.metering_client.samples.create(**sample)
# 3. confirm we get a scaleup.
# Note: there is little point waiting more than 60s+time to scale up.
self.assertTrue(test.call_until_true(
120, 2, self.check_instance_count, stack_identifier, 2))
| {
"content_hash": "599b0ef0a52db5e6b8bc033eb910bfb5",
"timestamp": "",
"source": "github",
"line_count": 45,
"max_line_length": 77,
"avg_line_length": 41.355555555555554,
"alnum_prop": 0.584631918323482,
"repo_name": "steveb/heat",
"id": "aa29861a8e1722a6057a6c5131fb96c94b15dca8",
"size": "2434",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "heat_integrationtests/scenario/test_ceilometer_alarm.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "1226938"
},
{
"name": "Shell",
"bytes": "17870"
}
],
"symlink_target": ""
} |
"""
This module provides the function riptimize() that is intended to
optimize AWS Reserved Instance (RI) utilization by redistributing RIs
across the typically 3 availability zones (AZs) in a single RI holding
account. The premise of the script is that there exist many AWS accounts
linked via consolidated billing in which on-demand instances are being
launched. One special account is designated as an RI holding account
where all RIs are purchased. In a consolidated billing setup the RIs
reserved in one account can actually be "used" by an instance launced in
another account, therefore, it is sufficient to keep all RIs for
simplicity in one account. It is also assumed that all linked accounts
have already remapped their logical AZs to correspond to the same
physical datacenters, and that only 3 AZs are being used. If some
accounts have running instances in other AZs, a recommendation is issued
to migrate them to one of the 3 supported AZs. If a corresponding option
('optimize') is selected, the function will actually perform needed
modifications in order to migrate the RIs and thus increase RI
utilization.
"""
import time
import boto
import boto.ec2
# TODO Idea: publish RI utilization metrics to CloudWatch so they can be
# viewed in the console
# TODO Idea: save results of script execution into an S3 bucket for
# logging purposes
def riptimize(all_accounts, ri_account_credentials, region, optimize=False,
publish_metrics=True):
"""
riptimize() accepts the following arguments:
all_accounts -- a dict carriying the account credentials of all accounts
in the consolidated account tree that need to be
taken into consideration. The key is some account
identifier, typically the 12 digit AWS Account ID.
The value is a tuple of Access Key ID and Secret
Access Key.
ri_account_credentials -- a tuple containing the account credentials for the
RI holding account
region -- the function is designed to be used in one specific region, e.g.
'us-east-1' specified in this parameter
optimize -- When True the optimization modifications (if any) will be
executed for real, otherwise they will execute in a
DRY-RUN mode. This is an optional parameter, which
is False by default
publish_metrics -- When False, do not publish RI usage metrics to CloudWatch,
True by default
and returns a large tuple consisting of the following information:
i_inventory -- a dict keyed by a tuple (instance_type, availability_zone),
with the value being the count of running on-demand instances
i_inventory_by_account -- a dict keyed by the account_id where the values
are the same as i_inventory above
ri_inventory -- same as i_inventory but for RIs in the RI holding account,
RIs in other accounts are ignored
supported_ri_zones -- a list of availability zones used by the RI holding
account, RIs can only move between these zones
processing_modifications -- a list of modification_ids of all previous RI
modifications that are still in 'processing' state
clean_mismatch -- a dict similar to i_inventory, except values are count
differences between the corresponding values of
ri_inventory and i_inventory, i.e. negative
values mean that RIs are needed for the
corresponding combination of instance type and
availability zone.
recommendations -- a tuple containing two elements, each corresponding to
details of two different recommendations: the
first being the sub-inventory of on-demand
instances running in unsupported availability
zones and the second being the overall RI
surplus (or deficit) for each instance type
aggregated over all availability zones
plan -- if RIs can be redistributed for optimization reasons this will
contain the the high-level RI modification
plan. It is a list of tuples, each of which
contains the instance type, the source AZ, the
destination AZ and the number of RIs that need
to be moved from source AZ to destination AZ
modification_ids -- Once the plan is translated into specific RI
modifications and those modifications are
kicked off, this list will contain the
modification IDs returned by
ModifyReservedInstances API, so these can be
tracked later on
"""
# 1. get the inventory for on-demand instances running in all linked accounts
i_inventory_by_account = get_i_inventory_by_account(all_accounts, region)
i_inventory = aggregate_inventory(i_inventory_by_account)
# 2. get the RI inventory in the RI holding account, supported RI zones and any previous RI modifications that are still being processed
ri_inventory, supported_ri_zones, processing_modifications = get_ri_inventory(ri_account_credentials, region)
modifications_inflight = len(processing_modifications) != 0
# 3. compute On-demand/RI inventory mismatches per availability zone
mismatch = compute_ri_mistmatch(ri_inventory, i_inventory)
# 4. get rid of mismatches in zones that RIs do not cover in the RI holding account
clean_mismatch, eliminated_i_inventory = eliminate_unsupported_zones(mismatch, supported_ri_zones)
# 5. figure out what the RI surplus (or deficit) is for each instance type across all linked accounts
ri_surplus = compute_ri_surplus(clean_mismatch)
# get rid of entries where RIs and running instances are perfectly balanced
ri_imbalance = {itype: diff for itype, diff in ri_surplus.items() if diff != 0}
# 6. create recommendations for migrating instances to supported zones, purchasing more RIs and/or starting more instances
recommendations = (eliminated_i_inventory, ri_imbalance)
# 7. if an RI distributions are possible that would optimize RI utilization, generate a modification plan
modification_ids = []
# for now generate a "greedy" plan. Eventually, a smarter plan can be created, e.g. the one that minimizes modifications
plan = greedy_distribution(clean_mismatch)
if len(plan) > 0:
perform_optimization = optimize and not modifications_inflight
# 8. execute the plan either for real or in a DRY-RUN mode
modification_ids = execute_plan(ri_account_credentials, region, plan, perform_optimization)
# 9. publish RI usage metrics to CloudWatch
if publish_metrics:
# TODO also publish RI utilization metrics, % of utilization
publish_cw_metrics(ri_account_credentials, region, ri_surplus)
# 10. finally, return all the collected information for generation of reports, logging, etc.
return (i_inventory, i_inventory_by_account, ri_inventory, supported_ri_zones, processing_modifications, clean_mismatch, recommendations, plan, modification_ids)
# TODO The function currently simply kicks off the proposed mofications without verifying whether they actually succeeded. Since the kinds of modifications
# performed by this script are not likely to fail (no new RIs are purchased in the process), the modifications are extremely unlikely to fail, but nonetheless
# monitoring the success of such modifications would be a recommended addition to the logic
def get_i_inventory_by_account(all_accounts, region):
inventory_by_account = {}
for account_id, credentials in all_accounts.items():
inventory_by_account[account_id] = get_account_i_inventory(credentials, region)
return inventory_by_account
def get_account_i_inventory(credentials, region):
access_key_id, secret_access_key = credentials
conn = boto.ec2.connect_to_region(region, aws_access_key_id=access_key_id, aws_secret_access_key=secret_access_key)
account_inventory = {}
# TODO should instances that are launching at this very moment be included in this report? Probably...
filters = {'instance-state-name' : 'running'}
for instance in conn.get_only_instances(filters=filters):
itype_and_az = instance.instance_type, instance.placement
if itype_and_az in account_inventory:
account_inventory[itype_and_az] += 1
else:
account_inventory[itype_and_az] = 1
conn.close()
return account_inventory
def aggregate_inventory(inventory_by_account):
i_inventory = {}
for account_inventory in inventory_by_account.values():
for itype_and_az, count in account_inventory.items():
if itype_and_az in i_inventory:
i_inventory[itype_and_az] += count
else:
i_inventory[itype_and_az] = count
return i_inventory
def get_ri_inventory(ri_account_credentials, region):
access_key_id, secret_access_key = ri_account_credentials
conn = boto.ec2.connect_to_region(region, aws_access_key_id=access_key_id, aws_secret_access_key=secret_access_key)
# first, find out which availability zones are present in the RI account
supported_ri_zones = [] # just zone names
zones = conn.get_all_zones()
for z in zones:
if z.state != 'available':
raise RuntimeError("Zone %s state is not available, i.e. %s" % z.name, z.state)
else:
supported_ri_zones.append(z.name)
# second, determine if there are still modifications that are being processed
mod_filters = {'status' : 'processing'}
processing_modifications = conn.describe_reserved_instances_modifications(filters=mod_filters)
# and finally, compile the RI inventory for the RI account
ri_inventory = {}
ri_filters = {'state': 'active'} # possible RI Group states: active, retired, payment-pending, payment-failed
for ri_group in conn.get_all_reserved_instances(filters=ri_filters):
itype_and_az = ri_group.instance_type, ri_group.availability_zone
if itype_and_az in ri_inventory:
ri_inventory[itype_and_az] += ri_group.instance_count
else:
ri_inventory[itype_and_az] = ri_group.instance_count
conn.close()
return ri_inventory, supported_ri_zones, processing_modifications
def compute_ri_mistmatch(ri_inventory, i_inventory):
mismatch = ri_inventory.copy()
for itype_and_az, count in i_inventory.items():
if itype_and_az not in mismatch:
mismatch[itype_and_az] = 0
mismatch[itype_and_az] -= count
return {itype_and_az: diff for itype_and_az, diff in mismatch.items() if diff != 0}
def compute_ri_surplus(clean_mismatch):
ri_surplus = {}
# sum up all the on-demand/RI imbalances by instance type
for (itype, az), diff in clean_mismatch.items():
if itype not in ri_surplus:
ri_surplus[itype] = 0
ri_surplus[itype] += diff
return ri_surplus
def greedy_distribution(mismatch):
# separate into recepients and donors
recepients = {}
donors = {}
for itype_and_az, diff in mismatch.items():
if diff < 0:
recepients[itype_and_az] = diff
elif diff > 0:
donors[itype_and_az] = diff
plan = []
for (recepient_itype, recepient_az), deficit in recepients.items():
for donor_itype_and_az, count in donors.items():
donor_itype, donor_az = donor_itype_and_az
if donor_itype == recepient_itype:
# greedily compensate the deficit
move_count = min(abs(deficit), count)
# update the plan with a new modification action
plan.append((donor_itype, donor_az, recepient_az, move_count))
# update the donor available count
if count == move_count:
del donors[donor_itype_and_az]
else:
donors[(donor_itype, donor_az)] -= move_count
# update deficit
deficit += move_count
if deficit >= 0:
break
return plan
def eliminate_unsupported_zones(mismatch, supported_ri_zones):
# eliminate entries for zones that are not in a supported list.
clean_mismatch = { itype_and_az : diff for itype_and_az, diff in mismatch.items() if itype_and_az[1] in supported_ri_zones }
eliminated_i_inventory = { itype_and_az : -diff for itype_and_az, diff in mismatch.items() if itype_and_az[1] not in supported_ri_zones }
return clean_mismatch, eliminated_i_inventory
def execute_plan(ri_account_credentials, region, plan, optimize):
access_key_id, secret_access_key = ri_account_credentials
conn = boto.ec2.connect_to_region(region, aws_access_key_id=access_key_id, aws_secret_access_key=secret_access_key)
ri_filters = {'state': 'active'}
ri_groups = conn.get_all_reserved_instances(filters=ri_filters)
modifications = {} # keyed by the source RI group
for action in plan:
itype, source_az, dest_az, count = action
# necessary to check g.instance_count > 0 below because the following code could decrement it down to 0
donor_groups = [g for g in ri_groups if g.instance_type == itype and g.availability_zone == source_az and g.instance_count > 0]
index = 0
while index < len(donor_groups) and count > 0:
donor_group = donor_groups[index]
move_count = min(count, donor_group.instance_count)
if donor_group.id not in modifications:
modifications[donor_group.id] = []
move_descriptor = (donor_group, dest_az, move_count)
modifications[donor_group.id].append(move_descriptor)
count -= move_count
donor_group.instance_count -= move_count
index += 1
modification_ids = []
for modification in modifications.values():
modification_ids.append(move_reserved_instances(conn, modification, optimize))
conn.close()
return modification_ids
def move_reserved_instances(conn, move_descriptor_list, optimize):
assert len(move_descriptor_list) > 0
donor_group_id = move_descriptor_list[0][0].id # id of the donor group in the first tuple
target_configurations = []
for donor_group, dest_az, move_count, in move_descriptor_list:
assert donor_group.id == donor_group_id # move_descriptor_list should contain one and the same RI group in all tuples
config = boto.ec2.reservedinstance.ReservedInstancesConfiguration(availability_zone = dest_az, instance_count = move_count, platform = "EC2-VPC")
target_configurations.append(config)
if donor_group.instance_count > 0:
target_configurations.append(boto.ec2.reservedinstance.ReservedInstancesConfiguration(availability_zone = donor_group.availability_zone, instance_count = donor_group.instance_count, platform = "EC2-VPC"))
reserved_instance_ids = [donor_group_id]
if optimize:
return conn.modify_reserved_instances(client_token = str(time.time()), reserved_instance_ids = reserved_instance_ids, target_configurations = target_configurations)
else:
return 'rimod-<DRY-RUN>'
def publish_cw_metrics(ri_account_credentials, region, ri_surplus):
access_key_id, secret_access_key = ri_account_credentials
conn = boto.connect_cloudwatch(aws_access_key_id=access_key_id, aws_secret_access_key=secret_access_key)
for itype, surplus in ri_surplus.items():
conn.put_metric_data("RI-usage-%s" % region, "%s-available-RIs" % itype, surplus)
conn.close()
| {
"content_hash": "8a611c39ce2c3de8efd9ed7e458ab1d5",
"timestamp": "",
"source": "github",
"line_count": 334,
"max_line_length": 212,
"avg_line_length": 49.532934131736525,
"alnum_prop": 0.653227756286267,
"repo_name": "dbatalov/ri-optimizer",
"id": "acc34f6954595823b03da32b66e0eddaf3a2a5af",
"size": "16544",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "riptimize.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "25576"
}
],
"symlink_target": ""
} |
class ClusterTask(object):
"""
Stores the different kind of tasks being performed by a cluster.
"""
_lookup = {}
def __init__(self, code, name, description):
self._code = int(code)
self._name = name
self._description = description
ClusterTask._lookup[self._code] = self
@property
def code(self):
return self._code
@property
def name(self):
return self._name
@property
def description(self):
return self._description
def __eq__(self, other):
if not isinstance(other, ClusterTask):
return False
return self._code == other._code
@classmethod
def from_code(cls, code):
if code not in cls._lookup:
return None
return cls._lookup[code]
def __str__(self):
return "(%d %s %s)" % (self._code, self._name,
self._description)
def __repr__(self):
return "ClusterTask.%s (%s)" % (self._name,
self._description)
class ClusterTasks(object):
NONE = ClusterTask(0x01, 'NONE', 'No tasks for the cluster.')
BUILDING_INITIAL = ClusterTask(
0x02, 'BUILDING', 'Building the initial cluster.')
DELETING = ClusterTask(0x03, 'DELETING', 'Deleting the cluster.')
ADDING_SHARD = ClusterTask(
0x04, 'ADDING_SHARD', 'Adding a shard to the cluster.')
GROWING_CLUSTER = ClusterTask(
0x05, 'GROWING_CLUSTER', 'Increasing the size of the cluster.')
SHRINKING_CLUSTER = ClusterTask(
0x06, 'SHRINKING_CLUSTER', 'Decreasing the size of the cluster.')
# Dissuade further additions at run-time.
ClusterTask.__init__ = None
| {
"content_hash": "7cbe1693ec0cd7f13203437a21994db0",
"timestamp": "",
"source": "github",
"line_count": 59,
"max_line_length": 73,
"avg_line_length": 29.084745762711865,
"alnum_prop": 0.5856643356643356,
"repo_name": "redhat-openstack/trove",
"id": "b91fc1a5fb66eab8ee923f998f567f04f93be28e",
"size": "2357",
"binary": false,
"copies": "4",
"ref": "refs/heads/mitaka-patches",
"path": "trove/cluster/tasks.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "ApacheConf",
"bytes": "88"
},
{
"name": "CSS",
"bytes": "21914"
},
{
"name": "JavaScript",
"bytes": "60526"
},
{
"name": "Python",
"bytes": "2637613"
},
{
"name": "Shell",
"bytes": "4771"
},
{
"name": "XSLT",
"bytes": "50542"
}
],
"symlink_target": ""
} |
import mock
import testscenarios
from oslo import messaging
from oslo.messaging import serializer as msg_serializer
from tests import utils as test_utils
load_tests = testscenarios.load_tests_apply_scenarios
class _FakeEndpoint(object):
def __init__(self, target=None):
self.target = target
def foo(self, ctxt, **kwargs):
pass
def bar(self, ctxt, **kwargs):
pass
class TestDispatcher(test_utils.BaseTestCase):
scenarios = [
('no_endpoints',
dict(endpoints=[],
dispatch_to=None,
ctxt={}, msg=dict(method='foo'),
success=False, ex=messaging.UnsupportedVersion)),
('default_target',
dict(endpoints=[{}],
dispatch_to=dict(endpoint=0, method='foo'),
ctxt={}, msg=dict(method='foo'),
success=True, ex=None)),
('default_target_ctxt_and_args',
dict(endpoints=[{}],
dispatch_to=dict(endpoint=0, method='bar'),
ctxt=dict(user='bob'), msg=dict(method='bar',
args=dict(blaa=True)),
success=True, ex=None)),
('default_target_namespace',
dict(endpoints=[{}],
dispatch_to=dict(endpoint=0, method='foo'),
ctxt={}, msg=dict(method='foo', namespace=None),
success=True, ex=None)),
('default_target_version',
dict(endpoints=[{}],
dispatch_to=dict(endpoint=0, method='foo'),
ctxt={}, msg=dict(method='foo', version='1.0'),
success=True, ex=None)),
('default_target_no_such_method',
dict(endpoints=[{}],
dispatch_to=None,
ctxt={}, msg=dict(method='foobar'),
success=False, ex=messaging.NoSuchMethod)),
('namespace',
dict(endpoints=[{}, dict(namespace='testns')],
dispatch_to=dict(endpoint=1, method='foo'),
ctxt={}, msg=dict(method='foo', namespace='testns'),
success=True, ex=None)),
('namespace_mismatch',
dict(endpoints=[{}, dict(namespace='testns')],
dispatch_to=None,
ctxt={}, msg=dict(method='foo', namespace='nstest'),
success=False, ex=messaging.UnsupportedVersion)),
('version',
dict(endpoints=[dict(version='1.5'), dict(version='3.4')],
dispatch_to=dict(endpoint=1, method='foo'),
ctxt={}, msg=dict(method='foo', version='3.2'),
success=True, ex=None)),
('version_mismatch',
dict(endpoints=[dict(version='1.5'), dict(version='3.0')],
dispatch_to=None,
ctxt={}, msg=dict(method='foo', version='3.2'),
success=False, ex=messaging.UnsupportedVersion)),
]
def test_dispatcher(self):
endpoints = [mock.Mock(spec=_FakeEndpoint,
target=messaging.Target(**e))
for e in self.endpoints]
serializer = None
target = messaging.Target()
dispatcher = messaging.RPCDispatcher(target, endpoints, serializer)
def check_reply(reply=None, failure=None, log_failure=True):
if self.ex and failure is not None:
ex = failure[1]
self.assertFalse(self.success, ex)
self.assertIsNotNone(self.ex, ex)
self.assertIsInstance(ex, self.ex, ex)
if isinstance(ex, messaging.NoSuchMethod):
self.assertEqual(ex.method, self.msg.get('method'))
elif isinstance(ex, messaging.UnsupportedVersion):
self.assertEqual(ex.version,
self.msg.get('version', '1.0'))
else:
self.assertTrue(self.success, failure)
self.assertIsNone(failure)
incoming = mock.Mock(ctxt=self.ctxt, message=self.msg)
incoming.reply.side_effect = check_reply
with dispatcher(incoming) as callback:
callback()
for n, endpoint in enumerate(endpoints):
for method_name in ['foo', 'bar']:
method = getattr(endpoint, method_name)
if self.dispatch_to and n == self.dispatch_to['endpoint'] and \
method_name == self.dispatch_to['method']:
method.assert_called_once_with(
self.ctxt, **self.msg.get('args', {}))
else:
self.assertEqual(method.call_count, 0)
self.assertEqual(incoming.reply.call_count, 1)
class TestSerializer(test_utils.BaseTestCase):
scenarios = [
('no_args_or_retval',
dict(ctxt={}, dctxt={}, args={}, retval=None)),
('args_and_retval',
dict(ctxt=dict(user='bob'),
dctxt=dict(user='alice'),
args=dict(a='a', b='b', c='c'),
retval='d')),
]
def test_serializer(self):
endpoint = _FakeEndpoint()
serializer = msg_serializer.NoOpSerializer()
target = messaging.Target()
dispatcher = messaging.RPCDispatcher(target, [endpoint], serializer)
self.mox.StubOutWithMock(endpoint, 'foo')
args = dict([(k, 'd' + v) for k, v in self.args.items()])
endpoint.foo(self.dctxt, **args).AndReturn(self.retval)
self.mox.StubOutWithMock(serializer, 'serialize_entity')
self.mox.StubOutWithMock(serializer, 'deserialize_entity')
self.mox.StubOutWithMock(serializer, 'deserialize_context')
serializer.deserialize_context(self.ctxt).AndReturn(self.dctxt)
for arg in self.args:
serializer.deserialize_entity(self.dctxt, arg).AndReturn('d' + arg)
serializer.serialize_entity(self.dctxt, self.retval).\
AndReturn('s' + self.retval if self.retval else None)
self.mox.ReplayAll()
retval = dispatcher._dispatch(self.ctxt, dict(method='foo',
args=self.args))
if self.retval is not None:
self.assertEqual(retval, 's' + self.retval)
| {
"content_hash": "0a1b42ac5f481866df3170e16bb57621",
"timestamp": "",
"source": "github",
"line_count": 161,
"max_line_length": 79,
"avg_line_length": 38.29192546583851,
"alnum_prop": 0.5506893755068938,
"repo_name": "JioCloud/oslo.messaging",
"id": "8d12278a26cb276d8ee4a96da9a65104533b72fb",
"size": "6772",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_rpc_dispatcher.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "488745"
}
],
"symlink_target": ""
} |
from app.validation.abstract_validator import AbstractValidator
from app.validation.validation_result import ValidationResult
class MandatoryCheck(AbstractValidator):
def validate(self, user_answer):
"""
Validate that a field is mandatory
:param user_answer: The answer the user provided for the response
:return: ValidationResult(): An object containing the result of the validation
"""
validation_result = ValidationResult()
validation_result.is_valid = False
if isinstance(user_answer, list):
self._validate_list(user_answer, validation_result)
else:
self._validate_single(user_answer, validation_result)
# We only want ONE error message
if not validation_result.is_valid:
validation_result.errors.append(AbstractValidator.MANDATORY)
return validation_result
def _validate_list(self, user_answers, validation_result):
for answer in user_answers:
self._validate_single(answer, validation_result)
if validation_result.is_valid:
# We've found a valid entry in the list, bail out
return
@staticmethod
def _validate_single(user_answer, validation_result):
validation_result.is_valid = user_answer and not str(user_answer).isspace()
| {
"content_hash": "2f99010c82fa7d705f7e088a43652f90",
"timestamp": "",
"source": "github",
"line_count": 36,
"max_line_length": 86,
"avg_line_length": 37.72222222222222,
"alnum_prop": 0.6723122238586157,
"repo_name": "qateam123/eq",
"id": "6184eeddcbdaa2e48cc1ba761b092956d2ef1452",
"size": "1358",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "app/validation/mandatory_check.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "56444"
},
{
"name": "HTML",
"bytes": "64720"
},
{
"name": "JavaScript",
"bytes": "752517"
},
{
"name": "Python",
"bytes": "735531"
},
{
"name": "Shell",
"bytes": "7685"
}
],
"symlink_target": ""
} |
"""
Created on Fri May 5 09:49:09 2017
@author: dhingratul
Create a softmax function to translate Scores into probabilities
as a part of Lesson1, Quiz 10
To Do: Add legend to the graph
"""
import numpy as np
import matplotlib.pyplot as plt
scores = [3.0, 1.0, 0.2]
def softmax(x):
"""Compute softmax values for each sets of scores in x."""
return np.exp(x)/np.sum(np.exp(x), axis=0)
print(softmax(scores))
# Plot softmax curves
x = np.arange(-2.0, 6.0, 0.1)
scores = np.vstack([x, np.ones_like(x), 0.2 * np.ones_like(x)])
plt.plot(x, softmax(scores).T, linewidth=2)
plt.show()
| {
"content_hash": "90e444341a274a14f290ac0c3a2bc67d",
"timestamp": "",
"source": "github",
"line_count": 25,
"max_line_length": 64,
"avg_line_length": 23.68,
"alnum_prop": 0.6858108108108109,
"repo_name": "dhingratul/Deep-Learning",
"id": "7aa7cd231f0dfacde6d728bf2d824305a3e688be",
"size": "639",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Softmax.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "62822"
}
],
"symlink_target": ""
} |
import sys
from cg_trace.main import main
sys.exit(main())
| {
"content_hash": "ef63f925c4a8d071ce5a7c024b3536be",
"timestamp": "",
"source": "github",
"line_count": 5,
"max_line_length": 30,
"avg_line_length": 12.2,
"alnum_prop": 0.7377049180327869,
"repo_name": "github/codeql",
"id": "90cc057208a3e92b159aae9f527fed137a39ba8b",
"size": "61",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "python/tools/recorded-call-graph-metrics/src/cg_trace/__main__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "ASP.NET",
"bytes": "3739"
},
{
"name": "Batchfile",
"bytes": "3534"
},
{
"name": "C",
"bytes": "410440"
},
{
"name": "C#",
"bytes": "21146000"
},
{
"name": "C++",
"bytes": "1352639"
},
{
"name": "CMake",
"bytes": "1809"
},
{
"name": "CodeQL",
"bytes": "32583145"
},
{
"name": "Dockerfile",
"bytes": "496"
},
{
"name": "EJS",
"bytes": "1478"
},
{
"name": "Emacs Lisp",
"bytes": "3445"
},
{
"name": "Go",
"bytes": "697562"
},
{
"name": "HTML",
"bytes": "58008"
},
{
"name": "Handlebars",
"bytes": "1000"
},
{
"name": "Java",
"bytes": "5417683"
},
{
"name": "JavaScript",
"bytes": "2432320"
},
{
"name": "Kotlin",
"bytes": "12163740"
},
{
"name": "Lua",
"bytes": "13113"
},
{
"name": "Makefile",
"bytes": "8631"
},
{
"name": "Mustache",
"bytes": "17025"
},
{
"name": "Nunjucks",
"bytes": "923"
},
{
"name": "Perl",
"bytes": "1941"
},
{
"name": "PowerShell",
"bytes": "1295"
},
{
"name": "Python",
"bytes": "1649035"
},
{
"name": "RAML",
"bytes": "2825"
},
{
"name": "Ruby",
"bytes": "299268"
},
{
"name": "Rust",
"bytes": "234024"
},
{
"name": "Shell",
"bytes": "23973"
},
{
"name": "Smalltalk",
"bytes": "23"
},
{
"name": "Starlark",
"bytes": "27062"
},
{
"name": "Swift",
"bytes": "204309"
},
{
"name": "Thrift",
"bytes": "3020"
},
{
"name": "TypeScript",
"bytes": "219623"
},
{
"name": "Vim Script",
"bytes": "1949"
},
{
"name": "Vue",
"bytes": "2881"
}
],
"symlink_target": ""
} |
'''
Module for sending messages to Pushover (https://www.pushover.net)
.. versionadded:: 2016.3.0
:configuration: This module can be used by either passing an api key and version
directly or by specifying both in a configuration profile in the salt
master/minion config.
For example:
.. code-block:: yaml
pushover:
token: abAHuZyCLtdH8P4zhmFZmgUHUsv1ei8
'''
# Import Python libs
from __future__ import absolute_import
import logging
# Import 3rd-party libs
# pylint: disable=import-error,no-name-in-module,redefined-builtin
from salt.ext.six.moves.urllib.parse import urlencode as _urlencode
# pylint: enable=import-error,no-name-in-module,redefined-builtin
# Import salt libs
from salt.exceptions import SaltInvocationError
import salt.utils.pushover
log = logging.getLogger(__name__)
__virtualname__ = 'pushover'
def __virtual__():
'''
Return virtual name of the module.
:return: The virtual name of the module.
'''
return __virtualname__
def post_message(user=None,
device=None,
message=None,
title=None,
priority=None,
expire=None,
retry=None,
sound=None,
api_version=1,
token=None):
'''
Send a message to a Pushover user or group.
:param user: The user or group to send to, must be key of user or group not email address.
:param message: The message to send to the PushOver user or group.
:param title: Specify who the message is from.
:param priority: The priority of the message, defaults to 0.
:param expire: The message should expire after N number of seconds.
:param retry: The number of times the message should be retried.
:param sound: The sound to associate with the message.
:param api_version: The PushOver API version, if not specified in the configuration.
:param token: The PushOver token, if not specified in the configuration.
:return: Boolean if message was sent successfully.
CLI Example:
.. code-block:: bash
salt '*' pushover.post_message user='xxxxxxxxxxxxxxxxxxxxxxxxxxxxxx' title='Message from Salt' message='Build is done'
salt '*' pushover.post_message user='xxxxxxxxxxxxxxxxxxxxxxxxxxxxxx' title='Message from Salt' message='Build is done' priority='2' expire='720' retry='5'
'''
if not token:
token = __salt__['config.get']('pushover.token') or \
__salt__['config.get']('pushover:token')
if not token:
raise SaltInvocationError('Pushover token is unavailable.')
if not user:
user = __salt__['config.get']('pushover.user') or \
__salt__['config.get']('pushover:user')
if not user:
raise SaltInvocationError('Pushover user key is unavailable.')
if not message:
raise SaltInvocationError('Required parameter "message" is missing.')
user_validate = salt.utils.pushover.validate_user(user, device, token)
if not user_validate['result']:
return user_validate
if not title:
title = 'Message from SaltStack'
parameters = dict()
parameters['user'] = user
parameters['device'] = device
parameters['token'] = token
parameters['title'] = title
parameters['priority'] = priority
parameters['expire'] = expire
parameters['retry'] = retry
parameters['message'] = message
if sound and salt.utils.pushover.validate_sound(sound, token)['res']:
parameters['sound'] = sound
result = salt.utils.pushover.query(function='message',
method='POST',
header_dict={'Content-Type': 'application/x-www-form-urlencoded'},
data=_urlencode(parameters),
opts=__opts__)
if result['res']:
return True
else:
return result
| {
"content_hash": "1355dd274e063020b2de904590f8326c",
"timestamp": "",
"source": "github",
"line_count": 122,
"max_line_length": 162,
"avg_line_length": 33.19672131147541,
"alnum_prop": 0.6207407407407407,
"repo_name": "stephane-martin/salt-debian-packaging",
"id": "868fbc581a6f36da7007e1df1e5be25fccd3e632",
"size": "4074",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "salt-2016.3.2/salt/modules/pushover_notify.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "13798"
},
{
"name": "C",
"bytes": "986"
},
{
"name": "Groff",
"bytes": "13634346"
},
{
"name": "HTML",
"bytes": "39558"
},
{
"name": "Makefile",
"bytes": "20902"
},
{
"name": "NSIS",
"bytes": "22316"
},
{
"name": "PowerShell",
"bytes": "38719"
},
{
"name": "Python",
"bytes": "40857506"
},
{
"name": "SaltStack",
"bytes": "58278"
},
{
"name": "Scheme",
"bytes": "1790"
},
{
"name": "Shell",
"bytes": "829927"
},
{
"name": "Tcl",
"bytes": "6532"
},
{
"name": "TeX",
"bytes": "11632"
}
],
"symlink_target": ""
} |
import os
from setuptools import setup, find_packages
def read(fname):
return open(os.path.join(os.path.dirname(__file__), fname)).read()
setup(
name="c7n",
version='0.8.25.1',
description="Cloud Custodian - Policy Rules Engine",
long_description=read('README.rst'),
classifiers=[
"Topic :: System :: Systems Administration",
"Topic :: System :: Distributed Computing"
],
url="https://github.com/capitalone/cloud-custodian",
license="Apache-2.0",
packages=find_packages(),
entry_points={
'console_scripts': [
'custodian = c7n.cli:main']},
install_requires=[
"boto3>=1.4.4",
"botocore>=1.5.73",
"pyyaml",
"jsonschema",
"ipaddress",
"argcomplete",
"tabulate",
],
)
| {
"content_hash": "cf47a9237b90a9eea3f4046c7944e6c6",
"timestamp": "",
"source": "github",
"line_count": 33,
"max_line_length": 70,
"avg_line_length": 24.575757575757574,
"alnum_prop": 0.5795314426633785,
"repo_name": "jdubs/cloud-custodian",
"id": "629089d4b5dc96f7bf0f6377e58fe9e2787580bb",
"size": "811",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Makefile",
"bytes": "1251"
},
{
"name": "Python",
"bytes": "1557818"
}
],
"symlink_target": ""
} |
def Binary2Decimal(bin_num):
""" Return the decimal representation of bin_num
This actually uses the built-in int() function,
but is wrapped in a function for consistency """
return int(bin_num, 2)
def Decimal2Binary(dec_num):
""" Return the binary representation of dec_num """
if dec_num == 0: return ''
head, tail = divmod(dec_num, 2)
return Decimal2Binary(head) + str(tail)
#input code: converts input into string
s= raw_input("Give Numbers:")
n=0
st=[]
while n<>len(s):
st.append(s[n])
n=n+1
#decoding
one = int("".join(st[:3]))
two = int("".join(st[3:6]))
three = int("".join(st[6:9]))
#Decimal two Binary
x = Decimal2Binary(one)
y = Decimal2Binary(two)
z = Decimal2Binary(three)
#Add the Binary together
a = (x+""+y+""+z)
#get additional numbers from Binary
b = a[:6]
c = Binary2Decimal(b)
d = a[6:12]
e = Binary2Decimal(d)
f = a[12:18]
g = Binary2Decimal(f)
h = a[18:24]
i = Binary2Decimal(h)
print c
print e
print g
print i
| {
"content_hash": "ea483b7a43eda4ade966e47e77b2513d",
"timestamp": "",
"source": "github",
"line_count": 45,
"max_line_length": 56,
"avg_line_length": 22.022222222222222,
"alnum_prop": 0.6478304742684158,
"repo_name": "ActiveState/code",
"id": "c631a1d1a6b41b14553ec48794b4729e541ea188",
"size": "1013",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "recipes/Python/425953_base64_encoding_prototype/recipe-425953.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "35894"
},
{
"name": "C",
"bytes": "56048"
},
{
"name": "C++",
"bytes": "90880"
},
{
"name": "HTML",
"bytes": "11656"
},
{
"name": "Java",
"bytes": "57468"
},
{
"name": "JavaScript",
"bytes": "181218"
},
{
"name": "PHP",
"bytes": "250144"
},
{
"name": "Perl",
"bytes": "37296"
},
{
"name": "Perl 6",
"bytes": "9914"
},
{
"name": "Python",
"bytes": "17387779"
},
{
"name": "Ruby",
"bytes": "40233"
},
{
"name": "Shell",
"bytes": "190732"
},
{
"name": "Tcl",
"bytes": "674650"
}
],
"symlink_target": ""
} |
"""
This module contains the checker mode, a base class for code checker modes.
"""
import logging
from pyqode.core.api import TextBlockUserData
from pyqode.core.api.decoration import TextDecoration
from pyqode.core.api.mode import Mode
from pyqode.core.backend import NotRunning
from pyqode.core.api.utils import DelayJobRunner
from pyqode.qt import QtCore, QtGui
class CheckerMessages(object):
"""
Enumerates the possible checker message types.
"""
#: Status value for an information message.
INFO = 0
#: Status value for a warning message.
WARNING = 1
#: Status value for an error message.
ERROR = 2
class CheckerMessage(object):
"""
Holds data for a message displayed by the
:class:`pyqode.core.modes.CheckerMode`.
"""
#: Default colors foreach message status
COLORS = {CheckerMessages.INFO: "#4040DD",
CheckerMessages.WARNING: "#DDDD40",
CheckerMessages.ERROR: "#DD4040"}
@classmethod
def status_to_string(cls, status):
"""
Converts a message status to a string.
:param status: Status to convert (p yqode.core.modes.CheckerMessages)
:return: The status string.
:rtype: str
"""
strings = {CheckerMessages.INFO: "Info",
CheckerMessages.WARNING: "Warning",
CheckerMessages.ERROR: "Error"}
return strings[status]
@property
def status_string(self):
"""
Returns the message status as a string.
:return: The status string.
"""
return self.status_to_string(self.status)
def __init__(self, description, status, line, col=None, icon=None,
color=None, path=None):
"""
:param description: The message description (used as a tooltip)
:param status: The status associated with the message.
:param line: The message line number
:param col: The message start column (at the moment the message ends at
the end of the line).
:param icon: Unused, we keep it for backward compatiblity.
:param color: Text decoration color
:param path: file path. Optional
"""
assert 0 <= status <= 2
#: The description of the message, used as a tooltip.
self.description = description
#: The status associated with the message. One of:
#: * :const:`pyqode.core.modes.CheckerMessages.INFO`
#: * :const:`pyqode.core.modes.CheckerMessages.WARNING`
#: * :const:`pyqode.core.modes.CheckerMessages.ERROR`
self.status = status
#: The line of the message
self.line = line
#: The start column (used for the text decoration). If the col is None,
#: the whole line is highlighted.
self.col = col
#: The color used for the text decoration. If None, the default color
#: is used (:const:`pyqode.core.CheckerMessage.COLORS`)
self.color = color
if self.color is None:
self.color = self.COLORS[status]
self.decoration = None
self.path = path
#: store a reference to the associated QTextBlock, for quick acces
self.block = None
def __str__(self):
return "{0} l{1}".format(self.description, self.line)
def __eq__(self, other):
return (self.block == other.block and
self.description == other.description)
def _logger(klass):
return logging.getLogger('%s [%s]' % (__name__, klass.__name__))
class CheckerMode(Mode, QtCore.QObject):
"""
Performs a user defined code analysis job using the backend and
display the results on the editor instance.
The user defined code analysis job is a simple **function** with the
following signature:
.. code-block:: python
def analysisProcess(data)
where data is the request data:
.. code-block:: python
request_data = {
'code': self.editor.toPlainText(),
'path': self.editor.file.path,
'encoding': self.editor.file.encoding
}
and the return value is a tuple made up of the following elements:
(description, status, line, [col], [icon], [color], [path])
The background process is ran when the text changed and the ide is an idle
state for a few seconds.
You can also request an analysis manually using
:meth:`pyqode.core.modes.CheckerMode.request_analysis`
Messages are displayed as text decorations on the editor. A checker panel
will take care of display message icons next to each line.
"""
@property
def messages(self):
"""
Returns the entire list of checker messages.
"""
return self._messages
def __init__(self, worker,
delay=500,
show_tooltip=True):
"""
:param worker: The process function or class to call remotely.
:param delay: The delay used before running the analysis process when
trigger is set to
:class:pyqode.core.modes.CheckerTriggers`
:param show_tooltip: Specify if a tooltip must be displayed when the
mouse is over a checker message decoration.
"""
Mode.__init__(self)
QtCore.QObject.__init__(self)
# max number of messages to keep good performances
self.limit = 200
self.ignore_rules = []
self._job_runner = DelayJobRunner(delay=delay)
self._messages = []
self._worker = worker
self._mutex = QtCore.QMutex()
self._show_tooltip = show_tooltip
self._pending_msg = []
self._finished = True
def set_ignore_rules(self, rules):
"""
Sets the ignore rules for the linter.
Rules are a list of string that the actual linter function will check
to reject some warnings/errors.
"""
self.ignore_rules = rules
def add_messages(self, messages):
"""
Adds a message or a list of message.
:param messages: A list of messages or a single message
"""
# remove old messages
if len(messages) > self.limit:
messages = messages[:self.limit]
_logger(self.__class__).log(5, 'adding %s messages' % len(messages))
self._finished = False
self._new_messages = messages
self._to_check = list(self._messages)
self._pending_msg = messages
# start removing messages, new message won't be added until we
# checked all message that need to be removed
QtCore.QTimer.singleShot(1, self._remove_batch)
def _remove_batch(self):
if self.editor is None:
return
for i in range(100):
if not len(self._to_check):
# all messages checker, start adding messages now
QtCore.QTimer.singleShot(1, self._add_batch)
self.editor.repaint()
return False
msg = self._to_check.pop(0)
if msg.block is None:
msg.block = self.editor.document().findBlockByNumber(msg.line)
if msg not in self._new_messages:
self.remove_message(msg)
self.editor.repaint()
QtCore.QTimer.singleShot(1, self._remove_batch)
def _add_batch(self):
if self.editor is None:
return
for i in range(10):
if not len(self._pending_msg):
# all pending message added
self._finished = True
_logger(self.__class__).log(5, 'finished')
self.editor.repaint()
return False
message = self._pending_msg.pop(0)
if message.line >= 0:
try:
usd = message.block.userData()
except AttributeError:
message.block = self.editor.document().findBlockByNumber(
message.line)
usd = message.block.userData()
if usd is None:
usd = TextBlockUserData()
message.block.setUserData(usd)
# check if the same message already exists
if message in usd.messages:
continue
self._messages.append(message)
usd.messages.append(message)
tooltip = None
if self._show_tooltip:
tooltip = message.description
message.decoration = TextDecoration(
self.editor.textCursor(), start_line=message.line,
tooltip=tooltip, draw_order=3)
message.decoration.set_full_width()
message.decoration.set_as_error(color=QtGui.QColor(
message.color))
self.editor.decorations.append(message.decoration)
QtCore.QTimer.singleShot(1, self._add_batch)
self.editor.repaint()
return True
def remove_message(self, message):
"""
Removes a message.
:param message: Message to remove
"""
import time
_logger(self.__class__).log(5, 'removing message %s' % message)
t = time.time()
usd = message.block.userData()
if usd:
try:
usd.messages.remove(message)
except (AttributeError, ValueError):
pass
if message.decoration:
self.editor.decorations.remove(message.decoration)
self._messages.remove(message)
def clear_messages(self):
"""
Clears all messages.
"""
while len(self._messages):
msg = self._messages.pop(0)
usd = msg.block.userData()
if usd and hasattr(usd, 'messages'):
usd.messages[:] = []
if msg.decoration:
self.editor.decorations.remove(msg.decoration)
def on_state_changed(self, state):
if state:
self.editor.textChanged.connect(self.request_analysis)
self.editor.new_text_set.connect(self.clear_messages)
self.request_analysis()
else:
self.editor.textChanged.disconnect(self.request_analysis)
self.editor.new_text_set.disconnect(self.clear_messages)
self._job_runner.cancel_requests()
self.clear_messages()
def _on_work_finished(self, results):
"""
Display results.
:param status: Response status
:param results: Response data, messages.
"""
messages = []
for msg in results:
msg = CheckerMessage(*msg)
block = self.editor.document().findBlockByNumber(msg.line)
msg.block = block
messages.append(msg)
self.add_messages(messages)
def request_analysis(self):
"""
Requests an analysis.
"""
if self._finished:
_logger(self.__class__).log(5, 'running analysis')
self._job_runner.request_job(self._request)
elif self.editor:
# retry later
_logger(self.__class__).log(
5, 'delaying analysis (previous analysis not finished)')
QtCore.QTimer.singleShot(500, self.request_analysis)
def _request(self):
""" Requests a checking of the editor content. """
try:
self.editor.toPlainText()
except (TypeError, RuntimeError):
return
try:
max_line_length = self.editor.modes.get(
'RightMarginMode').position
except KeyError:
max_line_length = 79
request_data = {
'code': self.editor.toPlainText(),
'path': self.editor.file.path,
'encoding': self.editor.file.encoding,
'ignore_rules': self.ignore_rules,
'max_line_length': max_line_length,
}
try:
self.editor.backend.send_request(
self._worker, request_data, on_receive=self._on_work_finished)
self._finished = False
except NotRunning:
# retry later
QtCore.QTimer.singleShot(100, self._request)
| {
"content_hash": "ddbd39b05c77a9b3913fe5cf096d71c6",
"timestamp": "",
"source": "github",
"line_count": 349,
"max_line_length": 79,
"avg_line_length": 35.23495702005731,
"alnum_prop": 0.575668862324144,
"repo_name": "zwadar/pyqode.core",
"id": "d50837e1a6cd6fdfbad31059888957137207a65b",
"size": "12321",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pyqode/core/modes/checker.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "2129763"
}
],
"symlink_target": ""
} |
"""
Compare volume average temperature profiles, Tv, from 1-D model and 3-D Comsol
simulation of white oak particles with Feret diameters of DF = 200 um to 20 mm.
Surface area to volume diameter, Dsv, is used for the 1-D model.
Requirements: Python 3, NumPy, SciPy, Matplotlib, funcHeatCond, funcOther
"""
import numpy as np
import matplotlib.pyplot as py
from funcHeatCond import hc2
from funcOther import vol, Tvol, dsv
# Parameters
# -----------------------------------------------------------------------------
Gb = 0.72 # basic specific gravity, Wood Handbook Table 4-7, (-)
k = 0.16 # thermal conductivity, W/mK
x = 0 # moisture content, %
h = 350 # heat transfer coefficient, W/m^2*K
Ti = 293 # initial particle temp, K
Tinf = 773 # ambient temp, K
# Comsol Data for Particle Geometry and Temperatures
# -----------------------------------------------------------------------------
# geometry and temperature data for DF = 200 um
sa200 = 5.355e-8 # surface area of Comsol particle, m^2
v200 = 8.895e-13 # volume of Comsol particle, m^3
file200 = 'comsol/200tempsOak.txt' # time and temperatures
t200, Tv200, _, _, _, _, _ = np.loadtxt(file200, skiprows=5, unpack=True)
# geometry and temperature data for DF = 400 um
sa400 = 1.879e-7 # surface area of Comsol particle, m^2
v400 = 5.553e-12 # volume of Comsol particle, m^3
file400 = 'comsol/400tempsOak.txt' # time and temperatures
t400, Tv400, _, _, _, _, _ = np.loadtxt(file400, skiprows=5, unpack=True)
# geometry and temperature data for DF = 700 um
sa700 = 4.836e-7 # surface area of Comsol particle, m^2
v700 = 2.11e-11 # volume of Comsol particle, m^3
file700 = 'comsol/700tempsOak.txt' # time and temperatures
t700, Tv700, _, _, _, _, _ = np.loadtxt(file700, skiprows=5, unpack=True)
# geometry and temperature data for DF = 1400 um
sa1400 = 1.394e-6 # surface area of Comsol particle, m^2
v1400 = 8.442e-11 # volume of Comsol particle, m^3
file1400 = 'comsol/1400tempsOak.txt' # time and temperatures
t1400, Tv1400, _, _, _, _, _ = np.loadtxt(file1400, skiprows=5, unpack=True)
# geometry and temperature data for DF = 2800 um
sa2800 = 4.614e-6 # surface area of Comsol particle, m^2
v2800 = 4.011e-10 # volume of Comsol particle, m^3
file2800 = 'comsol/2800tempsOak.txt' # time and temperatures
t2800, Tv2800, _, _, _, _, _ = np.loadtxt(file2800, skiprows=5, unpack=True)
# geometry and temperature data for DF = 5400 um
sa5400 = 1.716e-5 # surface area of Comsol particle, m^2
v5400 = 2.877e-9 # volume of Comsol particle, m^3
file5400 = 'comsol/5400tempsOak.txt' # time and temperatures
t5400, Tv5400, _, _, _, _, _ = np.loadtxt(file5400, skiprows=5, unpack=True)
# geometry and temperature data for DF = 10000 um
sa10000 = 5.885e-5 # surface area of Comsol particle, m^2
v10000 = 1.827e-8 # volume of Comsol particle, m^3
file10000 = 'comsol/10000tempsOak.txt' # time and temperatures
t10000, Tv10000, _, _, _, _, _ = np.loadtxt(file10000, skiprows=5, unpack=True)
# geometry and temperature data for DF = 20000 um
sa20000 = 2.354e-4 # surface area of Comsol particle, m^2
v20000 = 1.462e-7 # volume of Comsol particle, m^3
file20000 = 'comsol/20000tempsOak.txt' # time and temperatures
t20000, Tv20000, _, _, _, _, _ = np.loadtxt(file20000, skiprows=5, unpack=True)
# 1-D Transient Heat Conduction using Dsv
# -----------------------------------------------------------------------------
# number of nodes from center of particle (m=0) to surface (m)
m = 1000
# time vector from 0 to max time
tmax = 2.0 # max time, s
nt = 1000 # number of time steps
dt = tmax/nt # time step, s
t = np.arange(0, tmax+dt, dt) # time vector, s
tmax2 = 20.0 # max time for large particles, s
t2 = np.arange(0, tmax2+dt, dt) # time vector for large particles, s
# 1-D Transient Heat Conduction for DF = 200 um
# -----------------------------------------------------------------------------
# surface area to volume equivalent sphere diameter Dsv, m
dsv200 = dsv(sa200, v200)
# intraparticle temperature array [T] in Kelvin for Dsv case, b = 2 for sphere
# row = time step, column = node point from 0 to m
Tsv200 = hc2(dsv200, x, k, Gb, h, Ti, Tinf, 2, m, t)
# volume average temperature at each time step
vol200 = vol(dsv200, m) # volumes in the sphere
Tvol200 = Tvol(Tsv200, vol200) # Dsv volume average temperature profile
# 1-D Transient Heat Conduction for DF = 400 um
# -----------------------------------------------------------------------------
# surface area to volume equivalent sphere diameter Dsv, m
dsv400 = dsv(sa400, v400)
# intraparticle temperature array [T] in Kelvin for Dsv case, b = 2 for sphere
# row = time step, column = node point from 0 to m
Tsv400 = hc2(dsv400, x, k, Gb, h, Ti, Tinf, 2, m, t)
# volume average temperature at each time step
vol400 = vol(dsv400, m) # volumes in the sphere
Tvol400 = Tvol(Tsv400, vol400) # Dsv volume average temperature profile
# 1-D Transient Heat Conduction for DF = 700 um
# -----------------------------------------------------------------------------
# surface area to volume equivalent sphere diameter Dsv, m
dsv700 = dsv(sa700, v700)
# intraparticle temperature array [T] in Kelvin for Dsv case, b = 2 for sphere
# row = time step, column = node point from 0 to m
Tsv700 = hc2(dsv700, x, k, Gb, h, Ti, Tinf, 2, m, t)
# volume average temperature at each time step
vol700 = vol(dsv700, m) # volumes in the sphere
Tvol700 = Tvol(Tsv700, vol700) # Dsv volume average temperature profile
# 1-D Transient Heat Conduction for DF = 1400 um
# -----------------------------------------------------------------------------
# surface area to volume equivalent sphere diameter Dsv, m
dsv1400 = dsv(sa1400, v1400)
# intraparticle temperature array [T] in Kelvin for Dsv case, b = 2 for sphere
# row = time step, column = node point from 0 to m
Tsv1400 = hc2(dsv1400, x, k, Gb, h, Ti, Tinf, 2, m, t)
# volume average temperature at each time step
vol1400 = vol(dsv1400, m) # volumes in the sphere
Tvol1400 = Tvol(Tsv1400, vol1400) # Dsv volume average temperature profile
# 1-D Transient Heat Conduction for DF = 2800 um
# -----------------------------------------------------------------------------
# surface area to volume equivalent sphere diameter Dsv, m
dsv2800 = dsv(sa2800, v2800)
# intraparticle temperature array [T] in Kelvin for Dsv case, b = 2 for sphere
# row = time step, column = node point from 0 to m
Tsv2800 = hc2(dsv2800, x, k, Gb, h, Ti, Tinf, 2, m, t)
# volume average temperature at each time step
vol2800 = vol(dsv2800, m) # volumes in the sphere
Tvol2800 = Tvol(Tsv2800, vol2800) # Dsv volume average temperature profile
# 1-D Transient Heat Conduction for DF = 5400 um
# -----------------------------------------------------------------------------
# surface area to volume equivalent sphere diameter Dsv, m
dsv5400 = dsv(sa5400, v5400)
# intraparticle temperature array [T] in Kelvin for Dsv case, b = 2 for sphere
# row = time step, column = node point from 0 to m
Tsv5400 = hc2(dsv5400, x, k, Gb, h, Ti, Tinf, 2, m, t2)
# volume average temperature at each time step
vol5400 = vol(dsv5400, m) # volumes in the sphere
Tvol5400 = Tvol(Tsv5400, vol5400) # Dsv volume average temperature profile
# 1-D Transient Heat Conduction for DF = 10000 um
# -----------------------------------------------------------------------------
# surface area to volume equivalent sphere diameter Dsv, m
dsv10000 = dsv(sa10000, v10000)
# intraparticle temperature array [T] in Kelvin for Dsv case, b = 2 for sphere
# row = time step, column = node point from 0 to m
Tsv10000 = hc2(dsv10000, x, k, Gb, h, Ti, Tinf, 2, m, t2)
# volume average temperature at each time step
vol10000 = vol(dsv10000, m) # volumes in the sphere
Tvol10000 = Tvol(Tsv10000, vol10000) # Dsv volume average temperature profile
# 1-D Transient Heat Conduction for DF = 20000 um
# -----------------------------------------------------------------------------
# surface area to volume equivalent sphere diameter Dsv, m
dsv20000 = dsv(sa20000, v20000)
# intraparticle temperature array [T] in Kelvin for Dsv case, b = 2 for sphere
# row = time step, column = node point from 0 to m
Tsv20000 = hc2(dsv20000, x, k, Gb, h, Ti, Tinf, 2, m, t2)
# volume average temperature at each time step
vol20000 = vol(dsv20000, m) # volumes in the sphere
Tvol20000 = Tvol(Tsv20000, vol20000) # Dsv volume average temperature profile
# Plot Results
# -----------------------------------------------------------------------------
py.ion()
py.close('all')
def despine():
ax = py.gca()
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
py.tick_params(axis='both', bottom='off', top='off', left='off', right='off')
py.figure(1)
py.plot(t200, Tv200, 'co', mec='c', mew=2, mfc='none', label='Tv')
py.plot(t400, Tv400, 'co', mec='c', mew=2, mfc='none')
py.plot(t700, Tv700, 'co', mec='c', mew=2, mfc='none')
py.plot(t1400, Tv1400, 'co', mec='c', mew=2, mfc='none')
py.plot(t2800, Tv2800, 'co', mec='c', mew=2, mfc='none')
py.plot(t, Tvol200, 'r', lw=2, label='0.2 mm')
py.plot(t, Tvol400, 'g', lw=2, label ='0.4 mm')
py.plot(t, Tvol700, 'b', lw=2, label='0.7 mm')
py.plot(t, Tvol1400, 'm', lw=2, label='1.4 mm')
py.plot(t, Tvol2800, 'y', lw=2, label='2.8 mm')
py.axhline(Tinf, c='k', ls='--')
py.ylim(250, 800)
py.xlim(0, tmax)
py.title('White Oak with DF = 200-2800 um')
py.ylabel('Temperature (K)')
py.xlabel('Time (s)')
py.legend(loc='best', numpoints=1, frameon=False)
py.grid()
despine()
py.figure(2)
py.plot(t5400, Tv5400, 'co', mec='c', mew=2, mfc='none', label='Tv')
py.plot(t10000, Tv10000, 'co', mec='c', mew=2, mfc='none')
py.plot(t20000, Tv20000, 'co', mec='c', mew=2, mfc='none')
py.plot(t2, Tvol5400, lw=2, label ='5.4 mm')
py.plot(t2, Tvol10000, lw=2, label='10 mm')
py.plot(t2, Tvol20000, lw=2, label='20 mm')
py.axhline(Tinf, c='k', ls='--')
py.ylim(250, 800)
py.xlim(0, tmax2)
py.title('White Oak with DF = 5.4-20 mm')
py.ylabel('Temperature (K)')
py.xlabel('Time (s)')
py.legend(loc='best', numpoints=1, frameon=False)
py.grid()
despine()
| {
"content_hash": "0df2f4f9a96b646a075452b65d8c48b8",
"timestamp": "",
"source": "github",
"line_count": 250,
"max_line_length": 81,
"avg_line_length": 42.292,
"alnum_prop": 0.5996405939657619,
"repo_name": "pyrolysis/low-order-particle",
"id": "4de209eef7bd3495477d1be9ab96406f9ed1ff93",
"size": "10573",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "oak-200-20000.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "108984"
}
],
"symlink_target": ""
} |
# standard library imports
from struct import unpack
# custom import
from .DataTypeConverters import readBew, readVar, varLen
class RawInstreamFile:
"""
It parses and reads data from an input file. It takes care of big
endianess, and keeps track of the cursor position. The midi parser
only reads from this object. Never directly from the file.
"""
def __init__(self, infile=''):
"""
If 'file' is a string we assume it is a path and read from
that file.
If it is a file descriptor we read from the file, but we don't
close it.
Midi files are usually pretty small, so it should be safe to
copy them into memory.
"""
if infile:
if isinstance(infile, str):
infile = open(infile, 'rb')
self.data = infile.read()
infile.close()
else:
# don't close the f
self.data = infile.read()
else:
self.data = ''
# start at beginning ;-)
self.cursor = 0
# setting up data manually
def setData(self, data=''):
"Sets the data from a string."
self.data = data
# cursor operations
def setCursor(self, position=0):
"Sets the absolute position if the cursor"
self.cursor = position
def getCursor(self):
"Returns the value of the cursor"
return self.cursor
def moveCursor(self, relative_position=0):
"Moves the cursor to a new relative position"
self.cursor += relative_position
# native data reading functions
def nextSlice(self, length, move_cursor=1):
"Reads the next text slice from the raw data, with length"
c = self.cursor
slc = self.data[c:c+length]
if move_cursor:
self.moveCursor(length)
return slc
def readBew(self, n_bytes=1, move_cursor=1):
"""
Reads n bytes of date from the current cursor position.
Moves cursor if move_cursor is true
"""
return readBew(self.nextSlice(n_bytes, move_cursor))
def readVarLen(self):
"""
Reads a variable length value from the current cursor position.
Moves cursor if move_cursor is true
"""
MAX_VARLEN = 4 # Max value varlen can be
var = readVar(self.nextSlice(MAX_VARLEN, 0))
# only move cursor the actual bytes in varlen
self.moveCursor(varLen(var))
return var
if __name__ == '__main__':
test_file = 'test/midifiles/minimal.mid'
fis = RawInstreamFile(test_file)
print(fis.nextSlice(len(fis.data)))
test_file = 'test/midifiles/cubase-minimal.mid'
cubase_minimal = open(test_file, 'rb')
fis2 = RawInstreamFile(cubase_minimal)
print(fis2.nextSlice(len(fis2.data)))
cubase_minimal.close()
| {
"content_hash": "40b39a82b64a67d58e5d19c363c672b7",
"timestamp": "",
"source": "github",
"line_count": 106,
"max_line_length": 71,
"avg_line_length": 28.764150943396228,
"alnum_prop": 0.5651033125614956,
"repo_name": "nanotone/midihub",
"id": "637f71841bb4c19d8b8044b1192b98be4b532320",
"size": "3079",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pythonmidi/RawInstreamFile.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "73091"
}
],
"symlink_target": ""
} |
from airflow.exceptions import AirflowException
from airflow.hooks.http_hook import HttpHook
from airflow.models import BaseOperator
from airflow.utils.decorators import apply_defaults
class SimpleHttpOperator(BaseOperator):
"""
Calls an endpoint on an HTTP system to execute an action
:param http_conn_id: The connection to run the operator against
:type http_conn_id: str
:param endpoint: The relative part of the full url. (templated)
:type endpoint: str
:param method: The HTTP method to use, default = "POST"
:type method: str
:param data: The data to pass. POST-data in POST/PUT and params
in the URL for a GET request. (templated)
:type data: For POST/PUT, depends on the content-type parameter,
for GET a dictionary of key/value string pairs
:param headers: The HTTP headers to be added to the GET request
:type headers: a dictionary of string key/value pairs
:param response_check: A check against the 'requests' response object.
Returns True for 'pass' and False otherwise.
:type response_check: A lambda or defined function.
:param extra_options: Extra options for the 'requests' library, see the
'requests' documentation (options to modify timeout, ssl, etc.)
:type extra_options: A dictionary of options, where key is string and value
depends on the option that's being modified.
:param xcom_push: Push the response to Xcom (default: False).
If xcom_push is True, response of an HTTP request will also
be pushed to an XCom.
:type xcom_push: bool
:param log_response: Log the response (default: False)
:type log_response: bool
"""
template_fields = ['endpoint', 'data', 'headers', ]
template_ext = ()
ui_color = '#f4a460'
@apply_defaults
def __init__(self,
endpoint,
method='POST',
data=None,
headers=None,
response_check=None,
extra_options=None,
xcom_push=False,
http_conn_id='http_default',
log_response=False,
*args, **kwargs):
super(SimpleHttpOperator, self).__init__(*args, **kwargs)
self.http_conn_id = http_conn_id
self.method = method
self.endpoint = endpoint
self.headers = headers or {}
self.data = data or {}
self.response_check = response_check
self.extra_options = extra_options or {}
self.xcom_push_flag = xcom_push
self.log_response = log_response
def execute(self, context):
http = HttpHook(self.method, http_conn_id=self.http_conn_id)
self.log.info("Calling HTTP method")
response = http.run(self.endpoint,
self.data,
self.headers,
self.extra_options)
if self.log_response:
self.log.info(response.text)
if self.response_check:
if not self.response_check(response):
raise AirflowException("Response check returned False.")
if self.xcom_push_flag:
return response.text
| {
"content_hash": "558a182dc15103722652dc6c90b144f3",
"timestamp": "",
"source": "github",
"line_count": 80,
"max_line_length": 79,
"avg_line_length": 40.0375,
"alnum_prop": 0.621604745551046,
"repo_name": "owlabs/incubator-airflow",
"id": "8b8532bff2185fdfaf32e332a9cd97105772ffd9",
"size": "4015",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "airflow/operators/http_operator.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "57045"
},
{
"name": "HTML",
"bytes": "147187"
},
{
"name": "JavaScript",
"bytes": "1370838"
},
{
"name": "Mako",
"bytes": "1037"
},
{
"name": "Python",
"bytes": "1647566"
},
{
"name": "Shell",
"bytes": "18823"
}
],
"symlink_target": ""
} |
from .qidian_ranking import QidianRankingSpider
from .zh_ranking import ZHRankingSpider | {
"content_hash": "8e61ed566dd3b17f94aceb434dd4aa76",
"timestamp": "",
"source": "github",
"line_count": 2,
"max_line_length": 47,
"avg_line_length": 43.5,
"alnum_prop": 0.8735632183908046,
"repo_name": "howie6879/owllook",
"id": "8e0a99b82438e74cda6b352ace4af89536c9e564",
"size": "87",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "owllook/spiders/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "19520"
},
{
"name": "Dockerfile",
"bytes": "446"
},
{
"name": "HTML",
"bytes": "73656"
},
{
"name": "JavaScript",
"bytes": "38344"
},
{
"name": "Python",
"bytes": "184011"
}
],
"symlink_target": ""
} |
"""
magrathea.core.feed.entry
~~~~~~~~~~~~~~~~~~~~~~~~~
:copyright: Copyright 2014 by the RootForum.org team, see AUTHORS.
:license: MIT License, see LICENSE for details.
"""
import base64
import calendar
import time
from ...utils.convert import to_str, to_bytes
from .info import FeedInfo
def get_entry_id(entry):
"""
Retrieve the unique identifier of a :py:mod:`feedparser` entry object.
Magrathea uses this internally for a identifying entry objects.
:param entry: :py:mod:`feedparser` entry object
"""
if hasattr(entry, 'id'):
return base64.b64encode(to_bytes(entry.id))
if hasattr(entry, 'link'):
return base64.b64encode(to_bytes(entry.link))
return None
class Entry(object):
"""
Class representing a feed entry. To ease sorting of entries,
each entry offers a sort key (``key`` property) constructed
from its update date. If the feed does not provide the updated
date, the publish date or the creation date are used.
:param entry: A :py:mod:`feedparser` entry object
"""
def __init__(self, entry):
self._id = get_entry_id(entry)
self._key = None
self._updated = None
self._expired = None
self._link = None
self._content = None
self._description = None
self._title = None
self._author = None
self._feed = None
self._parse_entry(entry)
def update(self, entry):
"""
Update feed entry with new information.
:param entry: A :py:mod:`feedparser` entry object
"""
self._parse_entry(entry)
def _parse_entry(self, entry):
if hasattr(entry, 'updated_parsed'):
self._updated = entry.updated_parsed
if hasattr(entry, 'published_parsed') and not self._updated:
self._updated = entry.published_parsed
if hasattr(entry, 'created_parsed') and not self._updated:
self._updated = entry.created_parsed
if hasattr(entry, 'expired_parsed'):
self._updated = entry.expired_parsed
if hasattr(entry, 'link'):
self._link = entry.link
if hasattr(entry, 'content'):
self._content = []
for element in entry.content:
self._content.append(element.value)
if hasattr(entry, 'description'):
self._description = entry.description
if hasattr(entry, 'title'):
self._title = entry.title
if hasattr(entry, 'author'):
self._author = entry.author
if self._updated:
self._key = time.strftime('%Y%m%d%H%M%S', self._updated)
@property
def id(self):
"""
Unique identifier of the entry
"""
return self._id
@property
def key(self):
"""
Time-based sorting key
"""
return self._key
@property
def body(self):
"""
Content body of the entry
"""
if self._content:
# noinspection PyTypeChecker
return " ".join(to_str(self._content))
if self._description:
return to_str(self._description)
return ""
@property
def title(self):
"""
Title of the entry
"""
return to_str(self._title)
@property
def pubdate_gmt(self):
"""
Date when the entry was last updated, published or otherwise changed in GMT
"""
return self._updated
@property
def pubdate_local(self):
"""
Date when the entry was last updated, published or otherwise changed converted to local time
"""
return time.localtime(calendar.timegm(self._updated))
@property
def author(self):
"""
Author of the entry
"""
return to_str(self._author)
@property
def feed(self):
"""
Feed the entry comes from.
Available sub-attributes: :py:attr:`~magrathea.core.feed.feed.FeedInfo.author`,
:py:attr:`~magrathea.core.feed.feed.FeedInfo.title`, :py:attr:`~magrathea.core.feed.feed.FeedInfo.uri` and
:py:attr:`~magrathea.core.feed.feed.FeedInfo.type`.
"""
return self._feed
@feed.setter
def feed(self, feed):
if isinstance(feed, FeedInfo):
self._feed = feed
def get_pubdate_gmt(self, format):
"""
Get the :py:attr:`~magrathea.core.feed.entry.Entry.pubdate_gmt` (GMT) formatted via :py:func:`time.strftime`.
:param str format: format string understood by :py:func:`time.strftime`
"""
return time.strftime(format, self._updated)
def get_pubdate_local(self, format):
"""
Get the :py:attr:`~magrathea.core.feed.entry.Entry.pubdate_local` (local) formatted via
:py:func:`time.strftime`.
:param str format: format string understood by :py:func:`time.strftime`
"""
return time.strftime(format, time.localtime(calendar.timegm(self._updated)))
| {
"content_hash": "39845a1c4ca7bf4623c38660d636ac66",
"timestamp": "",
"source": "github",
"line_count": 169,
"max_line_length": 117,
"avg_line_length": 29.745562130177515,
"alnum_prop": 0.5872289635965785,
"repo_name": "RootForum/magrathea",
"id": "79469de5422df8b871a2ff04e92c210bc18fff0c",
"size": "5051",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "magrathea/core/feed/entry.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "200494"
},
{
"name": "Shell",
"bytes": "6474"
}
],
"symlink_target": ""
} |
from decimal import Decimal
from twisted.internet import defer
from twisted.internet import reactor
class SoftwareModule:
module = ''
def __init__(self, module):
self._module = module
if not self.module:
raise ValueError("Must set a module name")
def log(self, level, message):
self._module.project.log(level, message)
def broadcast(self, data):
output = {self.module: data}
self._module.project.message(output)
class TimeBased(SoftwareModule):
sync = True
tick_rate = None
def __init__(self, module):
super().__init__(module)
if self.tick_rate is None:
raise ValueError("Must set a tick_rate for a time based module")
if 0.0001 < self.tick_rate < 0.01:
msg = ("Setting `tick` of a timer to less than 1/100th of a second "
"is likely to impact the performance of other modules "
"connected to your Flotilla. Doesn't mean you can't try "
"though!")
self._module.project.log('WARNING', msg)
if self.tick_rate < 0.0001:
raise ValueError("Can't cope with a tick rate below 0.0001")
def tick(self, tm):
# Override this with whatever you want to happen every tick
data = {'status': self.status,
'time': tm,
}
self.broadcast(data)
class TimeBasedUnsynced(SoftwareModule):
sync = False
time = 0
auto_start = True
status = "STOPPED"
def __init__(self, module):
super().__init__(module)
if self.auto_start:
self.start()
def tick(self, tm):
# Override this with whatever you want to happen every tick
data = {'status': self.status,
'time': tm,
}
self.broadcast(data)
def check(self):
return self.status == "RUNNING"
@defer.deferredGenerator
def start(self):
"""Define our own clock"""
self.status = "RUNNING"
while self.check():
d = defer.Deferred()
self.tick(self.time)
reactor.callLater(self.tick_rate, d.callback, None)
wfd = defer.waitForDeferred(d)
self.time += self.tick_rate
self.time = float(Decimal(self.time).quantize(Decimal(str(self.tick_rate))))
yield wfd
self.timer_stopped()
def timer_stopped(self):
pass
def reset(self):
self.status = 'STOPPED'
self.time = 0
def stop(self):
self.status = 'STOPPED'
def pause(self):
if self.status == 'RUNNING':
self.status = 'PAUSED'
elif self.status == 'PAUSED':
self.start()
| {
"content_hash": "859fc755531974906a2d7b567559c99e",
"timestamp": "",
"source": "github",
"line_count": 102,
"max_line_length": 100,
"avg_line_length": 28.333333333333332,
"alnum_prop": 0.5335640138408304,
"repo_name": "offmessage/blackpearl",
"id": "784256a12422f334508e7a08b3da721d29633791",
"size": "2890",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "blackpearl/things/software/base.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "35493"
}
],
"symlink_target": ""
} |
from django.conf.urls import url
from django.views.generic.base import TemplateView
from expdj import settings
from expdj.apps.experiments.views import (add_battery, add_experiment,
add_experiment_template,
add_game_template,
add_survey_template, batteries_view,
battery_results_dashboard,
change_experiment_order,
delete_battery,
delete_experiment_template,
dummy_battery, edit_battery,
edit_experiment,
edit_experiment_template,
enable_cookie_view,
experiment_results_dashboard,
experiments_view,
generate_battery_user, intro_battery,
modify_experiment, preview_battery,
preview_experiment, remove_condition,
remove_experiment, save_experiment,
save_experiment_template,
save_game_template,
save_survey_template, serve_battery,
serve_battery_anon,
serve_battery_gmail,
subject_management, sync,
update_experiment_template,
view_battery, view_experiment)
urlpatterns = [
# Experiment Templates
url(r'^experiments$', experiments_view, name="experiments"),
url(r'^experiments/save$',
save_experiment_template,
name='save_experiment_template'),
url(r'^experiments/add$',
add_experiment_template,
name='add_experiment_template'),
url(r'^experiments/(?P<eid>.+?)/update$',
update_experiment_template,
name='update_experiment_template'),
url(r'^experiments/(?P<eid>.+?)/edit$',
edit_experiment_template,
name='edit_experiment_template'),
url(r'^experiments/(?P<eid>.+?)/$',
view_experiment, name='experiment_details'),
url(r'^experiments/(?P<eid>.+?)/delete$',
delete_experiment_template,
name='delete_experiment'),
url(r'^experiments/(?P<bid>\d+|[A-Z]{8})/(?P<eid>.+?)/remove$',
remove_experiment, name='remove_experiment'),
url(r'^experiments/(?P<eid>.+?)/preview$',
preview_experiment, name='preview_experiment'),
# Surveys
url(r'^surveys/save$', save_survey_template, name='save_survey_template'),
url(r'^surveys/add$', add_survey_template, name='add_survey_template'),
# Games
url(r'^games/save$', save_game_template, name='save_game_template'),
url(r'^games/add$', add_game_template, name='add_game_template'),
# Experiments in Batteries
url(r'^experiments/(?P<bid>\d+|[A-Z]{8})/add$',
add_experiment, name='add_experiment'),
url(r'^experiments/(?P<bid>\d+|[A-Z]{8})/modify$',
modify_experiment, name='modify_experiment'),
url(r'^experiments/(?P<bid>\d+|[A-Z]{8})/save$',
save_experiment, name='save_experiment'),
url(
r'^experiments/(?P<bid>\d+|[A-Z]{8})/(?P<eid>\d+|[A-Z]{8})/customize$',
edit_experiment,
name='edit_experiment'),
# url(r'^experiments/(?P<bid>\d+|[A-Z]{8})/results$',experiment_results_dashboard,name='experiment_results_dashboard'),
url(
r'^experiments/(?P<bid>\d+|[A-Z]{8})/(?P<eid>\d+|[A-Z]{8})/view$',
view_experiment,
name='experiment_details'),
url(
r'^experiments/(?P<bid>\d+|[A-Z]{8})/(?P<eid>\d+|[A-Z]{8})/order$',
change_experiment_order,
name='change_experiment_order'),
url(
r'^experiments/(?P<bid>\d+|[A-Z]{8})/(?P<eid>\d+|[A-Z]{8})/remove$',
remove_experiment,
name='remove_experiment'),
url(
r'^conditions/(?P<bid>\d+|[A-Z]{8})/(?P<eid>\d+|[A-Z]{8})/(?P<cid>\d+|[A-Z]{8})/remove$',
remove_condition,
name='remove_condition'),
# Batteries
url(r'^batteries/$', batteries_view, name="batteries"),
url(r'^my-batteries/(?P<uid>\d+|[A-Z]{8})/$',
batteries_view, name="batteries"),
url(r'^batteries/new$', edit_battery, name='new_battery'),
url(r'^batteries/add$', add_battery, name='add_battery'),
url(r'^batteries/(?P<bid>\d+|[A-Z]{8})/edit$',
edit_battery, name='edit_battery'),
url(r'^batteries/(?P<bid>\d+|[A-Z]{8})/subjects$',
subject_management, name='subject_management'),
url(r'^batteries/(?P<bid>\d+|[A-Z]{8})/user$',
generate_battery_user,
name='generate_battery_user'),
# url(r'^batteries/(?P<bid>\d+|[A-Z]{8})/results$',battery_results_dashboard,name='battery_results_dashboard'),
url(r'^batteries/(?P<bid>\d+|[A-Z]{8})/$',
view_battery, name='battery_details'),
url(r'^batteries/(?P<bid>\d+|[A-Z]{8})/delete$',
delete_battery, name='delete_battery'),
# Deployment
url(r'^batteries/(?P<bid>\d+|[A-Z]{8})/preview$',
preview_battery,
name='preview_battery'),
# intro preview without subid
url(r'^batteries/(?P<bid>\d+|[A-Z]{8})/dummy$',
dummy_battery,
name='dummy_battery'),
# running without subid
url(
r'^batteries/(?P<bid>\d+|[A-Z]{8})/(?P<userid>\d+|[A-Za-z0-9-]{30,36})/serve$',
intro_battery,
name='intro_battery'),
url(
r'^batteries/(?P<bid>\d+|[A-Z]{8})/(?P<userid>\d+|[A-Za-z0-9-]{30,36})/accept$',
serve_battery,
name='serve_battery'),
url(
r'^batteries/(?P<bid>\d+|[A-Z]{8})/(?P<keyid>\d+|[A-Za-z0-9-]{32})/anon$',
serve_battery_anon,
name='serve_battery_anon'),
url(r'^batteries/(?P<bid>\d+|[A-Z]{8})/serve/gmail$',
serve_battery_gmail, name='serve_battery_gmail'),
url(r'^local/(?P<rid>\d+|[A-Z]{8})/$', sync, name='local'),
url(r'^local/$', sync, name='local'), # local sync of data
url(r'^cookie/$', enable_cookie_view, name='enable_cookie_view')
]
if settings.DEBUG:
'''
urlpatterns += [
url(r'^static/(?P<path>.*)$', 'django.views.static.serve', {
'document_root': settings.MEDIA_ROOT,
}),
]
'''
| {
"content_hash": "19b20933d3a383c3404fa7428f4ff50d",
"timestamp": "",
"source": "github",
"line_count": 150,
"max_line_length": 123,
"avg_line_length": 44.46,
"alnum_prop": 0.5107212475633528,
"repo_name": "expfactory/expfactory-docker",
"id": "fe230d40b7b0e9ce209f1ade064833009f37b323",
"size": "6669",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "expdj/apps/experiments/urls.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "102596"
},
{
"name": "Dockerfile",
"bytes": "478"
},
{
"name": "HTML",
"bytes": "138097"
},
{
"name": "JavaScript",
"bytes": "23536"
},
{
"name": "Python",
"bytes": "203703"
},
{
"name": "Shell",
"bytes": "2758"
}
],
"symlink_target": ""
} |
command = oslc ("-q -MMD test.osl")
# Test deps to custom file location
command += oslc ("-q -MMD -MFmydep.d test.osl")
# Test deps to stdout
command += oslc ("-MM test.osl")
# Test deps with custom target
command += oslc ("-q -MMD -MF mycustom.d -MT customtarget test.osl")
outputs = [ "test.d", "mydep.d", "mycustom.d", "out.txt" ]
| {
"content_hash": "b4ac1e9a4d37fec6fd3822aebb0c1aeb",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 68,
"avg_line_length": 26.076923076923077,
"alnum_prop": 0.6489675516224189,
"repo_name": "imageworks/OpenShadingLanguage",
"id": "88d7b8f5ce3c39ddf454ecbd2faf121054fc567f",
"size": "722",
"binary": false,
"copies": "4",
"ref": "refs/heads/main",
"path": "testsuite/oslc-M/run.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "26710"
},
{
"name": "C++",
"bytes": "2857272"
},
{
"name": "CMake",
"bytes": "141512"
},
{
"name": "CSS",
"bytes": "4165"
},
{
"name": "Cuda",
"bytes": "35730"
},
{
"name": "GLSL",
"bytes": "939"
},
{
"name": "HTML",
"bytes": "43631"
},
{
"name": "Lex",
"bytes": "27746"
},
{
"name": "Makefile",
"bytes": "16279"
},
{
"name": "Objective-C",
"bytes": "15551"
},
{
"name": "Python",
"bytes": "74310"
},
{
"name": "Shell",
"bytes": "20247"
},
{
"name": "TeX",
"bytes": "247947"
},
{
"name": "Yacc",
"bytes": "52694"
}
],
"symlink_target": ""
} |
"""Common types for gRPC Async API"""
from typing import (Any, AnyStr, AsyncIterable, Callable, Iterable, Sequence,
Tuple, TypeVar, Union)
from grpc._cython.cygrpc import EOF
RequestType = TypeVar('RequestType')
ResponseType = TypeVar('ResponseType')
SerializingFunction = Callable[[Any], bytes]
DeserializingFunction = Callable[[bytes], Any]
MetadatumType = Tuple[str, AnyStr]
MetadataType = Sequence[MetadatumType]
ChannelArgumentType = Sequence[Tuple[str, Any]]
EOFType = type(EOF)
DoneCallbackType = Callable[[Any], None]
RequestIterableType = Union[Iterable[Any], AsyncIterable[Any]]
| {
"content_hash": "28d764d509baf80ad62672a00b0c5362",
"timestamp": "",
"source": "github",
"line_count": 17,
"max_line_length": 77,
"avg_line_length": 36,
"alnum_prop": 0.7516339869281046,
"repo_name": "jboeuf/grpc",
"id": "205f6dc622766075b3e91d1479642f0591cf563c",
"size": "1192",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/python/grpcio/grpc/experimental/aio/_typing.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "34391"
},
{
"name": "C",
"bytes": "2506485"
},
{
"name": "C#",
"bytes": "2056447"
},
{
"name": "C++",
"bytes": "31737951"
},
{
"name": "CMake",
"bytes": "678007"
},
{
"name": "CSS",
"bytes": "1519"
},
{
"name": "DTrace",
"bytes": "147"
},
{
"name": "Dockerfile",
"bytes": "157798"
},
{
"name": "Go",
"bytes": "34791"
},
{
"name": "HTML",
"bytes": "14"
},
{
"name": "Java",
"bytes": "6907"
},
{
"name": "JavaScript",
"bytes": "61459"
},
{
"name": "M4",
"bytes": "50995"
},
{
"name": "Makefile",
"bytes": "1003022"
},
{
"name": "Mako",
"bytes": "5629"
},
{
"name": "Objective-C",
"bytes": "597466"
},
{
"name": "Objective-C++",
"bytes": "77713"
},
{
"name": "PHP",
"bytes": "474525"
},
{
"name": "PowerShell",
"bytes": "621"
},
{
"name": "Python",
"bytes": "2949502"
},
{
"name": "Ruby",
"bytes": "1029888"
},
{
"name": "Shell",
"bytes": "472145"
},
{
"name": "Swift",
"bytes": "3516"
},
{
"name": "TSQL",
"bytes": "4901"
},
{
"name": "XSLT",
"bytes": "9673"
}
],
"symlink_target": ""
} |
import os
from argparse import ArgumentParser
from typing import Any, Set
import orjson
from django.conf import settings
from requests.packages.urllib3.util.retry import Retry
from zerver.lib.management import ZulipBaseCommand
from zerver.lib.outgoing_http import OutgoingSession
class TorDataSession(OutgoingSession):
def __init__(self, max_retries: int) -> None:
Retry.BACKOFF_MAX = 64
retry = Retry(
total=max_retries,
backoff_factor=2.0,
status_forcelist={ # Retry on these
429, # The formal rate-limiting response code
500, # Server error
502, # Bad gateway
503, # Service unavailable
},
)
super().__init__(role="tor_data", timeout=3, max_retries=retry)
class Command(ZulipBaseCommand):
help = """Fetch the list of TOR exit nodes, and write the list of IP addresses
to a file for access from Django for rate-limiting purposes.
Does nothing unless RATE_LIMIT_TOR_TOGETHER is enabled.
"""
def add_arguments(self, parser: ArgumentParser) -> None:
parser.add_argument(
"--max-retries",
type=int,
default=10,
help="Number of times to retry fetching data from TOR",
)
def handle(self, *args: Any, **options: Any) -> None:
if not settings.RATE_LIMIT_TOR_TOGETHER:
return
certificates = os.environ.get("CUSTOM_CA_CERTIFICATES")
session = TorDataSession(max_retries=options["max_retries"])
response = session.get(
"https://check.torproject.org/exit-addresses",
verify=certificates,
)
response.raise_for_status()
# Format:
# ExitNode 4273E6D162ED2717A1CF4207A254004CD3F5307B
# Published 2021-11-02 11:01:07
# LastStatus 2021-11-02 23:00:00
# ExitAddress 176.10.99.200 2021-11-02 23:17:02
exit_nodes: Set[str] = set()
for line in response.text.splitlines():
if line.startswith("ExitAddress "):
exit_nodes.add(line.split()[1])
# Write to a tmpfile to ensure we can't read a partially-written file
with open(settings.TOR_EXIT_NODE_FILE_PATH + ".tmp", "wb") as f:
f.write(orjson.dumps(list(exit_nodes)))
# Do an atomic rename into place
os.rename(
settings.TOR_EXIT_NODE_FILE_PATH + ".tmp",
settings.TOR_EXIT_NODE_FILE_PATH,
)
| {
"content_hash": "919f7a83f3a7089f72171b2c7bd974db",
"timestamp": "",
"source": "github",
"line_count": 74,
"max_line_length": 82,
"avg_line_length": 34.054054054054056,
"alnum_prop": 0.6083333333333333,
"repo_name": "eeshangarg/zulip",
"id": "b5299ff48c16f0eac26c4774c43f2849998b3093",
"size": "2520",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "zerver/management/commands/fetch_tor_exit_nodes.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "484233"
},
{
"name": "Dockerfile",
"bytes": "5056"
},
{
"name": "Emacs Lisp",
"bytes": "157"
},
{
"name": "HTML",
"bytes": "713408"
},
{
"name": "Handlebars",
"bytes": "343958"
},
{
"name": "JavaScript",
"bytes": "3738321"
},
{
"name": "Perl",
"bytes": "9884"
},
{
"name": "Puppet",
"bytes": "106355"
},
{
"name": "Python",
"bytes": "9442083"
},
{
"name": "Ruby",
"bytes": "3250"
},
{
"name": "Shell",
"bytes": "135667"
},
{
"name": "TypeScript",
"bytes": "275302"
}
],
"symlink_target": ""
} |
from _pydevd_bundle.pydevd_constants import RETURN_VALUES_DICT, dict_iter_items, \
GET_FRAME_NORMAL_GROUP, GET_FRAME_SPECIAL_GROUP
HIDDEN_TYPES = ('function', 'type', 'classobj', 'module', 'typing')
DOUBLE_UNDERSCORE = '__'
DOUBLE_EX = '__exception__'
DUMMY_RET_VAL = '_dummy_ret_val'
DUMMY_IPYTHON_HIDDEN = '_dummy_ipython_val'
DUMMY_SPECIAL_VAR = '_dummy_special_var'
DO_NOT_PROCESS_VARS = (DUMMY_SPECIAL_VAR, DUMMY_RET_VAL, DUMMY_IPYTHON_HIDDEN)
class Handler(object):
def __init__(self, fun):
self.lst = []
self.fun = fun
_next_handler = None
def set_next(self, handler):
self._next_handler = handler
return handler
def handle(self, key, value, hidden_ns, evaluate_full_value, user_type_renderers=None):
self.give_to_next(key, value, hidden_ns, evaluate_full_value, user_type_renderers)
def regular_handle(self, key, value, hidden_ns, evaluate_full_value, is_dict_iter_items=False, user_type_renderers=None):
if self.is_belong_to_group(key, value, hidden_ns):
if is_dict_iter_items:
for name, val in dict_iter_items(value):
self.lst.append(self.fun(name, val, hidden_ns, evaluate_full_value, user_type_renderers))
else:
self.lst.append(self.fun(key, value, hidden_ns, evaluate_full_value, user_type_renderers))
else:
self.give_to_next(key, value, hidden_ns, evaluate_full_value, user_type_renderers)
def give_to_next(self, key, value, hidden_ns, evaluate_full_value, user_type_renderers):
if self._next_handler:
self._next_handler.handle(key, value, hidden_ns, evaluate_full_value, user_type_renderers)
@staticmethod
def is_belong_to_group(key, value, hidden_ns):
return False
def get_xml(self):
xml = ''
handler = self
while handler is not None:
for value in handler.lst:
xml += value
handler = handler._next_handler
return xml
def get_list(self):
result = []
handler = self
while handler is not None:
result += handler.lst
handler = handler._next_handler
return result
def update_handlers(self):
handler = self
while handler is not None:
handler.lst = []
if hasattr(handler, 'added_var'):
handler.added_var = False
handler = handler._next_handler
class DunderVarsHandler(Handler):
def handle(self, key, value, hidden_ns, evaluate_full_value, user_type_renderers):
self.regular_handle(key, value, hidden_ns, evaluate_full_value, user_type_renderers)
@staticmethod
def is_belong_to_group(key, value, hidden_ns):
return is_dunder_var(str(key))
class SpecialVarsHandler(Handler):
def handle(self, key, value, hidden_ns, evaluate_full_value, user_type_renderers):
self.regular_handle(key, value, hidden_ns, evaluate_full_value, user_type_renderers)
@staticmethod
def is_belong_to_group(key, value, hidden_ns):
return is_hidden_var(value)
class IpythonVarsHandler(Handler):
def handle(self, key, value, hidden_ns, evaluate_full_value, user_type_renderers):
self.regular_handle(key, value, hidden_ns, evaluate_full_value, user_type_renderers)
@staticmethod
def is_belong_to_group(key, value, hidden_ns):
return hidden_ns is not None and key in hidden_ns
class ReturnVarsHandler(Handler):
def handle(self, key, value, hidden_ns, evaluate_full_value, user_type_renderers):
self.regular_handle(key, value, hidden_ns, evaluate_full_value, True, user_type_renderers)
@staticmethod
def is_belong_to_group(key, value, hidden_ns):
return key == RETURN_VALUES_DICT
class AnotherVarsHandler(Handler):
def handle(self, key, value, hidden_ns, evaluate_full_value, user_type_renderers):
if is_special_var(key, value):
self.lst.append(self.fun(key, value, hidden_ns, evaluate_full_value, user_type_renderers))
class DefaultVarHandler(Handler):
def handle(self, key, value, hidden_ns, evaluate_full_value, user_type_renderers):
self.lst.append(self.fun(key, value, evaluate_full_value, user_type_renderers))
class DummyVarHandler(Handler):
def __init__(self, fun, cls):
super(DummyVarHandler, self).__init__(fun)
self.cls = cls
self.added_var = False
def handle(self, key, value, hidden_ns, evaluate_full_value, user_type_renderers):
if self.cls.is_belong_to_group(key, value, hidden_ns):
if self.added_var:
return
self.lst.append(self.fun())
self.added_var = True
else:
super(DummyVarHandler, self).handle(key, value, hidden_ns, evaluate_full_value, user_type_renderers)
def is_special_var(key, value):
return is_hidden_var(value) or is_dunder_var(str(key))
def is_dunder_var(key_str):
return key_str.startswith(DOUBLE_UNDERSCORE) and \
key_str.endswith(DOUBLE_UNDERSCORE) and \
key_str != DOUBLE_EX and \
key_str != '__' and \
key_str != '___'
def is_hidden_var(value):
return get_type(value) in HIDDEN_TYPES
def get_type(o):
try:
type_object = o.__class__
except:
type_object = type(o)
return type_object.__name__
XML_COMMUNICATION_VARS_HANDLER = 0
THRIFT_COMMUNICATION_VARS_HANDLER = 1
class VarsHandlerContainer:
_instance_xml_handler = None
_instance_thrift_handler = None
def get_vars_handler(func, handler_type, group_type):
if handler_type == XML_COMMUNICATION_VARS_HANDLER:
if VarsHandlerContainer._instance_xml_handler is None:
VarsHandlerContainer._instance_xml_handler = VarsHandler(func, handler_type)
return VarsHandlerContainer._instance_xml_handler.get_instance(group_type)
elif handler_type == THRIFT_COMMUNICATION_VARS_HANDLER:
if VarsHandlerContainer._instance_thrift_handler is None:
VarsHandlerContainer._instance_thrift_handler = VarsHandler(func, handler_type)
return VarsHandlerContainer._instance_thrift_handler.get_instance(group_type)
class VarsHandler:
def __init__(self, func, handler_type):
self.func = func
self._instance_normal = None
self._instance_special = None
self._instance_return = None
self.handler_type = handler_type
def get_instance(self, group_type):
if group_type == GET_FRAME_NORMAL_GROUP:
if self._instance_normal is None:
self._init_normal()
instance = self._instance_normal
elif group_type == GET_FRAME_SPECIAL_GROUP:
if self._instance_special is None:
self._init_special()
instance = self._instance_special
else:
if self._instance_return is None:
self._init_return()
instance = self._instance_return
instance.update_handlers()
return instance
def _init_normal(self):
self._instance_normal = DummyVarHandler(
lambda: self.func(DUMMY_RET_VAL, DUMMY_RET_VAL), ReturnVarsHandler
)
self._instance_normal.set_next(
DummyVarHandler(
lambda: self.func(DUMMY_IPYTHON_HIDDEN, DUMMY_IPYTHON_HIDDEN),
IpythonVarsHandler
)
).set_next(
DummyVarHandler(
lambda: self.func(DUMMY_SPECIAL_VAR, DUMMY_SPECIAL_VAR),
SpecialVarsHandler
)
).set_next(
DummyVarHandler(
lambda: self.func(DUMMY_SPECIAL_VAR, DUMMY_SPECIAL_VAR),
DunderVarsHandler
)
).set_next(
DefaultVarHandler(
lambda key, var, eval, type_renderers: self.func(var,
str(key),
evaluate_full_value=eval,
user_type_renderers=type_renderers)
)
)
def _get_lambda_for_special(self, is_special_lambda=False):
if self.handler_type == XML_COMMUNICATION_VARS_HANDLER:
if is_special_lambda:
additional_in_xml = ' isSpecialVal="True"'
else:
additional_in_xml = ' isIPythonHidden="True"'
return lambda key, var, hidden_ns, eval_full, type_renderers: self.func(var,
str(key),
additional_in_xml=additional_in_xml,
evaluate_full_value=eval_full,
user_type_renderers=type_renderers)
elif self.handler_type == THRIFT_COMMUNICATION_VARS_HANDLER:
return lambda key, var, hidden_ns, eval_full, type_renderers: self.func(var,
str(key),
evaluate_full_value=eval_full,
user_type_renderers=type_renderers)
else:
raise ValueError('Handler type is incorrect')
def _init_special(self):
initial_lambda = self._get_lambda_for_special(is_special_lambda=False)
special_lambda = self._get_lambda_for_special(is_special_lambda=True)
self._instance_special = DunderVarsHandler(initial_lambda)
self._instance_special.set_next(
SpecialVarsHandler(special_lambda)
).set_next(
IpythonVarsHandler(special_lambda)
).set_next(
AnotherVarsHandler(special_lambda)
)
def _get_lambda_for_return(self):
if self.handler_type == XML_COMMUNICATION_VARS_HANDLER:
return lambda name, val, hidden_ns, eval_full, type_renderers: self.func(val,
name,
additional_in_xml=' isRetVal="True"',
user_type_renderers=type_renderers)
elif self.handler_type == THRIFT_COMMUNICATION_VARS_HANDLER:
return lambda name, val, hidden_ns, eval_full, type_renderers: self.func(val, name)
else:
return None
def _init_return(self):
self._instance_return = ReturnVarsHandler(self._get_lambda_for_return())
| {
"content_hash": "5b149877c87f61132a6869f18757fe0a",
"timestamp": "",
"source": "github",
"line_count": 279,
"max_line_length": 125,
"avg_line_length": 39.12544802867384,
"alnum_prop": 0.575302308537926,
"repo_name": "GunoH/intellij-community",
"id": "a7df8067ff570cf17d590013fe34e8867443aa17",
"size": "11037",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "python/helpers/pydev/_pydevd_bundle/pydevd_frame_type_handler.py",
"mode": "33188",
"license": "apache-2.0",
"language": [],
"symlink_target": ""
} |
"""Common classes for automatic speech recognition (ASR) datasets.
The audio import uses sox to generate normalized waveforms, please install
it as appropriate (e.g. using apt-get or yum).
"""
import numpy as np
from tensor2tensor.data_generators import audio_encoder
from tensor2tensor.data_generators import problem
from tensor2tensor.data_generators import text_encoder
from tensor2tensor.layers import common_audio
from tensor2tensor.layers import common_layers
from tensor2tensor.layers import modalities
from tensor2tensor.utils import metrics
import tensorflow as tf
class ByteTextEncoderWithEos(text_encoder.ByteTextEncoder):
"""Encodes each byte to an id and appends the EOS token."""
def encode(self, s):
return super(ByteTextEncoderWithEos, self).encode(s) + [text_encoder.EOS_ID]
class SpeechRecognitionProblem(problem.Problem):
"""Base class for speech recognition problems."""
def hparams(self, defaults, model_hparams):
p = model_hparams
# Filterbank extraction in bottom instead of preprocess_example is faster.
p.add_hparam("audio_preproc_in_bottom", False)
# The trainer seems to reserve memory for all members of the input dict
p.add_hparam("audio_keep_example_waveforms", False)
p.add_hparam("audio_sample_rate", 16000)
p.add_hparam("audio_preemphasis", 0.97)
p.add_hparam("audio_dither", 1.0 / np.iinfo(np.int16).max)
p.add_hparam("audio_frame_length", 25.0)
p.add_hparam("audio_frame_step", 10.0)
p.add_hparam("audio_lower_edge_hertz", 20.0)
p.add_hparam("audio_upper_edge_hertz", 8000.0)
p.add_hparam("audio_num_mel_bins", 80)
p.add_hparam("audio_add_delta_deltas", True)
p.add_hparam("num_zeropad_frames", 250)
p = defaults
p.modality = {"inputs": modalities.SpeechRecognitionModality,
"targets": modalities.SymbolModality}
p.vocab_size = {"inputs": None,
"targets": 256}
@property
def is_character_level(self):
return True
@property
def input_space_id(self):
return problem.SpaceID.AUDIO_SPECTRAL
@property
def target_space_id(self):
return problem.SpaceID.EN_CHR
def feature_encoders(self, _):
return {
"inputs": None, # Put None to make sure that the logic in
# decoding.py doesn't try to convert the floats
# into text...
"waveforms": audio_encoder.AudioEncoder(),
"targets": ByteTextEncoderWithEos(),
}
def example_reading_spec(self):
data_fields = {
"waveforms": tf.VarLenFeature(tf.float32),
"targets": tf.VarLenFeature(tf.int64),
}
data_items_to_decoders = None
return data_fields, data_items_to_decoders
def preprocess_example(self, example, mode, hparams):
p = hparams
if p.audio_preproc_in_bottom:
example["inputs"] = tf.expand_dims(
tf.expand_dims(example["waveforms"], -1), -1)
else:
waveforms = tf.expand_dims(example["waveforms"], 0)
mel_fbanks = common_audio.compute_mel_filterbank_features(
waveforms,
sample_rate=p.audio_sample_rate,
dither=p.audio_dither,
preemphasis=p.audio_preemphasis,
frame_length=p.audio_frame_length,
frame_step=p.audio_frame_step,
lower_edge_hertz=p.audio_lower_edge_hertz,
upper_edge_hertz=p.audio_upper_edge_hertz,
num_mel_bins=p.audio_num_mel_bins,
apply_mask=False)
if p.audio_add_delta_deltas:
mel_fbanks = common_audio.add_delta_deltas(mel_fbanks)
fbank_size = common_layers.shape_list(mel_fbanks)
assert fbank_size[0] == 1
# This replaces CMVN estimation on data
var_epsilon = 1e-09
mean = tf.reduce_mean(mel_fbanks, keepdims=True, axis=1)
variance = tf.reduce_mean(tf.square(mel_fbanks - mean),
keepdims=True, axis=1)
mel_fbanks = (mel_fbanks - mean) * tf.rsqrt(variance + var_epsilon)
# Later models like to flatten the two spatial dims. Instead, we add a
# unit spatial dim and flatten the frequencies and channels.
example["inputs"] = tf.concat([
tf.reshape(mel_fbanks, [fbank_size[1], fbank_size[2], fbank_size[3]]),
tf.zeros((p.num_zeropad_frames, fbank_size[2], fbank_size[3]))], 0)
if not p.audio_keep_example_waveforms:
del example["waveforms"]
return super(SpeechRecognitionProblem, self
).preprocess_example(example, mode, hparams)
def eval_metrics(self):
defaults = super(SpeechRecognitionProblem, self).eval_metrics()
return defaults + [
metrics.Metrics.EDIT_DISTANCE,
metrics.Metrics.WORD_ERROR_RATE
]
| {
"content_hash": "95340df3d2ad80f7ae4835a136261e34",
"timestamp": "",
"source": "github",
"line_count": 130,
"max_line_length": 80,
"avg_line_length": 36.2,
"alnum_prop": 0.6663833404164896,
"repo_name": "mlperf/training_results_v0.5",
"id": "04891d50e8eca52f07337b378297b80208821b95",
"size": "5312",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "v0.5.0/google/cloud_v2.8/gnmt-tpuv2-8/code/gnmt/model/t2t/tensor2tensor/data_generators/speech_recognition.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "5720"
},
{
"name": "C++",
"bytes": "1288180"
},
{
"name": "CMake",
"bytes": "40880"
},
{
"name": "CSS",
"bytes": "32420"
},
{
"name": "Cuda",
"bytes": "1362093"
},
{
"name": "Dockerfile",
"bytes": "19488"
},
{
"name": "Go",
"bytes": "1088660"
},
{
"name": "HTML",
"bytes": "19756888"
},
{
"name": "Java",
"bytes": "45405"
},
{
"name": "JavaScript",
"bytes": "302838"
},
{
"name": "Jupyter Notebook",
"bytes": "9104667"
},
{
"name": "Lua",
"bytes": "4430"
},
{
"name": "Makefile",
"bytes": "3652"
},
{
"name": "Python",
"bytes": "31508548"
},
{
"name": "Scala",
"bytes": "106211"
},
{
"name": "Shell",
"bytes": "409745"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import
import six
from geocoder.base import Base
from geocoder.keys import here_app_id, here_app_code
class Here(Base):
"""
HERE Geocoding REST API
=======================
Send a request to the geocode endpoint to find an address
using a combination of country, state, county, city,
postal code, district, street and house number.
API Reference
-------------
https://developer.here.com/rest-apis/documentation/geocoder
"""
provider = 'here'
method = 'geocode'
qualified_address = ['city', 'district', 'postal', 'state', 'country']
def __init__(self, location, **kwargs):
self.url = kwargs.get('url', 'http://geocoder.cit.api.here.com/6.2/geocode.json')
self.location = location
# HERE Credentials
app_id = kwargs.get('app_id', here_app_id)
app_code = kwargs.get('app_code', here_app_code)
if not bool(app_id and app_code):
raise ValueError("Provide app_id & app_code")
# URL Params
self.params = {
'searchtext': location,
'app_id': app_id,
'app_code': app_code,
'gen': 9,
'language': kwargs.get('language', 'en')
}
for value in Here.qualified_address:
if kwargs.get(value) is not None:
self.params[value] = kwargs.get(value)
self._initialize(**kwargs)
def _exceptions(self):
# Build intial Tree with results
view = self.parse['Response']['View']
if view:
result = view[0]['Result']
if result:
self._build_tree(result[0])
for item in self.parse['Location']['Address']['AdditionalData']:
self.parse[item['key']] = item['value']
def _catch_errors(self):
status = self.parse.get('type')
if not status == 'OK':
self.error = status
@property
def lat(self):
return self.parse['DisplayPosition'].get('Latitude')
@property
def lng(self):
return self.parse['DisplayPosition'].get('Longitude')
@property
def address(self):
return self.parse['Address'].get('Label')
@property
def postal(self):
return self.parse['Address'].get('PostalCode')
@property
def housenumber(self):
return self.parse['Address'].get('HouseNumber')
@property
def street(self):
return self.parse['Address'].get('Street')
@property
def neighborhood(self):
return self.district
@property
def district(self):
return self.parse['Address'].get('District')
@property
def city(self):
return self.parse['Address'].get('City')
@property
def county(self):
return self.parse['Address'].get('County')
@property
def state(self):
return self.parse['Address'].get('State')
@property
def country(self):
return self.parse['Address'].get('Country')
@property
def quality(self):
return self.parse.get('MatchLevel')
@property
def accuracy(self):
return self.parse.get('MatchType')
@property
def bbox(self):
south = self.parse['BottomRight'].get('Latitude')
north = self.parse['TopLeft'].get('Latitude')
west = self.parse['TopLeft'].get('Longitude')
east = self.parse['BottomRight'].get('Longitude')
return self._get_bbox(south, west, north, east)
if __name__ == '__main__':
g = Here("New York City")
g.debug()
| {
"content_hash": "73e298a5b06c1a186116dfd84fc68b32",
"timestamp": "",
"source": "github",
"line_count": 127,
"max_line_length": 89,
"avg_line_length": 27.95275590551181,
"alnum_prop": 0.58,
"repo_name": "akittas/geocoder",
"id": "d5d43cb025573f6d5418288ad1c11842665542ba",
"size": "3584",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "geocoder/here.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "614"
},
{
"name": "Python",
"bytes": "177426"
}
],
"symlink_target": ""
} |
from libcloud.common.types import LibcloudError
from libcloud.common.aws import SignedAWSConnection
from libcloud.storage.drivers.s3 import BaseS3Connection
from libcloud.storage.drivers.s3 import BaseS3StorageDriver
from libcloud.storage.drivers.s3 import API_VERSION
__all__ = [
"MinIOStorageDriver"
]
class MinIOConnectionAWS4(SignedAWSConnection, BaseS3Connection):
service_name = 's3'
version = API_VERSION
def __init__(self, user_id, key, secure=True, host=None, port=None,
url=None, timeout=None, proxy_url=None, token=None,
retry_delay=None, backoff=None, **kwargs):
super(MinIOConnectionAWS4, self).__init__(user_id, key,
secure, host,
port, url,
timeout,
proxy_url, token,
retry_delay,
backoff,
4) # force aws4
class MinIOStorageDriver(BaseS3StorageDriver):
name = 'MinIO Storage Driver'
website = 'https://min.io/'
connectionCls = MinIOConnectionAWS4
region_name = ""
def __init__(self, key, secret=None, secure=True, host=None, port=None):
if host is None:
raise LibcloudError('host argument is required', driver=self)
self.connectionCls.host = host
super(MinIOStorageDriver, self).__init__(key=key,
secret=secret,
secure=secure,
host=host,
port=port)
| {
"content_hash": "af04dee114fd93b462a4a0e512a88f79",
"timestamp": "",
"source": "github",
"line_count": 46,
"max_line_length": 76,
"avg_line_length": 39.891304347826086,
"alnum_prop": 0.49209809264305177,
"repo_name": "Kami/libcloud",
"id": "9a496562b4d0ac4a4e0c044ebbd0c1ae755c4ffe",
"size": "2617",
"binary": false,
"copies": "1",
"ref": "refs/heads/trunk",
"path": "libcloud/storage/drivers/minio.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "1819"
},
{
"name": "HTML",
"bytes": "2545"
},
{
"name": "PowerShell",
"bytes": "410"
},
{
"name": "Python",
"bytes": "9122888"
},
{
"name": "Shell",
"bytes": "12994"
}
],
"symlink_target": ""
} |
from django.dispatch import receiver
from django.db.models import signals
from schedule.models import ScheduleEntry, ScheduleTransaction
@receiver(signals.post_save, sender=ScheduleEntry)
def create_add_transaction(sender, instance, **kwargs):
ScheduleTransaction(user=instance.user, term=instance.term, course_crn=instance.course_crn, action=ScheduleTransaction.ADD).save()
pass
@receiver(signals.pre_delete, sender=ScheduleEntry)
def create_drop_transaction(sender, instance, **kwargs):
ScheduleTransaction(user=instance.user, term=instance.term, course_crn=instance.course_crn, action=ScheduleTransaction.DROP).save()
pass
| {
"content_hash": "c13a10e9433f0ceb900a488804bf833e",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 135,
"avg_line_length": 49.53846153846154,
"alnum_prop": 0.8043478260869565,
"repo_name": "gravitylow/OpenCourse",
"id": "fcfd4ebd7326bef897835e3e9355305bee155b20",
"size": "644",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "schedule/signals/handlers.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "68920"
},
{
"name": "HTML",
"bytes": "458887"
},
{
"name": "JavaScript",
"bytes": "137559"
},
{
"name": "Python",
"bytes": "198465"
}
],
"symlink_target": ""
} |
from .calc import Mass_eff
| {
"content_hash": "36c20a2702a7e66d5f384c7f695326c3",
"timestamp": "",
"source": "github",
"line_count": 1,
"max_line_length": 26,
"avg_line_length": 27,
"alnum_prop": 0.7777777777777778,
"repo_name": "wayn3/masseff",
"id": "03159b924573e3bb76ab4e63b4f9d2c88f8eed6e",
"size": "27",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "masseff/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "6607"
}
],
"symlink_target": ""
} |
import os
from threading import RLock
from weavelib.exceptions import ObjectAlreadyExists
class SynonymRegistry(object):
def __init__(self):
self.synonym_lock = RLock()
self.synonyms = {}
def register(self, synonym, target):
synonym = os.path.join("/synonyms", synonym.lstrip('/'))
with self.synonym_lock:
if synonym in self.synonyms:
raise ObjecObjectAlreadyExists(synonym)
self.synonyms[synonym] = target
return synonym
def translate(self, synonym):
with self.synonym_lock:
return self.synonyms.get(synonym, synonym)
| {
"content_hash": "1fb025ca3f21a6bc46bb6042cc751cc2",
"timestamp": "",
"source": "github",
"line_count": 24,
"max_line_length": 64,
"avg_line_length": 26.75,
"alnum_prop": 0.6339563862928349,
"repo_name": "supersaiyanmode/HomePiServer",
"id": "3936713b058e893ba4a6cbb3d140db92fac4b2a5",
"size": "642",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "messaging/synonyms.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "3436"
},
{
"name": "HTML",
"bytes": "9965"
},
{
"name": "JavaScript",
"bytes": "45427"
},
{
"name": "Python",
"bytes": "132028"
}
],
"symlink_target": ""
} |
"""The implementation of the SocketOptionsAdapter."""
import socket
import warnings
import sys
import requests
from requests import adapters
from .._compat import connection
from .._compat import poolmanager
from .. import exceptions as exc
class SocketOptionsAdapter(adapters.HTTPAdapter):
"""An adapter for requests that allows users to specify socket options.
Since version 2.4.0 of requests, it is possible to specify a custom list
of socket options that need to be set before establishing the connection.
Example usage::
>>> import socket
>>> import requests
>>> from requests_toolbelt.adapters import socket_options
>>> s = requests.Session()
>>> opts = [(socket.IPPROTO_TCP, socket.TCP_NODELAY, 0)]
>>> adapter = socket_options.SocketOptionsAdapter(socket_options=opts)
>>> s.mount('http://', adapter)
You can also take advantage of the list of default options on this class
to keep using the original options in addition to your custom options. In
that case, ``opts`` might look like::
>>> opts = socket_options.SocketOptionsAdapter.default_options + opts
"""
if connection is not None:
default_options = getattr(
connection.HTTPConnection,
'default_socket_options',
[(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)]
)
else:
default_options = []
warnings.warn(exc.RequestsVersionTooOld,
"This version of Requests is only compatible with a "
"version of urllib3 which is too old to support "
"setting options on a socket. This adapter is "
"functionally useless.")
def __init__(self, **kwargs):
self.socket_options = kwargs.pop('socket_options',
self.default_options)
super(SocketOptionsAdapter, self).__init__(**kwargs)
def init_poolmanager(self, connections, maxsize, block=False):
if requests.__build__ >= 0x020400:
# NOTE(Ian): Perhaps we should raise a warning
self.poolmanager = poolmanager.PoolManager(
num_pools=connections,
maxsize=maxsize,
block=block,
socket_options=self.socket_options
)
else:
super(SocketOptionsAdapter, self).init_poolmanager(
connections, maxsize, block
)
class TCPKeepAliveAdapter(SocketOptionsAdapter):
"""An adapter for requests that turns on TCP Keep-Alive by default.
The adapter sets 4 socket options:
- ``SOL_SOCKET`` ``SO_KEEPALIVE`` - This turns on TCP Keep-Alive
- ``IPPROTO_TCP`` ``TCP_KEEPINTVL`` 20 - Sets the keep alive interval
- ``IPPROTO_TCP`` ``TCP_KEEPCNT`` 5 - Sets the number of keep alive probes
- ``IPPROTO_TCP`` ``TCP_KEEPIDLE`` 60 - Sets the keep alive time if the
socket library has the ``TCP_KEEPIDLE`` constant
The latter three can be overridden by keyword arguments (respectively):
- ``idle``
- ``interval``
- ``count``
You can use this adapter like so::
>>> from requests_toolbelt.adapters import socket_options
>>> tcp = socket_options.TCPKeepAliveAdapter(idle=120, interval=10)
>>> s = requests.Session()
>>> s.mount('http://', tcp)
"""
def __init__(self, **kwargs):
socket_options = kwargs.pop('socket_options',
SocketOptionsAdapter.default_options)
idle = kwargs.pop('idle', 60)
interval = kwargs.pop('interval', 20)
count = kwargs.pop('count', 5)
socket_options = socket_options + [
(socket.SOL_SOCKET, socket.SO_KEEPALIVE, 1)
]
# NOTE(Ian): OSX does not have these constants defined, so we
# set them conditionally.
if getattr(socket, 'TCP_KEEPINTVL', None) is not None:
socket_options += [(socket.IPPROTO_TCP, socket.TCP_KEEPINTVL,
interval)]
elif sys.platform == 'darwin':
# On OSX, TCP_KEEPALIVE from netinet/tcp.h is not exported
# by python's socket module
TCP_KEEPALIVE = getattr(socket, 'TCP_KEEPALIVE', 0x10)
socket_options += [(socket.IPPROTO_TCP, TCP_KEEPALIVE, interval)]
if getattr(socket, 'TCP_KEEPCNT', None) is not None:
socket_options += [(socket.IPPROTO_TCP, socket.TCP_KEEPCNT, count)]
if getattr(socket, 'TCP_KEEPIDLE', None) is not None:
socket_options += [(socket.IPPROTO_TCP, socket.TCP_KEEPIDLE, idle)]
super(TCPKeepAliveAdapter, self).__init__(
socket_options=socket_options, **kwargs
)
| {
"content_hash": "c2635b3a2370e3e352b2b9287673be60",
"timestamp": "",
"source": "github",
"line_count": 128,
"max_line_length": 79,
"avg_line_length": 37.2265625,
"alnum_prop": 0.6100734522560336,
"repo_name": "endlessm/chromium-browser",
"id": "f8aef5dd5e99db598248df22223ce7ee67ab0824",
"size": "4789",
"binary": false,
"copies": "18",
"ref": "refs/heads/master",
"path": "third_party/catapult/third_party/requests_toolbelt/requests_toolbelt/adapters/socket_options.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [],
"symlink_target": ""
} |
from tempfile import mkstemp
from os import getcwd, remove, close
from unittest import TestCase, main
from functools import partial
from os.path import join
from skbio.util import get_data_path
from burrito.util import ApplicationError
from micronota.bfillings.hmmer import (HMMScan, hmmscan_fasta,
hmmpress_hmm)
class HMMERTests(TestCase):
def setUp(self):
self.get_hmmer_path = partial(
get_data_path, subfolder=join('data', 'hmmer'))
self.hmm_fp = self.get_hmmer_path('Pfam_B_1.hmm')
self.positive_fps = list(map(self.get_hmmer_path,
['Pfam_B_1.fasta']))
self.negative_fps = list(map(get_data_path, [
'empty',
'whitespace_only']))
self.temp_fd, self.temp_fp = mkstemp()
def tearDown(self):
close(self.temp_fd)
remove(self.temp_fp)
class HMMScanTests(HMMERTests):
def test_base_command(self):
c = HMMScan()
self.assertEqual(
c.BaseCommand,
'cd "%s/"; %s' % (getcwd(), c._command))
params = {'--cpu': 2}
for i in params:
if params[i] is None:
c.Parameters[i].on()
cmd = 'cd "{d}/"; {cmd} {option}'.format(
d=getcwd(), cmd=c._command, option=i)
else:
c.Parameters[i].on(params[i])
cmd = 'cd "{d}/"; {cmd} {option} {value}'.format(
d=getcwd(), cmd=c._command,
option=i, value=params[i])
self.assertEqual(c.BaseCommand, cmd)
c.Parameters[i].off()
def test_hmmscan_fasta_wrong_input(self):
for fp in self.negative_fps:
with self.assertRaisesRegex(
ApplicationError,
r'Error: Sequence file .* is empty or misformatted'):
hmmscan_fasta(self.hmm_fp, fp, 'foo')
def test_hmmscan_fasta(self):
params = {'--noali': None}
for f in self.positive_fps:
res = hmmscan_fasta(self.hmm_fp, f, self.temp_fp, 0.1, 1, params)
res['StdOut'].close()
res['StdErr'].close()
obs = res['--tblout']
out_fp = '.'.join([f, 'tblout'])
with open(out_fp) as exp:
# skip comment lines as some contain running time info
self.assertListEqual(
[i for i in exp.readlines() if not i.startswith('#')],
[j for j in obs.readlines() if not j.startswith('#')])
obs.close()
class HMMPressTests(HMMERTests):
def test_compress_hmm(self):
# .i1i file is different from run to run. skip it.
suffix = 'h3f'
with open('.'.join([self.hmm_fp, suffix]), 'rb') as f:
exp = f.read()
res = hmmpress_hmm(self.hmm_fp, True)
res['StdOut'].close()
res['StdErr'].close()
with open('.'.join([self.hmm_fp, suffix]), 'rb') as f:
self.assertEqual(f.read(), exp)
with self.assertRaisesRegex(
ApplicationError,
r'Error: Looks like .* is already pressed'):
hmmpress_hmm(self.hmm_fp)
if __name__ == "__main__":
main()
| {
"content_hash": "caf479b2ec7431c4d99048264e0b6de5",
"timestamp": "",
"source": "github",
"line_count": 97,
"max_line_length": 77,
"avg_line_length": 33.72164948453608,
"alnum_prop": 0.5261387954753898,
"repo_name": "mortonjt/micronota",
"id": "a11acadebfb00eeb594f8c1b0457cf98e8659ea4",
"size": "3624",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "micronota/bfillings/tests/test_hmmer.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Makefile",
"bytes": "955"
},
{
"name": "Python",
"bytes": "161450"
},
{
"name": "Shell",
"bytes": "252"
}
],
"symlink_target": ""
} |
"""Mujoco environment from OpenAI gym."""
from absl import flags
import gym
from gym import spaces
import numpy as np
from seed_rl.common import common_flags
from seed_rl.common import env_wrappers
from seed_rl.mujoco import toy_env
FLAGS = flags.FLAGS
class SinglePrecisionWrapper(gym.Wrapper):
"""Single precision Wrapper for Mujoco environments."""
def __init__(self, env):
"""Initialize the wrapper.
Args:
env: MujocoEnv to be wrapped.
"""
super().__init__(env)
self.observation_space = spaces.Box(
self.observation_space.low,
self.observation_space.high,
dtype=np.float32)
self.num_steps = 0
self.max_episode_steps = self.env.spec.max_episode_steps
def reset(self):
self.num_steps = 0
return self.env.reset().astype(np.float32)
def step(self, action):
self.num_steps += 1
obs, reward, done, info = self.env.step(action)
if self.num_steps >= self.max_episode_steps:
done = True
if isinstance(reward, np.ndarray):
reward = reward.astype(np.float32)
else:
reward = float(reward)
return obs.astype(np.float32), reward, done, info
def create_environment(env_name,
discretization='none',
n_actions_per_dim=11,
action_ratio=30.,
gym_kwargs=None):
"""Create environment from OpenAI Gym.
Actions are rescaled to the range [-1, 1] and optionally discretized.
Args:
env_name: environment name from OpenAI Gym. You can also use 'toy_env' or
'toy_memory_env' to get very simple environments which can be used for
sanity testing RL algorithms.
discretization: 'none', 'lin' or 'log'. Values other than 'none' cause
action coordinates to be discretized into n_actions_per_dim buckets.
Buckets are spaced linearly between the bounds if 'lin' mode is used and
logarithmically for 'log' mode.
n_actions_per_dim: the number of buckets per action coordinate if
discretization is used.
action_ratio: the ratio between the highest and the lowest positive action
for logarithmic action discretization.
gym_kwargs: Kwargs to pass to the gym environment contructor.
Returns:
wrapped environment
"""
assert FLAGS.num_action_repeats == 1, 'Only action repeat of 1 is supported.'
if env_name == 'toy_env':
env = toy_env.ToyEnv()
elif env_name == 'toy_memory_env':
env = toy_env.ToyMemoryEnv()
elif env_name == 'bit_flip':
return toy_env.BitFlippingEnv()
else: # mujoco
gym_kwargs = gym_kwargs if gym_kwargs else {}
gym_spec = gym.spec(env_name)
env = gym_spec.make(**gym_kwargs)
env = SinglePrecisionWrapper(env)
# rescale actions so that all bounds are [-1, 1]
env = env_wrappers.UniformBoundActionSpaceWrapper(env)
# optionally discretize actions
if discretization != 'none':
env = env_wrappers.DiscretizeEnvWrapper(env, n_actions_per_dim,
discretization, action_ratio)
return env
| {
"content_hash": "7baddf7bc1e4221548d574895a223fb6",
"timestamp": "",
"source": "github",
"line_count": 96,
"max_line_length": 79,
"avg_line_length": 31.885416666666668,
"alnum_prop": 0.6625285854295981,
"repo_name": "google-research/seed_rl",
"id": "7852609fd6a1edf9187ca582316a03e0b8a8fb4d",
"size": "3655",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "mujoco/env.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C++",
"bytes": "41131"
},
{
"name": "Jupyter Notebook",
"bytes": "72883"
},
{
"name": "Python",
"bytes": "614110"
},
{
"name": "Shell",
"bytes": "31284"
},
{
"name": "Starlark",
"bytes": "932"
}
],
"symlink_target": ""
} |
"""payu.models.mom6
================
Driver interface to the MOM6 ocean model.
:copyright: Copyright 2011 Marshall Ward, see AUTHORS for details
:license: Apache License, Version 2.0, see LICENSE for details
"""
# Standard library
import os
# Extensions
import f90nml
# Local
from payu.models.fms import Fms
class Mom6(Fms):
"""Interface to GFDL's MOM6 ocean model."""
def __init__(self, expt, name, config):
# FMS initialisation
super(Mom6, self).__init__(expt, name, config)
self.model_type = 'mom6'
self.default_exec = 'MOM6'
self.config_files = [
'input.nml',
'MOM_input',
'diag_table',
]
# TODO: Need to figure out what's going on here with MOM6
self.optional_config_files = [
'data_table',
'data_table.MOM6',
'data_table.OM4',
'data_table.SIS',
'data_table.icebergs',
'field_table',
'MOM_override',
'MOM_layout',
'MOM_saltrestore',
'SIS_input',
'SIS_override',
'SIS_layout',
]
def setup(self):
# FMS initialisation
super(Mom6, self).setup()
self.init_config()
def init_config(self):
"""Patch input.nml as a new or restart run."""
input_fpath = os.path.join(self.work_path, 'input.nml')
input_nml = f90nml.read(input_fpath)
if self.expt.counter == 0 or self.expt.repeat_run:
input_type = 'n'
else:
input_type = 'r'
input_nml['MOM_input_nml']['input_filename'] = input_type
f90nml.write(input_nml, input_fpath, force=True)
| {
"content_hash": "12e0e5d7570e1461b359e3c1631fbb49",
"timestamp": "",
"source": "github",
"line_count": 75,
"max_line_length": 68,
"avg_line_length": 23.106666666666666,
"alnum_prop": 0.5401038661281016,
"repo_name": "marshallward/payu",
"id": "77fbf0584bbc7c492cf51600d199cd21e4ff4ed0",
"size": "1749",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "payu/models/mom6.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "206065"
},
{
"name": "Shell",
"bytes": "7164"
}
],
"symlink_target": ""
} |
"""
Provides testing capabilities and customisations specific to tephi.
.. note:: This module needs to control the matplotlib backend, so it
**must** be imported before ``matplotlib.pyplot``.
By default, this module sets the matplotlib backend to "agg". But when
this module is imported it checks ``sys.argv`` for the flag "-d". If
found, it is removed from ``sys.argv`` and the matplotlib backend is
switched to "tkagg" to allow the interactive visual inspection of
graphical test results.
"""
import codecs
import collections
import io
import json
import os
import sys
import filelock
import matplotlib
import numpy as np
import pytest
import requests
from tephi import DATA_DIR
#: Basepath for test data.
_DATA_PATH = DATA_DIR
#: Basepath for test results.
_RESULT_PATH = os.path.join(os.path.dirname(__file__), "results")
#: Default perceptual hash size.
_HASH_SIZE = 16
#: Default maximum perceptual hash hamming distance.
_HAMMING_DISTANCE = 4
# Whether to display matplotlib output to the screen.
_DISPLAY_FIGURES = False
# Test images URL.
BASE_URL = "https://scitools.github.io/test-tephi-imagehash/images"
try:
# Added a timeout to stop the call to requests.get hanging when running
# on a platform which has restricted/no internet access.
requests.get("https://github.com/SciTools/tephi", timeout=5.0)
INET_AVAILABLE = True
except requests.exceptions.ConnectionError:
INET_AVAILABLE = False
requires_inet = pytest.mark.skipif(
not INET_AVAILABLE,
reason=('Test requires an "internet connection", which is not available.'),
)
if "-d" in sys.argv:
sys.argv.remove("-d")
matplotlib.use("tkagg")
_DISPLAY_FIGURES = True
else:
matplotlib.use("agg")
# Imported now so that matplotlib.use can work
import matplotlib.pyplot as plt
def get_data_path(relative_path):
"""
Returns the absolute path to a data file when given the relative path
as a string, or sequence of strings.
"""
if isinstance(relative_path, (list, tuple)):
relative_path = os.path.join(*relative_path)
return os.path.abspath(os.path.join(_DATA_PATH, relative_path))
def get_result_path(relative_path):
"""
Returns the absolute path to a result file when given the relative path
as a string, or sequence of strings.
"""
if isinstance(relative_path, (list, tuple)):
relative_path = os.path.join(*relative_path)
return os.path.abspath(os.path.join(_RESULT_PATH, relative_path))
class TephiTest:
"""
Utility class containing common testing framework functionality.
"""
def assertArrayEqual(self, a, b):
__tracebackhide__ = True
return np.testing.assert_array_equal(a, b)
def assertArrayAlmostEqual(self, a, b, *args, **kwargs):
__tracebackhide__ = True
return np.testing.assert_array_almost_equal(a, b, *args, **kwargs)
class GraphicsTest(TephiTest):
_assertion_count = collections.defaultdict(int)
def _unique_id(self, nodeid):
"""Create a hashable key to represent the unique test invocation.
Construct the hashable key from the provided nodeid and a sequential
counter specific to the current test, that is incremented on each call.
Parameters
----------
nodeid : str
Unique identifier for the current test. See :func:`nodeid` fixture.
Returns
-------
str
The nodeid with sequential counter.
"""
count = self._assertion_count[nodeid]
self._assertion_count[nodeid] += 1
return f"{nodeid}.{count}"
def check_graphic(self, nodeid):
"""
Check the hash of the current matplotlib figure matches the expected
image hash for the current graphic test.
To create missing image test results, set the TEPHI_TEST_CREATE_MISSING
environment variable before running the tests. This will result in new
and appropriately "<hash>.png" image files being generated in the image
output directory, and the imagerepo.json file being updated.
"""
__tracebackhide__ = True
import imagehash
from PIL import Image
dev_mode = os.environ.get("TEPHI_TEST_CREATE_MISSING")
unique_id = self._unique_id(nodeid)
repo_fname = os.path.join(_RESULT_PATH, "imagerepo.json")
repo = {}
if os.path.isfile(repo_fname):
with open(repo_fname, "rb") as fi:
repo = json.load(codecs.getreader("utf-8")(fi))
try:
#: The path where the images generated by the tests should go.
image_output_directory = os.path.join(
os.path.dirname(__file__), "result_image_comparison"
)
if not os.access(image_output_directory, os.W_OK):
if not os.access(os.getcwd(), os.W_OK):
raise IOError(
"Write access to a local disk is required "
"to run image tests. Run the tests from a "
"current working directory you have write "
"access to to avoid this issue."
)
else:
image_output_directory = os.path.join(
os.getcwd(), "tephi_image_test_output"
)
result_fname = os.path.join(
image_output_directory, "result-" + unique_id + ".png"
)
if not os.path.isdir(image_output_directory):
# Handle race-condition where the directories are
# created sometime between the check above and the
# creation attempt below.
try:
os.makedirs(image_output_directory)
except OSError as err:
# Don't care about "File exists"
if err.errno != 17:
raise
def _create_missing():
fname = f"{phash}.png"
uri = os.path.join(BASE_URL, fname)
hash_fname = os.path.join(image_output_directory, fname)
uris = repo.setdefault(unique_id, [])
uris.append(uri)
print(f"Creating image file: {hash_fname}")
figure.savefig(hash_fname)
msg = "Creating imagerepo entry: {} -> {}"
print(msg.format(unique_id, uri))
lock = filelock.FileLock(
os.path.join(_RESULT_PATH, "imagerepo.lock")
)
# The imagerepo.json file is a critical resource, so ensure
# thread safe read/write behaviour via platform independent
# file locking.
with lock.acquire(timeout=600):
with open(repo_fname, "wb") as fo:
json.dump(
repo,
codecs.getwriter("utf-8")(fo),
indent=4,
sort_keys=True,
)
# Calculate the test result perceptual image hash.
buffer = io.BytesIO()
figure = plt.gcf()
figure.savefig(buffer, format="png")
buffer.seek(0)
phash = imagehash.phash(Image.open(buffer), hash_size=_HASH_SIZE)
if unique_id not in repo:
if dev_mode:
_create_missing()
else:
figure.savefig(result_fname)
emsg = "Missing image test result: {}."
raise AssertionError(emsg.format(unique_id))
else:
uris = repo[unique_id]
# Extract the hex basename strings from the uris.
hexes = [
os.path.splitext(os.path.basename(uri))[0] for uri in uris
]
# Create the expected perceptual image hashes from the uris.
to_hash = imagehash.hex_to_hash
expected = [to_hash(uri_hex) for uri_hex in hexes]
# Calculate hamming distance vector for the result hash.
distances = [e - phash for e in expected]
if np.all([hd > _HAMMING_DISTANCE for hd in distances]):
if dev_mode:
_create_missing()
else:
figure.savefig(result_fname)
msg = (
"Bad phash {} with hamming distance {} "
"for test {}."
)
msg = msg.format(phash, distances, unique_id)
if _DISPLAY_FIGURES:
emsg = "Image comparison would have failed: {}"
print(emsg.format(msg))
else:
emsg = "Image comparison failed: {}"
raise AssertionError(emsg.format(msg))
if _DISPLAY_FIGURES:
plt.show()
finally:
plt.close()
| {
"content_hash": "01aef76583c93099eb75157c68beb099",
"timestamp": "",
"source": "github",
"line_count": 265,
"max_line_length": 79,
"avg_line_length": 34.61132075471698,
"alnum_prop": 0.5604012211077192,
"repo_name": "SciTools/tephi",
"id": "478c858c83a9091adf6e25f47acbe50bdc0bf0dd",
"size": "9345",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "tephi/tests/__init__.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "734702"
},
{
"name": "Python",
"bytes": "83624"
}
],
"symlink_target": ""
} |
def displayInventory(inv):
output = "Inventory:\n"
itemCount = 0
#keys = List(inv.keys()).sort()
for k in sorted(inv.keys()):
output += "{val} {k}\n".format(val=inv[k], k = k)
itemCount += inv[k]
output += "\nTotal number of items: {count}\n".format(count = itemCount)
return output
def addToInventory(inv, items):
for item in items:
inv[item] = inv[item] + 1 if item in inv.keys() else 1 | {
"content_hash": "b3844425390b2b91be867c84e0f76756",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 73,
"avg_line_length": 30.923076923076923,
"alnum_prop": 0.6517412935323383,
"repo_name": "joeryan/pytest-practice",
"id": "b98b143780a8b3bd7aac6b95c0ea3f46a1e176a1",
"size": "491",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "inventory.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "10799"
}
],
"symlink_target": ""
} |
"""
toposort.py
Sorts dictionary keys based on lists of dependencies.
"""
class MissingDependency(Exception):
"""Exception raised when a listed dependency is not in the dictionary."""
class Sorter(object):
def __init__(self, dependencies):
self.dependencies = dependencies
self.visited = set()
self.sorted = ()
def sort(self):
for key in self.dependencies:
self._visit(key)
return self.sorted
def _visit(self, key):
if key not in self.visited:
self.visited.add(key)
if key not in self.dependencies:
raise MissingDependency(key)
for depends in self.dependencies[key]:
self._visit(depends)
self.sorted += (key,)
def toposort(dependencies):
"""Returns a tuple of the dependencies dictionary keys sorted by entries
in the dependency lists. Given circular dependencies, sort will impose
an order. Raises MissingDependency if a key is not found.
"""
s = Sorter(dependencies)
return s.sort()
| {
"content_hash": "0a14d1270e8afe93833620d1f5aa01c5",
"timestamp": "",
"source": "github",
"line_count": 35,
"max_line_length": 77,
"avg_line_length": 30.62857142857143,
"alnum_prop": 0.6324626865671642,
"repo_name": "flavour/eden",
"id": "d4d67ca8488c22ddeb6d14672322eca1db6f901b",
"size": "1072",
"binary": false,
"copies": "6",
"ref": "refs/heads/master",
"path": "static/scripts/tools/toposort.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "727"
},
{
"name": "CSS",
"bytes": "3351335"
},
{
"name": "HTML",
"bytes": "1367727"
},
{
"name": "JavaScript",
"bytes": "20109418"
},
{
"name": "NSIS",
"bytes": "3934"
},
{
"name": "PHP",
"bytes": "15220"
},
{
"name": "Python",
"bytes": "31407527"
},
{
"name": "Ruby",
"bytes": "8291"
},
{
"name": "Shell",
"bytes": "5059"
},
{
"name": "XSLT",
"bytes": "3274119"
}
],
"symlink_target": ""
} |
class Credits():
def __init__(self):
self.creditsText = ["Oregon Trail - A PyGame clone",
"",
"Created by:",
"Zak - Programming",
"David - Programming + Graphics",
"",
"Thanks for playing!"]
| {
"content_hash": "5c1aca66501076da8c849cfafad93c4e",
"timestamp": "",
"source": "github",
"line_count": 11,
"max_line_length": 54,
"avg_line_length": 21.90909090909091,
"alnum_prop": 0.5145228215767634,
"repo_name": "ZakDoesGaming/OregonTrail",
"id": "82eedf99229008690755a04513d5ac79bbcc779d",
"size": "241",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "lib/credits.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "118172"
}
],
"symlink_target": ""
} |
import os
import sys
import numpy as np
from django.conf import settings
import django
django.setup()
import logging
logger = logging.getLogger("neurodata")
class HistStats():
def mean(self, histogram, bins):
""" Given a histogram and the bin widths, calculate the mean for the histogram
and return it """
mean_upper = 0
mean_lower = 0
for i, binval in enumerate(histogram):
mean_upper += binval*( (bins[i+1] + bins[i]) / 2 )
mean_lower += binval
mean = mean_upper / mean_lower
return mean
def stddev(self, histogram, bins):
""" Compute the standard deviation of the histogram """
mean = self.mean(histogram, bins)
std_upper = 0
std_lower = 0
for i, binval in enumerate(histogram):
std_upper += binval * ( ( ( (bins[i+1] + bins[i])/2 ) - mean )**2 )
std_lower += binval
stddev = np.sqrt(std_upper / std_lower)
return stddev
def percentile(self, histogram, bins, percent):
""" Calculate the <<percent>> percentile of the histogram """
# handle > 100 or < 0
if float(percent) <= 0:
return 0
elif float(percent) >= 100:
return bins[-1]
# if histogram is all 0s, return 0
if len(np.nonzero(histogram)[0]) == 0:
return 0
# normalize the histogram
hist_norm = np.zeros(histogram.shape)
hist_sum = 0.0 # dividing by a float ensures we get a float for normalized histograms
for binval in histogram:
hist_sum += binval
for i, binval in enumerate(histogram):
hist_norm[i] = histogram[i] / hist_sum
# compute the percentile using the normalized histogram
bin_sum = 0.0 # the normalized histogram is floating point
i = 0
pfloat = float(percent) * 0.01
while bin_sum < pfloat and i < hist_norm.shape[0]:
bin_sum += hist_norm[i]
i += 1
if i+1 >= len(bins):
return bins[-1]
return bins[i+1]
def min(self, histogram, bins):
for i, binval in enumerate(histogram):
if binval > 0:
return bins[i]
return 0
def max(self, histogram, bins):
max = 0
for i, binval in enumerate(histogram):
if binval > 0:
max = bins[i]
return max
| {
"content_hash": "4b1e8f39e6cf4db0b8e4bf3c3c7131cb",
"timestamp": "",
"source": "github",
"line_count": 83,
"max_line_length": 89,
"avg_line_length": 26.518072289156628,
"alnum_prop": 0.6160835983643799,
"repo_name": "openconnectome/open-connectome",
"id": "802f9b398ebba356a52b6e6c9005eae09f5c3cee",
"size": "2800",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "ndstats/histstats.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "43100"
},
{
"name": "C++",
"bytes": "23724"
},
{
"name": "CSS",
"bytes": "53255"
},
{
"name": "HTML",
"bytes": "142332"
},
{
"name": "JavaScript",
"bytes": "303249"
},
{
"name": "Makefile",
"bytes": "2273"
},
{
"name": "Python",
"bytes": "1409968"
},
{
"name": "Shell",
"bytes": "5637"
}
],
"symlink_target": ""
} |
import django
import os
from distutils.version import LooseVersion
from django.utils.functional import LazyObject
from django.core.files.storage import get_storage_class
#The following class is taken from https://github.com/jezdez/django/compare/feature/staticfiles-templatetag
#and should be removed and replaced by the django-core version in 1.4
default_storage = 'django.contrib.staticfiles.storage.StaticFilesStorage'
if LooseVersion(django.get_version()) < LooseVersion('1.3'):
default_storage = 'staticfiles.storage.StaticFilesStorage'
class ConfiguredStorage(LazyObject):
def _setup(self):
from django.conf import settings
self._wrapped = get_storage_class(getattr(settings, 'STATICFILES_STORAGE', default_storage))()
configured_storage = ConfiguredStorage()
def static_url(path):
'''
Helper that prefixes a URL with STATIC_URL and cms
'''
if not path:
return ''
return configured_storage.url(os.path.join('table', path))
| {
"content_hash": "c00236dccab12d042abef4e60fe4f262",
"timestamp": "",
"source": "github",
"line_count": 29,
"max_line_length": 107,
"avg_line_length": 34.06896551724138,
"alnum_prop": 0.7530364372469636,
"repo_name": "divio/djangocms-table",
"id": "170a3ecaa11248e41e8b5e89ed51c9d19b28b238",
"size": "988",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "djangocms_table/utils.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "8858"
},
{
"name": "HTML",
"bytes": "2624"
},
{
"name": "JavaScript",
"bytes": "228204"
},
{
"name": "Python",
"bytes": "16082"
}
],
"symlink_target": ""
} |
__source__ = 'https://github.com/kamyu104/LeetCode/blob/master/Python/ternary-expression-parser.py'
# https://leetcode.com/problems/ternary-expression-parser/#/description
# Time: O(n)
# Space: O(1)
#
# Description: 439. Ternary Expression Parser
#
# Given a string representing arbitrarily nested ternary expressions,
# calculate the result of the expression.
# You can always assume that the given expression is valid and only consists of digits 0-9, ?, :,
# T and F (T and F represent True and False respectively).
#
# Note:
#
# The length of the given string is <= 10000.
# Each number will contain only one digit.
# The conditional expressions group right-to-left (as usual in most languages).
# The condition will always be either T or F. That is, the condition will never be a digit.
# The result of the expression will always evaluate to either a digit 0-9, T or F.
# Example 1:
#
# Input: "T?2:3"
#
# Output: "2"
#
# Explanation: If true, then result is 2; otherwise result is 3.
# Example 2:
#
# Input: "F?1:T?4:5"
#
# Output: "4"
#
# Explanation: The conditional expressions group right-to-left. Using parenthesis, it is read/evaluated as:
#
# "(F ? 1 : (T ? 4 : 5))" "(F ? 1 : (T ? 4 : 5))"
# -> "(F ? 1 : 4)" or -> "(T ? 4 : 5)"
# -> "4" -> "4"
# Example 3:
#
# Input: "T?T?F:5:3"
#
# Output: "F"
#
# Explanation: The conditional expressions group right-to-left. Using parenthesis, it is read/evaluated as:
#
# "(T ? (T ? F : 5) : 3)" "(T ? (T ? F : 5) : 3)"
# -> "(T ? F : 3)" or -> "(T ? F : 5)"
# -> "F" -> "F"
# Hide Company Tags Snapchat
# Hide Tags Depth-first Search Stack
# Hide Similar Problems (M) Mini Parser
import unittest
# 52ms 82.09%
class Solution(object):
def parseTernary(self, expression):
"""
:type expression: str
:rtype: str
"""
if not expression:
return ""
stack = []
for c in expression[::-1]:
if stack and stack[-1] == '?':
stack.pop() # pop '?'
first = stack.pop()
stack.pop() # pop ':'
second = stack.pop()
if c == 'T':
stack.append(first)
else:
stack.append(second)
else:
stack.append(c)
return str(stack[-1])
def parseTernary2(self, expression):
stack = []
for c in reversed(expression):
stack.append(c)
if stack[-2:-1] == ['?']:
stack[-5:] = stack[-3 if stack[-1] == 'T' else -5]
return stack[0]
class TestMethods(unittest.TestCase):
def test_Local(self):
self.assertEqual(1, 1)
if __name__ == '__main__':
unittest.main()
Java = '''
# Thought:
# Iterate the expression from tail, whenever encounter a character before '?',
# calculate the right value and push back to stack.
#
# P.S. this code is guaranteed only if "the given expression is valid" base on the requirement.
# 15ms 41.57%
class Solution {
public String parseTernary(String expression) {
if (expression == null || expression.length() == 0) return "";
Deque<Character> stack = new LinkedList<>();
for ( int i = expression.length() - 1; i >= 0; i--) {
char c = expression.charAt(i);
if (!stack.isEmpty() && stack.peek() == '?') {
stack.pop(); //'?'
char first = stack.pop();
stack.pop(); //':'
char second = stack.pop();
if (c == 'T') stack.push(first);
else stack.push(second); //'F'
} else {
stack.push(c);
}
}
return String.valueOf(stack.peek());
}
}
''' | {
"content_hash": "f325bf03a87b4e4f780bc9c074ad8833",
"timestamp": "",
"source": "github",
"line_count": 126,
"max_line_length": 107,
"avg_line_length": 31.27777777777778,
"alnum_prop": 0.5257548845470693,
"repo_name": "JulyKikuAkita/PythonPrac",
"id": "bd947c849be5f79652edd1ac341c54ddb5f00749",
"size": "3941",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "cs15211/TernaryExpressionParser.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "DIGITAL Command Language",
"bytes": "191608"
},
{
"name": "HTML",
"bytes": "647778"
},
{
"name": "Python",
"bytes": "5429558"
}
],
"symlink_target": ""
} |
import datetime as dt
from dateutil.relativedelta import relativedelta
from datetime import datetime, timedelta
from calendar import isleap
import pandas as pd
def convert_datetime(df):
datecols = df.dtypes.index[df.dtypes.index.str.contains('DATE')==True].values
for item in datecols:
try:
df[item]= pd.to_datetime(df[item])
except:
df[item]=df[item]
| {
"content_hash": "26365d33dae0cf45e32b6fcc8ebe0fe4",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 81,
"avg_line_length": 30.846153846153847,
"alnum_prop": 0.6932668329177057,
"repo_name": "igotcharts/charts_and_more_charts",
"id": "0cb1ad7b2a1e4fdd3cbf9bf3c06698b65f0e1de1",
"size": "401",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "functions/date_functions.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "4805689"
},
{
"name": "Python",
"bytes": "9534"
}
],
"symlink_target": ""
} |
from msrest.serialization import Model
class SecurityGroupNetworkInterface(Model):
"""Network interface and all its associated security rules.
:param id: ID of the network interface.
:type id: str
:param security_rule_associations:
:type security_rule_associations:
~azure.mgmt.network.v2017_03_01.models.SecurityRuleAssociations
"""
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'security_rule_associations': {'key': 'securityRuleAssociations', 'type': 'SecurityRuleAssociations'},
}
def __init__(self, id=None, security_rule_associations=None):
super(SecurityGroupNetworkInterface, self).__init__()
self.id = id
self.security_rule_associations = security_rule_associations
| {
"content_hash": "4254f24efda1100e1f1775de7591fd66",
"timestamp": "",
"source": "github",
"line_count": 22,
"max_line_length": 110,
"avg_line_length": 35,
"alnum_prop": 0.6805194805194805,
"repo_name": "AutorestCI/azure-sdk-for-python",
"id": "b087e727a80cc9a0f7c86958d6b00536c8ad83a8",
"size": "1244",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "azure-mgmt-network/azure/mgmt/network/v2017_03_01/models/security_group_network_interface.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "34619070"
}
],
"symlink_target": ""
} |
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("ddcz", "0015_auto_20180614_2308"),
]
operations = [
migrations.AlterModelOptions(
name="monster",
options={"verbose_name": "Nestvůra", "verbose_name_plural": "Bestiář"},
),
migrations.AlterField(
model_name="monster",
name="tisknuto",
field=models.IntegerField(default=0),
),
migrations.AlterField(
model_name="creativepageconcept",
name="text",
field=models.TextField(),
),
migrations.RenameModel("Galerie", "GalleryPicture"),
]
| {
"content_hash": "304aa96183cd9ee83129f1378ef59321",
"timestamp": "",
"source": "github",
"line_count": 26,
"max_line_length": 83,
"avg_line_length": 27.26923076923077,
"alnum_prop": 0.5613540197461213,
"repo_name": "dracidoupe/graveyard",
"id": "d8fdcd330f7ec7f0e3b7eba68a6137190fc32a73",
"size": "761",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ddcz/migrations/0016_auto_20180617_1602.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "API Blueprint",
"bytes": "4273"
},
{
"name": "CSS",
"bytes": "37578"
},
{
"name": "Dockerfile",
"bytes": "208"
},
{
"name": "HTML",
"bytes": "101149"
},
{
"name": "JavaScript",
"bytes": "2417"
},
{
"name": "Python",
"bytes": "766548"
},
{
"name": "Shell",
"bytes": "5103"
}
],
"symlink_target": ""
} |
from eiger import EigerImages
from edf import EDFImages
| {
"content_hash": "6570b193b98cc397f0f0c11a8451fbec",
"timestamp": "",
"source": "github",
"line_count": 2,
"max_line_length": 29,
"avg_line_length": 28,
"alnum_prop": 0.8571428571428571,
"repo_name": "brunoseivam/chxtools",
"id": "5e011ebeab165839dde288ce35ddc40a041ef29c",
"size": "56",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "chxtools/pims_readers/__init__.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "102907"
},
{
"name": "Shell",
"bytes": "38"
}
],
"symlink_target": ""
} |
"""
news_data.pipeline.metric_writer
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
[Pipeline Step 4 of 4]
This module subscribes to the 'analyzed_articles' queue and listens
for jobs. When it receives a job, it reads the analyzed article
results and creates metric data. This is then written to the DB.
:license: MIT, see LICENSE for more details.
"""
import argparse
from datetime import datetime
from datetime import timedelta
import date_util
from db import mongo
import queue
# >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>
# Temp -- used for monitoring performance
# Daily write/read/create time
dw_time = timedelta(days=0)
dr_time = timedelta(days=0)
dc_time = timedelta(days=0)
# Monthly write/read/create time
mw_time = timedelta(days=0)
mr_time = timedelta(days=0)
mc_time = timedelta(days=0)
# <<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<
verbose = False
updt_freq = 1000
metricized_articles = 0
terms_processed = 0
docs_created_daily = 0
docs_created_monthly = 0
connection = None
consume_channel = None
db_analyzed_articles = None
db_metric_data_daily = None
db_metric_data_monthly = None
def init_db_and_queue():
global db_analyzed_articles, db_metric_data_daily, db_metric_data_monthly,\
connection, consume_channel
# Init DB
db_analyzed_articles = mongo.get_analyzed_articles()
db_metric_data_daily = mongo.get_metric_data_daily()
db_metric_data_monthly = mongo.get_metric_data_monthly()
# Init connection and channels to RabbitMQ
connection, consume_channel = queue.init_connection()
queue.init_analyzed_articles(consume_channel)
def analyzed_articles_consumer(channel, method, header, body):
create_metrics_for_article(body)
channel.basic_ack(delivery_tag = method.delivery_tag)
def start_consuming_analyzed_articles():
try:
print " Metric Writer Started..."
queue.consume_analyzed_articles(consume_channel,
analyzed_articles_consumer)
finally:
queue.close_connection(connection)
def create_metrics_for_article(article_id, preview=False):
global metricized_articles, terms_processed
# Get article from the DB...
analyzed_article = read_analyzed_article_from_db(article_id)
# Create metrics...
if analyzed_article:
# There are a few different approaches to consider when writing
# metric data.
# (1) Ensure documents are allocated, and then upsert data for
# the change
# (2) Upsert an entire doc each time, where all values are zero
# except one.
# (3) Upsert daily term docs one at a time, then aggregate into
# higher level data later
#
# >> Currently selecting to do the former approach. Results in
# more small reads to the DB, but smaller writes.
# Get needed date values
published = analyzed_article["published"]
yyyy = published.year
mm = published.month
dd = published.day
first_of_month = datetime(yyyy, mm, 1)
days_in_curr_month = date_util.get_days_in_month(yyyy, published.month)
# Iterate over each term in the term histogram
term_histogram = analyzed_article["term_histogram"]
for term in term_histogram:
terms_processed += 1
if not preview:
update_daily_metrics(term, yyyy, mm, dd, first_of_month,
days_in_curr_month, term_histogram[term])
update_monthly_metrics(term, yyyy, mm, term_histogram[term])
# Increase count and update status after each article...
metricized_articles += 1
if preview or metricized_articles % updt_freq == 0:
print " * Articles Metricized: %d..." % metricized_articles
print " Terms: %d Daily Docs %d Monthly Docs %d" % \
(terms_processed, docs_created_daily, docs_created_monthly)
print " Monthly: Read: %s, Create: %s, Write: %s" % \
(mr_time, mc_time, mw_time)
print " Daily: Read: %s, Create: %s, Write: %s" % \
(dr_time, dc_time, dw_time)
else:
print " ERROR: No document with id of '%s' in DB" % article_id
def read_analyzed_article_from_db(article_id):
analyzed_article = db_analyzed_articles.find_one({ "_id" : article_id})
return analyzed_article
def update_daily_metrics(term, yyyy, mm, dd, first_of_month,
days_in_curr_month, term_count):
global docs_created_daily, dr_time, dw_time, dc_time
# Create the metric identifier
id_daily_metric = {
"_id" : {
"term" : term,
"yyyy" : yyyy,
"mm" : date_util.pad_month_day_value(mm)
}
}
# Check if a doc for this identifier already exists, if not
# allocate the doc
r_time = datetime.now()
if (db_metric_data_daily.find(id_daily_metric).count() == 0):
dr_time += (datetime.now() - r_time)
c_time = datetime.now()
docs_created_daily += 1
metric_doc_daily = {
"_id" : id_daily_metric["_id"],
"term" : term,
"date" : first_of_month,
"daily": {}
}
for day in range(1, days_in_curr_month + 1):
metric_doc_daily["daily"][str(day)] = 0
db_metric_data_daily.insert(metric_doc_daily)
dc_time += (datetime.now() - c_time)
else:
dr_time += (datetime.now() - r_time)
# Update the daily metric data with this value
w_time = datetime.now()
metric_update_daily = {"$inc" : {"daily." + str(dd) : term_count}}
db_metric_data_daily.update(id_daily_metric, metric_update_daily,
True) # True for upsert
dw_time += (datetime.now() - w_time)
def update_monthly_metrics(term, yyyy, mm, term_count):
global docs_created_monthly, mr_time, mw_time, mc_time
# Create the metric identifier
id_monthly_metric = {
"_id" : {
"term" : term
}
}
# Check if a doc for this identifier already exists, if not
# allocate the doc
r_time = datetime.now()
if (db_metric_data_monthly.find(id_monthly_metric).count() == 0):
mr_time += (datetime.now() - r_time)
c_time = datetime.now()
docs_created_monthly += 1
metric_doc_monthly = {
"_id" : id_monthly_metric["_id"],
"term" : term
}
for yyyy in range(2000, 2014):
metric_doc_monthly[str(yyyy)] = {}
for mm in range(1, 13):
metric_doc_monthly[str(yyyy)][str(mm)] = 0
db_metric_data_monthly.insert(metric_doc_monthly)
mc_time += (datetime.now() - c_time)
else:
mr_time += (datetime.now() - r_time)
# Update the monthly metric data with this value
w_time = datetime.now()
metric_update_monthly = {"$inc" : {str(yyyy) + "." + str(mm) : term_count}}
db_metric_data_monthly.update(id_monthly_metric,
metric_update_monthly, True) # True for upsert
mw_time += (datetime.now() - w_time)
def parse_args():
""" Parse the command line arguments
"""
global verbose, updt_freq
parser = argparse.ArgumentParser(description="Listens to queue for\
analyzed articles to create metrics for, or optionally\
create metrics for a given article id argument")
parser.add_argument("-v", "--verbose", action='store_true',
help="Make the operation talkative")
parser.add_argument("-p", "--preview", action='store_true',
help="Preview only, don't persist results.")
parser.add_argument("-u", "--updt_freq", type=int, default=1000,
help="Frequency to print an update")
parser.add_argument("-i", "--id", help="Id of article to parse")
args = parser.parse_args()
verbose = args.verbose
updt_freq = args.updt_freq
return args
if __name__ == "__main__":
args = parse_args()
print "----------------------------------------------< metric_writer >----"
init_db_and_queue()
# If an article id is provided as an argument, create metrics for
# it. Otherwise, start consuming msgs from the queue.
if args.id:
create_metrics_for_article(args.id.strip(), args.preview)
else:
start_consuming_analyzed_articles() | {
"content_hash": "113673302b2b618f957d32f48362726c",
"timestamp": "",
"source": "github",
"line_count": 258,
"max_line_length": 79,
"avg_line_length": 32.58914728682171,
"alnum_prop": 0.5972882968601332,
"repo_name": "lbracken/news_data",
"id": "e890d12036a7c87f26da8b35a349236cd09371a1",
"size": "8432",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pipeline/metric_writer.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "3696"
},
{
"name": "JavaScript",
"bytes": "20984"
},
{
"name": "Python",
"bytes": "57955"
},
{
"name": "Shell",
"bytes": "359"
}
],
"symlink_target": ""
} |
'''
Module for gathering disk information
'''
from __future__ import absolute_import
# Import python libs
import logging
import os
import re
# Import salt libs
import salt.utils
import salt.utils.decorators as decorators
from salt.exceptions import CommandExecutionError
from salt.ext.six.moves import zip
log = logging.getLogger(__name__)
def __virtual__():
'''
Only work on POSIX-like systems
'''
if salt.utils.is_windows():
return False
return True
def _clean_flags(args, caller):
'''
Sanitize flags passed into df
'''
flags = ''
if args is None:
return flags
allowed = ('a', 'B', 'h', 'H', 'i', 'k', 'l', 'P', 't', 'T', 'x', 'v')
for flag in args:
if flag in allowed:
flags += flag
else:
raise CommandExecutionError(
'Invalid flag passed to {0}'.format(caller)
)
return flags
def usage(args=None):
'''
Return usage information for volumes mounted on this minion
CLI Example:
.. code-block:: bash
salt '*' disk.usage
'''
flags = _clean_flags(args, 'disk.usage')
if not os.path.isfile('/etc/mtab') and __grains__['kernel'] == 'Linux':
log.error('df cannot run without /etc/mtab')
if __grains__.get('virtual_subtype') == 'LXC':
log.error('df command failed and LXC detected. If you are running '
'a Docker container, consider linking /proc/mounts to '
'/etc/mtab or consider running Docker with -privileged')
return {}
if __grains__['kernel'] == 'Linux':
cmd = 'df -P'
elif __grains__['kernel'] == 'OpenBSD':
cmd = 'df -kP'
elif __grains__['kernel'] == 'AIX':
cmd = 'df -kP'
else:
cmd = 'df'
if flags:
cmd += ' -{0}'.format(flags)
ret = {}
out = __salt__['cmd.run'](cmd, python_shell=False).splitlines()
oldline = None
for line in out:
if not line:
continue
if line.startswith('Filesystem'):
continue
if oldline:
line = oldline + " " + line
comps = line.split()
if len(comps) == 1:
oldline = line
continue
else:
oldline = None
while len(comps) >= 2 and not comps[1].isdigit():
comps[0] = '{0} {1}'.format(comps[0], comps[1])
comps.pop(1)
if len(comps) < 2:
continue
try:
if __grains__['kernel'] == 'Darwin':
ret[comps[8]] = {
'filesystem': comps[0],
'512-blocks': comps[1],
'used': comps[2],
'available': comps[3],
'capacity': comps[4],
'iused': comps[5],
'ifree': comps[6],
'%iused': comps[7],
}
else:
ret[comps[5]] = {
'filesystem': comps[0],
'1K-blocks': comps[1],
'used': comps[2],
'available': comps[3],
'capacity': comps[4],
}
except IndexError:
log.error('Problem parsing disk usage information')
ret = {}
return ret
def inodeusage(args=None):
'''
Return inode usage information for volumes mounted on this minion
CLI Example:
.. code-block:: bash
salt '*' disk.inodeusage
'''
flags = _clean_flags(args, 'disk.inodeusage')
cmd = 'df -iP'
if flags:
cmd += ' -{0}'.format(flags)
ret = {}
out = __salt__['cmd.run'](cmd, python_shell=False).splitlines()
for line in out:
if line.startswith('Filesystem'):
continue
comps = line.split()
# Don't choke on empty lines
if not comps:
continue
try:
if __grains__['kernel'] == 'OpenBSD':
ret[comps[8]] = {
'inodes': int(comps[5]) + int(comps[6]),
'used': comps[5],
'free': comps[6],
'use': comps[7],
'filesystem': comps[0],
}
else:
ret[comps[5]] = {
'inodes': comps[1],
'used': comps[2],
'free': comps[3],
'use': comps[4],
'filesystem': comps[0],
}
except (IndexError, ValueError):
log.error('Problem parsing inode usage information')
ret = {}
return ret
def percent(args=None):
'''
Return partition information for volumes mounted on this minion
CLI Example:
.. code-block:: bash
salt '*' disk.percent /var
'''
if __grains__['kernel'] == 'Linux':
cmd = 'df -P'
elif __grains__['kernel'] == 'OpenBSD':
cmd = 'df -kP'
else:
cmd = 'df'
ret = {}
out = __salt__['cmd.run'](cmd, python_shell=False).splitlines()
for line in out:
if not line:
continue
if line.startswith('Filesystem'):
continue
comps = line.split()
while not comps[1].isdigit():
comps[0] = '{0} {1}'.format(comps[0], comps[1])
comps.pop(1)
try:
if __grains__['kernel'] == 'Darwin':
ret[comps[8]] = comps[4]
else:
ret[comps[5]] = comps[4]
except IndexError:
log.error('Problem parsing disk usage information')
ret = {}
if args and args not in ret:
log.error('Problem parsing disk usage information: Partition \'{0}\' does not exist!'.format(args))
ret = {}
elif args:
return ret[args]
return ret
@decorators.which('blkid')
def blkid(device=None):
'''
Return block device attributes: UUID, LABEL, etc. This function only works
on systems where blkid is available.
CLI Example:
.. code-block:: bash
salt '*' disk.blkid
salt '*' disk.blkid /dev/sda
'''
args = ""
if device:
args = " " + device
ret = {}
blkid_result = __salt__['cmd.run_all']('blkid' + args, python_shell=False)
if blkid_result['retcode'] > 0:
return ret
for line in blkid_result['stdout'].splitlines():
if not line:
continue
comps = line.split()
device = comps[0][:-1]
info = {}
device_attributes = re.split(('\"*\"'), line.partition(' ')[2])
for key, value in zip(*[iter(device_attributes)]*2):
key = key.strip('=').strip(' ')
info[key] = value.strip('"')
ret[device] = info
return ret
| {
"content_hash": "39e6d66c20de0a73bc13bf07a4cb45d6",
"timestamp": "",
"source": "github",
"line_count": 249,
"max_line_length": 107,
"avg_line_length": 27.48995983935743,
"alnum_prop": 0.4821037253469686,
"repo_name": "smallyear/linuxLearn",
"id": "3c4ca3162e0e3819bfcbb333e23e64e43be2e0d7",
"size": "6869",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "salt/salt/modules/disk.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "269"
},
{
"name": "CSS",
"bytes": "35"
},
{
"name": "HTML",
"bytes": "23373"
},
{
"name": "JavaScript",
"bytes": "510"
},
{
"name": "Mako",
"bytes": "494"
},
{
"name": "Python",
"bytes": "12800734"
},
{
"name": "Shell",
"bytes": "240576"
}
],
"symlink_target": ""
} |
import unittest
from recipeformats import mmf
class TestParseRecipe(unittest.TestCase):
def assert_equal(self, actual, expected_title, expected_categories,
expected_yield, expected_servings, expected_ingredients,
expected_directions):
actual_ingredients = [repr(i) for i in actual.ingredients]
self.assertEqual(actual.title, expected_title)
self.assertEqual(actual.categories, expected_categories)
self.assertEqual(actual.yield_, expected_yield)
self.assertEqual(actual.servings, expected_servings)
self.assertEqual(actual_ingredients, expected_ingredients)
self.assertEqual(actual.directions, expected_directions)
# Variations on this recipe follow it.
def test_when_one_column(self):
lines = [
'---------- Recipe via Meal-Master (tm) v8.05',
' ',
' Title: Potato Casserole',
' Categories: Casserole, Potato',
' Yield: 8 Servings',
' ',
' 2 lb Frozen hash brown potatoes',
' 1 c Onions; diced',
' 1 cn Cream of chicken soup',
' 16 oz Sour cream',
' 1/2 c Melted margarine',
' 8 oz Grated sharp cheese',
' Salt and pepper to taste',
'',
' Thaw potatoes about 30 min., then mix all ingredients in a large bowl.',
' Place in a 9 X 13 baking dish. Bake at 350 for one hour. Serves 8',
' ',
' Recipe by: From recipe files of Martha',
' ',
'-----',
]
expected_title = 'Potato Casserole'
expected_categories = ['Casserole', 'Potato']
expected_yield = ''
expected_servings = 8
expected_ingredients = [
'{2} {lb} {Frozen hash brown potatoes}',
'{1} {c} {Onions; diced}',
'{1} {cn} {Cream of chicken soup}',
'{16} {oz} {Sour cream}',
'{1/2} {c} {Melted margarine}',
'{8} {oz} {Grated sharp cheese}',
'{} {} {Salt and pepper to taste}',
]
expected_directions = [
'Thaw potatoes about 30 min., then mix all ingredients in a large bowl. Place in a 9 X 13 baking dish. Bake at 350 for one hour. Serves 8',
'Recipe by: From recipe files of Martha',
]
actual = mmf.parse_recipe(lines)
self.assert_equal(actual, expected_title, expected_categories,
expected_yield, expected_servings,
expected_ingredients, expected_directions)
def test_when_no_ingredients_or_directions(self):
lines = [
'---------- Recipe via Meal-Master (tm) v8.05',
' ',
' Title: Potato Casserole',
' Categories: Casserole, Potato',
' Yield: 8 Servings',
' ',
'-----',
]
expected_title = 'Potato Casserole'
expected_categories = ['Casserole', 'Potato']
expected_yield = ''
expected_servings = 8
expected_ingredients = []
expected_directions = []
actual = mmf.parse_recipe(lines)
self.assert_equal(actual, expected_title, expected_categories,
expected_yield, expected_servings,
expected_ingredients, expected_directions)
def test_when_extra_empty_lines(self):
lines = [
' ',
'---------- Recipe via Meal-Master (tm) v8.05',
' ',
' Title: Potato Casserole',
' ',
' ',
' ',
' Categories: Casserole, Potato',
' ',
' ',
' Yield: 8 Servings',
' ',
' ',
' 2 lb Frozen hash brown potatoes',
' 1 c Onions; diced',
' 1 cn Cream of chicken soup',
' 16 oz Sour cream',
' ',
' 1/2 c Melted margarine',
' 8 oz Grated sharp cheese',
' Salt and pepper to taste',
'',
' Thaw potatoes about 30 min., then mix all ingredients in a large bowl.',
' Place in a 9 X 13 baking dish. Bake at 350 for one hour. Serves 8',
' ',
' ',
' ',
' Recipe by: From recipe files of Martha',
' ',
'-----',
' ',
]
expected_title = 'Potato Casserole'
expected_categories = ['Casserole', 'Potato']
expected_yield = ''
expected_servings = 8
expected_ingredients = [
'{2} {lb} {Frozen hash brown potatoes}',
'{1} {c} {Onions; diced}',
'{1} {cn} {Cream of chicken soup}',
'{16} {oz} {Sour cream}',
'{1/2} {c} {Melted margarine}',
'{8} {oz} {Grated sharp cheese}',
'{} {} {Salt and pepper to taste}',
]
expected_directions = [
'Thaw potatoes about 30 min., then mix all ingredients in a large bowl. Place in a 9 X 13 baking dish. Bake at 350 for one hour. Serves 8',
'Recipe by: From recipe files of Martha',
]
actual = mmf.parse_recipe(lines)
self.assert_equal(actual, expected_title, expected_categories,
expected_yield, expected_servings,
expected_ingredients, expected_directions)
def test_when_missing_typical_empty_lines(self):
lines = [
'---------- Recipe via Meal-Master (tm) v8.05',
' Title: Potato Casserole',
' Categories: Casserole, Potato',
' Yield: 8 Servings',
' 2 lb Frozen hash brown potatoes',
' 1 c Onions; diced',
' 1 cn Cream of chicken soup',
' 16 oz Sour cream',
' 1/2 c Melted margarine',
' 8 oz Grated sharp cheese',
' Salt and pepper to taste',
' Thaw potatoes about 30 min., then mix all ingredients in a large bowl.',
' Place in a 9 X 13 baking dish. Bake at 350 for one hour. Serves 8',
' ',
' Recipe by: From recipe files of Martha',
'-----',
]
expected_title = 'Potato Casserole'
expected_categories = ['Casserole', 'Potato']
expected_yield = ''
expected_servings = 8
expected_ingredients = [
'{2} {lb} {Frozen hash brown potatoes}',
'{1} {c} {Onions; diced}',
'{1} {cn} {Cream of chicken soup}',
'{16} {oz} {Sour cream}',
'{1/2} {c} {Melted margarine}',
'{8} {oz} {Grated sharp cheese}',
'{} {} {Salt and pepper to taste}',
]
expected_directions = [
'Thaw potatoes about 30 min., then mix all ingredients in a large bowl. Place in a 9 X 13 baking dish. Bake at 350 for one hour. Serves 8',
'Recipe by: From recipe files of Martha',
]
actual = mmf.parse_recipe(lines)
self.assert_equal(actual, expected_title, expected_categories,
expected_yield, expected_servings,
expected_ingredients, expected_directions)
def test_when_missing_header_and_footer(self):
lines = [
' Title: Potato Casserole',
' Categories: Casserole, Potato',
' Yield: 8 Servings',
' ',
' 2 lb Frozen hash brown potatoes',
' 1 c Onions; diced',
' 1 cn Cream of chicken soup',
' 16 oz Sour cream',
' 1/2 c Melted margarine',
' 8 oz Grated sharp cheese',
' Salt and pepper to taste',
'',
' Thaw potatoes about 30 min., then mix all ingredients in a large bowl.',
' Place in a 9 X 13 baking dish. Bake at 350 for one hour. Serves 8',
' ',
' Recipe by: From recipe files of Martha',
]
expected_title = 'Potato Casserole'
expected_categories = ['Casserole', 'Potato']
expected_yield = ''
expected_servings = 8
expected_ingredients = [
'{2} {lb} {Frozen hash brown potatoes}',
'{1} {c} {Onions; diced}',
'{1} {cn} {Cream of chicken soup}',
'{16} {oz} {Sour cream}',
'{1/2} {c} {Melted margarine}',
'{8} {oz} {Grated sharp cheese}',
'{} {} {Salt and pepper to taste}',
]
expected_directions = [
'Thaw potatoes about 30 min., then mix all ingredients in a large bowl. Place in a 9 X 13 baking dish. Bake at 350 for one hour. Serves 8',
'Recipe by: From recipe files of Martha',
]
actual = mmf.parse_recipe(lines)
self.assert_equal(actual, expected_title, expected_categories,
expected_yield, expected_servings,
expected_ingredients, expected_directions)
def test_when_missing_header_footer_and_title(self):
lines = [
' Categories: Casserole, Potato',
' Yield: 8 Servings',
' ',
' 2 lb Frozen hash brown potatoes',
' 1 c Onions; diced',
' 1 cn Cream of chicken soup',
' 16 oz Sour cream',
' 1/2 c Melted margarine',
' 8 oz Grated sharp cheese',
' Salt and pepper to taste',
'',
' Thaw potatoes about 30 min., then mix all ingredients in a large bowl.',
' Place in a 9 X 13 baking dish. Bake at 350 for one hour. Serves 8',
' ',
' Recipe by: From recipe files of Martha',
]
expected_title = ''
expected_categories = ['Casserole', 'Potato']
expected_yield = ''
expected_servings = 8
expected_ingredients = [
'{2} {lb} {Frozen hash brown potatoes}',
'{1} {c} {Onions; diced}',
'{1} {cn} {Cream of chicken soup}',
'{16} {oz} {Sour cream}',
'{1/2} {c} {Melted margarine}',
'{8} {oz} {Grated sharp cheese}',
'{} {} {Salt and pepper to taste}',
]
expected_directions = [
'Thaw potatoes about 30 min., then mix all ingredients in a large bowl. Place in a 9 X 13 baking dish. Bake at 350 for one hour. Serves 8',
'Recipe by: From recipe files of Martha',
]
actual = mmf.parse_recipe(lines)
self.assert_equal(actual, expected_title, expected_categories,
expected_yield, expected_servings,
expected_ingredients, expected_directions)
def test_when_missing_header_footer_and_categories(self):
lines = [
' Title: Potato Casserole',
' Yield: 8 Servings',
' ',
' 2 lb Frozen hash brown potatoes',
' 1 c Onions; diced',
' 1 cn Cream of chicken soup',
' 16 oz Sour cream',
' 1/2 c Melted margarine',
' 8 oz Grated sharp cheese',
' Salt and pepper to taste',
'',
' Thaw potatoes about 30 min., then mix all ingredients in a large bowl.',
' Place in a 9 X 13 baking dish. Bake at 350 for one hour. Serves 8',
' ',
' Recipe by: From recipe files of Martha',
]
expected_title = 'Potato Casserole'
expected_categories = []
expected_yield = ''
expected_servings = 8
expected_ingredients = [
'{2} {lb} {Frozen hash brown potatoes}',
'{1} {c} {Onions; diced}',
'{1} {cn} {Cream of chicken soup}',
'{16} {oz} {Sour cream}',
'{1/2} {c} {Melted margarine}',
'{8} {oz} {Grated sharp cheese}',
'{} {} {Salt and pepper to taste}',
]
expected_directions = [
'Thaw potatoes about 30 min., then mix all ingredients in a large bowl. Place in a 9 X 13 baking dish. Bake at 350 for one hour. Serves 8',
'Recipe by: From recipe files of Martha',
]
actual = mmf.parse_recipe(lines)
self.assert_equal(actual, expected_title, expected_categories,
expected_yield, expected_servings,
expected_ingredients, expected_directions)
def test_when_missing_header_footer_and_yield(self):
lines = [
' Title: Potato Casserole',
' Categories: Casserole, Potato',
' ',
' 2 lb Frozen hash brown potatoes',
' 1 c Onions; diced',
' 1 cn Cream of chicken soup',
' 16 oz Sour cream',
' 1/2 c Melted margarine',
' 8 oz Grated sharp cheese',
' Salt and pepper to taste',
'',
' Thaw potatoes about 30 min., then mix all ingredients in a large bowl.',
' Place in a 9 X 13 baking dish. Bake at 350 for one hour. Serves 8',
' ',
' Recipe by: From recipe files of Martha',
]
expected_title = 'Potato Casserole'
expected_categories = ['Casserole', 'Potato']
expected_yield = ''
expected_servings = 0
expected_ingredients = [
'{2} {lb} {Frozen hash brown potatoes}',
'{1} {c} {Onions; diced}',
'{1} {cn} {Cream of chicken soup}',
'{16} {oz} {Sour cream}',
'{1/2} {c} {Melted margarine}',
'{8} {oz} {Grated sharp cheese}',
'{} {} {Salt and pepper to taste}',
]
expected_directions = [
'Thaw potatoes about 30 min., then mix all ingredients in a large bowl. Place in a 9 X 13 baking dish. Bake at 350 for one hour. Serves 8',
'Recipe by: From recipe files of Martha',
]
actual = mmf.parse_recipe(lines)
self.assert_equal(actual, expected_title, expected_categories,
expected_yield, expected_servings,
expected_ingredients, expected_directions)
def test_when_only_ingredients_and_directions(self):
lines = [
' 2 lb Frozen hash brown potatoes',
' 1 c Onions; diced',
' 1 cn Cream of chicken soup',
' 16 oz Sour cream',
' 1/2 c Melted margarine',
' 8 oz Grated sharp cheese',
' Salt and pepper to taste',
'',
' Thaw potatoes about 30 min., then mix all ingredients in a large bowl.',
' Place in a 9 X 13 baking dish. Bake at 350 for one hour. Serves 8',
' ',
' Recipe by: From recipe files of Martha',
]
expected_title = ''
expected_categories = []
expected_yield = ''
expected_servings = 0
expected_ingredients = [
'{2} {lb} {Frozen hash brown potatoes}',
'{1} {c} {Onions; diced}',
'{1} {cn} {Cream of chicken soup}',
'{16} {oz} {Sour cream}',
'{1/2} {c} {Melted margarine}',
'{8} {oz} {Grated sharp cheese}',
'{} {} {Salt and pepper to taste}',
]
expected_directions = [
'Thaw potatoes about 30 min., then mix all ingredients in a large bowl. Place in a 9 X 13 baking dish. Bake at 350 for one hour. Serves 8',
'Recipe by: From recipe files of Martha',
]
actual = mmf.parse_recipe(lines)
self.assert_equal(actual, expected_title, expected_categories,
expected_yield, expected_servings,
expected_ingredients, expected_directions)
def test_when_no_ingredients(self):
lines = [
'---------- Recipe via Meal-Master (tm) v8.05',
' ',
' Title: Potato Casserole',
' Categories: Casserole, Potato',
' Yield: 8 Servings',
' ',
' Thaw potatoes about 30 min., then mix all ingredients in a large bowl.',
' Place in a 9 X 13 baking dish. Bake at 350 for one hour. Serves 8',
' ',
' Recipe by: From recipe files of Martha',
' ',
'-----',
]
expected_title = 'Potato Casserole'
expected_categories = ['Casserole', 'Potato']
expected_yield = ''
expected_servings = 8
expected_ingredients = []
expected_directions = [
'Thaw potatoes about 30 min., then mix all ingredients in a large bowl. Place in a 9 X 13 baking dish. Bake at 350 for one hour. Serves 8',
'Recipe by: From recipe files of Martha',
]
actual = mmf.parse_recipe(lines)
self.assert_equal(actual, expected_title, expected_categories,
expected_yield, expected_servings,
expected_ingredients, expected_directions)
def test_when_two_column(self):
lines = [
'MMMMM----- Recipe via Meal-Master (tm) v8.02',
' ',
' Title: Potato-Sorrel Soup',
' Categories: Soup/stew, Vegetarian',
' Yield: 6 servings',
' ',
' 4 tb Butter 1/2 ts Salt (to taste)',
' 7 c Water 1 1/2 lb Red potatoes; quartered',
' 3 md Leeks; the white parts only - lengthwise & thinly sliced',
' - chopped or cut Freshly ground pepper',
' - into 1/4-inch rounds Creme fraiche',
'MMMMM--------------------------HEADING-------------------------------',
' 6 c Loosely packed sorrel leaves 1 tb Chives',
' -the stems removed and - thinly sliced or snipped',
' - leaves roughly chopped ',
'MMMMM--------------------------HEADING-------------------------------',
' ',
' This is a rather rustic soup. For a more refined version, pass it',
' through a food mill before serving.',
' ',
'MMMMM'
]
expected_title = 'Potato-Sorrel Soup'
expected_categories = ['Soup/stew', 'Vegetarian']
expected_yield = ''
expected_servings = 6
expected_ingredients = [
'{4} {tb} {Butter}',
'{7} {c} {Water}',
'{3} {md} {Leeks; the white parts only chopped or cut into 1/4-inch rounds}',
'{1/2} {ts} {Salt (to taste)}',
'{1 1/2} {lb} {Red potatoes; quartered lengthwise & thinly sliced}',
'{} {} {Freshly ground pepper}',
'{} {} {Creme fraiche}',
'----- HEADING -----',
'{6} {c} {Loosely packed sorrel leaves the stems removed and leaves roughly chopped}',
'{1} {tb} {Chives thinly sliced or snipped}',
'----- HEADING -----',
]
expected_directions = [
'This is a rather rustic soup. For a more refined version, pass it through a food mill before serving.',
]
actual = mmf.parse_recipe(lines)
self.assert_equal(actual, expected_title, expected_categories,
expected_yield, expected_servings,
expected_ingredients, expected_directions)
def test_when_empty(self):
lines = []
expected_title = ''
expected_categories = []
expected_yield = ''
expected_servings = 0
expected_ingredients = []
expected_directions = []
actual = mmf.parse_recipe(lines)
self.assert_equal(actual, expected_title, expected_categories,
expected_yield, expected_servings,
expected_ingredients, expected_directions)
def test_when_only_header(self):
lines = [
'---------- Recipe via Meal-Master (tm) v8.05',
]
expected_title = ''
expected_categories = []
expected_yield = ''
expected_servings = 0
expected_ingredients = []
expected_directions = []
actual = mmf.parse_recipe(lines)
self.assert_equal(actual, expected_title, expected_categories,
expected_yield, expected_servings,
expected_ingredients, expected_directions)
def test_when_only_header_and_footer(self):
lines = [
'---------- Recipe via Meal-Master (tm) v8.05',
'-----',
'Extra text that should not be included',
]
expected_title = ''
expected_categories = []
expected_yield = ''
expected_servings = 0
expected_ingredients = []
expected_directions = []
actual = mmf.parse_recipe(lines)
self.assert_equal(actual, expected_title, expected_categories,
expected_yield, expected_servings,
expected_ingredients, expected_directions)
class TestIsMmfHeader(unittest.TestCase):
def test_when_empty(self):
actual = mmf._is_mmf_header('')
expected = False
self.assertEqual(actual, expected)
def test_when_normal(self):
actual = mmf._is_mmf_header('---------- Recipe via Meal-Master (tm) v8.05')
expected = True
self.assertEqual(actual, expected)
def test_when_MMMMM(self):
actual = mmf._is_mmf_header('MMMMM----- Recipe via Meal-Master (tm) v8.05')
expected = True
self.assertEqual(actual, expected)
def test_when_mmmmm(self):
actual = mmf._is_mmf_header('mmmmm----- Recipe via Meal-Master (tm) v8.05')
expected = False
self.assertEqual(actual, expected)
def test_when_extra_dash(self):
actual = mmf._is_mmf_header('----------- Recipe via Meal-Master (tm) v8.05')
expected = False
self.assertEqual(actual, expected)
def test_when_extra_M(self):
actual = mmf._is_mmf_header('MMMMMM----- Recipe via Meal-Master (tm) v8.05')
expected = False
self.assertEqual(actual, expected)
def test_when_missing_dash(self):
actual = mmf._is_mmf_header('--------- Recipe via Meal-Master (tm) v8.05')
expected = False
self.assertEqual(actual, expected)
def test_when_missing_M(self):
actual = mmf._is_mmf_header('MMMM----- Recipe via Meal-Master (tm) v8.05')
expected = False
self.assertEqual(actual, expected)
def test_when_leading_space(self):
actual = mmf._is_mmf_header(' ---------- Recipe via Meal-Master (tm) v8.05')
expected = False
self.assertEqual(actual, expected)
def test_when_trailing_space(self):
actual = mmf._is_mmf_header('---------- Recipe via Meal-Master (tm) v8.05 ')
expected = True
self.assertEqual(actual, expected)
def test_when_only_dashes(self):
actual = mmf._is_mmf_header('----------')
expected = False
self.assertEqual(actual, expected)
def test_when_only_dashes_and_space(self):
actual = mmf._is_mmf_header('---------- ')
expected = True
self.assertEqual(actual, expected)
def test_when_other_text(self):
actual = mmf._is_mmf_header('---------- Anything goes here')
expected = True
self.assertEqual(actual, expected)
def test_when_only_MMMMM_and_dashes(self):
actual = mmf._is_mmf_header('MMMMM-----')
expected = False
self.assertEqual(actual, expected)
def test_when_only_MMMMM_and_dashes_and_space(self):
actual = mmf._is_mmf_header('MMMMM----- ')
expected = True
self.assertEqual(actual, expected)
def test_when_MMMMM_other_text(self):
actual = mmf._is_mmf_header('MMMMM----- Anything goes here')
expected = True
self.assertEqual(actual, expected)
class TestIsMmfFooter(unittest.TestCase):
# only '-----' and 'MMMMM' should be considerd valid
def test_when_normal(self):
actual = mmf._is_mmf_footer('-----')
expected = True
self.assertEqual(actual, expected)
def test_when_MMMMM(self):
actual = mmf._is_mmf_footer('MMMMM')
expected = True
self.assertEqual(actual, expected)
def test_when_empty(self):
actual = mmf._is_mmf_footer('')
expected = False
self.assertEqual(actual, expected)
def test_when_leading_space(self):
actual = mmf._is_mmf_footer(' -----')
expected = False
self.assertEqual(actual, expected)
def test_when_trailing_space(self):
actual = mmf._is_mmf_footer('----- ')
expected = False
self.assertEqual(actual, expected)
def test_when_extra_dash(self):
actual = mmf._is_mmf_footer('------')
expected = False
self.assertEqual(actual, expected)
def test_when_missing_dash(self):
actual = mmf._is_mmf_footer('----')
expected = False
self.assertEqual(actual, expected)
def test_when_trailing_text(self):
actual = mmf._is_mmf_footer('-----TEXT')
expected = False
self.assertEqual(actual, expected)
def test_when_MMMMM_leading_space(self):
actual = mmf._is_mmf_footer(' MMMMM')
expected = False
self.assertEqual(actual, expected)
def test_when_MMMMM_trailing_space(self):
actual = mmf._is_mmf_footer('MMMMM ')
expected = False
self.assertEqual(actual, expected)
def test_when_MMMMM_extra_M(self):
actual = mmf._is_mmf_footer('MMMMMM')
expected = False
self.assertEqual(actual, expected)
def test_when_MMMMM_missing_M(self):
actual = mmf._is_mmf_footer('MMMM')
expected = False
self.assertEqual(actual, expected)
def test_when_MMMMM_trailing_text(self):
actual = mmf._is_mmf_footer('MMMMMTEXT')
expected = False
self.assertEqual(actual, expected)
class TestTestMetadata(unittest.TestCase):
def test_when_empty(self):
actual = mmf._test_metadata('')
expected = False, '', ''
self.assertEqual(actual, expected)
def test_when_colon(self):
actual = mmf._test_metadata(':')
expected = False, '', ''
self.assertEqual(actual, expected)
def test_when_no_attribute_name(self):
actual = mmf._test_metadata(' : value')
expected = False, '', ''
self.assertEqual(actual, expected)
def test_when_text_without_colon(self):
actual = mmf._test_metadata(' Chill before serving. ')
expected = False, '', ''
self.assertEqual(actual, expected)
def test_when_no_value(self):
actual = mmf._test_metadata(' Categories: ')
expected = True, 'Categories', ''
self.assertEqual(actual, expected)
def test_when_normal(self):
actual = mmf._test_metadata(' Title: 21 Club Rice Pudding')
expected = True, 'Title', '21 Club Rice Pudding'
self.assertEqual(actual, expected)
def test_when_extra_spaces(self):
actual = mmf._test_metadata(' Recipe By : Aunt Salli ')
expected = True, 'Recipe By', 'Aunt Salli'
self.assertEqual(actual, expected)
class TestSplitCategories(unittest.TestCase):
def test_when_none(self):
actual = mmf._split_categories('None')
expected = []
self.assertEqual(actual, expected)
def test_when_none_mixed_caps(self):
actual = mmf._split_categories('noNE')
expected = []
self.assertEqual(actual, expected)
def test_when_one_category(self):
actual = mmf._split_categories('Dessert')
expected = ['Dessert']
self.assertEqual(actual, expected)
def test_when_multiple_categories(self):
actual = mmf._split_categories('Dessert,Italian,Easy')
expected = ['Dessert', 'Italian', 'Easy']
self.assertEqual(actual, expected)
def test_when_multiple_categories_with_space(self):
actual = mmf._split_categories('Dessert, Italian, Easy')
expected = ['Dessert', 'Italian', 'Easy']
self.assertEqual(actual, expected)
def test_when_multiple_categories_with_more_space(self):
actual = mmf._split_categories(' Dessert , Italian , Easy ')
expected = ['Dessert', 'Italian', 'Easy']
self.assertEqual(actual, expected)
class TestGetYieldAndServings(unittest.TestCase):
def test_when_empty(self):
actual = mmf._get_yield_and_servings('')
expected = '', 0
self.assertEqual(actual, expected)
def test_when_number(self):
actual = mmf._get_yield_and_servings('10')
expected = '', 10
self.assertEqual(actual, expected)
def test_when_number_and_unit(self):
actual = mmf._get_yield_and_servings('24 cookies')
expected = '24 cookies', 0
self.assertEqual(actual, expected)
class TestGetIngredientHeading(unittest.TestCase):
def test_when_empty(self):
actual = mmf._get_ingredient_heading('')
expected = ''
self.assertEqual(actual, expected)
def test_when_not_marked_up(self):
actual = mmf._get_ingredient_heading('This is some text.')
expected = ''
self.assertEqual(actual, expected)
def test_when_heading(self):
actual = mmf._get_ingredient_heading('---------------------------------SPAM---------------------------------')
expected = 'SPAM'
self.assertEqual(actual, expected)
def test_when_minimum(self):
actual = mmf._get_ingredient_heading('-----BAR-----')
expected = 'BAR'
self.assertEqual(actual, expected)
def test_when_MMMMM_heading(self):
actual = mmf._get_ingredient_heading('MMMMM---------------------------QUICK OATS--------------------------------')
expected = 'QUICK OATS'
self.assertEqual(actual, expected)
def test_when_MMMMM_minimum(self):
actual = mmf._get_ingredient_heading('MMMMM-----JARS-----')
expected = 'JARS'
self.assertEqual(actual, expected)
def test_when_spaces(self):
actual = mmf._get_ingredient_heading('------------------------- This is a heading. --------------------------------')
expected = 'This is a heading.'
self.assertEqual(actual, expected)
class TestGetIngredient(unittest.TestCase):
def test_when_empty(self):
actual = mmf._get_ingredient('')
self.assertEqual(actual.quantity, '')
self.assertEqual(actual.unit, '')
self.assertEqual(actual.text, '')
self.assertEqual(actual.is_heading, False)
def test_when_whitespace(self):
actual = mmf._get_ingredient(' ')
self.assertEqual(actual.quantity, '')
self.assertEqual(actual.unit, '')
self.assertEqual(actual.text, '')
self.assertEqual(actual.is_heading, False)
def test_1(self):
actual = mmf._get_ingredient(' 1 qt Milk')
self.assertEqual(actual.quantity, '1')
self.assertEqual(actual.unit, 'qt')
self.assertEqual(actual.text, 'Milk')
self.assertEqual(actual.is_heading, False)
def test_2(self):
actual = mmf._get_ingredient(' 1/2 qt Milk')
self.assertEqual(actual.quantity, '1/2')
self.assertEqual(actual.unit, 'qt')
self.assertEqual(actual.text, 'Milk')
self.assertEqual(actual.is_heading, False)
def test_3(self):
actual = mmf._get_ingredient(' 3 1/2 qt Milk')
self.assertEqual(actual.quantity, '3 1/2')
self.assertEqual(actual.unit, 'qt')
self.assertEqual(actual.text, 'Milk')
self.assertEqual(actual.is_heading, False)
def test_4(self):
actual = mmf._get_ingredient(' 1.5 qt Milk')
self.assertEqual(actual.quantity, '1.5')
self.assertEqual(actual.unit, 'qt')
self.assertEqual(actual.text, 'Milk')
self.assertEqual(actual.is_heading, False)
def test_5(self):
actual = mmf._get_ingredient(' .5 qt Milk')
self.assertEqual(actual.quantity, '.5')
self.assertEqual(actual.unit, 'qt')
self.assertEqual(actual.text, 'Milk')
self.assertEqual(actual.is_heading, False)
def test_6(self):
actual = mmf._get_ingredient(' 3/4 c Long-grained rice')
self.assertEqual(actual.quantity, '3/4')
self.assertEqual(actual.unit, 'c')
self.assertEqual(actual.text, 'Long-grained rice')
self.assertEqual(actual.is_heading, False)
def test_7(self):
actual = mmf._get_ingredient(' Raisins (optional)')
self.assertEqual(actual.quantity, '')
self.assertEqual(actual.unit, '')
self.assertEqual(actual.text, 'Raisins (optional)')
self.assertEqual(actual.is_heading, False)
def test_8(self):
actual = mmf._get_ingredient(' 1 Egg yolk')
self.assertEqual(actual.quantity, '1')
self.assertEqual(actual.unit, '')
self.assertEqual(actual.text, 'Egg yolk')
self.assertEqual(actual.is_heading, False)
class TestIsIngredient(unittest.TestCase):
def test_when_empty(self):
actual = mmf._is_ingredient('')
expected = False
self.assertEqual(actual, expected)
def test_when_direction(self):
actual = mmf._is_ingredient('In large bowl, blend oil and sugars on low until well mixed. Add')
expected = False
self.assertEqual(actual, expected)
def test_when_invalid_quantity(self):
actual = mmf._is_ingredient(' ab qt Milk')
expected = False
self.assertEqual(actual, expected)
def test_when_invalid_unit(self):
actual = mmf._is_ingredient(' 21 Apples')
expected = False
self.assertEqual(actual, expected)
def test_when_spacing_off(self):
actual = mmf._is_ingredient(' 1 qt Milk')
expected = False
self.assertEqual(actual, expected)
def test_when_1(self):
actual = mmf._is_ingredient(' 1 qt Milk')
expected = True
self.assertEqual(actual, expected)
def test_when_2(self):
actual = mmf._is_ingredient(' 1 1/2 c Whipped cream')
expected = True
self.assertEqual(actual, expected)
def test_when_3(self):
actual = mmf._is_ingredient(' 1 Vanilla bean')
expected = True
self.assertEqual(actual, expected)
def test_when_4(self):
actual = mmf._is_ingredient(' Raisins (optional)')
expected = True
self.assertEqual(actual, expected)
def test_when_5(self):
actual = mmf._is_ingredient(' 1.5 qt Milk')
expected = True
self.assertEqual(actual, expected)
def test_when_6(self):
actual = mmf._is_ingredient(' 1 c Oil 1 t Baking soda')
expected = True
self.assertEqual(actual, expected)
class TestGetIngredients(unittest.TestCase):
def test_when_none(self):
lines = []
expected = []
actual = [repr(i) for i in mmf._get_ingredients(lines)]
self.assertEqual(actual, expected)
def test_when_empty_line(self):
lines = [
(' ', False),
]
expected = []
actual = [repr(i) for i in mmf._get_ingredients(lines)]
self.assertEqual(actual, expected)
def test_when_empty_lines(self):
lines = [
(' ', False),
(' ', False),
(' ', False),
]
expected = []
actual = [repr(i) for i in mmf._get_ingredients(lines)]
self.assertEqual(actual, expected)
def test_when_one_column(self):
lines = [
(' 1 qt Milk', False),
(' 1 pt Heavy cream', False),
(' 1/2 ts Salt', False),
(' 1 Vanilla bean', False),
(' 3/4 c Long-grained rice', False),
(' 1 c Granulated sugar', False),
(' 1 Egg yolk', False),
(' 1 1/2 c Whipped cream', False),
(' Raisins (optional)', False),
]
expected = [
'{1} {qt} {Milk}',
'{1} {pt} {Heavy cream}',
'{1/2} {ts} {Salt}',
'{1} {} {Vanilla bean}',
'{3/4} {c} {Long-grained rice}',
'{1} {c} {Granulated sugar}',
'{1} {} {Egg yolk}',
'{1 1/2} {c} {Whipped cream}',
'{} {} {Raisins (optional)}',
]
actual = [repr(i) for i in mmf._get_ingredients(lines)]
self.assertEqual(actual, expected)
def test_when_one_column_with_extra_lines(self):
lines = [
(' ', False),
(' 1 qt Milk', False),
(' 1 pt Heavy cream', False),
(' 1/2 ts Salt', False),
(' 1 Vanilla bean', False),
(' 3/4 c Long-grained rice', False),
(' ', False),
(' 1 c Granulated sugar', False),
(' 1 Egg yolk', False),
(' 1 1/2 c Whipped cream', False),
(' Raisins (optional)', False),
(' ', False),
]
expected = [
'{1} {qt} {Milk}',
'{1} {pt} {Heavy cream}',
'{1/2} {ts} {Salt}',
'{1} {} {Vanilla bean}',
'{3/4} {c} {Long-grained rice}',
'{1} {c} {Granulated sugar}',
'{1} {} {Egg yolk}',
'{1 1/2} {c} {Whipped cream}',
'{} {} {Raisins (optional)}',
]
actual = [repr(i) for i in mmf._get_ingredients(lines)]
self.assertEqual(actual, expected)
def test_when_one_column_with_headings(self):
lines = [
('FOR THE PIE', True),
(' 1 1/2 c All-Purpose Flour', False),
(' 1/2 ts Salt', False),
(' 1/2 c Shortening', False),
(' 5 tb ICE Water', False),
(' 8 c Apples [peeled & sliced]', False),
(' 1/4 c Granulated Sugar', False),
(' 2 tb All-Purpose Flour', False),
(' 1/2 ts Nutmeg, Ground', False),
(' 2 tb Lemon Juice', False),
(' 1 ts Cinnamon, Ground', False),
('', False),
('FOR THE TOPPING', True),
(' 1/2 c Granulated Sugar', False),
(' 1/2 c All-Purpose Flour', False),
(' 1/3 c Butter', False),
(' 1 lg Paper Bag', False),
(' Vanilla Ice Cream', False),
]
expected = [
'----- FOR THE PIE -----',
'{1 1/2} {c} {All-Purpose Flour}',
'{1/2} {ts} {Salt}',
'{1/2} {c} {Shortening}',
'{5} {tb} {ICE Water}',
'{8} {c} {Apples [peeled & sliced]}',
'{1/4} {c} {Granulated Sugar}',
'{2} {tb} {All-Purpose Flour}',
'{1/2} {ts} {Nutmeg, Ground}',
'{2} {tb} {Lemon Juice}',
'{1} {ts} {Cinnamon, Ground}',
'----- FOR THE TOPPING -----',
'{1/2} {c} {Granulated Sugar}',
'{1/2} {c} {All-Purpose Flour}',
'{1/3} {c} {Butter}',
'{1} {lg} {Paper Bag}',
'{} {} {Vanilla Ice Cream}',
]
actual = [repr(i) for i in mmf._get_ingredients(lines)]
self.assertEqual(actual, expected)
def test_when_two_columns(self):
lines = [
(' 1 1/2 lb Hamburger 1 ds Salt', False),
(' 1 c Onion; chopped 1/2 c Water', False),
(' 1 c Green pepper; chopped 1/8 t Hot pepper sauce', False),
(' 1 T Oil ', False),
]
expected = [
'{1 1/2} {lb} {Hamburger}',
'{1} {c} {Onion; chopped}',
'{1} {c} {Green pepper; chopped}',
'{1} {T} {Oil}',
'{1} {ds} {Salt}',
'{1/2} {c} {Water}',
'{1/8} {t} {Hot pepper sauce}',
]
actual = [repr(i) for i in mmf._get_ingredients(lines)]
self.assertEqual(actual, expected)
def test_when_two_columns_with_headings(self):
lines = [
('HEADING 1', True),
(' 1 1/2 lb Hamburger 1 ds Salt', False),
(' 1 c Onion; chopped 1/2 c Water', False),
('HEADING 2', True),
(' 1 c Green pepper; chopped 1/8 t Hot pepper sauce', False),
(' 1 T Oil ', False),
('HEADING 3', True),
(' 7 oz Jack/Mozz. cheese slices 1/2 c Parmesan cheese; grated', False),
]
expected = [
'----- HEADING 1 -----',
'{1 1/2} {lb} {Hamburger}',
'{1} {c} {Onion; chopped}',
'{1} {ds} {Salt}',
'{1/2} {c} {Water}',
'----- HEADING 2 -----',
'{1} {c} {Green pepper; chopped}',
'{1} {T} {Oil}',
'{1/8} {t} {Hot pepper sauce}',
'----- HEADING 3 -----',
'{7} {oz} {Jack/Mozz. cheese slices}',
'{1/2} {c} {Parmesan cheese; grated}',
]
actual = [repr(i) for i in mmf._get_ingredients(lines)]
self.assertEqual(actual, expected)
def test_when_one_column_with_line_continuations(self):
lines = [
(' 1 ts Salt', False),
(' Fresh ground', False),
(' -black pepper to', False),
(' -taste', False),
(' 1 cn (6-oz) tomato paste', False),
(' 1 cn (30-oz) red kidney beans', False),
(' -drained', False),
]
expected = [
'{1} {ts} {Salt}',
'{} {} {Fresh ground black pepper to taste}',
'{1} {cn} {(6-oz) tomato paste}',
'{1} {cn} {(30-oz) red kidney beans drained}',
]
actual = [repr(i) for i in mmf._get_ingredients(lines)]
self.assertEqual(actual, expected)
def test_when_two_columns_with_line_continuations(self):
lines = [
(' 1 lg Artichoke; -=OR=- - and thinly sliced', False),
(' 2 md -Artichokes 6 Leaves butter lettuce', False),
(' 1 c Water; acidulated with - sliced into 1/4" strips', False),
(' - the juice of -=OR=- a handful of', False),
(' 1 Lemon - Sorrel leaves, sliced', False),
(' 2 Garlic cloves 1 tb Chopped parsley', False),
(' 1 tb Virgin olive oil 2 Mint leaves; chopped', False),
(' 1 lg Leek; white part only -=OR=- Salt', False),
(' 2 md Leeks, white part only 5 1/2 c Water', False),
(' - washed and sliced 1 lb Fresh peas; shucked, -=OR=-', False),
(' 1 sm New potato; quartered 1 c -Frozen peas', False),
]
expected = [
'{1} {lg} {Artichoke; -=OR=-}',
'{2} {md} {-Artichokes}',
'{1} {c} {Water; acidulated with the juice of}',
'{1} {} {Lemon}',
'{2} {} {Garlic cloves}',
'{1} {tb} {Virgin olive oil}',
'{1} {lg} {Leek; white part only -=OR=-}',
'{2} {md} {Leeks, white part only washed and sliced}',
'{1} {sm} {New potato; quartered and thinly sliced}',
'{6} {} {Leaves butter lettuce sliced into 1/4" strips =OR=- a handful of Sorrel leaves, sliced}',
'{1} {tb} {Chopped parsley}',
'{2} {} {Mint leaves; chopped}',
'{} {} {Salt}',
'{5 1/2} {c} {Water}',
'{1} {lb} {Fresh peas; shucked, -=OR=-}',
'{1} {c} {-Frozen peas}',
]
actual = [repr(i) for i in mmf._get_ingredients(lines)]
self.assertEqual(actual, expected)
class TestParagraphizeDirections(unittest.TestCase):
def test_when_none(self):
lines = []
expected = []
actual = mmf._paragraphize_directions(lines)
self.assertEqual(actual, expected)
def test_when_empty(self):
lines = ['']
expected = []
actual = mmf._paragraphize_directions(lines)
self.assertEqual(actual, expected)
def test_when_single_line(self):
lines = [' Brown cut up pieces of meat.']
expected = ['Brown cut up pieces of meat.']
actual = mmf._paragraphize_directions(lines)
self.assertEqual(actual, expected)
def test_when_extra_lines(self):
lines = [' ', ' Brown cut up pieces of meat.', ' ']
expected = ['Brown cut up pieces of meat.']
actual = mmf._paragraphize_directions(lines)
self.assertEqual(actual, expected)
def test_when_more_extra_lines(self):
lines = [
' ',
' ',
' Brown cut up pieces of meat.',
' ',
' Brown cut up pieces of meat!',
' ',
' ',
' ',
]
expected = [
'Brown cut up pieces of meat.',
'Brown cut up pieces of meat!',
]
actual = mmf._paragraphize_directions(lines)
self.assertEqual(actual, expected)
def test_when_paragraph(self):
lines = [
' Brown cut up pieces of meat.Season with chili powder,salt and black',
' pepper.Add chopped vegetables and V - 8 vegetable juice. Add ketchup',
' and Worcestershire sauce to taste.',
]
expected = [
'Brown cut up pieces of meat.Season with chili powder,salt and black pepper.Add chopped vegetables and V - 8 vegetable juice. Add ketchup and Worcestershire sauce to taste.',
]
actual = mmf._paragraphize_directions(lines)
self.assertEqual(actual, expected)
def test_when_multiple_paragraphs(self):
lines = [
' The kind of chiles that you use determine the final flavor, you can',
' experiment with different kinds or mixing the different kinds of chiles.',
' But this is the basic recipe for prepare salsas with dry chiles.',
' ',
' Wash the chiles in water and discard the seeds and threads of chiles. Let',
' stand in water at least 2 or 3 hours or all the night, if you do not have',
' time let the chiles in warm water at least 30 min.',
' ',
' Then ground with the other ingredients.',
]
expected = [
'The kind of chiles that you use determine the final flavor, you can experiment with different kinds or mixing the different kinds of chiles. But this is the basic recipe for prepare salsas with dry chiles.',
'Wash the chiles in water and discard the seeds and threads of chiles. Let stand in water at least 2 or 3 hours or all the night, if you do not have time let the chiles in warm water at least 30 min.',
'Then ground with the other ingredients.',
]
actual = mmf._paragraphize_directions(lines)
self.assertEqual(actual, expected)
def test_when_multiple_paragraphs_separated_by_paragraph_marker(self):
lines = [
' The kind of chiles that you use determine the final flavor, you can',
' experiment with different kinds or mixing the different kinds of chiles.',
' But this is the basic recipe for prepare salsas with dry chiles.\x14',
' Wash the chiles in water and discard the seeds and threads of chiles. Let',
' stand in water at least 2 or 3 hours or all the night, if you do not have',
' time let the chiles in warm water at least 30 min.\x14',
' Then ground with the other ingredients.',
]
expected = [
'The kind of chiles that you use determine the final flavor, you can experiment with different kinds or mixing the different kinds of chiles. But this is the basic recipe for prepare salsas with dry chiles.',
'Wash the chiles in water and discard the seeds and threads of chiles. Let stand in water at least 2 or 3 hours or all the night, if you do not have time let the chiles in warm water at least 30 min.',
'Then ground with the other ingredients.',
]
actual = mmf._paragraphize_directions(lines)
self.assertEqual(actual, expected)
if __name__ == '__main__':
unittest.main() | {
"content_hash": "da63c3e1284f63027d335999035e25b0",
"timestamp": "",
"source": "github",
"line_count": 1238,
"max_line_length": 220,
"avg_line_length": 40.225363489499195,
"alnum_prop": 0.5159340549007008,
"repo_name": "samjavner/recipeformats",
"id": "ce63bf44fea3c0509b4f6e96be91f0e9c127393a",
"size": "49799",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_mmf.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "127906"
}
],
"symlink_target": ""
} |
"""The tests for the Season integration."""
from datetime import datetime
from freezegun import freeze_time
import pytest
from homeassistant.components.season.const import (
DOMAIN,
TYPE_ASTRONOMICAL,
TYPE_METEOROLOGICAL,
)
from homeassistant.components.season.sensor import (
STATE_AUTUMN,
STATE_SPRING,
STATE_SUMMER,
STATE_WINTER,
)
from homeassistant.const import CONF_TYPE, STATE_UNKNOWN
from homeassistant.core import HomeAssistant
from homeassistant.helpers import device_registry as dr, entity_registry as er
from tests.common import MockConfigEntry
HEMISPHERE_NORTHERN = {
"homeassistant": {"latitude": 48.864716, "longitude": 2.349014},
"sensor": {"platform": "season", "type": "astronomical"},
}
HEMISPHERE_SOUTHERN = {
"homeassistant": {"latitude": -33.918861, "longitude": 18.423300},
"sensor": {"platform": "season", "type": "astronomical"},
}
HEMISPHERE_EQUATOR = {
"homeassistant": {"latitude": 0, "longitude": -51.065100},
"sensor": {"platform": "season", "type": "astronomical"},
}
HEMISPHERE_EMPTY = {
"homeassistant": {},
"sensor": {"platform": "season", "type": "meteorological"},
}
NORTHERN_PARAMETERS = [
(TYPE_ASTRONOMICAL, datetime(2017, 9, 3, 0, 0), STATE_SUMMER),
(TYPE_METEOROLOGICAL, datetime(2017, 8, 13, 0, 0), STATE_SUMMER),
(TYPE_ASTRONOMICAL, datetime(2017, 9, 23, 0, 0), STATE_AUTUMN),
(TYPE_METEOROLOGICAL, datetime(2017, 9, 3, 0, 0), STATE_AUTUMN),
(TYPE_ASTRONOMICAL, datetime(2017, 12, 25, 0, 0), STATE_WINTER),
(TYPE_METEOROLOGICAL, datetime(2017, 12, 3, 0, 0), STATE_WINTER),
(TYPE_ASTRONOMICAL, datetime(2017, 4, 1, 0, 0), STATE_SPRING),
(TYPE_METEOROLOGICAL, datetime(2017, 3, 3, 0, 0), STATE_SPRING),
]
SOUTHERN_PARAMETERS = [
(TYPE_ASTRONOMICAL, datetime(2017, 12, 25, 0, 0), STATE_SUMMER),
(TYPE_METEOROLOGICAL, datetime(2017, 12, 3, 0, 0), STATE_SUMMER),
(TYPE_ASTRONOMICAL, datetime(2017, 4, 1, 0, 0), STATE_AUTUMN),
(TYPE_METEOROLOGICAL, datetime(2017, 3, 3, 0, 0), STATE_AUTUMN),
(TYPE_ASTRONOMICAL, datetime(2017, 9, 3, 0, 0), STATE_WINTER),
(TYPE_METEOROLOGICAL, datetime(2017, 8, 13, 0, 0), STATE_WINTER),
(TYPE_ASTRONOMICAL, datetime(2017, 9, 23, 0, 0), STATE_SPRING),
(TYPE_METEOROLOGICAL, datetime(2017, 9, 3, 0, 0), STATE_SPRING),
]
def idfn(val):
"""Provide IDs for pytest parametrize."""
if isinstance(val, (datetime)):
return val.strftime("%Y%m%d")
@pytest.mark.parametrize("type,day,expected", NORTHERN_PARAMETERS, ids=idfn)
async def test_season_northern_hemisphere(
hass: HomeAssistant,
mock_config_entry: MockConfigEntry,
type: str,
day: datetime,
expected: str,
) -> None:
"""Test that season should be summer."""
hass.config.latitude = HEMISPHERE_NORTHERN["homeassistant"]["latitude"]
mock_config_entry.add_to_hass(hass)
hass.config_entries.async_update_entry(
mock_config_entry, unique_id=type, data={CONF_TYPE: type}
)
with freeze_time(day):
await hass.config_entries.async_setup(mock_config_entry.entry_id)
await hass.async_block_till_done()
state = hass.states.get("sensor.season")
assert state
assert state.state == expected
entity_registry = er.async_get(hass)
entry = entity_registry.async_get("sensor.season")
assert entry
assert entry.unique_id == mock_config_entry.entry_id
@pytest.mark.parametrize("type,day,expected", SOUTHERN_PARAMETERS, ids=idfn)
async def test_season_southern_hemisphere(
hass: HomeAssistant,
mock_config_entry: MockConfigEntry,
type: str,
day: datetime,
expected: str,
) -> None:
"""Test that season should be summer."""
hass.config.latitude = HEMISPHERE_SOUTHERN["homeassistant"]["latitude"]
mock_config_entry.add_to_hass(hass)
hass.config_entries.async_update_entry(
mock_config_entry, unique_id=type, data={CONF_TYPE: type}
)
with freeze_time(day):
await hass.config_entries.async_setup(mock_config_entry.entry_id)
await hass.async_block_till_done()
state = hass.states.get("sensor.season")
assert state
assert state.state == expected
entity_registry = er.async_get(hass)
entry = entity_registry.async_get("sensor.season")
assert entry
assert entry.unique_id == mock_config_entry.entry_id
device_registry = dr.async_get(hass)
assert entry.device_id
device_entry = device_registry.async_get(entry.device_id)
assert device_entry
assert device_entry.identifiers == {(DOMAIN, mock_config_entry.entry_id)}
assert device_entry.name == "Season"
assert device_entry.entry_type is dr.DeviceEntryType.SERVICE
async def test_season_equator(
hass: HomeAssistant,
mock_config_entry: MockConfigEntry,
) -> None:
"""Test that season should be unknown for equator."""
hass.config.latitude = HEMISPHERE_EQUATOR["homeassistant"]["latitude"]
mock_config_entry.add_to_hass(hass)
with freeze_time(datetime(2017, 9, 3, 0, 0)):
await hass.config_entries.async_setup(mock_config_entry.entry_id)
await hass.async_block_till_done()
state = hass.states.get("sensor.season")
assert state
assert state.state == STATE_UNKNOWN
entity_registry = er.async_get(hass)
entry = entity_registry.async_get("sensor.season")
assert entry
assert entry.unique_id == mock_config_entry.entry_id
| {
"content_hash": "edf6513c3e1344368900538903d5b343",
"timestamp": "",
"source": "github",
"line_count": 158,
"max_line_length": 78,
"avg_line_length": 34.21518987341772,
"alnum_prop": 0.6825749167591565,
"repo_name": "w1ll1am23/home-assistant",
"id": "0c2470edb7bf5d4cffa7597cc737784ae8af1158",
"size": "5406",
"binary": false,
"copies": "3",
"ref": "refs/heads/dev",
"path": "tests/components/season/test_sensor.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "2963"
},
{
"name": "PLSQL",
"bytes": "840"
},
{
"name": "Python",
"bytes": "52277012"
},
{
"name": "Shell",
"bytes": "6252"
}
],
"symlink_target": ""
} |
"""
A set of methods to get coordinate maps which represent slices in space.
"""
from nipy.core.reference.coordinate_system import CoordinateSystem
from nipy.core.reference.coordinate_map import Affine
from nipy.core.reference.array_coords import ArrayCoordMap
import numpy as np
__docformat__ = 'restructuredtext'
def from_origin_and_columns(origin, colvectors, shape, output_coords):
"""
Return a CoordinateMap representing a slice based on a given origin,
a pair of direction vectors which span the slice, and a shape.
:Parameters:
origin : the corner of the output coordinates, i.e. the [0]*ndimin
point
colvectors : the steps in each voxel direction
shape : how many steps in each voxel direction
output_coords : a CoordinateSystem for the output
:Returns: `CoordinateMap`
"""
colvectors = np.asarray(colvectors)
nout = colvectors.shape[1]
nin = colvectors.shape[0]
f = np.zeros((nout+1,nin+1))
for i in range(nin):
f[0:nout,i] = colvectors[i]
f[0:nout,-1] = origin
f[nout, nin] = 1.
input_coords = CoordinateSystem(['i%d' % d for d in range(len(shape))],
'slice', output_coords.coord_dtype)
g = Affine(f, input_coords, output_coords)
return ArrayCoordMap.from_shape(g, shape)
def xslice(x, zlim, ylim, output_coords, shape):
"""
Return a slice through a 3d box with x fixed.
:Parameters:
y : TODO
TODO
zlim : TODO
TODO
ylim : TODO
TODO
xlim : TODO
TODO
shape : TODO
TODO
output_coords : TODO
TODO
"""
origin = [zlim[0],ylim[0],x]
colvectors = [[(zlim[1]-zlim[0])/(shape[0] - 1.),0,0],
[0,(ylim[1]-ylim[0])/(shape[1] - 1.),0]]
return from_origin_and_columns(origin, colvectors, shape, output_coords)
def yslice(y, zlim, xlim, output_coords, shape):
"""
Return a slice through a 3d box with y fixed.
:Parameters:
x : TODO
TODO
zlim : TODO
TODO
ylim : TODO
TODO
xlim : TODO
TODO
shape : TODO
TODO
output_coords : TODO
TODO
"""
origin = [zlim[0],y,xlim[0]]
colvectors = [[(zlim[1]-zlim[0])/(shape[0] - 1.),0,0],
[0,0,(xlim[1]-xlim[0])/(shape[1] - 1.)]]
return from_origin_and_columns(origin, colvectors, shape, output_coords)
def zslice(z, ylim, xlim, output_coords, shape):
"""
Return a slice through a 3d box with z fixed.
:Parameters:
z : TODO
TODO
ylim : TODO
TODO
xlim : TODO
TODO
shape : TODO
TODO
output_coords : TODO
TODO
"""
origin = [z,xlim[0],ylim[0]]
colvectors = [[0,(ylim[1]-ylim[0])/(shape[0] - 1.),0],
[0,0,(xlim[1]-xlim[0])/(shape[1] - 1.)]]
return from_origin_and_columns(origin, colvectors, shape, output_coords)
def bounding_box(coordmap, shape):
"""
Determine a valid bounding box from a CoordinateMap instance.
:Parameters:
coordmap : `CoordinateMap`
"""
e = ArrayCoordMap.from_shape(coordmap, shape)
return [[r.min(), r.max()] for r in e.transposed_values]
| {
"content_hash": "cabcba58b70392bd2b9b7c5751784be0",
"timestamp": "",
"source": "github",
"line_count": 122,
"max_line_length": 76,
"avg_line_length": 27.704918032786885,
"alnum_prop": 0.5662721893491124,
"repo_name": "yarikoptic/NiPy-OLD",
"id": "fb10876046afcbce2e80b06c7bd92a401558ea19",
"size": "3380",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "nipy/core/reference/slices.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "4411801"
},
{
"name": "Objective-C",
"bytes": "4262"
},
{
"name": "Python",
"bytes": "2617786"
}
],
"symlink_target": ""
} |
from setuptools import setup, find_packages
setup(
name="easy dataset scrapper",
version="0.1.1",
description="A lightweight base class to create a dataset scrapper",
author="Iuga",
packages=find_packages(exclude=["tests", "tools", "docs", ".github"]),
install_requires=[]
)
| {
"content_hash": "e070a613c2c864f81cde495604db3afc",
"timestamp": "",
"source": "github",
"line_count": 10,
"max_line_length": 74,
"avg_line_length": 30,
"alnum_prop": 0.6766666666666666,
"repo_name": "iuga/easy_dataset_scrapper",
"id": "58cfa54a552abfbd14bf38054dc8a7e713f2e28e",
"size": "300",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "6223"
}
],
"symlink_target": ""
} |
from . import generic
# http://infocenter.arm.com/help/topic/com.arm.doc.ihi0044e/IHI0044E_aaelf.pdf
arch = 'ARM'
R_ARM_COPY = generic.GenericCopyReloc
R_ARM_GLOB_DAT = generic.GenericJumpslotReloc
R_ARM_JUMP_SLOT = generic.GenericJumpslotReloc
R_ARM_RELATIVE = generic.GenericRelativeReloc
R_ARM_ABS32 = generic.GenericAbsoluteAddendReloc
R_ARM_TLS_DTPMOD32 = generic.GenericTLSModIdReloc
R_ARM_TLS_DTPOFF32 = generic.GenericTLSDoffsetReloc
R_ARM_TLS_TPOFF32 = generic.GenericTLSOffsetReloc
| {
"content_hash": "989f44e8e83e950e0f8919bb2c21b7e5",
"timestamp": "",
"source": "github",
"line_count": 14,
"max_line_length": 78,
"avg_line_length": 35.357142857142854,
"alnum_prop": 0.8141414141414142,
"repo_name": "chubbymaggie/cle",
"id": "29ff6f414bac9020dd30aa46d7ff24fb22c3b53a",
"size": "495",
"binary": false,
"copies": "6",
"ref": "refs/heads/dev",
"path": "cle/relocations/arm.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "164978"
}
],
"symlink_target": ""
} |
import os
from pymessenger2 import Element, Button
from pymessenger2.bot import Bot
TOKEN = os.environ.get('TOKEN')
APP_SECRET = os.environ.get('APP_SECRET')
bot = Bot(TOKEN, app_secret=APP_SECRET)
recipient_id = os.environ.get('RECIPIENT_ID')
def test_wrong_format_message():
result = bot.send_text_message(recipient_id, {'text': "its a test"})
assert type(result) is dict
assert result.get('message_id') is None
def test_text_message():
result = bot.send_text_message(recipient_id, "test")
assert type(result) is dict
assert result.get('message_id') is not None
assert result.get('recipient_id') is not None
def test_elements():
image_url = 'https://lh4.googleusercontent.com/-dZ2LhrpNpxs/AAAAAAAAAAI/AAAAAAAA1os/qrf-VeTVJrg/s0-c-k-no-ns/photo.jpg'
elements = []
element = Element(
title="Arsenal",
image_url=image_url,
subtitle="Click to go to Arsenal website.",
item_url="http://arsenal.com")
elements.append(element)
result = bot.send_generic_message(recipient_id, elements)
assert type(result) is dict
assert result.get('message_id') is not None
assert result.get('recipient_id') is not None
def test_image_url():
image_url = 'https://lh4.googleusercontent.com/-dZ2LhrpNpxs/AAAAAAAAAAI/AAAAAAAA1os/qrf-VeTVJrg/s0-c-k-no-ns/photo.jpg'
result = bot.send_image_url(recipient_id, image_url)
assert type(result) is dict
assert result.get('message_id') is not None
assert result.get('recipient_id') is not None
def test_image_gif_url():
image_url = 'https://media.giphy.com/media/rl0FOxdz7CcxO/giphy.gif'
result = bot.send_image_url(recipient_id, image_url)
assert type(result) is dict
assert result.get('message_id') is not None
assert result.get('recipient_id') is not None
def test_button_message():
buttons = []
button = Button(title='Arsenal', type='web_url', url='http://arsenal.com')
buttons.append(button)
button = Button(title='Other', type='postback', payload='other')
buttons.append(button)
text = 'Select'
result = bot.send_button_message(recipient_id, text, buttons)
assert type(result) is dict
assert result.get('message_id') is not None
assert result.get('recipient_id') is not None
def test_fields_blank():
user_profile = bot.get_user_info(recipient_id)
assert user_profile is not None
def test_fields():
fields = ['first_name', 'last_name']
user_profile = bot.get_user_info(recipient_id, fields=fields)
assert user_profile is not None
assert len(user_profile.keys()) == len(fields)
| {
"content_hash": "902f7aa1d8b7ff6d506096cf4de3b51c",
"timestamp": "",
"source": "github",
"line_count": 80,
"max_line_length": 123,
"avg_line_length": 32.6875,
"alnum_prop": 0.6887189292543021,
"repo_name": "karlinnolabs/pymessenger",
"id": "0ee4689bf742bb5efed7c42222a95b631631b4f0",
"size": "2615",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test/bot_test.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "25700"
}
],
"symlink_target": ""
} |
"Test pyshell, coverage 12%."
# Plus coverage of test_warning. Was 20% with test_openshell.
from idlelib import pyshell
import unittest
from test.support import requires
from tkinter import Tk
class PyShellFileListTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
requires('gui')
cls.root = Tk()
cls.root.withdraw()
@classmethod
def tearDownClass(cls):
#cls.root.update_idletasks()
## for id in cls.root.tk.call('after', 'info'):
## cls.root.after_cancel(id) # Need for EditorWindow.
cls.root.destroy()
del cls.root
def test_init(self):
psfl = pyshell.PyShellFileList(self.root)
self.assertEqual(psfl.EditorWindow, pyshell.PyShellEditorWindow)
self.assertIsNone(psfl.pyshell)
# The following sometimes causes 'invalid command name "109734456recolorize"'.
# Uncommenting after_cancel above prevents this, but results in
# TclError: bad window path name ".!listedtoplevel.!frame.text"
# which is normally prevented by after_cancel.
## def test_openshell(self):
## pyshell.use_subprocess = False
## ps = pyshell.PyShellFileList(self.root).open_shell()
## self.assertIsInstance(ps, pyshell.PyShell)
if __name__ == '__main__':
unittest.main(verbosity=2)
| {
"content_hash": "462795b32a7b7306e79e6d9b796f55fd",
"timestamp": "",
"source": "github",
"line_count": 42,
"max_line_length": 78,
"avg_line_length": 31.11904761904762,
"alnum_prop": 0.6794185156847743,
"repo_name": "zooba/PTVS",
"id": "581444ca5ef21fe9041424bf7629808db7d0b9b7",
"size": "1307",
"binary": false,
"copies": "6",
"ref": "refs/heads/master",
"path": "Python/Product/Miniconda/Miniconda3-x64/Lib/idlelib/idle_test/test_pyshell.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "ASP",
"bytes": "109"
},
{
"name": "Batchfile",
"bytes": "10898"
},
{
"name": "C",
"bytes": "23236"
},
{
"name": "C#",
"bytes": "12390821"
},
{
"name": "C++",
"bytes": "209386"
},
{
"name": "CSS",
"bytes": "7025"
},
{
"name": "HTML",
"bytes": "34251"
},
{
"name": "JavaScript",
"bytes": "87257"
},
{
"name": "PowerShell",
"bytes": "25220"
},
{
"name": "Python",
"bytes": "888412"
},
{
"name": "Rich Text Format",
"bytes": "260880"
},
{
"name": "Smarty",
"bytes": "8156"
},
{
"name": "Tcl",
"bytes": "24968"
}
],
"symlink_target": ""
} |
"""
Created on Sat Sep 2 09:41:08 2017
@author: Kirby Urner
"""
from math import gcd
from string import ascii_lowercase
from random import shuffle
import unittest
def totatives(n):
return {x for x in range(n) if gcd(x, n) == 1}
def totient(n):
return len(totatives(n))
class M: # for "modulo"
modulus = 10 # class level
def __init__(self, val, modulus=None):
if modulus:
self.modulus = M.modulus = modulus # resettable
else:
self.modulus = M.modulus # default
self.val = val % self.modulus
def __add__(self, other):
if self.modulus != other.modulus:
raise ValueError
return M(self.val + other.val, self.modulus)
def __mul__(self, other):
if self.modulus != other.modulus:
raise ValueError
return M(self.val * other.val, self.modulus)
def _bingcd(self, a, b):
"""Extended version of Euclid's Algorithm (binary GCD)
Returns (m,n,gcd) such that m*a + n*b = gcd(a,b)"""
g,u,v = [b,a],[1,0],[0,1]
while g[1] != 0:
y = g[0] // g[1]
g[0],g[1] = g[1],g[0] % g[1]
u[0],u[1] = u[1],u[0] - y*u[1]
v[0],v[1] = v[1],v[0] - y*v[1]
m = v[0]%b
gcd = (m*a)%b
n = (gcd - m*a)/b
return (m,n,gcd)
def __invert__(self):
"""
If gcd(a,b)=1, then (inverse(a, b) * a) mod b = 1,
otherwise, if gcd(a,b)!=1, return 0
Useful in RSA encryption, for finding d such that
e*d mod totient(n) == 1
"""
inva,n,gcd = self._bingcd(self.val, self.modulus)
return M((gcd==1) * inva, self.modulus)
def __pow__(self, exp): # pow() and ** both trigger this method
output = self
if exp < 0:
exp = abs(exp)
output = ~self
elif exp == 0:
output = M(1)
elif exp == 1:
output = self
if exp > 1:
for _ in range(1, exp):
output = output * self
return output
def __repr__(self):
return "(" + str(self.val) + " mod " + str(M.modulus)+ ")"
class P_base:
"""
A Permutation
self._code: a dict, is a mapping of iterable elements
to themselves in any order.
"""
def __init__(self, iterable = ascii_lowercase + ' '): # default domain
"""
start out with Identity
"""
try:
seq1 = iter(iterable)
seq2 = iter(iterable)
except:
raise TypeError
self._code = dict(zip(seq1, seq2))
def __getitem__(self, key):
return self._code.get(key, None)
def __repr__(self):
return "P class: " + str(tuple(self._code.items())[:3]) + "..."
def __mul__(self, other):
"""
look up my keys to get values that serve
as keys to get others "target" values
"""
new_code = {}
for c in self._code: # going through my keys
target = other._code[ self._code[c] ]
new_code[c] = target
new_P = P()
new_P._code = new_code
return new_P
def __eq__(self, other):
"""
are these permutation the same?
Yes if self._cod==e other._code
"""
return self._code == other._code
class P(P_base): # first use of inheritance
def __invert__(self):
"""
create new P with reversed dict
"""
newP = P()
newP._code = dict(zip(self._code.values(), self._code.keys()))
return newP
def shuffle(self):
"""
return a random permutation of this permutation
"""
# use shuffle
# something like
the_keys = list(self._code.keys()) # grab keys
shuffle(the_keys) # shuffles 'em
newP = P()
# inject dict directly...
newP._code = dict(zip(self._code.keys(), the_keys))
return newP
def encrypt(self, plain):
"""
turn plaintext into cyphertext using self._code
"""
output = "" # empty string
for c in plain:
output = output + self._code.get(c, c)
return output
def decrypt(self, cypher):
"""
Turn cyphertext into plaintext using ~self
"""
reverse_P = ~self # invert me!
output = ""
for c in cypher:
output = output + reverse_P._code.get(c, c)
return output
def cyclic(self):
"""
cyclic notation, a compact view of a group
"""
output = []
the_dict = self._code.copy()
while the_dict:
start = tuple(the_dict.keys())[0]
the_cycle = [start]
the_next = the_dict.pop(start)
while the_next != start:
the_cycle.append(the_next)
the_next = the_dict.pop(the_next)
output.append(tuple(the_cycle))
return tuple(output)
def from_cyclic(self, incoming):
"""
create a P-type object from incoming cyclic view
Think of zipping ('a', 'c', 'q', 'k') with
('c', 'q', 'k', 'a') -- the pairs ('a', 'c'),
('c', 'q'), ('q', 'k') and ('k', 'a') are what
dict() will then consume. We go through each
subgroup, updating the final dict with the results
of each loop. When done, dump the dict into _code.
"""
output = {}
for subgroup in incoming:
output.update(dict(zip(subgroup, subgroup[1:] + tuple(subgroup[0]))))
newP = P()
newP._code = output
return newP
class Test_M(unittest.TestCase):
def test_Perm(self):
pass
def test_M(self):
N = 3 * 47
m = M(93, N) # original message, N product of 2 primes
e = 7 # raise to power
c = m ** e # encrypted
d = ~M(e, totient(N)) # inverse of c mod 40
self.assertEqual(m.val, pow(c.val, d.val, N)) # getting our message back
if __name__ == "__main__":
unittest.main() | {
"content_hash": "e7b1eaa5b5199008306fd2f71230eaee",
"timestamp": "",
"source": "github",
"line_count": 217,
"max_line_length": 81,
"avg_line_length": 28.857142857142858,
"alnum_prop": 0.48658575534972853,
"repo_name": "4dsolutions/Python5",
"id": "f3fb3cf1bd2156fdcd747a96432a9fb0804a2107",
"size": "6309",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "groups.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "2969"
},
{
"name": "Jupyter Notebook",
"bytes": "2264451"
},
{
"name": "Python",
"bytes": "157873"
}
],
"symlink_target": ""
} |
"""
Claim objects for use with resource tracking.
"""
from nova.openstack.common import jsonutils
from nova.openstack.common import lockutils
from nova.openstack.common import log as logging
LOG = logging.getLogger(__name__)
COMPUTE_RESOURCE_SEMAPHORE = "compute_resources"
class NopClaim(object):
"""For use with compute drivers that do not support resource tracking"""
def __init__(self, migration=None):
self.migration = migration
@property
def disk_gb(self):
return 0
@property
def memory_mb(self):
return 0
@property
def vcpus(self):
return 0
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
if exc_type is not None:
self.abort()
def abort(self):
pass
def __str__(self):
return "[Claim: %d MB memory, %d GB disk, %d VCPUS]" % (self.memory_mb,
self.disk_gb, self.vcpus)
class Claim(NopClaim):
"""A declaration that a compute host operation will require free resources.
Claims serve as marker objects that resources are being held until the
update_available_resource audit process runs to do a full reconciliation
of resource usage.
This information will be used to help keep the local compute hosts's
ComputeNode model in sync to aid the scheduler in making efficient / more
correct decisions with respect to host selection.
"""
def __init__(self, instance, tracker):
super(Claim, self).__init__()
self.instance = jsonutils.to_primitive(instance)
self.tracker = tracker
@property
def disk_gb(self):
return self.instance['root_gb'] + self.instance['ephemeral_gb']
@property
def memory_mb(self):
return self.instance['memory_mb']
@property
def vcpus(self):
return self.instance['vcpus']
@lockutils.synchronized(COMPUTE_RESOURCE_SEMAPHORE, 'nova-')
def abort(self):
"""Compute operation requiring claimed resources has failed or
been aborted.
"""
LOG.debug(_("Aborting claim: %s") % self, instance=self.instance)
self.tracker.abort_instance_claim(self.instance)
def test(self, resources, limits=None):
"""Test if this claim can be satisfied given available resources and
optional oversubscription limits
This should be called before the compute node actually consumes the
resources required to execute the claim.
:param resources: available local compute node resources
:returns: Return true if resources are available to claim.
"""
if not limits:
limits = {}
# If an individual limit is None, the resource will be considered
# unlimited:
memory_mb_limit = limits.get('memory_mb')
disk_gb_limit = limits.get('disk_gb')
vcpu_limit = limits.get('vcpu')
msg = _("Attempting claim: memory %(memory_mb)d MB, disk %(disk_gb)d "
"GB, VCPUs %(vcpus)d")
params = {'memory_mb': self.memory_mb, 'disk_gb': self.disk_gb,
'vcpus': self.vcpus}
LOG.audit(msg % params, instance=self.instance)
# Test for resources:
can_claim = (self._test_memory(resources, memory_mb_limit) and
self._test_disk(resources, disk_gb_limit) and
self._test_cpu(resources, vcpu_limit))
if can_claim:
LOG.audit(_("Claim successful"), instance=self.instance)
else:
LOG.audit(_("Claim failed"), instance=self.instance)
return can_claim
def _test_memory(self, resources, limit):
type_ = _("Memory")
unit = "MB"
total = resources['memory_mb']
used = resources['memory_mb_used']
requested = self.memory_mb
return self._test(type_, unit, total, used, requested, limit)
def _test_disk(self, resources, limit):
type_ = _("Disk")
unit = "GB"
total = resources['local_gb']
used = resources['local_gb_used']
requested = self.disk_gb
return self._test(type_, unit, total, used, requested, limit)
def _test_cpu(self, resources, limit):
type_ = _("CPU")
unit = "VCPUs"
total = resources['vcpus']
used = resources['vcpus_used']
requested = self.vcpus
return self._test(type_, unit, total, used, requested, limit)
def _test(self, type_, unit, total, used, requested, limit):
"""Test if the given type of resource needed for a claim can be safely
allocated.
"""
msg = _("Total %(type_)s: %(total)d %(unit)s, used: %(used)d %(unit)s")
LOG.audit(msg % locals(), instance=self.instance)
if limit is None:
# treat resource as unlimited:
LOG.audit(_("%(type_)s limit not specified, defaulting to "
"unlimited") % locals(), instance=self.instance)
return True
free = limit - used
# Oversubscribed resource policy info:
msg = _("%(type_)s limit: %(limit)d %(unit)s, free: %(free)d "
"%(unit)s") % locals()
LOG.audit(msg, instance=self.instance)
can_claim = requested <= free
if not can_claim:
msg = _("Unable to claim resources. Free %(type_)s %(free)d "
"%(unit)s < requested %(requested)d %(unit)s") % locals()
LOG.info(msg, instance=self.instance)
return can_claim
class ResizeClaim(Claim):
"""Claim used for holding resources for an incoming resize/migration
operation.
"""
def __init__(self, instance, instance_type, tracker):
super(ResizeClaim, self).__init__(instance, tracker)
self.instance_type = instance_type
self.migration = None
@property
def disk_gb(self):
return (self.instance_type['root_gb'] +
self.instance_type['ephemeral_gb'])
@property
def memory_mb(self):
return self.instance_type['memory_mb']
@property
def vcpus(self):
return self.instance_type['vcpus']
@lockutils.synchronized(COMPUTE_RESOURCE_SEMAPHORE, 'nova-')
def abort(self):
"""Compute operation requiring claimed resources has failed or
been aborted.
"""
LOG.debug(_("Aborting claim: %s") % self, instance=self.instance)
self.tracker.abort_resize_claim(self.instance['uuid'],
self.instance_type)
| {
"content_hash": "099de21037f15e274f72dc8c89725eeb",
"timestamp": "",
"source": "github",
"line_count": 205,
"max_line_length": 79,
"avg_line_length": 31.84390243902439,
"alnum_prop": 0.6024816176470589,
"repo_name": "houshengbo/nova_vmware_compute_driver",
"id": "c4828b823f25f30ff1ce68f58abc98195d6d4c58",
"size": "7163",
"binary": false,
"copies": "3",
"ref": "refs/heads/attach-detach-VMware-iSCSI-driver",
"path": "nova/compute/claims.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "JavaScript",
"bytes": "7403"
},
{
"name": "Python",
"bytes": "7173520"
},
{
"name": "Shell",
"bytes": "15478"
}
],
"symlink_target": ""
} |
__author__ = 'huqinghua'
# coding=gbk
import os
import Dui4Win
from CommonUtil import CommonUtils
if __name__ == "__main__":
CommonUtils.SaveExePath()
Dui4Win.Run()
| {
"content_hash": "3e3dc0f9274e1373fa0835cfb9a20e82",
"timestamp": "",
"source": "github",
"line_count": 11,
"max_line_length": 34,
"avg_line_length": 17,
"alnum_prop": 0.6310160427807486,
"repo_name": "est/py-ui4win",
"id": "cdd2596ae9950fc6e31883c16ba33bee49c3f30c",
"size": "187",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "bin/DebugEntry.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "C",
"bytes": "648428"
},
{
"name": "C++",
"bytes": "1646895"
},
{
"name": "CMake",
"bytes": "3434"
},
{
"name": "Python",
"bytes": "56423"
},
{
"name": "Shell",
"bytes": "316"
}
],
"symlink_target": ""
} |
"""
CSB Test Runner app. Run with -h to see the app's documentation.
"""
from csb.test import Console
def main():
return Console('csb.test.cases.*')
if __name__ == '__main__':
main()
| {
"content_hash": "8c51cfcdbafb09b9e769488ddf9aa919",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 64,
"avg_line_length": 15.076923076923077,
"alnum_prop": 0.6071428571428571,
"repo_name": "csb-toolbox/CSB",
"id": "75cf905f6b25d6dd21bedeb6e7bf6e9b1e4dd8b6",
"size": "196",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "csb/test/app.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "14987"
},
{
"name": "Python",
"bytes": "1475360"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import
from typing import Any, Dict, List, Optional
from zerver.lib import cache
from zerver.lib.test_helpers import (
AuthedTestCase, queries_captured, stub, tornado_redirected_to_list
)
from zerver.decorator import (
JsonableError
)
from zerver.lib.test_runner import (
slow
)
from zerver.models import (
get_display_recipient, Message, Realm, Recipient, Stream, Subscription,
UserProfile,
)
from zerver.lib.actions import (
create_stream_if_needed, do_add_default_stream, do_add_subscription, do_change_is_admin,
do_create_realm, do_remove_default_stream, do_set_realm_create_stream_by_admins_only,
gather_subscriptions, get_default_streams_for_realm, get_realm, get_stream,
get_user_profile_by_email, set_default_streams,
)
from django.http import HttpResponse
import random
import ujson
import six
from six import text_type
from six.moves import range, urllib
class StreamAdminTest(AuthedTestCase):
def test_make_stream_public(self):
# type: () -> None
email = 'hamlet@zulip.com'
self.login(email)
user_profile = get_user_profile_by_email(email)
realm = user_profile.realm
stream, _ = create_stream_if_needed(realm, 'private_stream', invite_only=True)
do_change_is_admin(user_profile, True)
params = {
'stream_name': 'private_stream'
}
result = self.client.post("/json/make_stream_public", params)
self.assert_json_error(result, 'You are not invited to this stream.')
do_add_subscription(user_profile, stream)
do_change_is_admin(user_profile, True)
params = {
'stream_name': 'private_stream'
}
result = self.client.post("/json/make_stream_public", params)
self.assert_json_success(result)
stream = Stream.objects.get(name='private_stream', realm=realm)
self.assertFalse(stream.invite_only)
def test_make_stream_private(self):
# type: () -> None
email = 'hamlet@zulip.com'
self.login(email)
user_profile = get_user_profile_by_email(email)
realm = user_profile.realm
stream, _ = create_stream_if_needed(realm, 'public_stream')
do_change_is_admin(user_profile, True)
params = {
'stream_name': 'public_stream'
}
result = self.client.post("/json/make_stream_private", params)
self.assert_json_success(result)
stream = Stream.objects.get(name='public_stream', realm=realm)
self.assertTrue(stream.invite_only)
def test_deactivate_stream_backend(self):
# type: () -> None
email = 'hamlet@zulip.com'
self.login(email)
user_profile = get_user_profile_by_email(email)
realm = user_profile.realm
stream, _ = create_stream_if_needed(realm, 'new_stream')
do_add_subscription(user_profile, stream, no_log=True)
do_change_is_admin(user_profile, True)
result = self.client.delete('/json/streams/new_stream')
self.assert_json_success(result)
subscription_exists = Subscription.objects.filter(
user_profile=user_profile,
recipient__type_id=stream.id,
recipient__type=Recipient.STREAM,
active=True,
).exists()
self.assertFalse(subscription_exists)
def test_deactivate_stream_backend_requires_realm_admin(self):
# type: () -> None
email = 'hamlet@zulip.com'
self.login(email)
user_profile = get_user_profile_by_email(email)
realm = user_profile.realm
stream, _ = create_stream_if_needed(realm, 'new_stream')
do_add_subscription(user_profile, stream, no_log=True)
result = self.client.delete('/json/streams/new_stream')
self.assert_json_error(result, 'Must be a realm administrator')
def test_rename_stream(self):
# type: () -> None
email = 'hamlet@zulip.com'
self.login(email)
user_profile = get_user_profile_by_email(email)
realm = user_profile.realm
stream, _ = create_stream_if_needed(realm, 'stream_name1')
do_add_subscription(user_profile, stream, no_log=True)
do_change_is_admin(user_profile, True)
events = [] # type: List[Dict[str, Any]]
with tornado_redirected_to_list(events):
result = self.client.post('/json/rename_stream?old_name=stream_name1&new_name=stream_name2')
self.assert_json_success(result)
event = events[1]['event']
self.assertEqual(event, dict(
op='update',
type='stream',
property='name',
value='stream_name2',
name='stream_name1'
))
users = events[1]['users']
self.assertEqual(users, [user_profile.id])
stream_name1_exists = Stream.objects.filter(
name='stream_name1',
realm=realm,
).exists()
self.assertFalse(stream_name1_exists)
stream_name2_exists = Stream.objects.filter(
name='stream_name2',
realm=realm,
).exists()
self.assertTrue(stream_name2_exists)
def test_rename_stream_requires_realm_admin(self):
# type: () -> None
email = 'hamlet@zulip.com'
self.login(email)
user_profile = get_user_profile_by_email(email)
realm = user_profile.realm
stream, _ = create_stream_if_needed(realm, 'stream_name1')
result = self.client.post('/json/rename_stream?old_name=stream_name1&new_name=stream_name2')
self.assert_json_error(result, 'Must be a realm administrator')
def test_change_stream_description(self):
# type: () -> None
email = 'hamlet@zulip.com'
self.login(email)
user_profile = get_user_profile_by_email(email)
realm = user_profile.realm
stream, _ = create_stream_if_needed(realm, 'stream_name1')
do_add_subscription(user_profile, stream, no_log=True)
do_change_is_admin(user_profile, True)
events = [] # type: List[Dict[str, Any]]
with tornado_redirected_to_list(events):
result = self.client_patch('/json/streams/stream_name1',
{'description': ujson.dumps('Test description')})
self.assert_json_success(result)
event = events[0]['event']
self.assertEqual(event, dict(
op='update',
type='stream',
property='description',
value='Test description',
name='stream_name1'
))
users = events[0]['users']
self.assertEqual(users, [user_profile.id])
stream = Stream.objects.get(
name='stream_name1',
realm=realm,
)
self.assertEqual('Test description', stream.description)
def test_change_stream_description_requires_realm_admin(self):
# type: () -> None
email = 'hamlet@zulip.com'
self.login(email)
user_profile = get_user_profile_by_email(email)
realm = user_profile.realm
stream, _ = create_stream_if_needed(realm, 'stream_name1')
do_add_subscription(user_profile, stream, no_log=True)
do_change_is_admin(user_profile, False)
result = self.client_patch('/json/streams/stream_name1',
{'description': ujson.dumps('Test description')})
self.assert_json_error(result, 'Must be a realm administrator')
def set_up_stream_for_deletion(self, stream_name, invite_only=False,
subscribed=True):
# type: (str, bool, bool) -> Stream
"""
Create a stream for deletion by an administrator.
"""
email = 'hamlet@zulip.com'
self.login(email)
user_profile = get_user_profile_by_email(email)
stream, _ = create_stream_if_needed(user_profile.realm, stream_name,
invite_only=invite_only)
# For testing deleting streams you aren't on.
if subscribed:
do_add_subscription(user_profile, stream, no_log=True)
do_change_is_admin(user_profile, True)
return stream
def delete_stream(self, stream, subscribed=True):
# type: (Stream, bool) -> None
"""
Delete the stream and assess the result.
"""
active_name = stream.name
events = [] # type: List[Dict[str, Any]]
with tornado_redirected_to_list(events):
result = self.client.delete('/json/streams/' + active_name)
self.assert_json_success(result)
deletion_events = [e['event'] for e in events if e['event']['type'] == 'subscription']
if subscribed:
self.assertEqual(deletion_events[0], dict(
op='remove',
type='subscription',
subscriptions=[{'name': active_name, 'stream_id': stream.id}]
))
else:
# You could delete the stream, but you weren't on it so you don't
# receive an unsubscription event.
self.assertEqual(deletion_events, [])
with self.assertRaises(Stream.DoesNotExist):
Stream.objects.get(realm=get_realm("zulip.com"), name=active_name)
# A deleted stream's name is changed, is deactivated, is invite-only,
# and has no subscribers.
deactivated_stream_name = "!DEACTIVATED:" + active_name
deactivated_stream = Stream.objects.get(name=deactivated_stream_name)
self.assertTrue(deactivated_stream.deactivated)
self.assertTrue(deactivated_stream.invite_only)
self.assertEqual(deactivated_stream.name, deactivated_stream_name)
subscribers = self.users_subscribed_to_stream(
deactivated_stream_name, "zulip.com")
self.assertEqual(subscribers, [])
# It doesn't show up in the list of public streams anymore.
result = self.client.get("/json/streams?include_subscribed=false")
public_streams = [s["name"] for s in ujson.loads(result.content)["streams"]]
self.assertNotIn(active_name, public_streams)
self.assertNotIn(deactivated_stream_name, public_streams)
# Even if you could guess the new name, you can't subscribe to it.
result = self.client.post(
"/json/users/me/subscriptions",
{"subscriptions": ujson.dumps([{"name": deactivated_stream_name}])})
self.assert_json_error(
result, "Unable to access stream (%s)." % (deactivated_stream_name,))
def test_delete_public_stream(self):
# type: () -> None
"""
When an administrator deletes a public stream, that stream is not
visible to users at all anymore.
"""
stream = self.set_up_stream_for_deletion("newstream")
self.delete_stream(stream)
def test_delete_private_stream(self):
# type: () -> None
"""
Administrators can delete private streams they are on.
"""
stream = self.set_up_stream_for_deletion("newstream", invite_only=True)
self.delete_stream(stream)
def test_delete_streams_youre_not_on(self):
# type: () -> None
"""
Administrators can delete public streams they aren't on, but cannot
delete private streams they aren't on.
"""
pub_stream = self.set_up_stream_for_deletion(
"pubstream", subscribed=False)
self.delete_stream(pub_stream, subscribed=False)
priv_stream = self.set_up_stream_for_deletion(
"privstream", subscribed=False, invite_only=True)
result = self.client.delete('/json/streams/' + priv_stream.name)
self.assert_json_error(
result, "Cannot administer invite-only streams this way")
def attempt_unsubscribe_of_principal(self, is_admin=False, is_subbed=True,
invite_only=False, other_user_subbed=True):
# type: (bool, bool, bool, bool) -> HttpResponse
# Set up the main user, who is in most cases an admin.
email = "hamlet@zulip.com"
self.login(email)
user_profile = get_user_profile_by_email(email)
realm = user_profile.realm
if is_admin:
do_change_is_admin(user_profile, True)
# Set up the stream.
stream_name = u"hümbüǵ"
stream, _ = create_stream_if_needed(realm, stream_name,
invite_only=invite_only)
# Set up the principal to be unsubscribed.
other_email = "cordelia@zulip.com"
other_user_profile = get_user_profile_by_email(other_email)
# Subscribe the admin and/or principal as specified in the flags.
if is_subbed:
do_add_subscription(user_profile, stream, no_log=True)
if other_user_subbed:
do_add_subscription(other_user_profile, stream, no_log=True)
result = self.client.post(
"/json/subscriptions/remove",
{"subscriptions": ujson.dumps([stream.name]),
"principals": ujson.dumps([other_email])})
# If the removal succeeded, then assert that Cordelia is no longer subscribed.
if result.status_code not in [400]:
subbed_users = self.users_subscribed_to_stream(stream_name, other_user_profile.realm.domain)
self.assertNotIn(other_user_profile, subbed_users)
return result
def test_cant_remove_others_from_stream(self):
# type: () -> None
"""
If you're not an admin, you can't remove other people from streams.
"""
result = self.attempt_unsubscribe_of_principal(
is_admin=False, is_subbed=True, invite_only=False,
other_user_subbed=True)
self.assert_json_error(
result, "This action requires administrative rights")
def test_admin_remove_others_from_public_stream(self):
# type: () -> None
"""
If you're an admin, you can remove people from public streams, even
those you aren't on.
"""
result = self.attempt_unsubscribe_of_principal(
is_admin=True, is_subbed=True, invite_only=False,
other_user_subbed=True)
json = self.assert_json_success(result)
self.assertEqual(len(json["removed"]), 1)
self.assertEqual(len(json["not_subscribed"]), 0)
def test_admin_remove_others_from_subbed_private_stream(self):
# type: () -> None
"""
If you're an admin, you can remove other people from private streams you
are on.
"""
result = self.attempt_unsubscribe_of_principal(
is_admin=True, is_subbed=True, invite_only=True,
other_user_subbed=True)
json = self.assert_json_success(result)
self.assertEqual(len(json["removed"]), 1)
self.assertEqual(len(json["not_subscribed"]), 0)
def test_admin_remove_others_from_unsubbed_private_stream(self):
# type: () -> None
"""
Even if you're an admin, you can't remove people from private
streams you aren't on.
"""
result = self.attempt_unsubscribe_of_principal(
is_admin=True, is_subbed=False, invite_only=True,
other_user_subbed=True)
self.assert_json_error(
result, "Cannot administer invite-only streams this way")
def test_create_stream_by_admins_only_setting(self):
# type: () -> None
"""
When realm.create_stream_by_admins_only setting is active,
non admin users shouldn't be able to create new streams.
"""
email = 'hamlet@zulip.com'
self.login(email)
user_profile = get_user_profile_by_email(email)
do_change_is_admin(user_profile, False)
do_set_realm_create_stream_by_admins_only(user_profile.realm, True)
stream_name = ['adminsonlysetting']
result = self.common_subscribe_to_streams(
email,
stream_name
)
self.assert_json_error(result, 'User cannot create streams.')
# Change setting back to default
do_set_realm_create_stream_by_admins_only(user_profile.realm, False)
def test_remove_already_not_subbed(self):
# type: () -> None
"""
Trying to unsubscribe someone who already isn't subscribed to a stream
fails gracefully.
"""
result = self.attempt_unsubscribe_of_principal(
is_admin=True, is_subbed=False, invite_only=False,
other_user_subbed=False)
json = self.assert_json_success(result)
self.assertEqual(len(json["removed"]), 0)
self.assertEqual(len(json["not_subscribed"]), 1)
def test_remove_invalid_user(self):
# type: () -> None
"""
Trying to unsubscribe an invalid user from a stream fails gracefully.
"""
admin_email = "hamlet@zulip.com"
self.login(admin_email)
user_profile = get_user_profile_by_email(admin_email)
do_change_is_admin(user_profile, True)
realm = user_profile.realm
stream_name = u"hümbüǵ"
stream, _ = create_stream_if_needed(realm, stream_name)
result = self.client.post("/json/subscriptions/remove",
{"subscriptions": ujson.dumps([stream.name]),
"principals": ujson.dumps(["baduser@zulip.com"])})
self.assert_json_error(
result,
"User not authorized to execute queries on behalf of 'baduser@zulip.com'",
status_code=403)
class DefaultStreamTest(AuthedTestCase):
def get_default_stream_names(self, realm):
# type: (Realm) -> Set[str]
streams = get_default_streams_for_realm(realm)
stream_names = [s.name for s in streams]
return set(stream_names)
def test_set_default_streams(self):
# type: () -> None
(realm, _) = do_create_realm("testrealm.com", "Test Realm")
stream_names = ['apple', 'banana', 'Carrot Cake']
expected_names = stream_names + ['announce']
set_default_streams(realm, stream_names)
stream_names_set = self.get_default_stream_names(realm)
self.assertEqual(stream_names_set, set(expected_names))
def test_set_default_streams_no_notifications_stream(self):
# type: () -> None
(realm, _) = do_create_realm("testrealm.com", "Test Realm")
realm.notifications_stream = None
realm.save(update_fields=["notifications_stream"])
stream_names = ['apple', 'banana', 'Carrot Cake']
expected_names = stream_names
set_default_streams(realm, stream_names)
stream_names_set = self.get_default_stream_names(realm)
self.assertEqual(stream_names_set, set(expected_names))
def test_add_and_remove_default_stream(self):
# type: () -> None
realm = get_realm("zulip.com")
orig_stream_names = self.get_default_stream_names(realm)
do_add_default_stream(realm, 'Added Stream')
new_stream_names = self.get_default_stream_names(realm)
added_stream_names = new_stream_names - orig_stream_names
self.assertEqual(added_stream_names, set(['Added Stream']))
# idempotentcy--2nd call to add_default_stream should be a noop
do_add_default_stream(realm, 'Added Stream')
self.assertEqual(self.get_default_stream_names(realm), new_stream_names)
# start removing
do_remove_default_stream(realm, 'Added Stream')
self.assertEqual(self.get_default_stream_names(realm), orig_stream_names)
# idempotentcy--2nd call to remove_default_stream should be a noop
do_remove_default_stream(realm, 'Added Stream')
self.assertEqual(self.get_default_stream_names(realm), orig_stream_names)
def test_api_calls(self):
# type: () -> None
self.login("hamlet@zulip.com")
user_profile = get_user_profile_by_email('hamlet@zulip.com')
do_change_is_admin(user_profile, True)
stream_name = 'stream ADDED via api'
result = self.client_put('/json/default_streams', dict(stream_name=stream_name))
self.assert_json_success(result)
self.assertTrue(stream_name in self.get_default_stream_names(user_profile.realm))
# and remove it
result = self.client_delete('/json/default_streams', dict(stream_name=stream_name))
self.assert_json_success(result)
self.assertFalse(stream_name in self.get_default_stream_names(user_profile.realm))
class SubscriptionPropertiesTest(AuthedTestCase):
def test_set_stream_color(self):
# type: () -> None
"""
A POST request to /json/subscriptions/property with stream_name and
color data sets the stream color, and for that stream only.
"""
test_email = "hamlet@zulip.com"
self.login(test_email)
old_subs, _ = gather_subscriptions(get_user_profile_by_email(test_email))
sub = old_subs[0]
stream_name = sub['name']
new_color = "#ffffff" # TODO: ensure that this is different from old_color
result = self.client.post(
"/json/subscriptions/property",
{"subscription_data": ujson.dumps([{"property": "color",
"stream": stream_name,
"value": "#ffffff"}])})
self.assert_json_success(result)
new_subs = gather_subscriptions(get_user_profile_by_email(test_email))[0]
found_sub = None
for sub in new_subs:
if sub['name'] == stream_name:
found_sub = sub
break
self.assertIsNotNone(found_sub)
self.assertEqual(found_sub['color'], new_color)
new_subs.remove(found_sub)
for sub in old_subs:
if sub['name'] == stream_name:
found_sub = sub
break
old_subs.remove(found_sub)
self.assertEqual(old_subs, new_subs)
def test_set_color_missing_stream_name(self):
# type: () -> None
"""
Updating the color property requires a `stream` key.
"""
test_email = "hamlet@zulip.com"
self.login(test_email)
result = self.client.post(
"/json/subscriptions/property",
{"subscription_data": ujson.dumps([{"property": "color",
"value": "#ffffff"}])})
self.assert_json_error(
result, "stream key is missing from subscription_data[0]")
def test_set_color_missing_color(self):
# type: () -> None
"""
Updating the color property requires a color.
"""
test_email = "hamlet@zulip.com"
self.login(test_email)
subs = gather_subscriptions(get_user_profile_by_email(test_email))[0]
result = self.client.post(
"/json/subscriptions/property",
{"subscription_data": ujson.dumps([{"property": "color",
"stream": subs[0]["name"]}])})
self.assert_json_error(
result, "value key is missing from subscription_data[0]")
def test_set_invalid_property(self):
# type: () -> None
"""
Trying to set an invalid property returns a JSON error.
"""
test_email = "hamlet@zulip.com"
self.login(test_email)
subs = gather_subscriptions(get_user_profile_by_email(test_email))[0]
result = self.client.post(
"/json/subscriptions/property",
{"subscription_data": ujson.dumps([{"property": "bad",
"value": "bad",
"stream": subs[0]["name"]}])})
self.assert_json_error(result,
"Unknown subscription property: bad")
class SubscriptionRestApiTest(AuthedTestCase):
def test_basic_add_delete(self):
# type: () -> None
email = 'hamlet@zulip.com'
self.login(email)
# add
request = {
'add': ujson.dumps([{'name': 'my_test_stream_1'}])
}
result = self.client_patch(
"/api/v1/users/me/subscriptions",
request,
**self.api_auth(email)
)
self.assert_json_success(result)
streams = self.get_streams(email)
self.assertTrue('my_test_stream_1' in streams)
# now delete the same stream
request = {
'delete': ujson.dumps(['my_test_stream_1'])
}
result = self.client_patch(
"/api/v1/users/me/subscriptions",
request,
**self.api_auth(email)
)
self.assert_json_success(result)
streams = self.get_streams(email)
self.assertTrue('my_test_stream_1' not in streams)
def test_bad_add_parameters(self):
# type: () -> None
email = 'hamlet@zulip.com'
self.login(email)
def check_for_error(val, expected_message):
# type: (Any, str) -> None
request = {
'add': ujson.dumps(val)
}
result = self.client_patch(
"/api/v1/users/me/subscriptions",
request,
**self.api_auth(email)
)
self.assert_json_error(result, expected_message)
check_for_error(['foo'], 'add[0] is not a dict')
check_for_error([{'bogus': 'foo'}], 'name key is missing from add[0]')
check_for_error([{'name': {}}], 'add[0]["name"] is not a string')
def test_bad_principals(self):
# type: () -> None
email = 'hamlet@zulip.com'
self.login(email)
request = {
'add': ujson.dumps([{'name': 'my_new_stream'}]),
'principals': ujson.dumps([{}]),
}
result = self.client_patch(
"/api/v1/users/me/subscriptions",
request,
**self.api_auth(email)
)
self.assert_json_error(result, 'principals[0] is not a string')
def test_bad_delete_parameters(self):
# type: () -> None
email = 'hamlet@zulip.com'
self.login(email)
request = {
'delete': ujson.dumps([{'name': 'my_test_stream_1'}])
}
result = self.client_patch(
"/api/v1/users/me/subscriptions",
request,
**self.api_auth(email)
)
self.assert_json_error(result, "delete[0] is not a string")
class SubscriptionAPITest(AuthedTestCase):
def setUp(self):
# type: () -> None
"""
All tests will be logged in as hamlet. Also save various useful values
as attributes that tests can access.
"""
self.test_email = "hamlet@zulip.com"
self.login(self.test_email)
self.user_profile = get_user_profile_by_email(self.test_email)
self.realm = self.user_profile.realm
self.streams = self.get_streams(self.test_email)
def make_random_stream_names(self, existing_stream_names):
# type: (List[text_type]) -> List[text_type]
"""
Helper function to make up random stream names. It takes
existing_stream_names and randomly appends a digit to the end of each,
but avoids names that appear in the list names_to_avoid.
"""
random_streams = []
all_stream_names = [stream.name for stream in Stream.objects.filter(realm=self.realm)]
for stream in existing_stream_names:
random_stream = stream + str(random.randint(0, 9))
if random_stream not in all_stream_names:
random_streams.append(random_stream)
return random_streams
def test_successful_subscriptions_list(self):
# type: () -> None
"""
Calling /api/v1/users/me/subscriptions should successfully return your subscriptions.
"""
email = self.test_email
result = self.client.get("/api/v1/users/me/subscriptions", **self.api_auth(email))
self.assert_json_success(result)
json = ujson.loads(result.content)
self.assertIn("subscriptions", json)
for stream in json['subscriptions']:
self.assertIsInstance(stream['name'], six.string_types)
self.assertIsInstance(stream['color'], six.string_types)
self.assertIsInstance(stream['invite_only'], bool)
# check that the stream name corresponds to an actual stream
try:
Stream.objects.get(name__iexact=stream['name'], realm=self.realm)
except Stream.DoesNotExist:
self.fail("stream does not exist")
list_streams = [stream['name'] for stream in json["subscriptions"]]
# also check that this matches the list of your subscriptions
self.assertItemsEqual(list_streams, self.streams)
def helper_check_subs_before_and_after_add(self, subscriptions, other_params,
subscribed, already_subscribed,
email, new_subs, invite_only=False):
# type: (List[text_type], Dict[str, Any], List[text_type], List[text_type], text_type, List[text_type], bool) -> None
"""
Check result of adding subscriptions.
You can add subscriptions for yourself or possibly many
principals, which is why e-mails map to subscriptions in the
result.
The result json is of the form
{"msg": "",
"result": "success",
"already_subscribed": {"iago@zulip.com": ["Venice", "Verona"]},
"subscribed": {"iago@zulip.com": ["Venice8"]}}
"""
result = self.common_subscribe_to_streams(self.test_email, subscriptions,
other_params, invite_only=invite_only)
self.assert_json_success(result)
json = ujson.loads(result.content)
self.assertItemsEqual(subscribed, json["subscribed"][email])
self.assertItemsEqual(already_subscribed, json["already_subscribed"][email])
new_streams = self.get_streams(email)
self.assertItemsEqual(new_streams, new_subs)
def test_successful_subscriptions_add(self):
# type: () -> None
"""
Calling POST /json/users/me/subscriptions should successfully add
streams, and should determine which are new subscriptions vs
which were already subscribed. We add 2 new streams to the
list of subscriptions and confirm the right number of events
are generated.
"""
self.assertNotEqual(len(self.streams), 0) # necessary for full test coverage
add_streams = [u"Verona2", u"Denmark5"]
self.assertNotEqual(len(add_streams), 0) # necessary for full test coverage
events = [] # type: List[Dict[str, Any]]
with tornado_redirected_to_list(events):
self.helper_check_subs_before_and_after_add(self.streams + add_streams, {},
add_streams, self.streams, self.test_email, self.streams + add_streams)
self.assert_length(events, 4, True)
def test_successful_subscriptions_notifies_pm(self):
# type: () -> None
"""
Calling POST /json/users/me/subscriptions should notify when a new stream is created.
"""
invitee = "iago@zulip.com"
invitee_full_name = 'Iago'
current_stream = self.get_streams(invitee)[0]
invite_streams = self.make_random_stream_names([current_stream])[:1]
result = self.common_subscribe_to_streams(
invitee,
invite_streams,
extra_post_data={
'announce': 'true',
'principals': '["%s"]' % (self.user_profile.email,)
},
)
self.assert_json_success(result)
msg = self.get_last_message()
self.assertEqual(msg.recipient.type, Recipient.PERSONAL)
self.assertEqual(msg.sender_id,
get_user_profile_by_email('notification-bot@zulip.com').id)
expected_msg = "Hi there! %s just created a new stream '%s'. " \
"!_stream_subscribe_button(%s)" % (invitee_full_name,
invite_streams[0],
invite_streams[0])
self.assertEqual(msg.content, expected_msg)
def test_successful_subscriptions_notifies_stream(self):
# type: () -> None
"""
Calling POST /json/users/me/subscriptions should notify when a new stream is created.
"""
invitee = "iago@zulip.com"
invitee_full_name = 'Iago'
current_stream = self.get_streams(invitee)[0]
invite_streams = self.make_random_stream_names([current_stream])[:1]
notifications_stream = Stream.objects.get(name=current_stream, realm=self.realm)
self.realm.notifications_stream = notifications_stream
self.realm.save()
# Delete the UserProfile from the cache so the realm change will be
# picked up
cache.cache_delete(cache.user_profile_by_email_cache_key(invitee))
result = self.common_subscribe_to_streams(
invitee,
invite_streams,
extra_post_data=dict(
announce='true',
principals='["%s"]' % (self.user_profile.email,)
),
)
self.assert_json_success(result)
msg = self.get_last_message()
self.assertEqual(msg.recipient.type, Recipient.STREAM)
self.assertEqual(msg.sender_id,
get_user_profile_by_email('notification-bot@zulip.com').id)
expected_msg = "%s just created a new stream `%s`. " \
"!_stream_subscribe_button(%s)" % (invitee_full_name,
invite_streams[0],
invite_streams[0])
self.assertEqual(msg.content, expected_msg)
def test_successful_subscriptions_notifies_with_escaping(self):
# type: () -> None
"""
Calling POST /json/users/me/subscriptions should notify when a new stream is created.
"""
invitee = "iago@zulip.com"
invitee_full_name = 'Iago'
current_stream = self.get_streams(invitee)[0]
notifications_stream = Stream.objects.get(name=current_stream, realm=self.realm)
self.realm.notifications_stream = notifications_stream
self.realm.save()
invite_streams = ['strange ) \\ test']
result = self.common_subscribe_to_streams(
invitee,
invite_streams,
extra_post_data={
'announce': 'true',
'principals': '["%s"]' % (self.user_profile.email,)
},
)
self.assert_json_success(result)
msg = self.get_last_message()
self.assertEqual(msg.sender_id,
get_user_profile_by_email('notification-bot@zulip.com').id)
expected_msg = "%s just created a new stream `%s`. " \
"!_stream_subscribe_button(strange \\) \\\\ test)" % (
invitee_full_name,
invite_streams[0])
self.assertEqual(msg.content, expected_msg)
def test_non_ascii_stream_subscription(self):
# type: () -> None
"""
Subscribing to a stream name with non-ASCII characters succeeds.
"""
self.helper_check_subs_before_and_after_add(self.streams + [u"hümbüǵ"], {},
[u"hümbüǵ"], self.streams, self.test_email, self.streams + [u"hümbüǵ"])
def test_subscriptions_add_too_long(self):
# type: () -> None
"""
Calling POST /json/users/me/subscriptions on a stream whose name is >60
characters should return a JSON error.
"""
# character limit is 60 characters
long_stream_name = "a" * 61
result = self.common_subscribe_to_streams(self.test_email, [long_stream_name])
self.assert_json_error(result,
"Stream name (%s) too long." % (long_stream_name,))
def test_user_settings_for_adding_streams(self):
# type: () -> None
with stub(UserProfile, 'can_create_streams', lambda self: False):
result = self.common_subscribe_to_streams(self.test_email, ['stream1'])
self.assert_json_error(result, 'User cannot create streams.')
with stub(UserProfile, 'can_create_streams', lambda self: True):
result = self.common_subscribe_to_streams(self.test_email, ['stream2'])
self.assert_json_success(result)
# User should still be able to subscribe to an existing stream
with stub(UserProfile, 'can_create_streams', lambda self: False):
result = self.common_subscribe_to_streams(self.test_email, ['stream2'])
self.assert_json_success(result)
def test_subscriptions_add_invalid_stream(self):
# type: () -> None
"""
Calling POST /json/users/me/subscriptions on a stream whose name is invalid (as
defined by valid_stream_name in zerver/views.py) should return a JSON
error.
"""
# currently, the only invalid name is the empty string
invalid_stream_name = ""
result = self.common_subscribe_to_streams(self.test_email, [invalid_stream_name])
self.assert_json_error(result,
"Invalid stream name (%s)." % (invalid_stream_name,))
def assert_adding_subscriptions_for_principal(self, invitee, streams, invite_only=False):
# type: (text_type, List[text_type], bool) -> None
"""
Calling POST /json/users/me/subscriptions on behalf of another principal (for
whom you have permission to add subscriptions) should successfully add
those subscriptions and send a message to the subscribee notifying
them.
"""
other_profile = get_user_profile_by_email(invitee)
current_streams = self.get_streams(invitee)
self.assertIsInstance(other_profile, UserProfile)
self.assertNotEqual(len(current_streams), 0) # necessary for full test coverage
self.assertNotEqual(len(streams), 0) # necessary for full test coverage
streams_to_sub = streams[:1] # just add one, to make the message easier to check
streams_to_sub.extend(current_streams)
self.helper_check_subs_before_and_after_add(streams_to_sub,
{"principals": ujson.dumps([invitee])}, streams[:1], current_streams,
invitee, streams_to_sub, invite_only=invite_only)
# verify that the user was sent a message informing them about the subscription
msg = self.get_last_message()
self.assertEqual(msg.recipient.type, msg.recipient.PERSONAL)
self.assertEqual(msg.sender_id,
get_user_profile_by_email("notification-bot@zulip.com").id)
expected_msg = ("Hi there! We thought you'd like to know that %s just "
"subscribed you to the %sstream [%s](#narrow/stream/%s)."
% (self.user_profile.full_name,
'**invite-only** ' if invite_only else '',
streams[0], urllib.parse.quote(streams[0].encode('utf-8'))))
if not Stream.objects.get(name=streams[0]).invite_only:
expected_msg += ("\nYou can see historical content on a "
"non-invite-only stream by narrowing to it.")
self.assertEqual(msg.content, expected_msg)
recipients = get_display_recipient(msg.recipient)
self.assertEqual(len(recipients), 1)
self.assertEqual(recipients[0]['email'], invitee)
def test_multi_user_subscription(self):
# type: () -> None
email1 = 'cordelia@zulip.com'
email2 = 'iago@zulip.com'
realm = get_realm("zulip.com")
streams_to_sub = ['multi_user_stream']
events = [] # type: List[Dict[str, Any]]
with tornado_redirected_to_list(events):
with queries_captured() as queries:
self.common_subscribe_to_streams(
self.test_email,
streams_to_sub,
dict(principals=ujson.dumps([email1, email2])),
)
self.assert_length(queries, 43)
self.assert_length(events, 6, exact=True)
for ev in [x for x in events if x['event']['type'] not in ('message', 'stream')]:
self.assertEqual(ev['event']['op'], 'add')
self.assertEqual(
set(ev['event']['subscriptions'][0]['subscribers']),
set([email1, email2])
)
stream = get_stream('multi_user_stream', realm)
self.assertEqual(stream.num_subscribers(), 2)
# Now add ourselves
events = []
with tornado_redirected_to_list(events):
with queries_captured() as queries:
self.common_subscribe_to_streams(
self.test_email,
streams_to_sub,
dict(principals=ujson.dumps([self.test_email])),
)
self.assert_length(queries, 8)
self.assert_length(events, 2, True)
add_event, add_peer_event = events
self.assertEqual(add_event['event']['type'], 'subscription')
self.assertEqual(add_event['event']['op'], 'add')
self.assertEqual(add_event['users'], [get_user_profile_by_email(self.test_email).id])
self.assertEqual(
set(add_event['event']['subscriptions'][0]['subscribers']),
set([email1, email2, self.test_email])
)
self.assertEqual(len(add_peer_event['users']), 2)
self.assertEqual(add_peer_event['event']['type'], 'subscription')
self.assertEqual(add_peer_event['event']['op'], 'peer_add')
self.assertEqual(add_peer_event['event']['user_email'], self.test_email)
stream = get_stream('multi_user_stream', realm)
self.assertEqual(stream.num_subscribers(), 3)
# Finally, add othello, exercising the do_add_subscription() code path.
events = []
email3 = 'othello@zulip.com'
user_profile = get_user_profile_by_email(email3)
stream = get_stream('multi_user_stream', realm)
with tornado_redirected_to_list(events):
do_add_subscription(user_profile, stream)
self.assert_length(events, 2, True)
add_event, add_peer_event = events
self.assertEqual(add_event['event']['type'], 'subscription')
self.assertEqual(add_event['event']['op'], 'add')
self.assertEqual(add_event['users'], [get_user_profile_by_email(email3).id])
self.assertEqual(
set(add_event['event']['subscriptions'][0]['subscribers']),
set([email1, email2, email3, self.test_email])
)
self.assertEqual(len(add_peer_event['users']), 3)
self.assertEqual(add_peer_event['event']['type'], 'subscription')
self.assertEqual(add_peer_event['event']['op'], 'peer_add')
self.assertEqual(add_peer_event['event']['user_email'], email3)
def test_bulk_subscribe_MIT(self):
# type: () -> None
realm = get_realm("mit.edu")
streams = ["stream_%s" % i for i in range(40)]
for stream in streams:
create_stream_if_needed(realm, stream)
events = [] # type: List[Dict[str, Any]]
with tornado_redirected_to_list(events):
with queries_captured() as queries:
self.common_subscribe_to_streams(
'starnine@mit.edu',
streams,
dict(principals=ujson.dumps(['starnine@mit.edu'])),
)
# Make sure MIT does not get any tornado subscription events
self.assert_length(events, 0, True)
self.assert_length(queries, 7)
def test_bulk_subscribe_many(self):
# type: () -> None
# Create a whole bunch of streams
realm = get_realm("zulip.com")
streams = ["stream_%s" % i for i in range(20)]
for stream in streams:
create_stream_if_needed(realm, stream)
with queries_captured() as queries:
self.common_subscribe_to_streams(
self.test_email,
streams,
dict(principals=ujson.dumps([self.test_email])),
)
# Make sure we don't make O(streams) queries
self.assert_length(queries, 9)
@slow(0.15, "common_subscribe_to_streams is slow")
def test_subscriptions_add_for_principal(self):
# type: () -> None
"""
You can subscribe other people to streams.
"""
invitee = "iago@zulip.com"
current_streams = self.get_streams(invitee)
invite_streams = self.make_random_stream_names(current_streams)
self.assert_adding_subscriptions_for_principal(invitee, invite_streams)
@slow(0.15, "common_subscribe_to_streams is slow")
def test_subscriptions_add_for_principal_invite_only(self):
# type: () -> None
"""
You can subscribe other people to invite only streams.
"""
invitee = "iago@zulip.com"
current_streams = self.get_streams(invitee)
invite_streams = self.make_random_stream_names(current_streams)
self.assert_adding_subscriptions_for_principal(invitee, invite_streams,
invite_only=True)
@slow(0.15, "common_subscribe_to_streams is slow")
def test_non_ascii_subscription_for_principal(self):
# type: () -> None
"""
You can subscribe other people to streams even if they containing
non-ASCII characters.
"""
self.assert_adding_subscriptions_for_principal("iago@zulip.com", [u"hümbüǵ"])
def test_subscription_add_invalid_principal(self):
# type: () -> None
"""
Calling subscribe on behalf of a principal that does not exist
should return a JSON error.
"""
invalid_principal = "rosencrantz-and-guildenstern@zulip.com"
# verify that invalid_principal actually doesn't exist
with self.assertRaises(UserProfile.DoesNotExist):
get_user_profile_by_email(invalid_principal)
result = self.common_subscribe_to_streams(self.test_email, self.streams,
{"principals": ujson.dumps([invalid_principal])})
self.assert_json_error(result, "User not authorized to execute queries on behalf of '%s'"
% (invalid_principal,), status_code=403)
def test_subscription_add_principal_other_realm(self):
# type: () -> None
"""
Calling subscribe on behalf of a principal in another realm
should return a JSON error.
"""
principal = "starnine@mit.edu"
profile = get_user_profile_by_email(principal)
# verify that principal exists (thus, the reason for the error is the cross-realming)
self.assertIsInstance(profile, UserProfile)
result = self.common_subscribe_to_streams(self.test_email, self.streams,
{"principals": ujson.dumps([principal])})
self.assert_json_error(result, "User not authorized to execute queries on behalf of '%s'"
% (principal,), status_code=403)
def helper_check_subs_before_and_after_remove(self, subscriptions, json_dict,
email, new_subs):
# type: (List[text_type], Dict[str, Any], text_type, List[text_type]) -> None
"""
Check result of removing subscriptions.
Unlike adding subscriptions, you can only remove subscriptions
for yourself, so the result format is different.
{"msg": "",
"removed": ["Denmark", "Scotland", "Verona"],
"not_subscribed": ["Rome"], "result": "success"}
"""
result = self.client.post("/json/subscriptions/remove",
{"subscriptions": ujson.dumps(subscriptions)})
self.assert_json_success(result)
json = ujson.loads(result.content)
for key, val in six.iteritems(json_dict):
self.assertItemsEqual(val, json[key]) # we don't care about the order of the items
new_streams = self.get_streams(email)
self.assertItemsEqual(new_streams, new_subs)
def test_successful_subscriptions_remove(self):
# type: () -> None
"""
Calling /json/subscriptions/remove should successfully remove streams,
and should determine which were removed vs which weren't subscribed to.
We cannot randomly generate stream names because the remove code
verifies whether streams exist.
"""
if len(self.streams) < 2:
self.fail() # necesssary for full test coverage
streams_to_remove = self.streams[1:]
not_subbed = []
for stream in Stream.objects.all():
if stream.name not in self.streams:
not_subbed.append(stream.name)
random.shuffle(not_subbed)
self.assertNotEqual(len(not_subbed), 0) # necessary for full test coverage
try_to_remove = not_subbed[:3] # attempt to remove up to 3 streams not already subbed to
streams_to_remove.extend(try_to_remove)
self.helper_check_subs_before_and_after_remove(streams_to_remove,
{"removed": self.streams[1:], "not_subscribed": try_to_remove},
self.test_email, [self.streams[0]])
def test_subscriptions_remove_fake_stream(self):
# type: () -> None
"""
Calling /json/subscriptions/remove on a stream that doesn't exist
should return a JSON error.
"""
random_streams = self.make_random_stream_names(self.streams)
self.assertNotEqual(len(random_streams), 0) # necessary for full test coverage
streams_to_remove = random_streams[:1] # pick only one fake stream, to make checking the error message easy
result = self.client.post("/json/subscriptions/remove",
{"subscriptions": ujson.dumps(streams_to_remove)})
self.assert_json_error(result, "Stream(s) (%s) do not exist" % (random_streams[0],))
def helper_subscriptions_exists(self, stream, exists, subscribed):
# type: (text_type, bool, bool) -> None
"""
A helper function that calls /json/subscriptions/exists on a stream and
verifies that the returned JSON dictionary has the exists and
subscribed values passed in as parameters. (If subscribed should not be
present, pass in None.)
"""
result = self.client.post("/json/subscriptions/exists",
{"stream": stream})
json = ujson.loads(result.content)
self.assertIn("exists", json)
self.assertEqual(json["exists"], exists)
if exists:
self.assert_json_success(result)
else:
self.assertEquals(result.status_code, 404)
if subscribed:
self.assertIn("subscribed", json)
self.assertEqual(json["subscribed"], subscribed)
def test_successful_subscriptions_exists_subbed(self):
# type: () -> None
"""
Calling /json/subscriptions/exist on a stream to which you are subbed
should return that it exists and that you are subbed.
"""
self.assertNotEqual(len(self.streams), 0) # necessary for full test coverage
self.helper_subscriptions_exists(self.streams[0], True, True)
def test_successful_subscriptions_exists_not_subbed(self):
# type: () -> None
"""
Calling /json/subscriptions/exist on a stream to which you are not
subbed should return that it exists and that you are not subbed.
"""
all_stream_names = [stream.name for stream in Stream.objects.filter(realm=self.realm)]
streams_not_subbed = list(set(all_stream_names) - set(self.streams))
self.assertNotEqual(len(streams_not_subbed), 0) # necessary for full test coverage
self.helper_subscriptions_exists(streams_not_subbed[0], True, False)
def test_subscriptions_does_not_exist(self):
# type: () -> None
"""
Calling /json/subscriptions/exist on a stream that doesn't exist should
return that it doesn't exist.
"""
random_streams = self.make_random_stream_names(self.streams)
self.assertNotEqual(len(random_streams), 0) # necessary for full test coverage
self.helper_subscriptions_exists(random_streams[0], False, False)
def test_subscriptions_exist_invalid_name(self):
# type: () -> None
"""
Calling /json/subscriptions/exist on a stream whose name is invalid (as
defined by valid_stream_name in zerver/views.py) should return a JSON
error.
"""
# currently, the only invalid stream name is the empty string
invalid_stream_name = ""
result = self.client.post("/json/subscriptions/exists",
{"stream": invalid_stream_name})
self.assert_json_error(result, "Invalid characters in stream name")
def get_subscription(self, user_profile, stream_name):
# type: (UserProfile, text_type) -> Subscription
stream = Stream.objects.get(realm=self.realm, name=stream_name)
return Subscription.objects.get(
user_profile=user_profile,
recipient__type=Recipient.STREAM,
recipient__type_id=stream.id,
)
def test_subscriptions_add_notification_default_true(self):
# type: () -> None
"""
When creating a subscription, the desktop and audible notification
settings for that stream are derived from the global notification
settings.
"""
invitee = "iago@zulip.com"
user_profile = get_user_profile_by_email(invitee)
user_profile.enable_stream_desktop_notifications = True
user_profile.enable_stream_sounds = True
user_profile.save()
current_stream = self.get_streams(invitee)[0]
invite_streams = self.make_random_stream_names([current_stream])
self.assert_adding_subscriptions_for_principal(invitee, invite_streams)
subscription = self.get_subscription(user_profile, invite_streams[0])
self.assertTrue(subscription.desktop_notifications)
self.assertTrue(subscription.audible_notifications)
def test_subscriptions_add_notification_default_false(self):
# type: () -> None
"""
When creating a subscription, the desktop and audible notification
settings for that stream are derived from the global notification
settings.
"""
invitee = "iago@zulip.com"
user_profile = get_user_profile_by_email(invitee)
user_profile.enable_stream_desktop_notifications = False
user_profile.enable_stream_sounds = False
user_profile.save()
current_stream = self.get_streams(invitee)[0]
invite_streams = self.make_random_stream_names([current_stream])
self.assert_adding_subscriptions_for_principal(invitee, invite_streams)
subscription = self.get_subscription(user_profile, invite_streams[0])
self.assertFalse(subscription.desktop_notifications)
self.assertFalse(subscription.audible_notifications)
class GetPublicStreamsTest(AuthedTestCase):
def test_public_streams(self):
# type: () -> None
"""
Ensure that streams successfully returns a list of streams
"""
email = 'hamlet@zulip.com'
self.login(email)
result = self.client.get("/json/streams?include_subscribed=false")
self.assert_json_success(result)
json = ujson.loads(result.content)
self.assertIn("streams", json)
self.assertIsInstance(json["streams"], list)
def test_public_streams_api(self):
# type: () -> None
"""
Ensure that the query we use to get public streams successfully returns
a list of streams
"""
email = 'hamlet@zulip.com'
self.login(email)
# Check it correctly lists the user's subs with include_public=false
result = self.client.get("/api/v1/streams?include_public=false", **self.api_auth(email))
result2 = self.client.get("/api/v1/users/me/subscriptions", **self.api_auth(email))
self.assert_json_success(result)
json = ujson.loads(result.content)
self.assertIn("streams", json)
self.assertIsInstance(json["streams"], list)
self.assert_json_success(result2)
json2 = ujson.loads(result2.content)
self.assertEqual(sorted([s["name"] for s in json["streams"]]),
sorted([s["name"] for s in json2["subscriptions"]]))
# Check it correctly lists all public streams with include_subscribed=false
result = self.client.get("/api/v1/streams?include_public=true&include_subscribed=false",
**self.api_auth(email))
self.assert_json_success(result)
json = ujson.loads(result.content)
all_streams = [stream.name for stream in
Stream.objects.filter(realm=get_user_profile_by_email(email).realm)]
self.assertEqual(sorted(s["name"] for s in json["streams"]),
sorted(all_streams))
# Check non-superuser can't use include_all_active
result = self.client.get("/api/v1/streams?include_all_active=true",
**self.api_auth(email))
self.assertEqual(result.status_code, 400)
class InviteOnlyStreamTest(AuthedTestCase):
def test_must_be_subbed_to_send(self):
# type: () -> None
"""
If you try to send a message to an invite-only stream to which
you aren't subscribed, you'll get a 400.
"""
self.login("hamlet@zulip.com")
# Create Saxony as an invite-only stream.
self.assert_json_success(
self.common_subscribe_to_streams("hamlet@zulip.com", ["Saxony"],
invite_only=True))
email = "cordelia@zulip.com"
with self.assertRaises(JsonableError):
self.send_message(email, "Saxony", Recipient.STREAM)
def test_list_respects_invite_only_bit(self):
# type: () -> None
"""
Make sure that /api/v1/users/me/subscriptions properly returns
the invite-only bit for streams that are invite-only
"""
email = 'hamlet@zulip.com'
self.login(email)
result1 = self.common_subscribe_to_streams(email, ["Saxony"], invite_only=True)
self.assert_json_success(result1)
result2 = self.common_subscribe_to_streams(email, ["Normandy"], invite_only=False)
self.assert_json_success(result2)
result = self.client.get("/api/v1/users/me/subscriptions", **self.api_auth(email))
self.assert_json_success(result)
json = ujson.loads(result.content)
self.assertIn("subscriptions", json)
for sub in json["subscriptions"]:
if sub['name'] == "Normandy":
self.assertEqual(sub['invite_only'], False, "Normandy was mistakenly marked invite-only")
if sub['name'] == "Saxony":
self.assertEqual(sub['invite_only'], True, "Saxony was not properly marked invite-only")
@slow(0.15, "lots of queries")
def test_inviteonly(self):
# type: () -> None
# Creating an invite-only stream is allowed
email = 'hamlet@zulip.com'
stream_name = "Saxony"
result = self.common_subscribe_to_streams(email, [stream_name], invite_only=True)
self.assert_json_success(result)
json = ujson.loads(result.content)
self.assertEqual(json["subscribed"], {email: [stream_name]})
self.assertEqual(json["already_subscribed"], {})
# Subscribing oneself to an invite-only stream is not allowed
email = "othello@zulip.com"
self.login(email)
result = self.common_subscribe_to_streams(email, [stream_name])
self.assert_json_error(result, 'Unable to access stream (Saxony).')
# authorization_errors_fatal=False works
email = "othello@zulip.com"
self.login(email)
result = self.common_subscribe_to_streams(email, [stream_name],
extra_post_data={'authorization_errors_fatal': ujson.dumps(False)})
self.assert_json_success(result)
json = ujson.loads(result.content)
self.assertEqual(json["unauthorized"], [stream_name])
self.assertEqual(json["subscribed"], {})
self.assertEqual(json["already_subscribed"], {})
# Inviting another user to an invite-only stream is allowed
email = 'hamlet@zulip.com'
self.login(email)
result = self.common_subscribe_to_streams(
email, [stream_name],
extra_post_data={'principals': ujson.dumps(["othello@zulip.com"])})
self.assert_json_success(result)
json = ujson.loads(result.content)
self.assertEqual(json["subscribed"], {"othello@zulip.com": [stream_name]})
self.assertEqual(json["already_subscribed"], {})
# Make sure both users are subscribed to this stream
result = self.client.get("/api/v1/streams/%s/members" % (stream_name,),
**self.api_auth(email))
self.assert_json_success(result)
json = ujson.loads(result.content)
self.assertTrue('othello@zulip.com' in json['subscribers'])
self.assertTrue('hamlet@zulip.com' in json['subscribers'])
class GetSubscribersTest(AuthedTestCase):
def setUp(self):
# type: () -> None
self.email = "hamlet@zulip.com"
self.user_profile = get_user_profile_by_email(self.email)
self.login(self.email)
def check_well_formed_result(self, result, stream_name, domain):
# type: (Dict[str, Any], text_type, text_type) -> None
"""
A successful call to get_subscribers returns the list of subscribers in
the form:
{"msg": "",
"result": "success",
"subscribers": ["hamlet@zulip.com", "prospero@zulip.com"]}
"""
self.assertIn("subscribers", result)
self.assertIsInstance(result["subscribers"], list)
true_subscribers = [user_profile.email for user_profile in self.users_subscribed_to_stream(
stream_name, domain)]
self.assertItemsEqual(result["subscribers"], true_subscribers)
def make_subscriber_request(self, stream_name, email=None):
# type: (text_type, Optional[str]) -> HttpResponse
if email is None:
email = self.email
return self.client.get("/api/v1/streams/%s/members" % (stream_name,),
**self.api_auth(email))
def make_successful_subscriber_request(self, stream_name):
# type: (text_type) -> None
result = self.make_subscriber_request(stream_name)
self.assert_json_success(result)
self.check_well_formed_result(ujson.loads(result.content),
stream_name, self.user_profile.realm.domain)
def test_subscriber(self):
# type: () -> None
"""
get_subscribers returns the list of subscribers.
"""
stream_name = gather_subscriptions(self.user_profile)[0][0]['name']
self.make_successful_subscriber_request(stream_name)
@slow(0.15, "common_subscribe_to_streams is slow")
def test_gather_subscriptions(self):
# type: () -> None
"""
gather_subscriptions returns correct results with only 3 queries
"""
realm = get_realm("zulip.com")
streams = ["stream_%s" % i for i in range(10)]
for stream in streams:
create_stream_if_needed(realm, stream)
users_to_subscribe = [self.email, "othello@zulip.com", "cordelia@zulip.com"]
ret = self.common_subscribe_to_streams(
self.email,
streams,
dict(principals=ujson.dumps(users_to_subscribe)))
self.assert_json_success(ret)
ret = self.common_subscribe_to_streams(
self.email,
["stream_invite_only_1"],
dict(principals=ujson.dumps(users_to_subscribe)),
invite_only=True)
self.assert_json_success(ret)
with queries_captured() as queries:
subscriptions = gather_subscriptions(self.user_profile)
self.assertTrue(len(subscriptions[0]) >= 11)
for sub in subscriptions[0]:
if not sub["name"].startswith("stream_"):
continue
self.assertTrue(len(sub["subscribers"]) == len(users_to_subscribe))
self.assert_length(queries, 4, exact=True)
@slow(0.15, "common_subscribe_to_streams is slow")
def test_gather_subscriptions_mit(self):
# type: () -> None
"""
gather_subscriptions returns correct results with only 3 queries
"""
# Subscribe only ourself because invites are disabled on mit.edu
users_to_subscribe = ["starnine@mit.edu", "espuser@mit.edu"]
for email in users_to_subscribe:
self.subscribe_to_stream(email, "mit_stream")
ret = self.common_subscribe_to_streams(
"starnine@mit.edu",
["mit_invite_only"],
dict(principals=ujson.dumps(users_to_subscribe)),
invite_only=True)
self.assert_json_success(ret)
with queries_captured() as queries:
subscriptions = gather_subscriptions(get_user_profile_by_email("starnine@mit.edu"))
self.assertTrue(len(subscriptions[0]) >= 2)
for sub in subscriptions[0]:
if not sub["name"].startswith("mit_"):
continue
if sub["name"] == "mit_invite_only":
self.assertTrue(len(sub["subscribers"]) == len(users_to_subscribe))
else:
self.assertTrue(len(sub["subscribers"]) == 0)
self.assert_length(queries, 4, exact=True)
def test_nonsubscriber(self):
# type: () -> None
"""
Even a non-subscriber to a public stream can query a stream's membership
with get_subscribers.
"""
# Create a stream for which Hamlet is the only subscriber.
stream_name = "Saxony"
self.common_subscribe_to_streams(self.email, [stream_name])
other_email = "othello@zulip.com"
# Fetch the subscriber list as a non-member.
self.login(other_email)
self.make_successful_subscriber_request(stream_name)
def test_subscriber_private_stream(self):
# type: () -> None
"""
A subscriber to a private stream can query that stream's membership.
"""
stream_name = "Saxony"
self.common_subscribe_to_streams(self.email, [stream_name],
invite_only=True)
self.make_successful_subscriber_request(stream_name)
def test_nonsubscriber_private_stream(self):
# type: () -> None
"""
A non-subscriber to a private stream can't query that stream's membership.
"""
# Create a private stream for which Hamlet is the only subscriber.
stream_name = "NewStream"
self.common_subscribe_to_streams(self.email, [stream_name],
invite_only=True)
other_email = "othello@zulip.com"
# Try to fetch the subscriber list as a non-member.
result = self.make_subscriber_request(stream_name, email=other_email)
self.assert_json_error(result,
"Unable to retrieve subscribers for invite-only stream")
| {
"content_hash": "ccc1a3d868db90ba29886a6b15714f3a",
"timestamp": "",
"source": "github",
"line_count": 1627,
"max_line_length": 125,
"avg_line_length": 42.105716041794714,
"alnum_prop": 0.5953785069920883,
"repo_name": "Vallher/zulip",
"id": "7d74b901f41ee11f3e528de190f4cddf119b5571",
"size": "68548",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "zerver/tests/test_subs.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "164"
},
{
"name": "CSS",
"bytes": "182566"
},
{
"name": "CoffeeScript",
"bytes": "18435"
},
{
"name": "Groovy",
"bytes": "5515"
},
{
"name": "HTML",
"bytes": "385288"
},
{
"name": "JavaScript",
"bytes": "1571752"
},
{
"name": "Nginx",
"bytes": "1228"
},
{
"name": "PHP",
"bytes": "18930"
},
{
"name": "Pascal",
"bytes": "1113"
},
{
"name": "Perl",
"bytes": "383634"
},
{
"name": "Puppet",
"bytes": "95624"
},
{
"name": "Python",
"bytes": "1862563"
},
{
"name": "Ruby",
"bytes": "255867"
},
{
"name": "Shell",
"bytes": "32357"
}
],
"symlink_target": ""
} |
"""
A sub-package for efficiently dealing with polynomials.
Within the documentation for this sub-package, a "finite power series,"
i.e., a polynomial (also referred to simply as a "series") is represented
by a 1-D numpy array of the polynomial's coefficients, ordered from lowest
order term to highest. For example, array([1,2,3]) represents
``P_0 + 2*P_1 + 3*P_2``, where P_n is the n-th order basis polynomial
applicable to the specific module in question, e.g., `polynomial` (which
"wraps" the "standard" basis) or `chebyshev`. For optimal performance,
all operations on polynomials, including evaluation at an argument, are
implemented as operations on the coefficients. Additional (module-specific)
information can be found in the docstring for the module of interest.
"""
from .polynomial import Polynomial
from .chebyshev import Chebyshev
from .legendre import Legendre
from .hermite import Hermite
from .hermite_e import HermiteE
from .laguerre import Laguerre
def set_default_printstyle(style):
"""
Set the default format for the string representation of polynomials.
Values for ``style`` must be valid inputs to ``__format__``, i.e. 'ascii'
or 'unicode'.
Parameters
----------
style : str
Format string for default printing style. Must be either 'ascii' or
'unicode'.
Notes
-----
The default format depends on the platform: 'unicode' is used on
Unix-based systems and 'ascii' on Windows. This determination is based on
default font support for the unicode superscript and subscript ranges.
Examples
--------
>>> p = np.polynomial.Polynomial([1, 2, 3])
>>> c = np.polynomial.Chebyshev([1, 2, 3])
>>> np.polynomial.set_default_printstyle('unicode')
>>> print(p)
1.0 + 2.0·x¹ + 3.0·x²
>>> print(c)
1.0 + 2.0·T₁(x) + 3.0·T₂(x)
>>> np.polynomial.set_default_printstyle('ascii')
>>> print(p)
1.0 + 2.0 x**1 + 3.0 x**2
>>> print(c)
1.0 + 2.0 T_1(x) + 3.0 T_2(x)
>>> # Formatting supercedes all class/package-level defaults
>>> print(f"{p:unicode}")
1.0 + 2.0·x¹ + 3.0·x²
"""
if style not in ('unicode', 'ascii'):
raise ValueError(
f"Unsupported format string '{style}'. Valid options are 'ascii' "
f"and 'unicode'"
)
_use_unicode = True
if style == 'ascii':
_use_unicode = False
from ._polybase import ABCPolyBase
ABCPolyBase._use_unicode = _use_unicode
from numpy._pytesttester import PytestTester
test = PytestTester(__name__)
del PytestTester
| {
"content_hash": "c1cbe9371be3f8cff4c48628403c5626",
"timestamp": "",
"source": "github",
"line_count": 75,
"max_line_length": 78,
"avg_line_length": 34.22666666666667,
"alnum_prop": 0.6630307752239969,
"repo_name": "abalkin/numpy",
"id": "43b2caba396f6b0d30302835d9e4158e66f5d1b1",
"size": "2581",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "numpy/polynomial/__init__.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "7712217"
},
{
"name": "C++",
"bytes": "27311"
},
{
"name": "Fortran",
"bytes": "5803"
},
{
"name": "Makefile",
"bytes": "2574"
},
{
"name": "Python",
"bytes": "6209890"
}
],
"symlink_target": ""
} |
import uuid
from django.test import TestCase
from corehq.apps.change_feed import data_sources, topics
from corehq.apps.change_feed.document_types import change_meta_from_doc
from corehq.apps.change_feed.producer import producer
from corehq.apps.change_feed.topics import get_topic_offset
from corehq.apps.es import GroupES
from corehq.apps.es.tests.utils import es_test
from corehq.apps.groups.models import Group
from corehq.apps.groups.tests.test_utils import delete_all_groups
from corehq.elastic import get_es_new
from corehq.pillows.groups_to_user import get_group_pillow
from corehq.pillows.mappings.group_mapping import GROUP_INDEX_INFO
from corehq.pillows.mappings.user_mapping import USER_INDEX_INFO
from corehq.util.elastic import ensure_index_deleted
from pillowtop.es_utils import initialize_index_and_mapping
@es_test
class GroupPillowTest(TestCase):
def setUp(self):
self.elasticsearch = get_es_new()
for index in [GROUP_INDEX_INFO, USER_INDEX_INFO]:
ensure_index_deleted(index.index)
initialize_index_and_mapping(self.elasticsearch, index)
delete_all_groups()
def tearDown(self):
ensure_index_deleted(GROUP_INDEX_INFO.index)
ensure_index_deleted(USER_INDEX_INFO.index)
def test_kafka_group_pillow(self):
domain = uuid.uuid4().hex
user_id = uuid.uuid4().hex
# make a group
group = Group(domain=domain, name='g1', users=[user_id])
group.save()
# send to kafka
since = get_topic_offset(topics.GROUP)
change_meta = change_meta_from_doc(
document=group.to_json(),
data_source_type=data_sources.SOURCE_COUCH,
data_source_name=Group.get_db().dbname,
)
producer.send_change(topics.GROUP, change_meta)
# send to elasticsearch
pillow = get_group_pillow()
pillow.process_changes(since=since, forever=False)
self.elasticsearch.indices.refresh(GROUP_INDEX_INFO.index)
# verify there
self._verify_group_in_es(group)
def _verify_group_in_es(self, group):
results = GroupES().run()
self.assertEqual(1, results.total)
es_group = results.hits[0]
self.assertEqual(group._id, es_group['_id'])
self.assertEqual(group.name, es_group['name'])
self.assertEqual(group.users, es_group['users'])
self.assertEqual('Group', es_group['doc_type'])
| {
"content_hash": "dfbb4943b7abe00e7e3134fe04475ad3",
"timestamp": "",
"source": "github",
"line_count": 65,
"max_line_length": 71,
"avg_line_length": 37.56923076923077,
"alnum_prop": 0.6883701883701884,
"repo_name": "dimagi/commcare-hq",
"id": "67132280c7c9b377866cfd4b86d465f7b40a66c1",
"size": "2442",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "testapps/test_pillowtop/tests/test_group_pillow.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "82928"
},
{
"name": "Dockerfile",
"bytes": "2341"
},
{
"name": "HTML",
"bytes": "2589268"
},
{
"name": "JavaScript",
"bytes": "5889543"
},
{
"name": "Jinja",
"bytes": "3693"
},
{
"name": "Less",
"bytes": "176180"
},
{
"name": "Makefile",
"bytes": "1622"
},
{
"name": "PHP",
"bytes": "2232"
},
{
"name": "PLpgSQL",
"bytes": "66704"
},
{
"name": "Python",
"bytes": "21779773"
},
{
"name": "Roff",
"bytes": "150"
},
{
"name": "Shell",
"bytes": "67473"
}
],
"symlink_target": ""
} |
from collections import namedtuple
from scopus.classes import Search
from scopus.utils import listify
class ScopusSearch(Search):
@property
def results(self):
"""A list of namedtuples in the form (eid doi pii pubmed_id title
subtype creator afid affilname affiliation_city affiliation_country
author_count author_names author_ids author_afids coverDate
coverDisplayDate publicationName issn source_id eIssn aggregationType
volume issueIdentifier article_number pageRange description
authkeywords citedby_count openaccess fund_acr fund_no fund_sponsor).
Field definitions correspond to
https://dev.elsevier.com/guides/ScopusSearchViews.htm, except for
afid, affilname, affiliation_city, affiliation_country, author_count,
author_names, author_ids and author_afids: These information are
joined on ";". In case an author has multiple affiliations, they are
joined on "-" (e.g. Author1Aff;Author2Aff1-Author2Aff2).
Notes
-----
The list of authors and the list of affiliations per author are
deduplicated.
"""
out = []
fields = 'eid doi pii pubmed_id title subtype creator afid affilname '\
'affiliation_city affiliation_country author_count '\
'author_names author_ids author_afids coverDate '\
'coverDisplayDate publicationName issn source_id eIssn '\
'aggregationType volume issueIdentifier article_number '\
'pageRange description authkeywords citedby_count '\
'openaccess fund_acr fund_no fund_sponsor'
doc = namedtuple('Document', fields)
for item in self._json:
info = {}
# Parse affiliations
try:
info["affilname"] = _join(item['affiliation'], 'affilname')
info["afid"] = _join(item['affiliation'], 'afid')
info["aff_city"] = _join(item['affiliation'], 'affiliation-city')
info["aff_country"] = _join(item['affiliation'],
'affiliation-country')
except KeyError:
pass
# Parse authors
try:
# Deduplicate list of authors
authors = _deduplicate(item['author'])
# Extract information
surnames = _replace_none([d['surname'] for d in authors])
firstnames = _replace_none([d['given-name'] for d in authors])
info["auth_names"] = ";".join([", ".join([t[0], t[1]]) for t in
zip(surnames, firstnames)])
info["auth_ids"] = ";".join([d['authid'] for d in authors])
affs = []
for auth in authors:
aff = listify(_deduplicate(auth.get('afid', [])))
affs.append('-'.join([d['$'] for d in aff]))
info["auth_afid"] = (';'.join(affs) or None)
except KeyError:
pass
date = item.get('prism:coverDate')
if isinstance(date, list):
date = date[0].get('$')
new = doc(article_number=item.get('article-number'),
title=item.get('dc:title'), fund_sponsor=item.get('fund-sponsor'),
subtype=item.get('subtype'), issn=item.get('prism:issn'),
creator=item.get('dc:creator'), affilname=info.get("affilname"),
author_names=info.get("auth_names"), doi=item.get('prism:doi'),
coverDate=date, volume=item.get('prism:volume'),
coverDisplayDate=item.get('prism:coverDisplayDate'),
publicationName=item.get('prism:publicationName'),
source_id=item.get('source-id'), author_ids=info.get("auth_ids"),
aggregationType=item.get('prism:aggregationType'),
issueIdentifier=item.get('prism:issueIdentifier'),
pageRange=item.get('prism:pageRange'),
author_afids=info.get("auth_afid"), fund_no=item.get('fund-no'),
affiliation_country=info.get("aff_country"),
citedby_count=item.get('citedby-count'),
openaccess=item.get('openaccess'), eIssn=item.get('prism:eIssn'),
author_count=item.get('author-count', {}).get('$'),
affiliation_city=info.get("aff_city"), afid=info.get("afid"),
description=item.get('dc:description'), pii=item.get('pii'),
authkeywords=item.get('authkeywords'), eid=item['eid'],
fund_acr=item.get('fund-acr'), pubmed_id=item.get('pubmed-id'))
out.append(new)
return out or None
def __init__(self, query, refresh=False, subscriber=True,
view=None, download=True, **kwds):
"""Class to perform a query against the Scopus Search API.
Parameters
----------
query : str
A string of the query.
refresh : bool (optional, default=False)
Whether to refresh the cached file if it exists or not.
subscriber : bool (optional, default=True)
Whether the user accesses Scopus with a subscription or not.
For subscribers, Scopus's cursor navigation will be used.
Sets the number of entries in each query iteration to the maximum
number allowed by the corresponding view.
view : str (optional, default=None)
Which view to use for the query, see
https://dev.elsevier.com/guides/ScopusSearchViews.htm.
Allowed values: STANDARD, COMPLETE. If None, defaults to
COMPLETE if subscriber=True and to STANDARD if subscriber=False.
cursor : bool (optional, default=True)
Whether to use Scopus's cursor navigation to obtain results.
Using the cursor allows to download an unlimited results set.
Non-subscribers should set this to False.
download : bool (optional, default=True)
Whether to download results (if they have not been cached).
kwds : key-value parings, optional
Keywords passed on as query parameters. Must contain fields
and values listed mentioned in the API specification
(https://dev.elsevier.com/documentation/SCOPUSSearchAPI.wadl),
such as "field" or "date".
Raises
------
ScopusQueryError
For non-subscribers, if the number of search results exceeds 5000.
ValueError
If the view parameter is not one of the allowed ones.
Notes
-----
Json results are cached in ~/.scopus/scopus_search/{view}/{fname},
where fname is the md5-hashed version of query.
"""
# Checks
allowed_views = ('STANDARD', 'COMPLETE')
if view and view not in allowed_views:
raise ValueError('view parameter must be one of ' +
', '.join(allowed_views))
# Parameters
if not view:
if subscriber:
view = "COMPLETE"
else:
view = "STANDARD"
count = 25
if view == "STANDARD" and subscriber:
count = 200
# Query
self.query = query
Search.__init__(self, query=query, api='ScopusSearch', refresh=refresh,
count=count, cursor=subscriber, view=view,
download_results=download, **kwds)
def __str__(self):
eids = self.get_eids()
s = """Search {} yielded {} document(s):\n {}"""
return s.format(self.query, len(eids), '\n '.join(eids))
def get_eids(self):
"""EIDs of retrieved documents."""
return [d['eid'] for d in self._json]
def _deduplicate(lst):
"""Auxiliary function to deduplicate lst."""
out = []
for i in lst:
if i not in out:
out.append(i)
return out
def _join(lst, key, sep=";"):
"""Auxiliary function to join same elements of a list of dictionaries if
the elements are not None.
"""
return sep.join([d[key] for d in lst if d[key]])
def _replace_none(lst, repl=""):
"""Auxiliary function to replace None's with another value."""
return ['' if v is None else v for v in lst]
| {
"content_hash": "3c9c73e16e7bde55a8c63494d79ff020",
"timestamp": "",
"source": "github",
"line_count": 191,
"max_line_length": 82,
"avg_line_length": 44.13612565445026,
"alnum_prop": 0.5730723606168446,
"repo_name": "scopus-api/scopus",
"id": "5458cc447bb0ffc0aeb6c9bc97744b792c5541d7",
"size": "8430",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "scopus/scopus_search.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "133243"
}
],
"symlink_target": ""
} |
from CIM14.CDPSM.Balanced.IEC61970.Core.IdentifiedObject import IdentifiedObject
class BaseVoltage(IdentifiedObject):
"""Collection of BaseVoltages which is used to verify that the BusbarSection.BaseVoltage and other voltage attributes in the CIM are given a value existing in the collection.
"""
def __init__(self, nominalVoltage=0.0, ConductingEquipment=None, VoltageLevel=None, *args, **kw_args):
"""Initialises a new 'BaseVoltage' instance.
@param nominalVoltage: The PowerSystemResource's base voltage.
@param ConductingEquipment: Use association to ConductingEquipment only when there is no VoltageLevel container used.
@param VoltageLevel: The VoltageLevels having this BaseVoltage.
"""
#: The PowerSystemResource's base voltage.
self.nominalVoltage = nominalVoltage
self._ConductingEquipment = []
self.ConductingEquipment = [] if ConductingEquipment is None else ConductingEquipment
self._VoltageLevel = []
self.VoltageLevel = [] if VoltageLevel is None else VoltageLevel
super(BaseVoltage, self).__init__(*args, **kw_args)
_attrs = ["nominalVoltage"]
_attr_types = {"nominalVoltage": float}
_defaults = {"nominalVoltage": 0.0}
_enums = {}
_refs = ["ConductingEquipment", "VoltageLevel"]
_many_refs = ["ConductingEquipment", "VoltageLevel"]
def getConductingEquipment(self):
"""Use association to ConductingEquipment only when there is no VoltageLevel container used.
"""
return self._ConductingEquipment
def setConductingEquipment(self, value):
for x in self._ConductingEquipment:
x.BaseVoltage = None
for y in value:
y._BaseVoltage = self
self._ConductingEquipment = value
ConductingEquipment = property(getConductingEquipment, setConductingEquipment)
def addConductingEquipment(self, *ConductingEquipment):
for obj in ConductingEquipment:
obj.BaseVoltage = self
def removeConductingEquipment(self, *ConductingEquipment):
for obj in ConductingEquipment:
obj.BaseVoltage = None
def getVoltageLevel(self):
"""The VoltageLevels having this BaseVoltage.
"""
return self._VoltageLevel
def setVoltageLevel(self, value):
for x in self._VoltageLevel:
x.BaseVoltage = None
for y in value:
y._BaseVoltage = self
self._VoltageLevel = value
VoltageLevel = property(getVoltageLevel, setVoltageLevel)
def addVoltageLevel(self, *VoltageLevel):
for obj in VoltageLevel:
obj.BaseVoltage = self
def removeVoltageLevel(self, *VoltageLevel):
for obj in VoltageLevel:
obj.BaseVoltage = None
| {
"content_hash": "d582fb4ca89cd13b95b3d350ae6c811f",
"timestamp": "",
"source": "github",
"line_count": 75,
"max_line_length": 178,
"avg_line_length": 37.266666666666666,
"alnum_prop": 0.6797853309481217,
"repo_name": "rwl/PyCIM",
"id": "e9eaaf89d567ba41196bc4f2aedec65e02aa6384",
"size": "3895",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "CIM14/CDPSM/Balanced/IEC61970/Core/BaseVoltage.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "7420564"
}
],
"symlink_target": ""
} |
"""Utility methods to handle the creation of tf.keras.models.Model instances.
We use a number of custom classes with tf.keras.models.Model, and when cloning
models we need to make sure Keras is aware of all our classes in order to
serialize and deserialize them properly. This file contains utility methods to
this end.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf # pylint: disable=g-explicit-tensorflow-version-import
from cold_posterior_bnn.core import prior
def bnn_scope():
"""Create a scope that is aware of BNN library objects.
Returns:
scope: tf.keras.utils.CustomObjectScope with object/class mapping.
"""
scope_dict = {
'NormalRegularizer': prior.NormalRegularizer,
'StretchedNormalRegularizer': prior.StretchedNormalRegularizer,
'HeNormalRegularizer': prior.HeNormalRegularizer,
'GlorotNormalRegularizer': prior.GlorotNormalRegularizer,
'LaplaceRegularizer': prior.LaplaceRegularizer,
'CauchyRegularizer': prior.CauchyRegularizer,
'SpikeAndSlabRegularizer': prior.SpikeAndSlabRegularizer,
'EmpiricalBayesNormal': prior.EmpiricalBayesNormal,
'HeNormalEBRegularizer': prior.HeNormalEBRegularizer,
'ShiftedNormalRegularizer': prior.ShiftedNormalRegularizer,
}
scope_dict.update(tf.keras.utils.get_custom_objects())
scope = tf.keras.utils.CustomObjectScope(scope_dict)
return scope
def clone_model(model):
"""Clone a model.
We add information necessary to serialize/deserialize the `bnn` classes.
Args:
model: tf.keras.models.Model to be cloned.
Returns:
model_cloned: tf.keras.models.Model having the same structure.
"""
with bnn_scope():
if isinstance(model, tf.keras.Sequential) or issubclass(tf.keras.Model,
type(model)):
model_cloned = tf.keras.models.clone_model(model)
elif isinstance(model, tf.keras.Model):
model_cloned = model.__class__.from_config(model.get_config())
else:
raise ValueError('Unknown model type, cannot clone.')
return model_cloned
def clone_model_and_weights(model, input_shape):
"""Clone a model including weights.
The model will be build with the given input_shape.
Args:
model: tf.keras.models.Model to be cloned.
input_shape: same parameter as in tf.keras.models.Model.build.
For example, for MNIST this would typically be (1,784).
Returns:
model_cloned: tf.keras.models.Model having the same structure and weights.
"""
model_cloned = clone_model(model)
model_cloned.build(input_shape)
model_cloned.set_weights(model.get_weights())
return model_cloned
| {
"content_hash": "4e7ed59fca353d026e2b838d421f2577",
"timestamp": "",
"source": "github",
"line_count": 84,
"max_line_length": 79,
"avg_line_length": 32.583333333333336,
"alnum_prop": 0.7223237120935331,
"repo_name": "google-research/google-research",
"id": "f1d5469b7efb9080f82e3cfdd55ac573ef6fe260",
"size": "3345",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "cold_posterior_bnn/core/model.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "9817"
},
{
"name": "C++",
"bytes": "4166670"
},
{
"name": "CMake",
"bytes": "6412"
},
{
"name": "CSS",
"bytes": "27092"
},
{
"name": "Cuda",
"bytes": "1431"
},
{
"name": "Dockerfile",
"bytes": "7145"
},
{
"name": "Gnuplot",
"bytes": "11125"
},
{
"name": "HTML",
"bytes": "77599"
},
{
"name": "ImageJ Macro",
"bytes": "50488"
},
{
"name": "Java",
"bytes": "487585"
},
{
"name": "JavaScript",
"bytes": "896512"
},
{
"name": "Julia",
"bytes": "67986"
},
{
"name": "Jupyter Notebook",
"bytes": "71290299"
},
{
"name": "Lua",
"bytes": "29905"
},
{
"name": "MATLAB",
"bytes": "103813"
},
{
"name": "Makefile",
"bytes": "5636"
},
{
"name": "NASL",
"bytes": "63883"
},
{
"name": "Perl",
"bytes": "8590"
},
{
"name": "Python",
"bytes": "53790200"
},
{
"name": "R",
"bytes": "101058"
},
{
"name": "Roff",
"bytes": "1208"
},
{
"name": "Rust",
"bytes": "2389"
},
{
"name": "Shell",
"bytes": "730444"
},
{
"name": "Smarty",
"bytes": "5966"
},
{
"name": "Starlark",
"bytes": "245038"
}
],
"symlink_target": ""
} |
"""
Script to use DB backups to migrate data for covidcast issues and lag addition
Author: Eu Jing Chua
Created: 2020-06-26
"""
# Standard library
import argparse
from collections import defaultdict
import datetime
import glob
import gzip
from itertools import islice, chain, starmap
import logging
from multiprocessing import Pool
import os
from typing import Optional, List, Iterable, Iterator, Dict, Tuple
# Third party
import pandas as pd
from multiprocessing_logging import install_mp_handler, uninstall_mp_handler
COVIDCAST_INSERT_START = "INSERT INTO `covidcast` VALUES "
# Column names
INDEX_COLS = ["source", "signal", "time_type", "geo_type", "time_value", "geo_value"]
VALUE_COLS = ["value_updated_timestamp", "value", "stderr", "sample_size", "direction_updated_timestamp", "direction"]
ALL_COLS = INDEX_COLS + VALUE_COLS
ALL_COLS_WITH_PK = ["id"] + ALL_COLS
# Dtypes that try save memory by using categoricals
DTYPES = {
# skip "id", the primary key as it may have changed
"source": "category",
"signal": "category",
"time_type": "category",
"geo_type": "category",
# time_value as str, because we need this parsed as a datetime anyway
"time_value": "str",
"geo_value": "category",
"value_updated_timestamp": "int",
"value": "str",
"stderr": "str",
"sample_size": "str",
"direction_updated_timestamp": "int",
"direction": "category"
}
def parse_args():
'''
Commandline arguments
'''
parser = argparse.ArgumentParser(
description="Process DB backups to migrate data for covidcast issues and lag addition")
parser.add_argument(
"--input-files", nargs="+", dest="sql_files",
default=glob.glob("./just_covidcast_*_database.sql.gz"),
help="Input backup .sql files to process. May be compressed (.gz)")
parser.add_argument(
"--skip", nargs="+", dest="skip_sources", default=[],
help="List of sources to skip")
parser.add_argument(
"--tmp-dir", dest="tmp_dir", default="./tmp", type=str,
help="Temporary directory to use for intermediate files")
parser.add_argument(
"--out-dir", dest="out_dir", default="./out", type=str,
help="Output directory to use for resulting .sql files")
parser.add_argument(
"--max-insert-chunk", dest="chunk_size", default=1000, type=int,
help="Maximum number of rows to have per SQL INSERT statement")
parser.add_argument(
"--par", dest="parallel", action="store_true",
help="Enable multiprocessing")
parser.add_argument(
"--ncpu-csvs", dest="ncpu_csvs", default=1, type=int,
help="Max number of processes to use for CSV processing (low memory usage)")
parser.add_argument(
"--ncpu-sources", dest="ncpu_sources", default=1, type=int,
help="Max number of processes to use for processing sources (high memory usage)")
parser.add_argument(
"--incremental", dest="use_cache", action="store_true",
help="Reuse results in --tmp-dir, and skip over existing results in --out-dir")
parser.add_argument(
"--debug", dest="debug", action="store_true",
help="More verbose debug output")
args = parser.parse_args()
return args
def show_args(args):
'''
Display arguments being used
'''
logging.info("Input files (in order):\n\t%s", "\n\t".join(sorted(args.sql_files)))
logging.info("Skipping sources: [%s]", ", ".join(args.skip_sources))
logging.info("Temporary dir: %s", args.tmp_dir)
logging.info("Output dir: %s", args.out_dir)
logging.info("Max insert chunk: %d", args.chunk_size)
logging.info("Parallel: %s", args.parallel)
if args.parallel:
logging.info("Num. CPU (CSVs): %d", args.ncpu_csvs)
logging.info("Num. CPU (sources): %d", args.ncpu_sources)
logging.info("Incremental: %s", args.use_cache)
logging.info("Debug output: %s", args.debug)
print()
def main(args):
'''
Overall flow:
1) Extract relevant tuples from .sql into CSVs so we can use CSV diffing tools
2) Split each CSV by 'source'
3) For each source, do a CSV diff for each sliding pair of dates
4) As diffs are found, write results to a .sql file for current source
'''
# 0) Configuration stuff
logging.basicConfig(
level=logging.DEBUG if args.debug else logging.INFO,
format="%(levelname)s:\t%(message)s")
show_args(args)
os.makedirs(args.tmp_dir, exist_ok=True)
os.makedirs(args.out_dir, exist_ok=True)
# 1) Extract relevant tuples from .sql into CSVs so we can use CSV diffing tools
logging.info("Extracting to csvs...")
csv_files = []
extract_args = []
# Ensure files are in sorted order of date in filename
for sql_file in sorted(args.sql_files):
csv_file = os.path.join(
args.tmp_dir,
f"just_covidcast_{date_int_from_filename(sql_file)}.csv")
if args.use_cache and os.path.exists(csv_file):
logging.debug("CSV %s already exists, skipping processing of %s", csv_file, sql_file)
else:
extract_args.append((sql_file, csv_file))
# Regardless of cache, keep track of csv files anyway
csv_files.append(csv_file)
starmap_mp_logging(
extract_to_csv, extract_args,
par=args.parallel, ncpu=args.ncpu_csvs)
# 2) Split each backup's csv by source
logging.info("Splitting csvs...")
split_col = 1
split_csv_args = []
files_by_src = defaultdict(list)
for csv_file in csv_files:
base_name, f_ext = os.path.splitext(csv_file)
split_patt = f"{base_name}_*{f_ext}"
split_csv_files = glob.glob(split_patt)
if args.use_cache and len(split_csv_files) > 0:
logging.debug("CSV %s already split, skipping splitting", csv_file)
# If split csvs already exist, update files_by_src directly
for sub_csv_file in split_csv_files:
src = sub_csv_file.split("_")[-1][:-4]
files_by_src[src].append(sub_csv_file)
else:
split_csv_args.append((csv_file, split_col))
by_srcs = starmap_mp_logging(
split_csv_by_col, split_csv_args,
par=args.parallel, ncpu=args.ncpu_csvs)
# Combine all return dictionaries into a dictionary of lists instead
# Note that each list may not be sorted
for by_src in by_srcs:
for src, sub_csv_file in by_src.items():
files_by_src[src].append(sub_csv_file)
# 3) Find issues from sliding pairs of [None, csv_1, csv_2, ... csv_N] for each source
proc_args = []
for source, src_files in files_by_src.items():
if source in args.skip_sources:
logging.info("Skipping group: %s", source)
continue
proc_args.append((args, source, src_files))
output_sql_files = starmap_mp_logging(
process_source, proc_args,
par=args.parallel, ncpu=args.ncpu_sources)
return output_sql_files
def starmap_mp_logging(func, args: Iterable, par: bool = False, ncpu: Optional[int] = None):
'''
Does a starmap of f over args, either in parallel or serially, with logging support
Args:
func: Callable to execute with each of args
args: List-like of args to execute f with
par: Whether to run in parallel or not
ncpu: When par=True, how many processes to use
Returns:
Result equivalent to starmap(f, args)
'''
if par:
install_mp_handler()
try:
with Pool(ncpu) as pool:
return pool.starmap(func, args)
finally:
uninstall_mp_handler()
else:
return starmap(func, args)
def process_source(args, source: str, src_files: List[str]) -> List[List[str]]:
logging.info("[%s] Finding issues and generating SQL files...", source)
files = [None] + sorted(src_files)
output_files = []
for before_file, after_file in zip(files, files[1:]):
date_int_after = date_int_from_filename(after_file)
if before_file is None:
logging.debug("[%s] First: %s", source, date_int_after)
outfile = os.path.join(args.out_dir, f"{source}_00000000_{date_int_after}.sql")
else:
date_int_before = date_int_from_filename(before_file)
logging.debug("[%s] Diffing: from %s to %s", source, date_int_before, date_int_after)
outfile = os.path.join(args.out_dir, f"{source}_{date_int_before}_{date_int_after}.sql")
# Diff and find new issues
if args.use_cache and os.path.exists(outfile):
logging.debug(
"[%s] SQL file %s already generated, skipping diff",
source, outfile)
output_files.append(outfile)
continue
issues = generate_issues(before_file, after_file)
# 4) Write out found issues into the SQL file
logging.debug("[%s] Writing to %s", source, outfile)
try:
with open(outfile, "w") as f_sql:
for issues_chunk in chunked(issues, args.chunk_size):
insert_stmt = COVIDCAST_INSERT_START + \
",\n".join(issues_chunk) + \
";\n"
f_sql.write(insert_stmt)
output_files.append(outfile)
except Exception as ex:
logging.error(
"[%s] Stopped unexpectedly while writing %s, deleting it",
source, outfile, exc_info=True)
os.remove(outfile)
raise ex
return output_files
def extract_to_csv(filename: str, output: str):
'''
Takes a backup .sql file and produces a CSV representing just the covidcast rows.
Also accepts gzipped .sql.gz files as input.
Args:
filename: Input .sql or .sql.gz file
output: Output .csv file
'''
logging.debug("Processing %s into %s", filename, output)
is_covidcast = lambda line: line.startswith(COVIDCAST_INSERT_START)
# Open gzipped .sql file or regular .sql file
if filename.endswith(".gz"):
open_file = lambda fname: gzip.open(fname, "rt")
else:
open_file = lambda fname: open(fname, "r")
# Load bulk insert lines
with open_file(filename) as f_in:
# Try to keep everything as iterators to reduce memory usage
inserts = filter(is_covidcast, f_in)
# Extract just tuples as individual lines
old_sep, new_sep = "),(", "\n"
# Skip the initial insert statement and (, and trailing ');\n'
start, end = len(COVIDCAST_INSERT_START) + 1, -3
with open(output, "w") as f_out:
for insert_cmd in inserts:
split_up_insert = insert_cmd[start:end].replace(old_sep, new_sep)
f_out.write(split_up_insert + "\n")
def split_csv_by_col(
filename: str, col_idx: int, add_header: bool = True) -> Dict[str, str]:
'''
Splits up a CSV file by unique values of a specified column into subset CSVs.
Produces subset CSVs in same directory as input, with '_{value}' appended to filename.
Assumes the input CSV has no header row, as produced by extract_to_csv.
Args:
filename: Input CSV file
col_idx: Column index to split-by-values on
add_header: Add column header row to output CSVs
Returns:
Mapping from column value -> subset CSV filename
'''
logging.debug("Splitting %s by %s", filename, ALL_COLS_WITH_PK[col_idx])
open_file_writers = {}
created_files = {}
base_name, f_ext = os.path.splitext(filename)
with open(filename, "r") as f_csv:
# Assume no header
prev_value = None
for line in f_csv:
# Not using in-built csv module as it was alot slower
# Dont need the rest of the split beyond the column we are interested in
value = line.split(",", col_idx + 1)[col_idx]
# Get appropriate file to write to, and create it if it does not exist yet
# Since most sources are in continuous rows, try do less dict lookups
# Only change file handle when we see a different value
if value != prev_value:
if value not in open_file_writers:
# Strip value of surrounding quotes for nicer filenames
clean_value = value.strip("'")
created_file = f"{base_name}_{clean_value}{f_ext}"
created_files[clean_value] = created_file
# Create and store file handle
sub_f = open(created_file, "w")
open_file_writers[value] = sub_f
# Add headers as the first row if indicated
if add_header:
sub_f.write(",".join(ALL_COLS_WITH_PK) + "\n")
else:
sub_f = open_file_writers[value]
# Write to appropriate file
sub_f.write(line)
prev_value = value
# Close all sub file handles
for _, sub_f in open_file_writers.items():
sub_f.close()
return created_files
def datetime_to_int(date: datetime.datetime) -> int:
'''
Convert input datetime.date to date integer format YYYYmmdd
'''
return int(date.strftime("%Y%m%d"))
def int_to_datetime(date_int: int) -> datetime.datetime:
'''
Convert input date integer format YYYYmmdd to datetime.datetime
'''
return datetime.datetime.strptime(str(date_int),"%Y%m%d")
def date_int_from_filename(filename: str) -> int:
'''
Extract date integer from a filename.
Assumes file is of format '{dir}/just_covidcast_{date}_...{ext}'.
'''
file_base = os.path.basename(filename)
return int(file_base.split("_")[2])
def pd_csvdiff(
before_file: str, after_file: str,
index_cols: List[str],
dtypes: Dict[str, str],
find_removals: bool = False
) -> Tuple[pd.DataFrame, pd.DataFrame]:
'''
Finds the diff (additions and changes only by default) between two CSV files.
Can find removals, but the additional index operations required add significant time.
Uses pandas with specified dtypes to save some memory.
Args:
before_file: The "before" CSV file to diff from
after_file: The "after" CSV file to diff to
index_cols: Column names to use as the index that identifies an entry
dtypes: Dtype definitions for column names to try save memory
find_removals: Whether to find entries that were removed too
Returns:
A dataframe containing a subset of the after_file CSV that represents additions and changes
'''
df_before = pd.read_csv(
before_file, usecols=dtypes.keys(), parse_dates=["time_value"],
dtype=dtypes, na_filter=False)
df_after = pd.read_csv(
after_file, usecols=dtypes.keys(), parse_dates=["time_value"],
dtype=dtypes, na_filter=False)
# Efficiently union all categories together for comparison
for col, dtype in dtypes.items():
if dtype == "category":
before_cats = df_before[col].cat.categories
after_cats = df_after[col].cat.categories
df_before[col].cat.add_categories(after_cats.difference(before_cats), inplace=True)
df_after[col].cat.add_categories(before_cats.difference(after_cats), inplace=True)
assert df_before[col].dtype == df_after[col].dtype
df_before.set_index(index_cols, inplace=True)
df_after.set_index(index_cols, inplace=True)
# Ensure lex sorted indices for efficient indexing
df_before.sort_index(inplace=True)
df_after.sort_index(inplace=True)
# Find additions and changes together
# Re-index df_before to be like df_after, index-wise, then do a diff
# For common indices, different field values be false in same_mask
# Since df_before is filled with NaN for new indices, new indices turn false in same_mask
same_mask = (df_before.reindex(df_after.index) == df_after)
# Ignore direction_updated_timestamp in the diff
is_diff = ~(same_mask.loc[:, same_mask.columns != "direction_updated_timestamp"].all(axis=1))
# Removed indices can be found via index difference, but is expensive
if find_removals:
removed_idx = df_before.index.difference(df_after.index)
return (
df_after.loc[is_diff, :],
df_before.loc[removed_idx, :])
return (
df_after.loc[is_diff, :],
None)
def generate_issues(
before_file: Optional[str], after_file: str) -> Iterator[str]:
'''
A generator that diffs the input files, then yields formatted strings representing a row-tuple
to be inserted in SQL. If before_file is None, we are simplying filling it with entries from
after_file. The issue date for these "first" entries come from the after_file filename.
Otherwise, we are updating accum with the diff between before_file to after_file.
Args:
before_file: The "before" CSV file in diffing. None if after_file is the 1st
after_file: The "after" CSV file in diffing.
Returns:
An iterator that yields the string row-tuples to be inserted as an issue.
'''
# Get issue date from after_file
issue_date_int = date_int_from_filename(after_file)
issue_date = int_to_datetime(issue_date_int)
row_fmt = "(" \
"{id},{source},{signal},{time_type},{geo_type},{time_value},{geo_value}," \
"{row.value_updated_timestamp},{row.value},{row.stderr},{row.sample_size},{row.direction_updated_timestamp},{row.direction}," \
"{issue},{row.lag})"
try:
if before_file is None:
# At first file, just yield all contents as new issues
df_diff = pd.read_csv(
after_file, usecols=DTYPES.keys(), parse_dates=["time_value"],
dtype=DTYPES, index_col=INDEX_COLS, na_filter=False)
else:
# Perform the CSV diff using INDEX_COLS to identify rows
df_diff, _ = pd_csvdiff(before_file, after_file, INDEX_COLS, DTYPES)
except Exception as ex:
logging.error(
"Diff Failed!!! Between files '%s', '%s'",
before_file, after_file, exc_info=True)
raise ex
# TODO: Does not really handle weekly values properly. Weekly time_value are in YYYYww format
df_diff["lag"] = (issue_date - df_diff.index.get_level_values("time_value")).days
is_weekly = df_diff.index.get_level_values("time_type") == "week"
df_diff.loc[is_weekly, "lag"] = df_diff.loc[is_weekly, "lag"] // 7
for row in df_diff.itertuples():
index = dict(zip(INDEX_COLS, row.Index))
index["time_value"] = datetime_to_int(index["time_value"])
yield row_fmt.format(id=0, **index, row=row, issue=issue_date_int)
def chunked(iterable: Iterable, size) -> Iterator[Iterator]:
'''
Chunks an iterable into desired size without walking whole iterable first.
https://stackoverflow.com/questions/24527006/split-a-generator-into-chunks-without-pre-walking-it
'''
iterator = iter(iterable)
for first in iterator:
yield chain([first], islice(iterator, size - 1))
if __name__ == "__main__":
main(parse_args())
| {
"content_hash": "0f3a7e964857f8340ed89c417c1ec8a3",
"timestamp": "",
"source": "github",
"line_count": 511,
"max_line_length": 135,
"avg_line_length": 37.79060665362035,
"alnum_prop": 0.6228056548081404,
"repo_name": "cmu-delphi/delphi-epidata",
"id": "1aa2cbe1b405aecca48ca802d7a976fa0b51a9d2",
"size": "19311",
"binary": false,
"copies": "1",
"ref": "refs/heads/dev",
"path": "src/server/covidcast_issues_migration/proc_db_backups_pd.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Dockerfile",
"bytes": "2852"
},
{
"name": "HTML",
"bytes": "727"
},
{
"name": "JavaScript",
"bytes": "18856"
},
{
"name": "Makefile",
"bytes": "5648"
},
{
"name": "PHP",
"bytes": "131735"
},
{
"name": "Python",
"bytes": "881368"
},
{
"name": "R",
"bytes": "17445"
},
{
"name": "Shell",
"bytes": "2024"
}
],
"symlink_target": ""
} |
"""Base class for sparse matrix formats using compressed storage."""
from __future__ import division, print_function, absolute_import
__all__ = []
from warnings import warn
import operator
import numpy as np
from scipy._lib.six import zip as izip
from .base import spmatrix, isspmatrix, SparseEfficiencyWarning
from .data import _data_matrix, _minmax_mixin
from .dia import dia_matrix
from . import _sparsetools
from .sputils import (upcast, upcast_char, to_native, isdense, isshape,
getdtype, isscalarlike, IndexMixin, get_index_dtype,
downcast_intp_index, get_sum_dtype)
class _cs_matrix(_data_matrix, _minmax_mixin, IndexMixin):
"""base matrix class for compressed row and column oriented matrices"""
def __init__(self, arg1, shape=None, dtype=None, copy=False):
_data_matrix.__init__(self)
if isspmatrix(arg1):
if arg1.format == self.format and copy:
arg1 = arg1.copy()
else:
arg1 = arg1.asformat(self.format)
self._set_self(arg1)
elif isinstance(arg1, tuple):
if isshape(arg1):
# It's a tuple of matrix dimensions (M, N)
# create empty matrix
self.shape = arg1 # spmatrix checks for errors here
M, N = self.shape
# Select index dtype large enough to pass array and
# scalar parameters to sparsetools
idx_dtype = get_index_dtype(maxval=max(M,N))
self.data = np.zeros(0, getdtype(dtype, default=float))
self.indices = np.zeros(0, idx_dtype)
self.indptr = np.zeros(self._swap((M,N))[0] + 1, dtype=idx_dtype)
else:
if len(arg1) == 2:
# (data, ij) format
from .coo import coo_matrix
other = self.__class__(coo_matrix(arg1, shape=shape))
self._set_self(other)
elif len(arg1) == 3:
# (data, indices, indptr) format
(data, indices, indptr) = arg1
# Select index dtype large enough to pass array and
# scalar parameters to sparsetools
maxval = None
if shape is not None:
maxval = max(shape)
idx_dtype = get_index_dtype((indices, indptr), maxval=maxval, check_contents=True)
self.indices = np.array(indices, copy=copy, dtype=idx_dtype)
self.indptr = np.array(indptr, copy=copy, dtype=idx_dtype)
self.data = np.array(data, copy=copy, dtype=dtype)
else:
raise ValueError("unrecognized %s_matrix constructor usage" %
self.format)
else:
# must be dense
try:
arg1 = np.asarray(arg1)
except:
raise ValueError("unrecognized %s_matrix constructor usage" %
self.format)
from .coo import coo_matrix
self._set_self(self.__class__(coo_matrix(arg1, dtype=dtype)))
# Read matrix dimensions given, if any
if shape is not None:
self.shape = shape # spmatrix will check for errors
else:
if self.shape is None:
# shape not already set, try to infer dimensions
try:
major_dim = len(self.indptr) - 1
minor_dim = self.indices.max() + 1
except:
raise ValueError('unable to infer matrix dimensions')
else:
self.shape = self._swap((major_dim,minor_dim))
if dtype is not None:
self.data = np.asarray(self.data, dtype=dtype)
self.check_format(full_check=False)
def getnnz(self, axis=None):
if axis is None:
return int(self.indptr[-1])
else:
if axis < 0:
axis += 2
axis, _ = self._swap((axis, 1 - axis))
_, N = self._swap(self.shape)
if axis == 0:
return np.bincount(downcast_intp_index(self.indices),
minlength=N)
elif axis == 1:
return np.diff(self.indptr)
raise ValueError('axis out of bounds')
getnnz.__doc__ = spmatrix.getnnz.__doc__
def _set_self(self, other, copy=False):
"""take the member variables of other and assign them to self"""
if copy:
other = other.copy()
self.data = other.data
self.indices = other.indices
self.indptr = other.indptr
self.shape = other.shape
def check_format(self, full_check=True):
"""check whether the matrix format is valid
Parameters
----------
full_check : bool, optional
If `True`, rigorous check, O(N) operations. Otherwise
basic check, O(1) operations (default True).
"""
# use _swap to determine proper bounds
major_name,minor_name = self._swap(('row','column'))
major_dim,minor_dim = self._swap(self.shape)
# index arrays should have integer data types
if self.indptr.dtype.kind != 'i':
warn("indptr array has non-integer dtype (%s)"
% self.indptr.dtype.name)
if self.indices.dtype.kind != 'i':
warn("indices array has non-integer dtype (%s)"
% self.indices.dtype.name)
idx_dtype = get_index_dtype((self.indptr, self.indices))
self.indptr = np.asarray(self.indptr, dtype=idx_dtype)
self.indices = np.asarray(self.indices, dtype=idx_dtype)
self.data = to_native(self.data)
# check array shapes
if self.data.ndim != 1 or self.indices.ndim != 1 or self.indptr.ndim != 1:
raise ValueError('data, indices, and indptr should be 1-D')
# check index pointer
if (len(self.indptr) != major_dim + 1):
raise ValueError("index pointer size (%d) should be (%d)" %
(len(self.indptr), major_dim + 1))
if (self.indptr[0] != 0):
raise ValueError("index pointer should start with 0")
# check index and data arrays
if (len(self.indices) != len(self.data)):
raise ValueError("indices and data should have the same size")
if (self.indptr[-1] > len(self.indices)):
raise ValueError("Last value of index pointer should be less than "
"the size of index and data arrays")
self.prune()
if full_check:
# check format validity (more expensive)
if self.nnz > 0:
if self.indices.max() >= minor_dim:
raise ValueError("%s index values must be < %d" %
(minor_name,minor_dim))
if self.indices.min() < 0:
raise ValueError("%s index values must be >= 0" %
minor_name)
if np.diff(self.indptr).min() < 0:
raise ValueError("index pointer values must form a "
"non-decreasing sequence")
# if not self.has_sorted_indices():
# warn('Indices were not in sorted order. Sorting indices.')
# self.sort_indices()
# assert(self.has_sorted_indices())
# TODO check for duplicates?
#######################
# Boolean comparisons #
#######################
def _scalar_binopt(self, other, op):
"""Scalar version of self._binopt, for cases in which no new nonzeros
are added. Produces a new spmatrix in canonical form.
"""
self.sum_duplicates()
res = self._with_data(op(self.data, other), copy=True)
res.eliminate_zeros()
return res
def __eq__(self, other):
# Scalar other.
if isscalarlike(other):
if np.isnan(other):
return self.__class__(self.shape, dtype=np.bool_)
if other == 0:
warn("Comparing a sparse matrix with 0 using == is inefficient"
", try using != instead.", SparseEfficiencyWarning)
all_true = self.__class__(np.ones(self.shape, dtype=np.bool_))
inv = self._scalar_binopt(other, operator.ne)
return all_true - inv
else:
return self._scalar_binopt(other, operator.eq)
# Dense other.
elif isdense(other):
return self.todense() == other
# Sparse other.
elif isspmatrix(other):
warn("Comparing sparse matrices using == is inefficient, try using"
" != instead.", SparseEfficiencyWarning)
#TODO sparse broadcasting
if self.shape != other.shape:
return False
elif self.format != other.format:
other = other.asformat(self.format)
res = self._binopt(other,'_ne_')
all_true = self.__class__(np.ones(self.shape, dtype=np.bool_))
return all_true - res
else:
return False
def __ne__(self, other):
# Scalar other.
if isscalarlike(other):
if np.isnan(other):
warn("Comparing a sparse matrix with nan using != is inefficient",
SparseEfficiencyWarning)
all_true = self.__class__(np.ones(self.shape, dtype=np.bool_))
return all_true
elif other != 0:
warn("Comparing a sparse matrix with a nonzero scalar using !="
" is inefficient, try using == instead.", SparseEfficiencyWarning)
all_true = self.__class__(np.ones(self.shape), dtype=np.bool_)
inv = self._scalar_binopt(other, operator.eq)
return all_true - inv
else:
return self._scalar_binopt(other, operator.ne)
# Dense other.
elif isdense(other):
return self.todense() != other
# Sparse other.
elif isspmatrix(other):
#TODO sparse broadcasting
if self.shape != other.shape:
return True
elif self.format != other.format:
other = other.asformat(self.format)
return self._binopt(other,'_ne_')
else:
return True
def _inequality(self, other, op, op_name, bad_scalar_msg):
# Scalar other.
if isscalarlike(other):
if 0 == other and op_name in ('_le_', '_ge_'):
raise NotImplementedError(" >= and <= don't work with 0.")
elif op(0, other):
warn(bad_scalar_msg, SparseEfficiencyWarning)
other_arr = np.empty(self.shape, dtype=np.result_type(other))
other_arr.fill(other)
other_arr = self.__class__(other_arr)
return self._binopt(other_arr, op_name)
else:
return self._scalar_binopt(other, op)
# Dense other.
elif isdense(other):
return op(self.todense(), other)
# Sparse other.
elif isspmatrix(other):
#TODO sparse broadcasting
if self.shape != other.shape:
raise ValueError("inconsistent shapes")
elif self.format != other.format:
other = other.asformat(self.format)
if op_name not in ('_ge_', '_le_'):
return self._binopt(other, op_name)
warn("Comparing sparse matrices using >= and <= is inefficient, "
"using <, >, or !=, instead.", SparseEfficiencyWarning)
all_true = self.__class__(np.ones(self.shape))
res = self._binopt(other, '_gt_' if op_name == '_le_' else '_lt_')
return all_true - res
else:
raise ValueError("Operands could not be compared.")
def __lt__(self, other):
return self._inequality(other, operator.lt, '_lt_',
"Comparing a sparse matrix with a scalar "
"greater than zero using < is inefficient, "
"try using >= instead.")
def __gt__(self, other):
return self._inequality(other, operator.gt, '_gt_',
"Comparing a sparse matrix with a scalar "
"less than zero using > is inefficient, "
"try using <= instead.")
def __le__(self, other):
return self._inequality(other, operator.le, '_le_',
"Comparing a sparse matrix with a scalar "
"greater than zero using <= is inefficient, "
"try using > instead.")
def __ge__(self,other):
return self._inequality(other, operator.ge, '_ge_',
"Comparing a sparse matrix with a scalar "
"less than zero using >= is inefficient, "
"try using < instead.")
#################################
# Arithmatic operator overrides #
#################################
def __add__(self,other):
# First check if argument is a scalar
if isscalarlike(other):
if other == 0:
return self.copy()
else: # Now we would add this scalar to every element.
raise NotImplementedError('adding a nonzero scalar to a '
'sparse matrix is not supported')
elif isspmatrix(other):
if (other.shape != self.shape):
raise ValueError("inconsistent shapes")
return self._binopt(other,'_plus_')
elif isdense(other):
# Convert this matrix to a dense matrix and add them
return self.todense() + other
else:
return NotImplemented
def __radd__(self,other):
return self.__add__(other)
def __sub__(self,other):
# First check if argument is a scalar
if isscalarlike(other):
if other == 0:
return self.copy()
else: # Now we would add this scalar to every element.
raise NotImplementedError('adding a nonzero scalar to a '
'sparse matrix is not supported')
elif isspmatrix(other):
if (other.shape != self.shape):
raise ValueError("inconsistent shapes")
return self._binopt(other,'_minus_')
elif isdense(other):
# Convert this matrix to a dense matrix and subtract them
return self.todense() - other
else:
return NotImplemented
def __rsub__(self,other): # other - self
# note: this can't be replaced by other + (-self) for unsigned types
if isscalarlike(other):
if other == 0:
return -self.copy()
else: # Now we would add this scalar to every element.
raise NotImplementedError('adding a nonzero scalar to a '
'sparse matrix is not supported')
elif isdense(other):
# Convert this matrix to a dense matrix and subtract them
return other - self.todense()
else:
return NotImplemented
def multiply(self, other):
"""Point-wise multiplication by another matrix, vector, or
scalar.
"""
# Scalar multiplication.
if isscalarlike(other):
return self._mul_scalar(other)
# Sparse matrix or vector.
if isspmatrix(other):
if self.shape == other.shape:
other = self.__class__(other)
return self._binopt(other, '_elmul_')
# Single element.
elif other.shape == (1,1):
return self._mul_scalar(other.toarray()[0, 0])
elif self.shape == (1,1):
return other._mul_scalar(self.toarray()[0, 0])
# A row times a column.
elif self.shape[1] == other.shape[0] and self.shape[1] == 1:
return self._mul_sparse_matrix(other.tocsc())
elif self.shape[0] == other.shape[1] and self.shape[0] == 1:
return other._mul_sparse_matrix(self.tocsc())
# Row vector times matrix. other is a row.
elif other.shape[0] == 1 and self.shape[1] == other.shape[1]:
other = dia_matrix((other.toarray().ravel(), [0]),
shape=(other.shape[1], other.shape[1]))
return self._mul_sparse_matrix(other)
# self is a row.
elif self.shape[0] == 1 and self.shape[1] == other.shape[1]:
copy = dia_matrix((self.toarray().ravel(), [0]),
shape=(self.shape[1], self.shape[1]))
return other._mul_sparse_matrix(copy)
# Column vector times matrix. other is a column.
elif other.shape[1] == 1 and self.shape[0] == other.shape[0]:
other = dia_matrix((other.toarray().ravel(), [0]),
shape=(other.shape[0], other.shape[0]))
return other._mul_sparse_matrix(self)
# self is a column.
elif self.shape[1] == 1 and self.shape[0] == other.shape[0]:
copy = dia_matrix((self.toarray().ravel(), [0]),
shape=(self.shape[0], self.shape[0]))
return copy._mul_sparse_matrix(other)
else:
raise ValueError("inconsistent shapes")
# Dense matrix.
if isdense(other):
if self.shape == other.shape:
ret = self.tocoo()
ret.data = np.multiply(ret.data, other[ret.row, ret.col]
).view(np.ndarray).ravel()
return ret
# Single element.
elif other.size == 1:
return self._mul_scalar(other.flat[0])
# Anything else.
return np.multiply(self.todense(), other)
###########################
# Multiplication handlers #
###########################
def _mul_vector(self, other):
M,N = self.shape
# output array
result = np.zeros(M, dtype=upcast_char(self.dtype.char,
other.dtype.char))
# csr_matvec or csc_matvec
fn = getattr(_sparsetools,self.format + '_matvec')
fn(M, N, self.indptr, self.indices, self.data, other, result)
return result
def _mul_multivector(self, other):
M,N = self.shape
n_vecs = other.shape[1] # number of column vectors
result = np.zeros((M,n_vecs), dtype=upcast_char(self.dtype.char,
other.dtype.char))
# csr_matvecs or csc_matvecs
fn = getattr(_sparsetools,self.format + '_matvecs')
fn(M, N, n_vecs, self.indptr, self.indices, self.data, other.ravel(), result.ravel())
return result
def _mul_sparse_matrix(self, other):
M, K1 = self.shape
K2, N = other.shape
major_axis = self._swap((M,N))[0]
other = self.__class__(other) # convert to this format
idx_dtype = get_index_dtype((self.indptr, self.indices,
other.indptr, other.indices),
maxval=M*N)
indptr = np.empty(major_axis + 1, dtype=idx_dtype)
fn = getattr(_sparsetools, self.format + '_matmat_pass1')
fn(M, N,
np.asarray(self.indptr, dtype=idx_dtype),
np.asarray(self.indices, dtype=idx_dtype),
np.asarray(other.indptr, dtype=idx_dtype),
np.asarray(other.indices, dtype=idx_dtype),
indptr)
nnz = indptr[-1]
idx_dtype = get_index_dtype((self.indptr, self.indices,
other.indptr, other.indices),
maxval=nnz)
indptr = np.asarray(indptr, dtype=idx_dtype)
indices = np.empty(nnz, dtype=idx_dtype)
data = np.empty(nnz, dtype=upcast(self.dtype, other.dtype))
fn = getattr(_sparsetools, self.format + '_matmat_pass2')
fn(M, N, np.asarray(self.indptr, dtype=idx_dtype),
np.asarray(self.indices, dtype=idx_dtype),
self.data,
np.asarray(other.indptr, dtype=idx_dtype),
np.asarray(other.indices, dtype=idx_dtype),
other.data,
indptr, indices, data)
return self.__class__((data,indices,indptr),shape=(M,N))
def diagonal(self):
"""Returns the main diagonal of the matrix
"""
# TODO support k-th diagonal
fn = getattr(_sparsetools, self.format + "_diagonal")
y = np.empty(min(self.shape), dtype=upcast(self.dtype))
fn(self.shape[0], self.shape[1], self.indptr, self.indices, self.data, y)
return y
#####################
# Other binary ops #
#####################
def _maximum_minimum(self, other, npop, op_name, dense_check):
if isscalarlike(other):
if dense_check(other):
warn("Taking maximum (minimum) with > 0 (< 0) number results to "
"a dense matrix.",
SparseEfficiencyWarning)
other_arr = np.empty(self.shape, dtype=np.asarray(other).dtype)
other_arr.fill(other)
other_arr = self.__class__(other_arr)
return self._binopt(other_arr, op_name)
else:
self.sum_duplicates()
new_data = npop(self.data, np.asarray(other))
mat = self.__class__((new_data, self.indices, self.indptr),
dtype=new_data.dtype, shape=self.shape)
return mat
elif isdense(other):
return npop(self.todense(), other)
elif isspmatrix(other):
return self._binopt(other, op_name)
else:
raise ValueError("Operands not compatible.")
def maximum(self, other):
return self._maximum_minimum(other, np.maximum, '_maximum_', lambda x: np.asarray(x) > 0)
def minimum(self, other):
return self._maximum_minimum(other, np.minimum, '_minimum_', lambda x: np.asarray(x) < 0)
#####################
# Reduce operations #
#####################
def sum(self, axis=None, dtype=None, out=None):
"""Sum the matrix over the given axis. If the axis is None, sum
over both rows and columns, returning a scalar.
"""
# The spmatrix base class already does axis=0 and axis=1 efficiently
# so we only do the case axis=None here
if (not hasattr(self, 'blocksize') and
axis in self._swap(((1, -1), (0, 2)))[0]):
# faster than multiplication for large minor axis in CSC/CSR
res_dtype = get_sum_dtype(self.dtype)
ret = np.zeros(len(self.indptr) - 1, dtype=res_dtype)
major_index, value = self._minor_reduce(np.add)
ret[major_index] = value
ret = np.asmatrix(ret)
if axis % 2 == 1:
ret = ret.T
if out is not None and out.shape != ret.shape:
raise ValueError('dimensions do not match')
return ret.sum(axis=(), dtype=dtype, out=out)
# spmatrix will handle the remaining situations when axis
# is in {None, -1, 0, 1}
else:
return spmatrix.sum(self, axis=axis, dtype=dtype, out=out)
sum.__doc__ = spmatrix.sum.__doc__
def _minor_reduce(self, ufunc):
"""Reduce nonzeros with a ufunc over the minor axis when non-empty
Warning: this does not call sum_duplicates()
Returns
-------
major_index : array of ints
Major indices where nonzero
value : array of self.dtype
Reduce result for nonzeros in each major_index
"""
major_index = np.flatnonzero(np.diff(self.indptr))
if self.data.size == 0 and major_index.size == 0:
# Numpy < 1.8.0 don't handle empty arrays in reduceat
value = np.zeros_like(self.data)
else:
value = ufunc.reduceat(self.data,
downcast_intp_index(self.indptr[major_index]))
return major_index, value
#######################
# Getting and Setting #
#######################
def __setitem__(self, index, x):
# Process arrays from IndexMixin
i, j = self._unpack_index(index)
i, j = self._index_to_arrays(i, j)
if isspmatrix(x):
broadcast_row = x.shape[0] == 1 and i.shape[0] != 1
broadcast_col = x.shape[1] == 1 and i.shape[1] != 1
if not ((broadcast_row or x.shape[0] == i.shape[0]) and
(broadcast_col or x.shape[1] == i.shape[1])):
raise ValueError("shape mismatch in assignment")
# clear entries that will be overwritten
ci, cj = self._swap((i.ravel(), j.ravel()))
self._zero_many(ci, cj)
x = x.tocoo()
r, c = x.row, x.col
x = np.asarray(x.data, dtype=self.dtype)
if broadcast_row:
r = np.repeat(np.arange(i.shape[0]), len(r))
c = np.tile(c, i.shape[0])
x = np.tile(x, i.shape[0])
if broadcast_col:
r = np.repeat(r, i.shape[1])
c = np.tile(np.arange(i.shape[1]), len(c))
x = np.repeat(x, i.shape[1])
# only assign entries in the new sparsity structure
i = i[r, c]
j = j[r, c]
else:
# Make x and i into the same shape
x = np.asarray(x, dtype=self.dtype)
x, _ = np.broadcast_arrays(x, i)
if x.shape != i.shape:
raise ValueError("shape mismatch in assignment")
if np.size(x) == 0:
return
i, j = self._swap((i.ravel(), j.ravel()))
self._set_many(i, j, x.ravel())
def _setdiag(self, values, k):
if 0 in self.shape:
return
M, N = self.shape
broadcast = (values.ndim == 0)
if k < 0:
if broadcast:
max_index = min(M + k, N)
else:
max_index = min(M + k, N, len(values))
i = np.arange(max_index, dtype=self.indices.dtype)
j = np.arange(max_index, dtype=self.indices.dtype)
i -= k
else:
if broadcast:
max_index = min(M, N - k)
else:
max_index = min(M, N - k, len(values))
i = np.arange(max_index, dtype=self.indices.dtype)
j = np.arange(max_index, dtype=self.indices.dtype)
j += k
if not broadcast:
values = values[:len(i)]
self[i, j] = values
def _prepare_indices(self, i, j):
M, N = self._swap(self.shape)
def check_bounds(indices, bound):
idx = indices.max()
if idx >= bound:
raise IndexError('index (%d) out of range (>= %d)' %
(idx, bound))
idx = indices.min()
if idx < -bound:
raise IndexError('index (%d) out of range (< -%d)' %
(idx, bound))
check_bounds(i, M)
check_bounds(j, N)
i = np.asarray(i, dtype=self.indices.dtype)
j = np.asarray(j, dtype=self.indices.dtype)
return i, j, M, N
def _set_many(self, i, j, x):
"""Sets value at each (i, j) to x
Here (i,j) index major and minor respectively.
"""
i, j, M, N = self._prepare_indices(i, j)
n_samples = len(x)
offsets = np.empty(n_samples, dtype=self.indices.dtype)
ret = _sparsetools.csr_sample_offsets(M, N, self.indptr, self.indices,
n_samples, i, j, offsets)
if ret == 1:
# rinse and repeat
self.sum_duplicates()
_sparsetools.csr_sample_offsets(M, N, self.indptr,
self.indices, n_samples, i, j,
offsets)
if -1 not in offsets:
# only affects existing non-zero cells
self.data[offsets] = x
return
else:
warn("Changing the sparsity structure of a %s_matrix is expensive. "
"lil_matrix is more efficient." % self.format,
SparseEfficiencyWarning)
# replace where possible
mask = offsets > -1
self.data[offsets[mask]] = x[mask]
# only insertions remain
mask = ~mask
i = i[mask]
i[i < 0] += M
j = j[mask]
j[j < 0] += N
self._insert_many(i, j, x[mask])
def _zero_many(self, i, j):
"""Sets value at each (i, j) to zero, preserving sparsity structure.
Here (i,j) index major and minor respectively.
"""
i, j, M, N = self._prepare_indices(i, j)
n_samples = len(i)
offsets = np.empty(n_samples, dtype=self.indices.dtype)
ret = _sparsetools.csr_sample_offsets(M, N, self.indptr, self.indices,
n_samples, i, j, offsets)
if ret == 1:
# rinse and repeat
self.sum_duplicates()
_sparsetools.csr_sample_offsets(M, N, self.indptr,
self.indices, n_samples, i, j,
offsets)
# only assign zeros to the existing sparsity structure
self.data[offsets[offsets > -1]] = 0
def _insert_many(self, i, j, x):
"""Inserts new nonzero at each (i, j) with value x
Here (i,j) index major and minor respectively.
i, j and x must be non-empty, 1d arrays.
Inserts each major group (e.g. all entries per row) at a time.
Maintains has_sorted_indices property.
Modifies i, j, x in place.
"""
order = np.argsort(i, kind='mergesort') # stable for duplicates
i = i.take(order, mode='clip')
j = j.take(order, mode='clip')
x = x.take(order, mode='clip')
do_sort = self.has_sorted_indices
# Update index data type
idx_dtype = get_index_dtype((self.indices, self.indptr),
maxval=(self.indptr[-1] + x.size))
self.indptr = np.asarray(self.indptr, dtype=idx_dtype)
self.indices = np.asarray(self.indices, dtype=idx_dtype)
i = np.asarray(i, dtype=idx_dtype)
j = np.asarray(j, dtype=idx_dtype)
# Collate old and new in chunks by major index
indices_parts = []
data_parts = []
ui, ui_indptr = np.unique(i, return_index=True)
ui_indptr = np.append(ui_indptr, len(j))
new_nnzs = np.diff(ui_indptr)
prev = 0
for c, (ii, js, je) in enumerate(izip(ui, ui_indptr, ui_indptr[1:])):
# old entries
start = self.indptr[prev]
stop = self.indptr[ii]
indices_parts.append(self.indices[start:stop])
data_parts.append(self.data[start:stop])
# handle duplicate j: keep last setting
uj, uj_indptr = np.unique(j[js:je][::-1], return_index=True)
if len(uj) == je - js:
indices_parts.append(j[js:je])
data_parts.append(x[js:je])
else:
indices_parts.append(j[js:je][::-1][uj_indptr])
data_parts.append(x[js:je][::-1][uj_indptr])
new_nnzs[c] = len(uj)
prev = ii
# remaining old entries
start = self.indptr[ii]
indices_parts.append(self.indices[start:])
data_parts.append(self.data[start:])
# update attributes
self.indices = np.concatenate(indices_parts)
self.data = np.concatenate(data_parts)
nnzs = np.asarray(np.ediff1d(self.indptr, to_begin=0), dtype=idx_dtype)
nnzs[1:][ui] += new_nnzs
self.indptr = np.cumsum(nnzs, out=nnzs)
if do_sort:
# TODO: only sort where necessary
self.has_sorted_indices = False
self.sort_indices()
self.check_format(full_check=False)
def _get_single_element(self,row,col):
M, N = self.shape
if (row < 0):
row += M
if (col < 0):
col += N
if not (0 <= row < M) or not (0 <= col < N):
raise IndexError("index out of bounds")
major_index, minor_index = self._swap((row,col))
# TODO make use of sorted indices (if present)
start = self.indptr[major_index]
end = self.indptr[major_index+1]
# can use np.add(..., where) from numpy 1.7
return np.compress(minor_index == self.indices[start:end],
self.data[start:end]).sum(dtype=self.dtype)
def _get_submatrix(self, slice0, slice1):
"""Return a submatrix of this matrix (new matrix is created)."""
slice0, slice1 = self._swap((slice0,slice1))
shape0, shape1 = self._swap(self.shape)
def _process_slice(sl, num):
if isinstance(sl, slice):
i0, i1 = sl.start, sl.stop
if i0 is None:
i0 = 0
elif i0 < 0:
i0 = num + i0
if i1 is None:
i1 = num
elif i1 < 0:
i1 = num + i1
return i0, i1
elif np.isscalar(sl):
if sl < 0:
sl += num
return sl, sl + 1
else:
return sl[0], sl[1]
def _in_bounds(i0, i1, num):
if not (0 <= i0 < num) or not (0 < i1 <= num) or not (i0 < i1):
raise IndexError("index out of bounds: 0<=%d<%d, 0<=%d<%d, %d<%d" %
(i0, num, i1, num, i0, i1))
i0, i1 = _process_slice(slice0, shape0)
j0, j1 = _process_slice(slice1, shape1)
_in_bounds(i0, i1, shape0)
_in_bounds(j0, j1, shape1)
aux = _sparsetools.get_csr_submatrix(shape0, shape1,
self.indptr, self.indices,
self.data,
i0, i1, j0, j1)
data, indices, indptr = aux[2], aux[1], aux[0]
shape = self._swap((i1 - i0, j1 - j0))
return self.__class__((data, indices, indptr), shape=shape)
######################
# Conversion methods #
######################
def tocoo(self, copy=True):
major_dim, minor_dim = self._swap(self.shape)
minor_indices = self.indices
major_indices = np.empty(len(minor_indices), dtype=self.indices.dtype)
_sparsetools.expandptr(major_dim, self.indptr, major_indices)
row, col = self._swap((major_indices, minor_indices))
from .coo import coo_matrix
return coo_matrix((self.data, (row, col)), self.shape, copy=copy,
dtype=self.dtype)
tocoo.__doc__ = spmatrix.tocoo.__doc__
def toarray(self, order=None, out=None):
"""See the docstring for `spmatrix.toarray`."""
return self.tocoo(copy=False).toarray(order=order, out=out)
##############################################################
# methods that examine or modify the internal data structure #
##############################################################
def eliminate_zeros(self):
"""Remove zero entries from the matrix
This is an *in place* operation
"""
M, N = self._swap(self.shape)
_sparsetools.csr_eliminate_zeros(M, N, self.indptr, self.indices,
self.data)
self.prune() # nnz may have changed
def __get_has_canonical_format(self):
"""Determine whether the matrix has sorted indices and no duplicates
Returns
- True: if the above applies
- False: otherwise
has_canonical_format implies has_sorted_indices, so if the latter flag
is False, so will the former be; if the former is found True, the
latter flag is also set.
"""
# first check to see if result was cached
if not getattr(self, '_has_sorted_indices', True):
# not sorted => not canonical
self._has_canonical_format = False
elif not hasattr(self, '_has_canonical_format'):
self.has_canonical_format = _sparsetools.csr_has_canonical_format(
len(self.indptr) - 1, self.indptr, self.indices)
return self._has_canonical_format
def __set_has_canonical_format(self, val):
self._has_canonical_format = bool(val)
if val:
self.has_sorted_indices = True
has_canonical_format = property(fget=__get_has_canonical_format,
fset=__set_has_canonical_format)
def sum_duplicates(self):
"""Eliminate duplicate matrix entries by adding them together
The is an *in place* operation
"""
if self.has_canonical_format:
return
self.sort_indices()
M, N = self._swap(self.shape)
_sparsetools.csr_sum_duplicates(M, N, self.indptr, self.indices,
self.data)
self.prune() # nnz may have changed
self.has_canonical_format = True
def __get_sorted(self):
"""Determine whether the matrix has sorted indices
Returns
- True: if the indices of the matrix are in sorted order
- False: otherwise
"""
# first check to see if result was cached
if not hasattr(self,'_has_sorted_indices'):
self._has_sorted_indices = _sparsetools.csr_has_sorted_indices(
len(self.indptr) - 1, self.indptr, self.indices)
return self._has_sorted_indices
def __set_sorted(self, val):
self._has_sorted_indices = bool(val)
has_sorted_indices = property(fget=__get_sorted, fset=__set_sorted)
def sorted_indices(self):
"""Return a copy of this matrix with sorted indices
"""
A = self.copy()
A.sort_indices()
return A
# an alternative that has linear complexity is the following
# although the previous option is typically faster
# return self.toother().toother()
def sort_indices(self):
"""Sort the indices of this matrix *in place*
"""
if not self.has_sorted_indices:
_sparsetools.csr_sort_indices(len(self.indptr) - 1, self.indptr,
self.indices, self.data)
self.has_sorted_indices = True
def prune(self):
"""Remove empty space after all non-zero elements.
"""
major_dim = self._swap(self.shape)[0]
if len(self.indptr) != major_dim + 1:
raise ValueError('index pointer has invalid length')
if len(self.indices) < self.nnz:
raise ValueError('indices array has fewer than nnz elements')
if len(self.data) < self.nnz:
raise ValueError('data array has fewer than nnz elements')
self.data = self.data[:self.nnz]
self.indices = self.indices[:self.nnz]
###################
# utility methods #
###################
# needed by _data_matrix
def _with_data(self,data,copy=True):
"""Returns a matrix with the same sparsity structure as self,
but with different data. By default the structure arrays
(i.e. .indptr and .indices) are copied.
"""
if copy:
return self.__class__((data,self.indices.copy(),self.indptr.copy()),
shape=self.shape,dtype=data.dtype)
else:
return self.__class__((data,self.indices,self.indptr),
shape=self.shape,dtype=data.dtype)
def _binopt(self, other, op):
"""apply the binary operation fn to two sparse matrices."""
other = self.__class__(other)
# e.g. csr_plus_csr, csr_minus_csr, etc.
fn = getattr(_sparsetools, self.format + op + self.format)
maxnnz = self.nnz + other.nnz
idx_dtype = get_index_dtype((self.indptr, self.indices,
other.indptr, other.indices),
maxval=maxnnz)
indptr = np.empty(self.indptr.shape, dtype=idx_dtype)
indices = np.empty(maxnnz, dtype=idx_dtype)
bool_ops = ['_ne_', '_lt_', '_gt_', '_le_', '_ge_']
if op in bool_ops:
data = np.empty(maxnnz, dtype=np.bool_)
else:
data = np.empty(maxnnz, dtype=upcast(self.dtype, other.dtype))
fn(self.shape[0], self.shape[1],
np.asarray(self.indptr, dtype=idx_dtype),
np.asarray(self.indices, dtype=idx_dtype),
self.data,
np.asarray(other.indptr, dtype=idx_dtype),
np.asarray(other.indices, dtype=idx_dtype),
other.data,
indptr, indices, data)
actual_nnz = indptr[-1]
indices = indices[:actual_nnz]
data = data[:actual_nnz]
if actual_nnz < maxnnz // 2:
# too much waste, trim arrays
indices = indices.copy()
data = data.copy()
A = self.__class__((data, indices, indptr), shape=self.shape)
return A
def _divide_sparse(self, other):
"""
Divide this matrix by a second sparse matrix.
"""
if other.shape != self.shape:
raise ValueError('inconsistent shapes')
r = self._binopt(other, '_eldiv_')
if np.issubdtype(r.dtype, np.inexact):
# Eldiv leaves entries outside the combined sparsity
# pattern empty, so they must be filled manually. They are
# always nan, so that the matrix is completely full.
out = np.empty(self.shape, dtype=self.dtype)
out.fill(np.nan)
r = r.tocoo()
out[r.row, r.col] = r.data
out = np.matrix(out)
else:
# integers types go with nan <-> 0
out = r
return out
| {
"content_hash": "6c5969554ac58b83ba01a3ac3f147559",
"timestamp": "",
"source": "github",
"line_count": 1116,
"max_line_length": 102,
"avg_line_length": 38.54390681003584,
"alnum_prop": 0.5152621178658607,
"repo_name": "haudren/scipy",
"id": "cf840960999751bc490a6b0b94d7f8d42da2d96d",
"size": "43015",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "scipy/sparse/compressed.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "4120480"
},
{
"name": "C++",
"bytes": "3781683"
},
{
"name": "FORTRAN",
"bytes": "5574491"
},
{
"name": "HTML",
"bytes": "124330"
},
{
"name": "Makefile",
"bytes": "76425"
},
{
"name": "Matlab",
"bytes": "4346"
},
{
"name": "Python",
"bytes": "11306513"
},
{
"name": "Shell",
"bytes": "2218"
},
{
"name": "TeX",
"bytes": "52106"
}
],
"symlink_target": ""
} |
"""
A place for internal code
Some things are more easily handled Python.
"""
from __future__ import division, absolute_import, print_function
import re
import sys
from numpy.compat import unicode
from .multiarray import dtype, array, ndarray
try:
import ctypes
except ImportError:
ctypes = None
if (sys.byteorder == 'little'):
_nbo = b'<'
else:
_nbo = b'>'
def _makenames_list(adict, align):
allfields = []
fnames = list(adict.keys())
for fname in fnames:
obj = adict[fname]
n = len(obj)
if not isinstance(obj, tuple) or n not in [2, 3]:
raise ValueError("entry not a 2- or 3- tuple")
if (n > 2) and (obj[2] == fname):
continue
num = int(obj[1])
if (num < 0):
raise ValueError("invalid offset.")
format = dtype(obj[0], align=align)
if (n > 2):
title = obj[2]
else:
title = None
allfields.append((fname, format, num, title))
# sort by offsets
allfields.sort(key=lambda x: x[2])
names = [x[0] for x in allfields]
formats = [x[1] for x in allfields]
offsets = [x[2] for x in allfields]
titles = [x[3] for x in allfields]
return names, formats, offsets, titles
# Called in PyArray_DescrConverter function when
# a dictionary without "names" and "formats"
# fields is used as a data-type descriptor.
def _usefields(adict, align):
try:
names = adict[-1]
except KeyError:
names = None
if names is None:
names, formats, offsets, titles = _makenames_list(adict, align)
else:
formats = []
offsets = []
titles = []
for name in names:
res = adict[name]
formats.append(res[0])
offsets.append(res[1])
if (len(res) > 2):
titles.append(res[2])
else:
titles.append(None)
return dtype({"names": names,
"formats": formats,
"offsets": offsets,
"titles": titles}, align)
# construct an array_protocol descriptor list
# from the fields attribute of a descriptor
# This calls itself recursively but should eventually hit
# a descriptor that has no fields and then return
# a simple typestring
def _array_descr(descriptor):
fields = descriptor.fields
if fields is None:
subdtype = descriptor.subdtype
if subdtype is None:
if descriptor.metadata is None:
return descriptor.str
else:
new = descriptor.metadata.copy()
if new:
return (descriptor.str, new)
else:
return descriptor.str
else:
return (_array_descr(subdtype[0]), subdtype[1])
names = descriptor.names
ordered_fields = [fields[x] + (x,) for x in names]
result = []
offset = 0
for field in ordered_fields:
if field[1] > offset:
num = field[1] - offset
result.append(('', '|V%d' % num))
offset += num
elif field[1] < offset:
raise ValueError(
"dtype.descr is not defined for types with overlapping or "
"out-of-order fields")
if len(field) > 3:
name = (field[2], field[3])
else:
name = field[2]
if field[0].subdtype:
tup = (name, _array_descr(field[0].subdtype[0]),
field[0].subdtype[1])
else:
tup = (name, _array_descr(field[0]))
offset += field[0].itemsize
result.append(tup)
if descriptor.itemsize > offset:
num = descriptor.itemsize - offset
result.append(('', '|V%d' % num))
return result
# Build a new array from the information in a pickle.
# Note that the name numpy.core._internal._reconstruct is embedded in
# pickles of ndarrays made with NumPy before release 1.0
# so don't remove the name here, or you'll
# break backward compatibility.
def _reconstruct(subtype, shape, dtype):
return ndarray.__new__(subtype, shape, dtype)
# format_re was originally from numarray by J. Todd Miller
format_re = re.compile(br'(?P<order1>[<>|=]?)'
br'(?P<repeats> *[(]?[ ,0-9L]*[)]? *)'
br'(?P<order2>[<>|=]?)'
br'(?P<dtype>[A-Za-z0-9.?]*(?:\[[a-zA-Z0-9,.]+\])?)')
sep_re = re.compile(br'\s*,\s*')
space_re = re.compile(br'\s+$')
# astr is a string (perhaps comma separated)
_convorder = {b'=': _nbo}
def _commastring(astr):
startindex = 0
result = []
while startindex < len(astr):
mo = format_re.match(astr, pos=startindex)
try:
(order1, repeats, order2, dtype) = mo.groups()
except (TypeError, AttributeError):
raise ValueError('format number %d of "%s" is not recognized' %
(len(result)+1, astr))
startindex = mo.end()
# Separator or ending padding
if startindex < len(astr):
if space_re.match(astr, pos=startindex):
startindex = len(astr)
else:
mo = sep_re.match(astr, pos=startindex)
if not mo:
raise ValueError(
'format number %d of "%s" is not recognized' %
(len(result)+1, astr))
startindex = mo.end()
if order2 == b'':
order = order1
elif order1 == b'':
order = order2
else:
order1 = _convorder.get(order1, order1)
order2 = _convorder.get(order2, order2)
if (order1 != order2):
raise ValueError(
'inconsistent byte-order specification %s and %s' %
(order1, order2))
order = order1
if order in [b'|', b'=', _nbo]:
order = b''
dtype = order + dtype
if (repeats == b''):
newitem = dtype
else:
newitem = (dtype, eval(repeats))
result.append(newitem)
return result
class dummy_ctype(object):
def __init__(self, cls):
self._cls = cls
def __mul__(self, other):
return self
def __call__(self, *other):
return self._cls(other)
def __eq__(self, other):
return self._cls == other._cls
def __ne__(self, other):
return self._cls != other._cls
def _getintp_ctype():
val = _getintp_ctype.cache
if val is not None:
return val
if ctypes is None:
import numpy as np
val = dummy_ctype(np.intp)
else:
char = dtype('p').char
if (char == 'i'):
val = ctypes.c_int
elif char == 'l':
val = ctypes.c_long
elif char == 'q':
val = ctypes.c_longlong
else:
val = ctypes.c_long
_getintp_ctype.cache = val
return val
_getintp_ctype.cache = None
# Used for .ctypes attribute of ndarray
class _missing_ctypes(object):
def cast(self, num, obj):
return num
def c_void_p(self, num):
return num
class _ctypes(object):
def __init__(self, array, ptr=None):
if ctypes:
self._ctypes = ctypes
else:
self._ctypes = _missing_ctypes()
self._arr = array
self._data = ptr
if self._arr.ndim == 0:
self._zerod = True
else:
self._zerod = False
def data_as(self, obj):
"""
Return the data pointer cast to a particular c-types object.
For example, calling ``self._as_parameter_`` is equivalent to
``self.data_as(ctypes.c_void_p)``. Perhaps you want to use the data as a
pointer to a ctypes array of floating-point data:
``self.data_as(ctypes.POINTER(ctypes.c_double))``.
"""
return self._ctypes.cast(self._data, obj)
def shape_as(self, obj):
"""
Return the shape tuple as an array of some other c-types
type. For example: ``self.shape_as(ctypes.c_short)``.
"""
if self._zerod:
return None
return (obj*self._arr.ndim)(*self._arr.shape)
def strides_as(self, obj):
"""
Return the strides tuple as an array of some other
c-types type. For example: ``self.strides_as(ctypes.c_longlong)``.
"""
if self._zerod:
return None
return (obj*self._arr.ndim)(*self._arr.strides)
def get_data(self):
"""
A pointer to the memory area of the array as a Python integer.
This memory area may contain data that is not aligned, or not in correct
byte-order. The memory area may not even be writeable. The array
flags and data-type of this array should be respected when passing this
attribute to arbitrary C-code to avoid trouble that can include Python
crashing. User Beware! The value of this attribute is exactly the same
as ``self._array_interface_['data'][0]``.
"""
return self._data
def get_shape(self):
"""
(c_intp*self.ndim): A ctypes array of length self.ndim where
the basetype is the C-integer corresponding to ``dtype('p')`` on this
platform. This base-type could be `ctypes.c_int`, `ctypes.c_long`, or
`ctypes.c_longlong` depending on the platform.
The c_intp type is defined accordingly in `numpy.ctypeslib`.
The ctypes array contains the shape of the underlying array.
"""
return self.shape_as(_getintp_ctype())
def get_strides(self):
"""
(c_intp*self.ndim): A ctypes array of length self.ndim where
the basetype is the same as for the shape attribute. This ctypes array
contains the strides information from the underlying array. This strides
information is important for showing how many bytes must be jumped to
get to the next element in the array.
"""
return self.strides_as(_getintp_ctype())
def get_as_parameter(self):
return self._ctypes.c_void_p(self._data)
data = property(get_data)
shape = property(get_shape)
strides = property(get_strides)
_as_parameter_ = property(get_as_parameter, None, doc="_as parameter_")
def _newnames(datatype, order):
"""
Given a datatype and an order object, return a new names tuple, with the
order indicated
"""
oldnames = datatype.names
nameslist = list(oldnames)
if isinstance(order, (str, unicode)):
order = [order]
seen = set()
if isinstance(order, (list, tuple)):
for name in order:
try:
nameslist.remove(name)
except ValueError:
if name in seen:
raise ValueError("duplicate field name: %s" % (name,))
else:
raise ValueError("unknown field name: %s" % (name,))
seen.add(name)
return tuple(list(order) + nameslist)
raise ValueError("unsupported order value: %s" % (order,))
def _copy_fields(ary):
"""Return copy of structured array with padding between fields removed.
Parameters
----------
ary : ndarray
Structured array from which to remove padding bytes
Returns
-------
ary_copy : ndarray
Copy of ary with padding bytes removed
"""
dt = ary.dtype
copy_dtype = {'names': dt.names,
'formats': [dt.fields[name][0] for name in dt.names]}
return array(ary, dtype=copy_dtype, copy=True)
def _getfield_is_safe(oldtype, newtype, offset):
""" Checks safety of getfield for object arrays.
As in _view_is_safe, we need to check that memory containing objects is not
reinterpreted as a non-object datatype and vice versa.
Parameters
----------
oldtype : data-type
Data type of the original ndarray.
newtype : data-type
Data type of the field being accessed by ndarray.getfield
offset : int
Offset of the field being accessed by ndarray.getfield
Raises
------
TypeError
If the field access is invalid
"""
if newtype.hasobject or oldtype.hasobject:
if offset == 0 and newtype == oldtype:
return
if oldtype.names:
for name in oldtype.names:
if (oldtype.fields[name][1] == offset and
oldtype.fields[name][0] == newtype):
return
raise TypeError("Cannot get/set field of an object array")
return
def _view_is_safe(oldtype, newtype):
""" Checks safety of a view involving object arrays, for example when
doing::
np.zeros(10, dtype=oldtype).view(newtype)
Parameters
----------
oldtype : data-type
Data type of original ndarray
newtype : data-type
Data type of the view
Raises
------
TypeError
If the new type is incompatible with the old type.
"""
# if the types are equivalent, there is no problem.
# for example: dtype((np.record, 'i4,i4')) == dtype((np.void, 'i4,i4'))
if oldtype == newtype:
return
if newtype.hasobject or oldtype.hasobject:
raise TypeError("Cannot change data-type for object array.")
return
# Given a string containing a PEP 3118 format specifier,
# construct a NumPy dtype
_pep3118_native_map = {
'?': '?',
'c': 'S1',
'b': 'b',
'B': 'B',
'h': 'h',
'H': 'H',
'i': 'i',
'I': 'I',
'l': 'l',
'L': 'L',
'q': 'q',
'Q': 'Q',
'e': 'e',
'f': 'f',
'd': 'd',
'g': 'g',
'Zf': 'F',
'Zd': 'D',
'Zg': 'G',
's': 'S',
'w': 'U',
'O': 'O',
'x': 'V', # padding
}
_pep3118_native_typechars = ''.join(_pep3118_native_map.keys())
_pep3118_standard_map = {
'?': '?',
'c': 'S1',
'b': 'b',
'B': 'B',
'h': 'i2',
'H': 'u2',
'i': 'i4',
'I': 'u4',
'l': 'i4',
'L': 'u4',
'q': 'i8',
'Q': 'u8',
'e': 'f2',
'f': 'f',
'd': 'd',
'Zf': 'F',
'Zd': 'D',
's': 'S',
'w': 'U',
'O': 'O',
'x': 'V', # padding
}
_pep3118_standard_typechars = ''.join(_pep3118_standard_map.keys())
class _Stream(object):
def __init__(self, s):
self.s = s
self.byteorder = '@'
def advance(self, n):
res = self.s[:n]
self.s = self.s[n:]
return res
def consume(self, c):
if self.s[:len(c)] == c:
self.advance(len(c))
return True
return False
def consume_until(self, c):
if callable(c):
i = 0
while i < len(self.s) and not c(self.s[i]):
i = i + 1
return self.advance(i)
else:
i = self.s.index(c)
res = self.advance(i)
self.advance(len(c))
return res
@property
def next(self):
return self.s[0]
def __bool__(self):
return bool(self.s)
__nonzero__ = __bool__
def _dtype_from_pep3118(spec):
stream = _Stream(spec)
dtype, align = __dtype_from_pep3118(stream, is_subdtype=False)
return dtype
def __dtype_from_pep3118(stream, is_subdtype):
field_spec = dict(
names=[],
formats=[],
offsets=[],
itemsize=0
)
offset = 0
common_alignment = 1
is_padding = False
# Parse spec
while stream:
value = None
# End of structure, bail out to upper level
if stream.consume('}'):
break
# Sub-arrays (1)
shape = None
if stream.consume('('):
shape = stream.consume_until(')')
shape = tuple(map(int, shape.split(',')))
# Byte order
if stream.next in ('@', '=', '<', '>', '^', '!'):
byteorder = stream.advance(1)
if byteorder == '!':
byteorder = '>'
stream.byteorder = byteorder
# Byte order characters also control native vs. standard type sizes
if stream.byteorder in ('@', '^'):
type_map = _pep3118_native_map
type_map_chars = _pep3118_native_typechars
else:
type_map = _pep3118_standard_map
type_map_chars = _pep3118_standard_typechars
# Item sizes
itemsize_str = stream.consume_until(lambda c: not c.isdigit())
if itemsize_str:
itemsize = int(itemsize_str)
else:
itemsize = 1
# Data types
is_padding = False
if stream.consume('T{'):
value, align = __dtype_from_pep3118(
stream, is_subdtype=True)
elif stream.next in type_map_chars:
if stream.next == 'Z':
typechar = stream.advance(2)
else:
typechar = stream.advance(1)
is_padding = (typechar == 'x')
dtypechar = type_map[typechar]
if dtypechar in 'USV':
dtypechar += '%d' % itemsize
itemsize = 1
numpy_byteorder = {'@': '=', '^': '='}.get(
stream.byteorder, stream.byteorder)
value = dtype(numpy_byteorder + dtypechar)
align = value.alignment
else:
raise ValueError("Unknown PEP 3118 data type specifier %r" % stream.s)
#
# Native alignment may require padding
#
# Here we assume that the presence of a '@' character implicitly implies
# that the start of the array is *already* aligned.
#
extra_offset = 0
if stream.byteorder == '@':
start_padding = (-offset) % align
intra_padding = (-value.itemsize) % align
offset += start_padding
if intra_padding != 0:
if itemsize > 1 or (shape is not None and _prod(shape) > 1):
# Inject internal padding to the end of the sub-item
value = _add_trailing_padding(value, intra_padding)
else:
# We can postpone the injection of internal padding,
# as the item appears at most once
extra_offset += intra_padding
# Update common alignment
common_alignment = _lcm(align, common_alignment)
# Convert itemsize to sub-array
if itemsize != 1:
value = dtype((value, (itemsize,)))
# Sub-arrays (2)
if shape is not None:
value = dtype((value, shape))
# Field name
if stream.consume(':'):
name = stream.consume_until(':')
else:
name = None
if not (is_padding and name is None):
if name is not None and name in field_spec['names']:
raise RuntimeError("Duplicate field name '%s' in PEP3118 format"
% name)
field_spec['names'].append(name)
field_spec['formats'].append(value)
field_spec['offsets'].append(offset)
offset += value.itemsize
offset += extra_offset
field_spec['itemsize'] = offset
# extra final padding for aligned types
if stream.byteorder == '@':
field_spec['itemsize'] += (-offset) % common_alignment
# Check if this was a simple 1-item type, and unwrap it
if (field_spec['names'] == [None]
and field_spec['offsets'][0] == 0
and field_spec['itemsize'] == field_spec['formats'][0].itemsize
and not is_subdtype):
ret = field_spec['formats'][0]
else:
_fix_names(field_spec)
ret = dtype(field_spec)
# Finished
return ret, common_alignment
def _fix_names(field_spec):
""" Replace names which are None with the next unused f%d name """
names = field_spec['names']
for i, name in enumerate(names):
if name is not None:
continue
j = 0
while True:
name = 'f{}'.format(j)
if name not in names:
break
j = j + 1
names[i] = name
def _add_trailing_padding(value, padding):
"""Inject the specified number of padding bytes at the end of a dtype"""
if value.fields is None:
field_spec = dict(
names=['f0'],
formats=[value],
offsets=[0],
itemsize=value.itemsize
)
else:
fields = value.fields
names = value.names
field_spec = dict(
names=names,
formats=[fields[name][0] for name in names],
offsets=[fields[name][1] for name in names],
itemsize=value.itemsize
)
field_spec['itemsize'] += padding
return dtype(field_spec)
def _prod(a):
p = 1
for x in a:
p *= x
return p
def _gcd(a, b):
"""Calculate the greatest common divisor of a and b"""
while b:
a, b = b, a % b
return a
def _lcm(a, b):
return a // _gcd(a, b) * b
# Exception used in shares_memory()
class TooHardError(RuntimeError):
pass
class AxisError(ValueError, IndexError):
""" Axis supplied was invalid. """
def __init__(self, axis, ndim=None, msg_prefix=None):
# single-argument form just delegates to base class
if ndim is None and msg_prefix is None:
msg = axis
# do the string formatting here, to save work in the C code
else:
msg = ("axis {} is out of bounds for array of dimension {}"
.format(axis, ndim))
if msg_prefix is not None:
msg = "{}: {}".format(msg_prefix, msg)
super(AxisError, self).__init__(msg)
def array_ufunc_errmsg_formatter(dummy, ufunc, method, *inputs, **kwargs):
""" Format the error message for when __array_ufunc__ gives up. """
args_string = ', '.join(['{!r}'.format(arg) for arg in inputs] +
['{}={!r}'.format(k, v)
for k, v in kwargs.items()])
args = inputs + kwargs.get('out', ())
types_string = ', '.join(repr(type(arg).__name__) for arg in args)
return ('operand type(s) all returned NotImplemented from '
'__array_ufunc__({!r}, {!r}, {}): {}'
.format(ufunc, method, args_string, types_string))
def _ufunc_doc_signature_formatter(ufunc):
"""
Builds a signature string which resembles PEP 457
This is used to construct the first line of the docstring
"""
# input arguments are simple
if ufunc.nin == 1:
in_args = 'x'
else:
in_args = ', '.join('x{}'.format(i+1) for i in range(ufunc.nin))
# output arguments are both keyword or positional
if ufunc.nout == 0:
out_args = ', /, out=()'
elif ufunc.nout == 1:
out_args = ', /, out=None'
else:
out_args = '[, {positional}], / [, out={default}]'.format(
positional=', '.join(
'out{}'.format(i+1) for i in range(ufunc.nout)),
default=repr((None,)*ufunc.nout)
)
# keyword only args depend on whether this is a gufunc
kwargs = (
", casting='same_kind'"
", order='K'"
", dtype=None"
", subok=True"
"[, signature"
", extobj]"
)
if ufunc.signature is None:
kwargs = ", where=True" + kwargs
# join all the parts together
return '{name}({in_args}{out_args}, *{kwargs})'.format(
name=ufunc.__name__,
in_args=in_args,
out_args=out_args,
kwargs=kwargs
)
def npy_ctypes_check(cls):
# determine if a class comes from ctypes, in order to work around
# a bug in the buffer protocol for those objects, bpo-10746
try:
# ctypes class are new-style, so have an __mro__. This probably fails
# for ctypes classes with multiple inheritance.
ctype_base = cls.__mro__[-2]
# right now, they're part of the _ctypes module
return 'ctypes' in ctype_base.__module__
except Exception:
return False
class recursive(object):
'''
A decorator class for recursive nested functions.
Naive recursive nested functions hold a reference to themselves:
def outer(*args):
def stringify_leaky(arg0, *arg1):
if len(arg1) > 0:
return stringify_leaky(*arg1) # <- HERE
return str(arg0)
stringify_leaky(*args)
This design pattern creates a reference cycle that is difficult for a
garbage collector to resolve. The decorator class prevents the
cycle by passing the nested function in as an argument `self`:
def outer(*args):
@recursive
def stringify(self, arg0, *arg1):
if len(arg1) > 0:
return self(*arg1)
return str(arg0)
stringify(*args)
'''
def __init__(self, func):
self.func = func
def __call__(self, *args, **kwargs):
return self.func(self, *args, **kwargs)
| {
"content_hash": "d64681260b671ef031e0e247c7df13f7",
"timestamp": "",
"source": "github",
"line_count": 841,
"max_line_length": 82,
"avg_line_length": 29.751486325802617,
"alnum_prop": 0.5413452699732225,
"repo_name": "gfyoung/numpy",
"id": "30069f0ca3b57b1a2497194862d15869f1f2be84",
"size": "25021",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "numpy/core/_internal.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "8117128"
},
{
"name": "C++",
"bytes": "165060"
},
{
"name": "Fortran",
"bytes": "10884"
},
{
"name": "Makefile",
"bytes": "2574"
},
{
"name": "Objective-C",
"bytes": "567"
},
{
"name": "Python",
"bytes": "6666823"
}
],
"symlink_target": ""
} |
"""
Check for IOOS-approved attributes
"""
import re
from numbers import Number
import validators
from cf_units import Unit
from lxml.etree import XPath
from owslib.namespaces import Namespaces
from compliance_checker import base
from compliance_checker.acdd import ACDD1_3Check
from compliance_checker.base import (
BaseCheck,
BaseNCCheck,
BaseSOSDSCheck,
BaseSOSGCCheck,
Result,
TestCtx,
attr_check,
check_has,
)
from compliance_checker.cf import util as cf_util # not to be confused with cfutil.py
from compliance_checker.cf.cf import CF1_6Check, CF1_7Check
from compliance_checker.cfutil import (
get_coordinate_variables,
get_geophysical_variables,
get_instrument_variables,
get_z_variables,
)
class IOOSBaseCheck(BaseCheck):
_cc_spec = "ioos"
_cc_spec_version = "0.1"
_cc_description = "IOOS Inventory Metadata"
_cc_url = "https://ioos.github.io/ioos-metadata/ioos-metadata-profile-v1-1.html#ioos-netcdf-metadata-profile-attributes"
_cc_display_headers = {3: "Highly Recommended", 2: "Recommended", 1: "Suggested"}
@classmethod
def _has_attr(cls, ds, attr, concept_name, priority=BaseCheck.HIGH):
"""
Checks for the existence of attr in ds, with the name/message using concept_name.
"""
val = cls.std_check(ds, attr)
msgs = []
if not val:
msgs.append(
"Attr '{}' (IOOS concept: '{}') not found in dataset".format(
attr, concept_name
)
)
return Result(priority, val, concept_name, msgs)
@classmethod
def _has_var_attr(cls, dataset, vname, attr, concept_name, priority=BaseCheck.HIGH):
"""
Checks for the existence of an attr on variable vname in dataset, with the name/message using concept_name.
"""
val = True
msgs = []
if vname not in dataset.variables:
val = False
msgs.append(
"Variable '{}' not present while checking for attr '{}' for IOOS concept: '{}'".format(
vname, attr, concept_name
)
)
else:
v = dataset.variables[vname]
if attr not in v.ncattrs():
val = False
msgs.append(
"Attr '{}' not present on var '{}' while checking for IOOS concept: '{}'".format(
attr, vname, concept_name
)
)
return Result(priority, val, concept_name, msgs)
class IOOSNCCheck(BaseNCCheck, IOOSBaseCheck):
def check_time_period(self, ds):
"""
Check that time period attributes are both set.
"""
start = self.std_check(ds, "time_coverage_start")
end = self.std_check(ds, "time_coverage_end")
msgs = []
count = 2
if not start:
count -= 1
msgs.append("Attr 'time_coverage_start' is missing")
if not end:
count -= 1
msgs.append("Attr 'time_coverage_end' is missing")
return Result(BaseCheck.HIGH, (count, 2), "time coverage start/end", msgs)
def check_station_location_lat(self, ds):
"""
Checks station lat attributes are set
"""
gmin = self.std_check(ds, "geospatial_lat_min")
gmax = self.std_check(ds, "geospatial_lat_max")
msgs = []
count = 2
if not gmin:
count -= 1
msgs.append("Attr 'geospatial_lat_min' is missing")
if not gmax:
count -= 1
msgs.append("Attr 'geospatial_lat_max' is missing")
return Result(BaseCheck.HIGH, (count, 2), "geospatial lat min/max", msgs)
def check_station_location_lon(self, ds):
"""
Checks station lon attributes are set
"""
gmin = self.std_check(ds, "geospatial_lon_min")
gmax = self.std_check(ds, "geospatial_lon_max")
msgs = []
count = 2
if not gmin:
count -= 1
msgs.append("Attr 'geospatial_lon_min' is missing")
if not gmax:
count -= 1
msgs.append("Attr 'geospatial_lon_max' is missing")
return Result(BaseCheck.HIGH, (count, 2), "geospatial lon min/max", msgs)
class IOOS0_1Check(IOOSNCCheck):
_cc_spec_version = "0.1"
_cc_description = "IOOS Inventory Metadata"
register_checker = True
def check_global_attributes(self, ds):
"""
Check all global NC attributes for existence.
:param netCDF4.Dataset ds: An open netCDF dataset
"""
return [
self._has_attr(ds, "acknowledgement", "Platform Sponsor"),
self._has_attr(ds, "publisher_email", "Station Publisher Email"),
self._has_attr(
ds, "publisher_email", "Service Contact Email", BaseCheck.MEDIUM
),
self._has_attr(
ds, "institution", "Service Provider Name", BaseCheck.MEDIUM
),
self._has_attr(
ds, "publisher_name", "Service Contact Name", BaseCheck.MEDIUM
),
self._has_attr(
ds, "Conventions", "Data Format Template Version", BaseCheck.MEDIUM
),
self._has_attr(
ds, "publisher_name", "Station Publisher Name", BaseCheck.HIGH
),
]
def check_variable_attributes(self, ds):
"""
Check IOOS concepts that come from NC variable attributes.
:param netCDF4.Dataset ds: An open netCDF dataset
"""
return [
self._has_var_attr(ds, "platform", "long_name", "Station Long Name"),
self._has_var_attr(ds, "platform", "short_name", "Station Short Name"),
self._has_var_attr(ds, "platform", "source", "Platform Type"),
self._has_var_attr(ds, "platform", "ioos_name", "Station ID"),
self._has_var_attr(ds, "platform", "wmo_id", "Station WMO ID"),
self._has_var_attr(ds, "platform", "comment", "Station Description"),
]
def check_variable_names(self, ds):
"""
Ensures all variables have a standard_name set.
"""
msgs = []
count = 0
for k, v in ds.variables.items():
if "standard_name" in v.ncattrs():
count += 1
else:
msgs.append("Variable '{}' missing standard_name attr".format(k))
return Result(
BaseCheck.MEDIUM, (count, len(ds.variables)), "Variable Names", msgs
)
def check_altitude_units(self, ds):
"""
If there's a variable named z, it must have units.
@TODO: this is duplicated with check_variable_units
:param netCDF4.Dataset ds: An open netCDF dataset
"""
if "z" in ds.variables:
msgs = []
val = "units" in ds.variables["z"].ncattrs()
if not val:
msgs.append("Variable 'z' has no units attr")
return Result(BaseCheck.LOW, val, "Altitude Units", msgs)
return Result(
BaseCheck.LOW, (0, 0), "Altitude Units", ["Dataset has no 'z' variable"]
)
def check_variable_units(self, ds):
"""
Ensures all variables have units.
"""
msgs = []
count = 0
for k, v in ds.variables.items():
if "units" in v.ncattrs():
count += 1
else:
msgs.append("Variable '{}' missing units attr".format(k))
return Result(
BaseCheck.MEDIUM, (count, len(ds.variables)), "Variable Units", msgs
)
class IOOS1_1Check(IOOSNCCheck):
"""
Compliance checker implementation of IOOS Metadata Profile, Version 1.1
Related links:
https://ioos.github.io/ioos-metadata/ioos-metadata-profile-v1-1.html#ioos-netcdf-metadata-profile-attributes
https://github.com/ioos/compliance-checker/issues/69
https://github.com/ioos/compliance-checker/issues/358
"""
_cc_spec_version = "1.1"
_cc_description = "IOOS Metadata Profile, Version 1.1"
_cc_url = "https://ioos.github.io/ioos-metadata/ioos-metadata-profile-v1-1.html#ioos-netcdf-metadata-profile-attributes"
register_checker = True
def __init__(self):
# Define the global attributes
self.required_atts = [
"contributor_name",
"contributor_role",
"creator_country",
"creator_email",
"creator_sector",
"featureType",
"id",
"institution",
"naming_authority",
"platform",
"platform_vocabulary",
"publisher_country",
"publisher_email",
"publisher_name",
"standard_name_vocabulary",
"title",
]
self.rec_atts = [
"creator_address",
"creator_city",
"creator_name",
"creator_phone",
"creator_state",
"creator_url",
"creator_zipcode",
"keywords",
"license",
"publisher_address",
"publisher_city",
"publisher_phone",
"publisher_state",
"publisher_url",
"publisher_zipcode",
"summary",
]
@check_has(BaseCheck.HIGH)
def check_high(self, ds):
"""
Performs a check on each highly recommended attributes' existence in the dataset
:param netCDF4.Dataset ds: An open netCDF dataset
"""
return self.required_atts
@check_has(BaseCheck.MEDIUM)
def check_recommended(self, ds):
"""
Performs a check on each recommended attributes' existence in the dataset
:param netCDF4.Dataset ds: An open netCDF dataset
"""
return self.rec_atts
def check_platform_variables(self, ds):
"""
The value of platform attribute should be set to another variable which
contains the details of the platform. There can be multiple platforms
involved depending on if all the instances of the featureType in the
collection share the same platform or not. If multiple platforms are
involved, a variable should be defined for each platform and referenced
from the geophysical variable in a space separated string.
:param netCDF4.Dataset ds: An open netCDF dataset
"""
platform_names = getattr(ds, "platform", "").split(" ")
val = all(platform_name in ds.variables for platform_name in platform_names)
msgs = []
if not val:
msgs = [
(
'The value of "platform" global attribute should be set to another variable '
"which contains the details of the platform. If multiple platforms are "
"involved, a variable should be defined for each platform and referenced "
"from the geophysical variable in a space separated string."
)
]
return [Result(BaseCheck.HIGH, val, "platform variables", msgs)]
def check_platform_variable_attributes(self, ds):
"""
Platform variables must contain the following attributes:
ioos_code
long_name
short_name
type
:param netCDF4.Dataset ds: An open netCDF dataset
"""
results = []
platform_name = getattr(ds, "platform", "")
# There can be multiple platforms defined here (space separated)
for platform in platform_name.split(" "):
if platform in ds.variables:
results += [
self._has_var_attr(ds, platform, "long_name", "Platform Long Name"),
self._has_var_attr(
ds, platform, "short_name", "Platform Short Name"
),
self._has_var_attr(ds, platform, "ioos_code", "Platform IOOS Code"),
self._has_var_attr(ds, platform, "type", "Platform Type"),
]
return results
def check_geophysical_vars_fill_value(self, ds):
"""
Check that geophysical variables contain fill values.
:param netCDF4.Dataset ds: An open netCDF dataset
"""
results = []
for geo_var in get_geophysical_variables(ds):
results.append(
self._has_var_attr(
ds, geo_var, "_FillValue", "_FillValue", BaseCheck.MEDIUM
),
)
return results
def check_geophysical_vars_standard_name(self, ds):
"""
Check that geophysical variables contain standard names.
:param netCDF4.Dataset ds: An open netCDF dataset
"""
results = []
for geo_var in get_geophysical_variables(ds):
results.append(
self._has_var_attr(
ds, geo_var, "standard_name", "geophysical variables standard_name"
),
)
return results
def check_units(self, ds):
"""
Required for most all variables that represent dimensional quantities.
The value should come from udunits authoritative vocabulary, which is
documented in the CF standard name table with it's corresponding
standard name.
:param netCDF4.Dataset ds: An open netCDF dataset
"""
cf16 = CF1_6Check()
return cf16.check_units(ds)
class IOOS1_2_ConventionsValidator(base.RegexValidator):
validator_regex = r"\bIOOS-1.2\b"
validator_fail_msg = '{} must contain the string "IOOS 1.2"'
class IOOS1_2_PlatformIDValidator(base.RegexValidator):
validator_regex = r"^[a-zA-Z0-9]+$"
validator_fail_msg = "{} must be alphanumeric"
class NamingAuthorityValidator(base.UrlValidator):
"""
Class to check for URL or reversed DNS strings contained within
naming_authority
"""
validator_fail_msg = (
'{} should either be a URL or a reversed DNS name (e.g "edu.ucar.unidata")'
)
def validator_func(self, input_value):
return (
super().validator_func(input_value)
or
# check for reverse DNS strings
validators.domain(".".join(input_value.split(".")[::-1]))
)
class IOOS1_2Check(IOOSNCCheck):
"""
Class to implement the IOOS Metadata 1.2 Specification
"""
_cc_spec_version = "1.2"
_cc_description = "IOOS Metadata Profile, Version 1.2"
_cc_url = "https://ioos.github.io/ioos-metadata/ioos-metadata-profile-v1-2.html"
register_checker = True
def __init__(self):
# instantiate objects used for delegation
self.acdd1_6 = ACDD1_3Check()
self.cf1_7 = CF1_7Check()
# extend standard_names set to include QARTOD standard_names
self._qartod_std_names = [
"aggregate_quality_flag",
"attenuated_signal_test_quality_flag",
"climatology_test_quality_flag",
"flat_line_test_quality_flag",
"gap_test_quality_flag",
"gross_range_test_quality_flag",
"location_test_quality_flag",
"multi_variate_test_quality_flag",
"neighbor_test_quality_flag",
"rate_of_change_test_quality_flag",
"spike_test_quality_flag",
"syntax_test_quality_flag",
]
self.cf1_7._std_names._names.extend(self._qartod_std_names)
self._default_check_var_attrs = set(
[
("_FillValue", BaseCheck.MEDIUM),
("missing_value", BaseCheck.MEDIUM),
# ( "standard_name", BaseCheck.HIGH # already checked in CF1_7Check.check_standard_name()
# ( "units", BaseCheck.HIGH # already checked in CF1_7Check.check_units()
]
)
# geophysical variables must have the following attrs:
self.geophys_check_var_attrs = self._default_check_var_attrs.union(
set(
[
("standard_name_url", BaseCheck.MEDIUM),
# ( "platform", BaseCheck.HIGH) # checked under check_single_platform()
# ( "wmo_platform_code", BaseCheck.HIGH) # only "if applicable", see check_wmo_platform_code()
# ( "ancillary_variables", BaseCheck.HIGH) # only "if applicable", see _check_var_gts_ingest()
# ("accuracy", BaseCheck.MEDIUM), see check_accuracy
("precision", BaseCheck.MEDIUM),
("resolution", BaseCheck.MEDIUM),
]
)
)
# geospatial vars must have the following attrs:
self.geospat_check_var_attrs = self._default_check_var_attrs
# valid contributor_role values
self.valid_contributor_roles = set(
[ # NERC and NOAA
"author",
"coAuthor",
"collaborator",
"contributor",
"custodian",
"distributor",
"editor",
"funder",
"mediator",
"originator",
"owner",
"pointOfContact",
"principalInvestigator",
"processor",
"publisher",
"resourceProvider",
"rightsHolder",
"sponsor",
"stakeholder",
"user",
]
)
self.valid_contributor_role_vocabs = set(
[
"http://vocab.nerc.ac.uk/collection/G04/current/",
"https://vocab.nerc.ac.uk/collection/G04/current/",
"http://www.ngdc.noaa.gov/wiki/index.php?title=ISO_19115_and_19115-2_CodeList_Dictionaries#CI_RoleCode",
"https://www.ngdc.noaa.gov/wiki/index.php?title=ISO_19115_and_19115-2_CodeList_Dictionaries#CI_RoleCode",
]
)
self.required_atts = [
("Conventions", IOOS1_2_ConventionsValidator()),
"creator_country",
("creator_email", base.EmailValidator()),
"creator_institution",
(
"creator_sector",
{
"gov_state",
"nonprofit",
"tribal",
"other",
"unknown",
"gov_municipal",
"industry",
"gov_federal",
"academic",
},
),
("creator_url", base.UrlValidator()),
"featureType",
"id",
("infoUrl", base.UrlValidator()),
"license",
("naming_authority", NamingAuthorityValidator()),
#'platform', # checked in check_platform_global
"platform_name",
"publisher_country",
("publisher_email", base.EmailValidator()),
"publisher_institution",
("publisher_url", base.UrlValidator()),
# TODO: handle standard name table exclusion for v38?
(
"standard_name_vocabulary",
re.compile(r"^CF Standard Name Table v[1-9]\d*$"),
),
"summary",
"title",
]
self.rec_atts = [
("contributor_email", base.EmailValidator(base.csv_splitter)),
"contributor_name",
("contributor_url", base.UrlValidator(base.csv_splitter)),
"creator_address",
"creator_city",
"creator_name",
"creator_phone",
"creator_postalcode",
"creator_state",
# checked in check_creator_and_publisher_type
#'creator_type',
"institution",
"instrument",
# checked in check_ioos_ingest
#'ioos_ingest',
"keywords",
("platform_id", IOOS1_2_PlatformIDValidator()), # alphanumeric only
"publisher_address",
"publisher_city",
"publisher_name",
"publisher_phone",
"publisher_postalcode",
"publisher_state",
# checked in check_creator_and_publisher_type
#'publisher_type',
"references",
"instrument_vocabulary",
]
def setup(self, ds):
self.platform_vars = self._find_platform_vars(ds)
def _find_platform_vars(self, ds):
"""
Finds any variables referenced by 'platform' attribute which exist in
the dataset.
Parameters
----------
ds: netCDF4.Dataset
An open netCDF4 Dataset.
Returns
-------
set of netCDF4.Variable
Set of variables which are platform variables.
"""
plat_vars = ds.get_variables_by_attributes(
platform=lambda p: isinstance(p, str)
)
return {
ds.variables[var.platform]
for var in plat_vars
if var.platform in ds.variables
}
@check_has(BaseCheck.HIGH)
def check_high(self, ds):
"""
Performs a check on each highly recommended attributes' existence in the dataset
:param netCDF4.Dataset ds: An open netCDF dataset
"""
return self.required_atts
@check_has(BaseCheck.MEDIUM)
def check_recommended(self, ds):
"""
Performs a check on each recommended attributes' existence in the dataset
:param netCDF4.Dataset ds: An open netCDF dataset
"""
return self.rec_atts
def check_standard_name(self, ds):
"""
Wrapper for checking standard names using the CF module.
Extends the StandardNameTable to include QARTOD variable
standard names.
"""
return self.cf1_7.check_standard_name(ds)
def check_feature_type(self, ds):
"""
Wrapper for checking featureType global attribute using the CF module.
"""
return self.cf1_7.check_feature_type(ds)
def check_units(self, ds):
"""
Wrapper to check units with the CF module.
"""
return self.cf1_7.check_units(ds)
def check_ioos_ingest(self, ds):
"""
If a dataset contains the global attribute ioos_ingest,
its value must be "false". All datasets are assumed to be
ingested except those with this flag. If the dataset should
be ingested, no flag (or "true") should be present.
Parameters
----------
ds: netCDF4.Dataset (open)
Returns
-------
Result
"""
r = True
m = (
"To disallow harvest of this dataset to IOOS national products, "
'global attribute "ioos_ingest" must be a string with value "false"'
)
igst = getattr(ds, "ioos_ingest", None)
if (isinstance(igst, str) and igst.lower() not in ("true", "false")) or (
not isinstance(igst, str) and igst is not None
):
r = False
return Result(BaseCheck.MEDIUM, r, "ioos_ingest", None if r else [m])
def check_contributor_role_and_vocabulary(self, ds):
"""
Check the dataset has global attributes contributor_role and
contributor_role_vocabulary. It is recommended to come from
one of NERC or NOAA-NCEI.
Parameters
----------
ds: netCDF4.Dataset (open)
Returns
-------
list of Result objects
"""
role = getattr(ds, "contributor_role", None)
vocb = getattr(ds, "contributor_role_vocabulary", None)
role_val = False
vocb_val = False
role_msg = "contributor_role '{}' should be from NERC or NOAA-NCEI"
vocb_msg = "contributor_role_vocabulary '{}' should be one of NERC or NOAA-NCEI"
role_results = []
if role:
# in case it's a CSV, split it and iterate through all
try:
_roles = base.csv_splitter(role)
for _role in _roles:
role_val = _role in self.valid_contributor_roles
role_results.append(
Result(
BaseCheck.MEDIUM,
role_val,
"contributor_role",
None if role_val else [role_msg.format(_role)],
)
)
except TypeError as e:
role_results.append(
Result(
BaseCheck.MEDIUM,
False,
"contributor_role",
["contributor_role '{}' must be of type 'string'".format(role)],
)
)
else:
role_results.append(
Result(
BaseCheck.MEDIUM,
False,
"contributor_role",
["contributor_role should be present"],
)
)
vocb_results = []
if vocb:
try:
_vocbs = base.csv_splitter(vocb)
for _vocb in _vocbs:
vocb_val = _vocb in self.valid_contributor_role_vocabs
vocb_results.append(
Result(
BaseCheck.MEDIUM,
vocb_val,
"contributor_role_vocabulary",
None if vocb_val else [vocb_msg.format(_vocb)],
)
)
except TypeError as e:
vocb_results.append(
Result(
BaseCheck.MEDIUM,
False,
"contributor_role_vocabulary",
[
"contributor_role_vocabulary '{}' must be of type 'string'".format(
vocb
)
],
)
)
else:
vocb_results.append(
Result(
BaseCheck.MEDIUM,
False,
"contributor_role_vocabulary",
["contributor_role_vocabulary should be present"],
)
)
return role_results + vocb_results
def check_geophysical_vars_have_attrs(self, ds):
"""
All geophysical variables must have certain attributes.
Parameters
----------
ds: netCDF4.Dataset
Returns
-------
list: list of Result objects
"""
# get geophysical variables
geophys_vars = get_geophysical_variables(ds) # list of str
results = self._check_vars_have_attrs( # list
ds, geophys_vars, self.geophys_check_var_attrs
)
return results
def check_accuracy(self, ds):
"""
Special check for accuracy when in the salinity context.
https://github.com/ioos/compliance-checker/issues/839
Parameters
----------
ds: netCDF4.Dataset
Returns
-------
list of Results objects
"""
results = []
msg = (
"Variable '{v}' attribute 'accuracy' should have the " "same units as '{v}'"
)
for v in get_geophysical_variables(ds):
_v = ds.variables[v]
std_name = getattr(_v, "standard_name", None)
gts_ingest = getattr(_v, "gts_ingest", None)
if (std_name == "sea_water_practical_salinity") and (gts_ingest == "true"):
msg = (
"Variable '{v}' should have an 'accuracy' attribute "
"that is numeric and of the same units as '{v}'"
)
r = isinstance(getattr(_v, "accuracy", None), Number)
else: # only test if exists
r = getattr(_v, "accuracy", None) is not None
results.append(
Result(
BaseCheck.MEDIUM,
r,
"geophysical_variable:accuracy",
[msg.format(v=v)],
)
)
return results
def check_geospatial_vars_have_attrs(self, ds):
"""
All geospatial variables must have certain attributes.
Parameters
----------
ds: netCDF4.Dataset
Returns
-------
list: list of Result objects
"""
return self._check_vars_have_attrs(
ds, get_coordinate_variables(ds), self.geospat_check_var_attrs
)
def _check_vars_have_attrs(self, ds, vars_to_check, atts_to_check):
"""
Check that the variables in vars_to_check have the attributes in
atts_to_check.
Parameters
----------
ds: netCDF4.Dataset (open)
Returns
-------
list of Result objects
"""
results = []
for var in vars_to_check:
for attr_tuple in atts_to_check:
results.append(
self._has_var_attr(
ds,
var,
attr_tuple[0], # attribute name
attr_tuple[0], # attribute name used as 'concept_name'
attr_tuple[1], # priority level
)
)
return results
def check_cf_role_variables(self, ds):
"""
The IOOS-1.2 specification details the following requirements regarding
the cf_role attribute and its relation to variable dimensionality:
cf_role may be applied to the "Platform Variable", as indicated by
geophysical_variable:platform, but it may also be an independent
variable. To comply with the single platform per dataset rule of
the IOOS Metadata Profile, the cf_role variable will typically
have a dimension of 1, unless it is a TimeSeries dataset following
the 'TimeSeries - multiple station' format.
To summarize the rules checked in this method:
- 'timeseries', cf_role var must have dim 1
- 'timeseriesprofile' must have
cf_role=timeseries_id variable have dim 1 and dim of cf_role=profile_id
can be > 1
- 'trajectory' or 'trajectoryprofile' variable with cf_role=trajectory_id
must have dim 1, cf_role=profile_id variable can be > 1
Relevant documentation found in the specification as well as GitHub issues:
https://github.com/ioos/compliance-checker/issues/748#issuecomment-606659685
https://github.com/ioos/compliance-checker/issues/828
"""
fType = getattr(ds, "featureType", None)
if (not fType) or (not isinstance(fType, str)): # can't do anything, pass
return Result(
BaseCheck.MEDIUM,
False,
"CF DSG: Invalid featureType",
[
(
f"Invalid featureType '{fType}'; please see the "
"IOOS 1.2 Profile and CF-1.7 Conformance documents for valid featureType"
)
],
)
featType = fType.lower()
if featType == "timeseries":
return self._check_feattype_timeseries_cf_role(ds)
elif featType == "timeseriesprofile":
return self._check_feattype_timeseriesprof_cf_role(ds)
elif featType == "trajectory":
return self._check_feattype_trajectory_cf_role(ds)
elif featType == "trajectoryprofile":
return self._check_feattype_trajprof_cf_role(ds)
elif featType == "profile":
return self._check_feattype_profile_cf_role(ds)
elif featType == "point":
return good_result # can't do anything
else:
return Result(
BaseCheck.MEDIUM,
False,
"CF DSG: Unknown featureType",
[
(
f"Invalid featureType '{featType}'; please see the "
"IOOS 1.2 Profile and CF-1.7 Conformance documents for valid featureType"
)
],
)
def _check_feattype_timeseries_cf_role(self, ds):
ts_msg = (
"Dimension length of variable with cf_role={cf_role} "
"(the '{dim_type}' dimension) is {dim_len}. "
"The IOOS Profile restricts timeSeries "
"datasets with multiple features to share the same lat/lon position "
"(i.e. to exist on the same platform). Datasets that include multiple "
"platforms are not valid and will cause harvesting errors."
)
# looking for cf_role=timeseries_id
cf_role_vars = ds.get_variables_by_attributes(cf_role="timeseries_id")
if (not cf_role_vars) or (len(cf_role_vars) > 1):
_val = False
msgs = [
(
"The IOOS-1.2 Profile specifies a single variable "
"must be present with attribute cf_role=timeseries_id"
)
]
else:
_v = cf_role_vars[0]
_dims = _v.get_dims()
if not _dims:
_dimsize = 0
else:
_dimsize = _dims[0].size
# dimension size must be == 1
_val = _dimsize == 1
msgs = [
ts_msg.format(
cf_role="timeseries_id", dim_type="station", dim_len=_dimsize
)
]
return Result(
BaseCheck.HIGH,
_val,
"CF DSG: featureType=timeseries",
msgs,
)
def _check_feattype_timeseriesprof_cf_role(self, ds):
ts_prof_msg = (
"Dimension length of non-platform variable with cf_role={cf_role} "
" (the '{dim_type}' dimension) is {dim_len}. "
"The IOOS profile restricts timeSeriesProfile datasets to a "
"single platform (ie. station) per dataset "
"(the profile dimension is permitted to be >= 1."
)
# looking for cf_roles timeseries_id and profile_id
cf_role_vars = [] # extend in specific order for easier checking
cf_role_vars.extend(ds.get_variables_by_attributes(cf_role="timeseries_id"))
cf_role_vars.extend(ds.get_variables_by_attributes(cf_role="profile_id"))
if len(cf_role_vars) != 2:
_val = False
msgs = [
(
"Datasets of featureType=timeSeriesProfile must have variables "
"containing cf_role=timeseries_id and cf_role=profile_id"
)
]
else:
_ts_id_dims = cf_role_vars[0].get_dims() # timeseries_id dimensions
if not _ts_id_dims:
_ts_id_dimsize = 0
else:
_ts_id_dimsize = _ts_id_dims[0].size
_pf_id_dims = cf_role_vars[1].get_dims() # profilie_id dimensions
if not _pf_id_dims:
_pf_id_dimsize = 0
else:
_pf_id_dimsize = _pf_id_dims[0].size
# timeseries_id must be == 1, profile >= 1
_val = _ts_id_dimsize == 1 and _pf_id_dimsize >= 1
msgs = [
ts_prof_msg.format(
cf_role="timeseries_id", dim_type="station", dim_len=_ts_id_dimsize
)
]
return Result(
BaseCheck.HIGH, _val, "CF DSG: featureType=timeSeriesProfile", msgs
)
def _check_feattype_trajectory_cf_role(self, ds):
trj_msg = (
"Dimension length of non-platform variable with cf_role={cf_role} "
" (the '{dim_type}' dimension) is {dim_len}. "
"The IOOS profile restricts trjectory "
"datasets to a single platform (i.e. trajectory) per dataset."
)
cf_role_vars = ds.get_variables_by_attributes(cf_role="trajectory_id")
if len(cf_role_vars) != 1:
_val = False
msgs = [
(
"Datasets of featureType=trajectory must have a variable "
"containing cf_role=trajectory_id"
)
]
else:
_v = cf_role_vars[0]
_dims = _v.get_dims()
if not _dims:
_dimsize = 0
else:
_dimsize = _dims[0].size
# trajectory dimension must be 1
_val = _dimsize == 1
msgs = [
trj_msg.format(
cf_role="trajectory_id", dim_type="station", dim_len=_dimsize
)
]
return Result(BaseCheck.HIGH, _val, "CF DSG: featureType=trajectory", msgs)
def _check_feattype_trajectoryprof_cf_role(self, ds):
trj_prof_msg = (
"Dimension length of non-platform variable with cf_role={cf_role} "
"(the '{dim_type}' dimension) is {dim_len}. "
"The IOOS profile restricts trjectory and trajectoryProfile "
"datasets to a single platform (ie. trajectory) per dataset "
"(the profile dimension is permitted to be >= 1)."
)
# looking for cf_roles trajectory_id and profile_id
cf_role_vars = [] # extend in specific order for easier checking
cf_role_vars.extend(ds.get_variables_by_attributes(cf_role="trajectory_id"))
cf_role_vars.extend(ds.get_variables_by_attributes(cf_role="profile_id"))
if len(cf_role_vars) != 2:
_val = False
msgs = [
(
"Datasets of featureType=trajectoryProfile must have variables "
"containing cf_role=trajectory_id and cf_role=profile_id"
)
]
else:
_trj_id_dims = cf_role_vars[0].get_dims()
if not _trj_id_dims:
_trj_id_dimsize = 0
else:
_trj_id_dimsize = _trj_id_dims[0].size
_prf_id_dims = cf_role_vars[1].get_dims()
if not _prf_id_dims:
_prf_id_dimsize = 0
else:
_prf_id_dimsize = _prf_id_dims[0].size
# trajectory dim must be == 1, profile must be >= 1
_val = _trj_id_dimsize == 1 and _prf_id_dimsize >= 1
msgs = [
trj_prof_msg.format(
cf_role="trajectory_id", dim_type="station", dim_len=_trj_id_dimsize
)
]
return Result(
BaseCheck.HIGH, _val, "CF DSG: featureType=trajectoryProfile", msgs
)
def _check_feattype_profile_cf_role(self, ds):
prof_msg = (
"Dimension length of non-platform variable with cf_role={cf_role} "
" (the '{dim_type}' dimension) is {dim_len}. "
"The IOOS profile restricts profile datasets to a single "
"platform (ie. profile) per dataset."
)
# looking for cf_role=profile_id
cf_role_vars = ds.get_variables_by_attributes(cf_role="profile_id")
if (not cf_role_vars) or (len(cf_role_vars) > 1):
_val = False
msgs = [
"None or multiple variables found with cf_role=profile_id; only one is allowed"
]
else:
_v = cf_role_vars[0]
_dims = _v.get_dims()
if not _dims:
_dimsize = 0
else:
_dimsize = _dims[0].size
# only one profile valid
_val = _dimsize == 1
msgs = [
prof_msg.format(
cf_role="profile_id", dim_type="profile", dim_len=_dimsize
)
]
return Result(BaseCheck.HIGH, _val, "CF DSG: featureType=profile", msgs)
def check_creator_and_publisher_type(self, ds):
"""
Check if global attribute creator_type and publisher_type
are contained within the values "person", "group", "institution", or
"position". If creator_type is not present within the global
attributes, assume it is set to a value of "person".
Parameters
----------
ds: netCDF4.Dataset
An open netCDF4 Dataset
Returns
-------
list of Result
"""
result_list = []
for global_att_name in ("creator_type", "publisher_type"):
messages = []
try:
att_value = ds.getncattr(global_att_name)
except AttributeError:
# if the attribute isn't found, it's implicitly assigned
# a value of "person", so it automatically passes.
pass_stat = True
else:
expected_types = {"person", "group", "institution", "position"}
if att_value in expected_types:
pass_stat = True
else:
pass_stat = False
messages.append(
"If specified, {} must be in value list "
"({})".format(global_att_name, sorted(expected_types))
)
result_list.append(
Result(BaseCheck.MEDIUM, pass_stat, global_att_name, messages)
)
return result_list
def check_platform_global(self, ds):
"""
The "platform" attribute must be a single string containing
no blank characters.
Parameters
----------
ds: netCDF4.Dataset (open)
Returns
-------
Result
"""
r = False
m = (
'The global attribute "platform" must be a single string '
+ "containing no blank characters; it is {}"
)
p = getattr(ds, "platform", None)
if p:
if re.match(r"^\S+$", p):
r = True
return Result(BaseCheck.HIGH, r, "platform", None if r else [m.format(p)])
def check_single_platform(self, ds):
"""
Verify that a dataset only has a single platform attribute, and thus
a single platform variable. Gridded model datasets are not required
to declare a platform or platform variables.
Args:
ds (netCDF-4 Dataset): open Dataset object
Returns:
Result
"""
glb_platform = getattr(ds, "platform", None)
platform_set = set()
for v in ds.get_variables_by_attributes(platform=lambda x: x is not None):
platform_set.add(v.getncattr("platform"))
num_platforms = len(platform_set)
if num_platforms > 1 and glb_platform:
msg = "A dataset may only have one platform; {} found".format(
len(platform_set)
)
val = False
elif (not glb_platform) and num_platforms > 0:
msg = 'If a platform variable exists, a global attribute "platform" must also exist'
val = False
elif num_platforms == 0 and glb_platform:
msg = 'A dataset with a global "platform" attribute must have at least one platform variable'
val = False
elif num_platforms == 0 and (not glb_platform):
msg = "Gridded model datasets are not required to declare a platform"
val = True
else:
val = True
return Result(BaseCheck.HIGH, val, "platform", None if val else [msg])
def check_platform_vocabulary(self, ds):
"""
The platform_vocabulary attribute is recommended to be a URL to
http://mmisw.org/ont/ioos/platform or
http://vocab.nerc.ac.uk/collection/L06/current/. However,
it is required to at least be a URL.
Args:
ds (netCDF4.Dataset): open Dataset
Returns:
Result
"""
m = "platform_vocabulary must be a valid URL"
pvocab = getattr(ds, "platform_vocabulary", "")
val = bool(validators.url(pvocab))
return Result(
BaseCheck.MEDIUM, val, "platform_vocabulary", None if val else [m]
)
def _check_gts_ingest_val(self, val):
"""
Check that `val` is a str and is equal to "true" or "false"
Parameters
----------
val (?): value to check
Returns
-------
bool
"""
return isinstance(val, str) and val.lower() in {"true", "false"}
def check_vertical_coordinates(self, ds):
"""
Check that vertical units (corresponding to axis "Z") are a unit
equivalent to one of "meter", "inch", "foot", "yard", "US_survey_foot",
"mile", or "fathom". Check that the vertical coordinate variable
"positive" attribute is either "up" or "down". Note that unlike the CF
version of this check, pressure units are not accepted and length units
are constrained to the aforementioned set instead of accepting any valid
UDUNITS length unit.
:param netCDF4.Dataset ds: An open netCDF dataset
:rtype: list
:return: List of results
"""
ret_val = []
for name in get_z_variables(ds):
variable = ds.variables[name]
units_str = getattr(variable, "units", None)
positive = getattr(variable, "positive", None)
expected_unit_strs = (
"meter",
"inch",
"foot",
"yard",
"US_survey_foot",
"mile",
"fathom",
)
unit_def_set = {
Unit(unit_str).definition for unit_str in expected_unit_strs
}
try:
units = Unit(units_str)
pass_stat = units.definition in unit_def_set
# unknown unit not convertible to UDUNITS
except ValueError:
pass_stat = False
valid_vertical_coord = TestCtx(BaseCheck.HIGH, "Vertical coordinates")
units_set_msg = (
"{}'s units attribute {} is not equivalent to one "
"of {}".format(name, units_str, expected_unit_strs)
)
valid_vertical_coord.assert_true(pass_stat, units_set_msg)
pos_msg = (
"{}: vertical coordinates must include a positive "
"attribute that is either 'up' or 'down'".format(name)
)
valid_vertical_coord.assert_true(positive in ("up", "down"), pos_msg)
ret_val.append(valid_vertical_coord.to_result())
return ret_val
def check_gts_ingest_global(self, ds):
"""
Check if a dataset has the global attribute "gts_ingest" and that
it matches "true" or "false". This attribute is "required, if applicable".
Parameters
----------
ds (netCDF4.Dataset): open dataset
Returns
-------
Result
"""
gts_ingest_value = getattr(ds, "gts_ingest", None)
is_valid_string = True
if isinstance(gts_ingest_value, str):
is_valid_string = self._check_gts_ingest_val(gts_ingest_value)
fail_message = [
'Global attribute "gts_ingest" must be a string "true" or "false"'
]
return Result(
BaseCheck.HIGH,
is_valid_string,
"NDBC/GTS Ingest Requirements",
None if is_valid_string else fail_message,
)
def _var_qualifies_for_gts_ingest(self, ds, var):
"""
Examine a variable to see if it qualifies for GTS Ingest.
Check that a given variable
- has a valid CF standard name (checked with check_standard_names())
- has a QARTOD aggregates variable
- has valid units (checked with check_units())
Parameters
----------
ds (netCDF4.Dataset): open Dataset
var (netCDF4.Variable): variable from dataset
Returns
-------
bool
"""
val = False
# should have an ancillary variable with standard_name aggregate_quality_flag
avar_val = False
anc_vars = str(getattr(var, "ancillary_variables", "")).split(" ")
for av in anc_vars:
if av in ds.variables:
if (
getattr(ds.variables[av], "standard_name", "")
== "aggregate_quality_flag"
):
avar_val = True
break
# should have compliant standard_name
# NOTE: standard names are checked extensively in self.check_standard_names()
# but that method delegates to CF1_7Check.check_standard_name(), which loops through
# ALL the variables; this takes the absolute core of that check and ASSUMES that the
# current variable being checked is a coordinate variable, auxiliary coordinate
# variable, axis variable, flag variable, or geophysical variable
std_name = getattr(var, "standard_name", False)
valid_std_name = std_name in self.cf1_7._std_names
# should have compliant units
# NOTE: units are checked extensively in self.check_units(), which delegates
# to CF1_7Check.check_units() --> CF1_6Check.check_units(), which loops through
# ALL variables; this takes the absolute core and assumes that the variable does
# not need dimensionless units nor are the units to be compared with any known
# deprecated ones; it would be nice to reuse machinery, but the similarly convoluted
# CF1_6Check.check_units() method is too tangled to use directly and would cause a huge
# time increase
units = getattr(var, "units", None)
has_udunits = units is not None and cf_util.units_known(units)
return avar_val and valid_std_name and has_udunits
def check_gts_ingest_requirements(self, ds):
"""
Check which variables qualify for ingest.
According to https://ioos.github.io/ioos-metadata/ioos-metadata-profile-v1-2.html#requirements-for-ioos-dataset-gts-ingest,
the gts_ingest is "required, if applicable". Any variables which a user
would like ingested must also contain the gts_ingest attribute with a
value of true. The variable must:
- have a valid CF standard_name attribute (already checked)
- have an ancillary variable reqpresenting QARTOD aggregate flags
- have a valid udunits units attribute (already checked)
This check will always fail so as to notify the user which variables
qualified/did not qualify for ingest.
https://github.com/ioos/compliance-checker/issues/759#issuecomment-629454412
Parameters
----------
ds (netCDF4.Dataset): open Dataset
Returns
-------
Result
"""
# is dataset properly flagged for ingest?
glb_gts_attr = getattr(ds, "gts_ingest", None)
# check variables
all_passed_ingest_reqs = True # default
var_failed_ingest_msg = (
"The following variables did not qualify for NDBC/GTS Ingest: {}\n"
)
var_passed_ingest_msg = (
"The following variables qualified for NDBC/GTS Ingest: {}\n"
)
var_passed_ingest_reqs = set()
for v in ds.get_variables_by_attributes(gts_ingest=lambda x: x == "true"):
var_passed_ingest_reqs.add(
(v.name, self._var_qualifies_for_gts_ingest(ds, v))
)
# always show which variables have passed
_var_passed = map(
lambda y: y[0], filter(lambda x: x[1], var_passed_ingest_reqs)
)
all_passed_ingest_reqs = all(map(lambda x: x[1], var_passed_ingest_reqs))
if not all_passed_ingest_reqs:
_var_failed = map(
lambda y: y[0], filter(lambda x: not x[1], var_passed_ingest_reqs)
)
return Result(
BaseCheck.HIGH,
False, # always fail
"NDBC/GTS Ingest Requirements",
[var_passed_ingest_msg.format(", ".join(_var_passed))]
if all_passed_ingest_reqs
else [
var_passed_ingest_msg.format(", ".join(_var_passed)),
var_failed_ingest_msg.format(", ".join(_var_failed)),
],
)
def check_instrument_variables(self, ds):
"""
If present, the instrument_variable is one that contains additional
metadata about the instrument the data was collected with.
Args:
ds (netCDF4.Dataset): open Dataset
Returns:
list of Results
"""
results = []
instr_vars = get_instrument_variables(ds)
# check for component, disciminant
for instr in instr_vars:
if instr in ds.variables:
compnt = getattr(ds.variables[instr], "component", None)
m = [
"component attribute of {} ({}) must be a string".format(
instr, compnt
)
]
if compnt:
results.append(
Result(
BaseCheck.MEDIUM,
isinstance(compnt, str),
"instrument_variable",
m,
)
)
else:
results.append(
Result(BaseCheck.MEDIUM, True, "instrument_variable", m)
)
disct = getattr(ds.variables[instr], "discriminant", None)
m = [
"discriminant attribute of {} ({}) must be a string".format(
instr, disct
)
]
if disct:
results.append(
Result(
BaseCheck.MEDIUM,
isinstance(disct, str),
"instrument_variable",
m,
)
)
else:
results.append(
Result(BaseCheck.MEDIUM, True, "instrument_variable", m)
)
return results
def check_qartod_variables_flags(self, ds):
"""
https://ioos.github.io/ioos-metadata/ioos-metadata-profile-v1-2.html#quality-controlqartod
Check that all QARTOD variables have flag_meanings and flag_values attributes.
Use delegation to methods in the CF module.
Parameters
----------
ds (netCDF4.Dataset): open dataset
Returns
-------
list of Result objects
"""
results = []
# get qartod variables
for v in ds.get_variables_by_attributes(
standard_name=lambda x: x in self._qartod_std_names
):
missing_msg = "flag_{} not present on {}"
# check if each has flag_values, flag_meanings
# need isinstance() as can't compare truth value of array
if getattr(v, "flag_values", None) is None:
results.append(
Result(
BaseCheck.MEDIUM,
False,
"qartod_variables flags",
missing_msg.format("values", v.name),
)
)
else: # if exist, test
results.append(self.cf1_7._check_flag_values(ds, v.name))
if getattr(v, "flag_meanings", None) is None:
results.append(
Result(
BaseCheck.MEDIUM,
False,
"qartod_variables flags",
missing_msg.format("meanings", v.name),
)
)
else: # if exist, test
results.append(self.cf1_7._check_flag_meanings(ds, v.name))
# Ensure message name is "qartod_variables flags"
# NOTE this is a bit of a hack to shove into CF results
for r in results:
r.name = "qartod_variables flags"
return results
def check_qartod_variables_references(self, ds):
"""
For any variables that are deemed QARTOD variables, check that they
contain the "references" attribute and that the value of the attribute
is a valid URL.
Args:
ds (netCDF4.Dataset): open Dataset
Returns:
list of Results
"""
results = []
for v in ds.get_variables_by_attributes(
standard_name=lambda x: x in self._qartod_std_names
):
attval = getattr(v, "references", None)
if attval is None:
msg = (
'"references" attribute not present for variable {}.'
"If present, it should be a valid URL."
).format(v.name)
val = False
else:
msg = '"references" attribute for variable "{}" must be a valid URL'.format(
v.name
)
val = bool(validators.url(attval))
results.append(
Result(
BaseCheck.MEDIUM,
val,
"qartod_variable:references",
None if val else [msg],
)
)
return results
def check_wmo_platform_code(self, ds):
"""
Per the spec:
"The WMO identifier for the platform used to measure the data. This
identifier can be any of the following types:
- WMO ID for buoys (numeric, 5 digits)
- WMO ID for gliders (numeric, 7 digits)
- NWS ID (alphanumeric, 5 digits)"
This attribute is "required, if applicable" -- a warning message will
only show up if the attribute is present and does not conform.
Args:
ds (netCDF4.Dataset): open Dataset
Returns:
Result
"""
valid = True
ctxt = "wmo_platform_code"
msg = (
"The wmo_platform_code must be an alphanumeric string of 5 "
"characters or a numeric string of 7 characters"
)
code = getattr(ds, "wmo_platform_code", None)
if code:
if not (
isinstance(code, str)
and (re.search(r"^(?:[a-zA-Z0-9]{5}|[0-9]{7})$", code))
):
valid = False
return Result(BaseCheck.HIGH, valid, ctxt, None if valid else [msg])
def check_instrument_make_model_calib_date(self, ds):
"""
Instrument variables should have attributes make_model and
calibration_date. Both should be strings, with calibration_date
following ISO-8601 date format.
https://github.com/ioos/compliance-checker/issues/839
"""
results = []
ivars = get_instrument_variables(ds)
for v in ivars:
_v = ds.variables[v]
# make_model
mm = getattr(_v, "make_model", None)
valid = isinstance(mm, str)
results.append(
Result(
BaseCheck.MEDIUM,
valid,
"instrument_variable:make_model",
None
if valid
else [f"Attribute {v}:make_model ({mm}) should be a string"],
)
)
# calibration_date
cd = getattr(_v, "calibration_date", "")
# thanks folks https://stackoverflow.com/questions/41129921/validate-an-iso-8601-datetime-string-in-python
valid = bool(
re.match(
r"^(-?(?:[1-9][0-9]*)?[0-9]{4})-(1[0-2]|0[1-9])-(3[01]|0[1-9]|[12][0-9])T(2[0-3]|[01][0-9]):([0-5][0-9]):([0-5][0-9])(\.[0-9]+)?(Z|[+-](?:2[0-3]|[01][0-9]):[0-5][0-9])?$",
cd,
)
)
results.append(
Result(
BaseCheck.MEDIUM,
valid,
"instrument_variable:calibration_date",
None
if valid
else [
f"Attribute {v}:calibration_date ({cd}) should be an ISO-8601 string"
],
)
)
return results
class IOOSBaseSOSCheck(BaseCheck):
_cc_spec = "ioos_sos"
_cc_spec_version = "0.1"
_cc_description = (
"IOOS Inventory Metadata checks for the Sensor Observation System (SOS). "
"Checks SOS functions GetCapabilities and DescribeSensor."
)
register_checker = True
# requires login
_cc_url = "http://sdf.ndbc.noaa.gov/sos/"
class IOOSSOSGCCheck(BaseSOSGCCheck, IOOSBaseSOSCheck):
# set up namespaces for XPath
ns = Namespaces().get_namespaces(["sos", "gml", "xlink"])
ns["ows"] = Namespaces().get_namespace("ows110")
@check_has(BaseCheck.HIGH)
def check_high(self, ds):
return []
@check_has(BaseCheck.MEDIUM)
def check_recommended(self, ds):
return [
(
"service_contact_email",
XPath(
"/sos:Capabilities/ows:ServiceProvider/ows:ServiceContact/ows:ContactInfo/ows:Address/ows:ElectronicMailAddress",
namespaces=self.ns,
),
),
(
"service_contact_name",
XPath(
"/sos:Capabilities/ows:ServiceProvider/ows:ServiceContact/ows:IndividualName",
namespaces=self.ns,
),
),
(
"service_provider_name",
XPath(
"/sos:Capabilities/ows:ServiceProvider/ows:ProviderName",
namespaces=self.ns,
),
),
(
"service_title",
XPath(
"/sos:Capabilities/ows:ServiceProvider/ows:ProviderName",
namespaces=self.ns,
),
),
(
"service_type_name",
XPath(
"/sos:Capabilities/ows:ServiceIdentification/ows:ServiceType",
namespaces=self.ns,
),
),
(
"service_type_version",
XPath(
"/sos:Capabilities/ows:ServiceIdentification/ows:ServiceTypeVersion",
namespaces=self.ns,
),
),
# ds.identification[0].observed_properties has this as well, but
# don't want to try to shoehorn a function here
# ('variable_names', len(ds.identification[0].observed_properties) > 0)
(
"variable_names",
XPath(
"/sos:Capabilities/sos:Contents/sos:ObservationOfferingList/sos:ObservationOffering/sos:observedProperty",
namespaces=self.ns,
),
),
(
"data_format_template_version",
XPath(
"/sos:Capabilities/ows:OperationsMetadata/ows:ExtendedCapabilities/gml:metaDataProperty[@xlink:title='ioosTemplateVersion']/gml:version",
namespaces=self.ns,
),
),
]
@check_has(BaseCheck.LOW)
def check_suggested(self, ds):
return ["altitude_units"]
class IOOSSOSDSCheck(BaseSOSDSCheck, IOOSBaseSOSCheck):
# set up namespaces for XPath
ns = Namespaces().get_namespaces(["sml", "swe", "gml", "xlink"])
@check_has(BaseCheck.HIGH)
def check_high(self, ds):
return [
(
"platform_sponsor",
XPath(
"/sml:SensorML/sml:member/sml:System/sml:classification/sml:ClassifierList/sml:classifier[@name='sponsor']/sml:Term/sml:value",
namespaces=self.ns,
),
),
(
"platform_type",
XPath(
"/sml:SensorML/sml:member/sml:System/sml:classification/sml:ClassifierList/sml:classifier[@name='platformType']/sml:Term/sml:value",
namespaces=self.ns,
),
),
(
"station_publisher_name",
XPath(
"/sml:SensorML/sml:member/sml:System/sml:contact/sml:ContactList/sml:member[@xlink:role='http://mmisw.org/ont/ioos/definition/publisher']/sml:ResponsibleParty/sml:organizationName",
namespaces=self.ns,
),
),
(
"station_publisher_email",
XPath(
"/sml:SensorML/sml:member/sml:System/sml:contact/sml:ContactList/sml:member[@xlink:role='http://mmisw.org/ont/ioos/definition/publisher']/sml:ResponsibleParty/sml:contactInfo/address/sml:electronicMailAddress",
namespaces=self.ns,
),
),
(
"station_id",
XPath(
"/sml:SensorML/sml:member/sml:System/sml:identification/sml:IdentifierList/sml:identifier[@name='stationID']/sml:Term/sml:value",
namespaces=self.ns,
),
),
(
"station_long_name",
XPath(
"/sml:SensorML/sml:member/sml:System/sml:identification/sml:IdentifierList/sml:identifier[@name='longName']/sml:Term/sml:value",
namespaces=self.ns,
),
),
(
"station_short_name",
XPath(
"/sml:SensorML/sml:member/sml:System/sml:identification/sml:IdentifierList/sml:identifier[@name='shortName']/sml:Term/sml:value",
namespaces=self.ns,
),
),
(
"station_wmo_id",
XPath(
'/sml:SensorML/sml:member/sml:System/sml:identification/sml:IdentifierList/sml:identifier/sml:Term[@definition="http://mmisw.org/ont/ioos/definition/wmoID"]/sml:value',
namespaces=self.ns,
),
),
(
"time_period",
XPath(
"/sml:SensorML/sml:member/sml:System/sml:capabilities[@name='observationTimeRange']/swe:DataRecord/swe:field[@name='observationTimeRange']/swe:TimeRange/swe:value",
namespaces=self.ns,
),
),
(
"operator_email",
XPath(
"/sml:SensorML/sml:member/sml:System/sml:contact/sml:ContactList/sml:member[@xlink:role='http://mmisw.org/ont/ioos/definition/operator']/sml:ResponsibleParty/sml:contactInfo/address/sml:electronicMailAddress",
namespaces=self.ns,
),
),
(
"operator_name",
XPath(
"/sml:SensorML/sml:member/sml:System/sml:contact/sml:ContactList/sml:member[@xlink:role='http://mmisw.org/ont/ioos/definition/operator']/sml:ResponsibleParty/sml:organizationName",
namespaces=self.ns,
),
),
(
"station_description",
XPath(
"/sml:SensorML/sml:member/sml:System/gml:description",
namespaces=self.ns,
),
),
# replaced with lon/lat with point
(
"station_location_point",
XPath(
"/sml:SensorML/sml:member/sml:System/sml:location/gml:Point/gml:pos",
namespaces=self.ns,
),
),
]
@check_has(BaseCheck.MEDIUM)
def check_recommended(self, ds):
return [
(
"sensor_descriptions",
XPath(
"/sml:SensorML/sml:member/sml:System/sml:components/sml:ComponentList/sml:component/sml:System/gml:description",
namespaces=self.ns,
),
),
(
"sensor_ids",
XPath(
"/sml:SensorML/sml:member/sml:System/sml:components/sml:ComponentList/sml:component/sml:System/@gml:id",
namespaces=self.ns,
),
),
(
"sensor_names",
XPath(
"/sml:SensorML/sml:member/sml:System/sml:components/sml:ComponentList/sml:component/@name",
namespaces=self.ns,
),
),
(
"data_format_template_version",
XPath(
"/sml:SensorML/sml:capabilities/swe:SimpleDataRecord/swe:field[@name='ioosTemplateVersion']/swe:Text/swe:value",
namespaces=self.ns,
),
),
(
"variable_names",
XPath(
"/sml:SensorML/sml:member/sml:System/sml:components/sml:ComponentList/sml:component/sml:System/sml:outputs/sml:OutputList/sml:output/swe:Quantity/@definition",
namespaces=self.ns,
),
),
(
"variable_units",
XPath(
"/sml:SensorML/sml:member/sml:System/sml:components/sml:ComponentList/sml:component/sml:System/sml:outputs/sml:OutputList/sml:output/swe:Quantity/swe:uom/@code",
namespaces=self.ns,
),
),
(
"network_id",
XPath(
"/sml:SensorML/sml:member/sml:System/sml:capabilities[@name='networkProcedures']/swe:SimpleDataRecord/gml:metaDataProperty/@xlink:href",
namespaces=self.ns,
),
),
(
"operator_sector",
XPath(
"/sml:SensorML/sml:member/sml:System/sml:classification/sml:ClassifierList/sml:classifier[@name='operatorSector']/sml:Term/sml:value",
namespaces=self.ns,
),
),
]
@check_has(BaseCheck.LOW)
def check_suggested(self, ds):
return []
| {
"content_hash": "464953896be4801e00f71d374290d860",
"timestamp": "",
"source": "github",
"line_count": 2049,
"max_line_length": 230,
"avg_line_length": 34.734992679355784,
"alnum_prop": 0.5179283988085202,
"repo_name": "ocefpaf/compliance-checker",
"id": "ea91fc4575a1aba3d28dc67a96d05199e1ae02cf",
"size": "71172",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "compliance_checker/ioos.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Jinja",
"bytes": "12681"
},
{
"name": "Python",
"bytes": "796514"
}
],
"symlink_target": ""
} |
"""Shared functionality for delimiter separated values output modules."""
from __future__ import unicode_literals
from plaso.output import formatting_helper
from plaso.output import interface
class DSVEventFormattingHelper(formatting_helper.EventFormattingHelper):
"""Delimiter separated values output module event formatting helper."""
def __init__(
self, output_mediator, field_formatting_helper, field_names,
field_delimiter=','):
"""Initializes a delimiter separated values event formatting helper.
Args:
output_mediator (OutputMediator): output mediator.
field_formatting_helper (FieldFormattingHelper): field formatting helper.
field_names (list[str]): names of the fields to output.
field_delimiter (Optional[str]): field delimiter.
"""
super(DSVEventFormattingHelper, self).__init__(output_mediator)
self._field_delimiter = field_delimiter
self._field_names = field_names
self._field_formatting_helper = field_formatting_helper
def _SanitizeField(self, field):
"""Sanitizes a field for output.
This method replaces any field delimiters with a space.
Args:
field (str): value of the field to sanitize.
Returns:
str: sanitized value of the field.
"""
if self._field_delimiter and isinstance(field, str):
return field.replace(self._field_delimiter, ' ')
return field
def GetFormattedEvent(self, event, event_data, event_data_stream, event_tag):
"""Retrieves a string representation of the event.
Args:
event (EventObject): event.
event_data (EventData): event data.
event_data_stream (EventDataStream): event data stream.
event_tag (EventTag): event tag.
Returns:
str: string representation of the event.
"""
field_values = []
for field_name in self._field_names:
field_value = self._field_formatting_helper.GetFormattedField(
field_name, event, event_data, event_data_stream, event_tag)
field_value = self._SanitizeField(field_value)
field_values.append(field_value)
return self._field_delimiter.join(field_values)
def GetFormattedFieldNames(self):
"""Retrieves a string representation of the field names.
Returns:
str: string representation of the field names.
"""
return self._field_delimiter.join(self._field_names)
def SetFieldDelimiter(self, field_delimiter):
"""Sets the field delimiter.
Args:
field_delimiter (str): field delimiter.
"""
self._field_delimiter = field_delimiter
def SetFields(self, field_names):
"""Sets the names of the fields to output.
Args:
field_names (list[str]): names of the fields to output.
"""
self._field_names = field_names
class DSVOutputModule(interface.LinearOutputModule):
"""Shared functionality for delimiter separated values output modules."""
def __init__(
self, output_mediator, field_formatting_helper, names, delimiter=',',
header=None):
"""Initializes a delimiter separated values output module.
Args:
output_mediator (OutputMediator): an output mediator.
field_formatting_helper (FieldFormattingHelper): field formatting helper.
names (list[str]): names of the fields to output.
delimiter (Optional[str]): field delimiter.
header (Optional[str]): header, where None will have WriteHeader
generate a header from the field names.
"""
event_formatting_helper = DSVEventFormattingHelper(
output_mediator, field_formatting_helper, names,
field_delimiter=delimiter)
super(DSVOutputModule, self).__init__(
output_mediator, event_formatting_helper)
self._header = header
def SetFieldDelimiter(self, field_delimiter):
"""Sets the field delimiter.
Args:
field_delimiter (str): field delimiter.
"""
self._event_formatting_helper.SetFieldDelimiter(field_delimiter)
def SetFields(self, field_names):
"""Sets the names of the fields to output.
Args:
field_names (list[str]): names of the fields to output.
"""
self._event_formatting_helper.SetFields(field_names)
def WriteHeader(self):
"""Writes the header to the output."""
if self._header:
output_text = self._header
else:
output_text = self._event_formatting_helper.GetFormattedFieldNames()
output_text = '{0:s}\n'.format(output_text)
self._output_writer.Write(output_text)
| {
"content_hash": "ff66063fbc1ee68c76699c9317270ed9",
"timestamp": "",
"source": "github",
"line_count": 137,
"max_line_length": 79,
"avg_line_length": 32.55474452554745,
"alnum_prop": 0.6919282511210763,
"repo_name": "rgayon/plaso",
"id": "29c1cbc6ebf451038eecdb910e51c013b212180e",
"size": "4484",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "plaso/output/shared_dsv.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "415"
},
{
"name": "Dockerfile",
"bytes": "1047"
},
{
"name": "Makefile",
"bytes": "712"
},
{
"name": "PowerShell",
"bytes": "17771"
},
{
"name": "Python",
"bytes": "4803191"
},
{
"name": "Ruby",
"bytes": "926"
},
{
"name": "Shell",
"bytes": "46225"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
import frappe
import unittest
from frappe.utils import random_string
from frappe.model.workflow import apply_workflow, WorkflowTransitionError, WorkflowPermissionError, get_common_transition_actions
from frappe.test_runner import make_test_records
class TestWorkflow(unittest.TestCase):
@classmethod
def setUpClass(cls):
make_test_records("User")
def setUp(self):
self.workflow = create_todo_workflow()
frappe.set_user('Administrator')
def tearDown(self):
frappe.delete_doc('Workflow', 'Test ToDo')
def test_default_condition(self):
'''test default condition is set'''
todo = create_new_todo()
# default condition is set
self.assertEqual(todo.workflow_state, 'Pending')
return todo
def test_approve(self, doc=None):
'''test simple workflow'''
todo = doc or self.test_default_condition()
apply_workflow(todo, 'Approve')
# default condition is set
self.assertEqual(todo.workflow_state, 'Approved')
self.assertEqual(todo.status, 'Closed')
return todo
def test_wrong_action(self):
'''Check illegal action (approve after reject)'''
todo = self.test_approve()
self.assertRaises(WorkflowTransitionError, apply_workflow, todo, 'Reject')
def test_workflow_condition(self):
'''Test condition in transition'''
self.workflow.transitions[0].condition = 'doc.status == "Closed"'
self.workflow.save()
# only approve if status is closed
self.assertRaises(WorkflowTransitionError, self.test_approve)
self.workflow.transitions[0].condition = ''
self.workflow.save()
def test_get_common_transition_actions(self):
todo1 = create_new_todo()
todo2 = create_new_todo()
todo3 = create_new_todo()
todo4 = create_new_todo()
actions = get_common_transition_actions([todo1, todo2, todo3, todo4], 'ToDo')
self.assertSetEqual(set(actions), set(['Approve', 'Reject']))
apply_workflow(todo1, 'Reject')
apply_workflow(todo2, 'Reject')
apply_workflow(todo3, 'Approve')
actions = get_common_transition_actions([todo1, todo2, todo3], 'ToDo')
self.assertListEqual(actions, [])
actions = get_common_transition_actions([todo1, todo2], 'ToDo')
self.assertListEqual(actions, ['Review'])
def test_if_workflow_actions_were_processed(self):
frappe.db.sql('delete from `tabWorkflow Action`')
user = frappe.get_doc('User', 'test2@example.com')
user.add_roles('Test Approver', 'System Manager')
frappe.set_user('test2@example.com')
doc = self.test_default_condition()
workflow_actions = frappe.get_all('Workflow Action', fields=['*'])
self.assertEqual(len(workflow_actions), 1)
# test if status of workflow actions are updated on approval
self.test_approve(doc)
user.remove_roles('Test Approver', 'System Manager')
workflow_actions = frappe.get_all('Workflow Action', fields=['status'])
self.assertEqual(len(workflow_actions), 1)
self.assertEqual(workflow_actions[0].status, 'Completed')
frappe.set_user('Administrator')
def test_update_docstatus(self):
todo = create_new_todo()
apply_workflow(todo, 'Approve')
self.workflow.states[1].doc_status = 0
self.workflow.save()
todo.reload()
self.assertEqual(todo.docstatus, 0)
self.workflow.states[1].doc_status = 1
self.workflow.save()
todo.reload()
self.assertEqual(todo.docstatus, 1)
self.workflow.states[1].doc_status = 0
self.workflow.save()
def test_if_workflow_set_on_action(self):
self.workflow.states[1].doc_status = 1
self.workflow.save()
todo = create_new_todo()
self.assertEqual(todo.docstatus, 0)
todo.submit()
self.assertEqual(todo.docstatus, 1)
self.assertEqual(todo.workflow_state, 'Approved')
self.workflow.states[1].doc_status = 0
self.workflow.save()
def create_todo_workflow():
if frappe.db.exists('Workflow', 'Test ToDo'):
frappe.delete_doc('Workflow', 'Test ToDo')
if not frappe.db.exists('Role', 'Test Approver'):
frappe.get_doc(dict(doctype='Role',
role_name='Test Approver')).insert(ignore_if_duplicate=True)
workflow = frappe.new_doc('Workflow')
workflow.workflow_name = 'Test ToDo'
workflow.document_type = 'ToDo'
workflow.workflow_state_field = 'workflow_state'
workflow.is_active = 1
workflow.send_email_alert = 0
workflow.append('states', dict(
state = 'Pending', allow_edit = 'All'
))
workflow.append('states', dict(
state = 'Approved', allow_edit = 'Test Approver',
update_field = 'status', update_value = 'Closed'
))
workflow.append('states', dict(
state = 'Rejected', allow_edit = 'Test Approver'
))
workflow.append('transitions', dict(
state = 'Pending', action='Approve', next_state = 'Approved',
allowed='Test Approver', allow_self_approval= 1
))
workflow.append('transitions', dict(
state = 'Pending', action='Reject', next_state = 'Rejected',
allowed='Test Approver', allow_self_approval= 1
))
workflow.append('transitions', dict(
state = 'Rejected', action='Review', next_state = 'Pending',
allowed='All', allow_self_approval= 1
))
workflow.insert(ignore_permissions=True)
return workflow
def create_new_todo():
return frappe.get_doc(dict(doctype='ToDo', description='workflow ' + random_string(10))).insert()
| {
"content_hash": "65dd4f15101f31aa30b6544db1b8ac0e",
"timestamp": "",
"source": "github",
"line_count": 164,
"max_line_length": 129,
"avg_line_length": 31.28048780487805,
"alnum_prop": 0.7183235867446394,
"repo_name": "saurabh6790/frappe",
"id": "9ad0562a86495a7361e40c1a71215c9bc2c6d7d4",
"size": "5217",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "frappe/workflow/doctype/workflow/test_workflow.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "63276"
},
{
"name": "HTML",
"bytes": "218921"
},
{
"name": "JavaScript",
"bytes": "2152738"
},
{
"name": "Less",
"bytes": "36947"
},
{
"name": "Makefile",
"bytes": "99"
},
{
"name": "Python",
"bytes": "3261616"
},
{
"name": "SCSS",
"bytes": "223084"
},
{
"name": "Shell",
"bytes": "3358"
},
{
"name": "Vue",
"bytes": "49860"
}
],
"symlink_target": ""
} |
from flask import Flask, render_template
app = Flask(__name__)
@app.route('/')
def index():
return render_template('index.html')
# export FLASK_DEBUG=1
# export FLASK_APP=hello.py
# flask run
# FLASK_DEBUG=1 FLASK_APP=hello.py flask run
| {
"content_hash": "f2cf6d0d6ec41ff68f7dd36c31bc71af",
"timestamp": "",
"source": "github",
"line_count": 15,
"max_line_length": 44,
"avg_line_length": 16.533333333333335,
"alnum_prop": 0.6895161290322581,
"repo_name": "sharkspeed/dororis",
"id": "0a1273e0bd3d8b4a0dc6f3d37be00c9dafa1c744",
"size": "248",
"binary": false,
"copies": "1",
"ref": "refs/heads/dev",
"path": "packages/python/flask/flask-01/ch4/hello.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Agda",
"bytes": "152"
},
{
"name": "AppleScript",
"bytes": "4936"
},
{
"name": "Assembly",
"bytes": "6654"
},
{
"name": "C",
"bytes": "568507"
},
{
"name": "C#",
"bytes": "2446"
},
{
"name": "C++",
"bytes": "15567"
},
{
"name": "CSS",
"bytes": "74090"
},
{
"name": "Clojure",
"bytes": "986"
},
{
"name": "CoffeeScript",
"bytes": "1055"
},
{
"name": "Crystal",
"bytes": "13171"
},
{
"name": "Dart",
"bytes": "22343"
},
{
"name": "Elixir",
"bytes": "27938"
},
{
"name": "Fortran",
"bytes": "400"
},
{
"name": "Go",
"bytes": "117383"
},
{
"name": "HTML",
"bytes": "780346"
},
{
"name": "Haskell",
"bytes": "33977"
},
{
"name": "Idris",
"bytes": "167"
},
{
"name": "Java",
"bytes": "105613"
},
{
"name": "JavaScript",
"bytes": "1453348"
},
{
"name": "Kotlin",
"bytes": "24078"
},
{
"name": "Lex",
"bytes": "1156"
},
{
"name": "Makefile",
"bytes": "22596"
},
{
"name": "Mako",
"bytes": "1976"
},
{
"name": "Objective-C",
"bytes": "1500"
},
{
"name": "PHP",
"bytes": "868941"
},
{
"name": "Python",
"bytes": "553417"
},
{
"name": "Racket",
"bytes": "11698"
},
{
"name": "Roff",
"bytes": "3741"
},
{
"name": "Ruby",
"bytes": "129923"
},
{
"name": "Rust",
"bytes": "27692"
},
{
"name": "Scala",
"bytes": "791"
},
{
"name": "Shell",
"bytes": "17297"
},
{
"name": "Smarty",
"bytes": "421"
},
{
"name": "Swift",
"bytes": "197600"
},
{
"name": "TeX",
"bytes": "3875"
},
{
"name": "TypeScript",
"bytes": "24815"
},
{
"name": "Vim script",
"bytes": "6936"
},
{
"name": "Vue",
"bytes": "32921"
},
{
"name": "Zig",
"bytes": "634"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from django.db import models
class YourModel(models.Model):
customer_support_email = models.EmailField(null=True, blank=True)
marketing_email = models.EmailField(null=True, blank=True)
| {
"content_hash": "c878e22637a967a57eac4d4682a4ffc3",
"timestamp": "",
"source": "github",
"line_count": 8,
"max_line_length": 69,
"avg_line_length": 29.5,
"alnum_prop": 0.7627118644067796,
"repo_name": "vinta/django-email-confirm-la",
"id": "85b0ef33d91dec5ac60cf61c1c0c8894068d3aa8",
"size": "236",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test_project/test_app/models.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "942"
},
{
"name": "Makefile",
"bytes": "130"
},
{
"name": "Python",
"bytes": "36706"
}
],
"symlink_target": ""
} |
from weixin import WeiXinClient
if __name__ == '__main__':
wc = WeiXinClient('your_appid', 'your_secret', fc = True, path='/tmp')
#"https://api.weixin.qq.com/cgi-bin/token?grant_type=client_credential&appid=APPID&secret=APPSECRET"
wc.request_access_token()
data = '{"touser":"obMnLt3bf7t65jyEsa7vOtXphdu4", "msgtype":"text", "text":{ "content":"hello!"}}'
#data = '{"touser":"obMnLt9Qx5ZfPdElO3DQblM7ksl0", "msgtype":"image", ' \
# '"image":{ "media_id":"OaPSe4DP-HF4s_ABWHEVDgMKOPCUoViID8x-yPUvwCfqTEA0whZOza4hGODiHs93"}}'
key = '{"button":[{"type":"click","name":"test","key":"V1001_TEST"}]}'
#"https://api.weixin.qq.com/cgi-bin/message/custom/send?access_token=ACCESS_TOKEN"
print wc.message.custom.send.post(body=data)
#"https://api.weixin.qq.com/cgi-bin/user/info?access_token=ACCESS_TOKEN&openid=OPENID&lang=zh_CN"
print wc.user.info._get(openid='obMnLt43lgfeeC8Ljn4-cLixEW6Q', lang='zh_CN')
#"https://api.weixin.qq.com/cgi-bin/message/custom/send?access_token=ACCESS_TOKEN"
print wc.message.custom.send.post(body=data)
#"https://api.weixin.qq.com/cgi-bin/user/get?access_token=ACCESS_TOKEN&next_openid=NEXT_OPENID"
print wc.user.get._get(next_openid=None)
#"http://file.api.weixin.qq.com/cgi-bin/media/upload?access_token=ACCESS_TOKEN&type=TYPE"
print wc.media.upload.file(type='image', pic = open('./test.jpg', 'rb'))
#"https://api.weixin.qq.com/cgi-bin/groups/create?access_token=ACCESS_TOKEN"
print wc.groups.create.post(body='{"group":{"name":"test_group_01"}}')
#"https://api.weixin.qq.com/cgi-bin/groups/update?access_token=ACCESS_TOKEN"
print wc.groups.update.post(body='{"group":{"id":100,"name":"test"}}')
#"https://api.weixin.qq.com/cgi-bin/groups/members/update?access_token=ACCESS_TOKEN"
print wc.groups.members.update.post(body='{"openid":"obMnLt9Qx5ZfPdElO3DQblM7ksl0","to_groupid":100}')
#"https://api.weixin.qq.com/cgi-bin/groups/getid?access_token=ACCESS_TOKEN"
print wc.groups.getid.post(body='{"openid":"obMnLt9Qx5ZfPdElO3DQblM7ksl0"}')
#"https://api.weixin.qq.com/cgi-bin/groups/get?access_token=ACCESS_TOKEN"
print wc.groups.get._get()
#"https://api.weixin.qq.com/cgi-bin/menu/create?access_token=ACCESS_TOKEN"
print wc.menu.create.post(body=key)
#"https://api.weixin.qq.com/cgi-bin/menu/get?access_token=ACCESS_TOKEN"
print wc.menu.get._get()
#"http://file.api.weixin.qq.com/cgi-bin/media/get?access_token=ACCESS_TOKEN&media_id=MEDIA_ID"
print wc.media.get.file(media_id='OaPSe4DP-HF4s_ABWHEVDgMKOPCUoViID8x-yPUvwCfqTEA0whZOza4hGODiHs93', base_path='/home/ubuntu/Pictures')
| {
"content_hash": "a7e6cfe7a1426de6dec928d95962e01c",
"timestamp": "",
"source": "github",
"line_count": 37,
"max_line_length": 139,
"avg_line_length": 71.35135135135135,
"alnum_prop": 0.7026515151515151,
"repo_name": "wenwenqingqing/weixinpy",
"id": "8cef98151539bee8b4837c8dee558ea264773652",
"size": "2687",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test/test.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "13990"
}
],
"symlink_target": ""
} |
"""passlib.hosts"""
#=============================================================================
# imports
#=============================================================================
# core
import sys
from warnings import warn
# pkg
from passlib.context import LazyCryptContext
from passlib.exc import PasslibRuntimeWarning
from passlib.registry import get_crypt_handler
from passlib.utils import has_crypt, unix_crypt_schemes
# local
__all__ = [
"linux_context", "linux2_context",
"openbsd_context",
"netbsd_context",
"freebsd_context",
"host_context",
]
#=============================================================================
# linux support
#=============================================================================
# known platform names - linux2
linux_context = linux2_context = LazyCryptContext(
schemes = [ "sha512_crypt", "sha256_crypt", "md5_crypt",
"des_crypt", "unix_disabled" ],
deprecated = [ "des_crypt" ],
)
#=============================================================================
# bsd support
#=============================================================================
# known platform names -
# freebsd2
# freebsd3
# freebsd4
# freebsd5
# freebsd6
# freebsd7
#
# netbsd1
# referencing source via -http://fxr.googlebit.com
# freebsd 6,7,8 - des, md5, bcrypt, bsd_nthash
# netbsd - des, ext, md5, bcrypt, sha1
# openbsd - des, ext, md5, bcrypt
freebsd_context = LazyCryptContext(["bcrypt", "md5_crypt", "bsd_nthash",
"des_crypt", "unix_disabled"])
openbsd_context = LazyCryptContext(["bcrypt", "md5_crypt", "bsdi_crypt",
"des_crypt", "unix_disabled"])
netbsd_context = LazyCryptContext(["bcrypt", "sha1_crypt", "md5_crypt",
"bsdi_crypt", "des_crypt", "unix_disabled"])
# XXX: include darwin in this list? it's got a BSD crypt variant,
# but that's not what it uses for user passwords.
#=============================================================================
# current host
#=============================================================================
if has_crypt:
# NOTE: this is basically mimicing the output of os crypt(),
# except that it uses passlib's (usually stronger) defaults settings,
# and can be introspected and used much more flexibly.
def _iter_os_crypt_schemes():
"""helper which iterates over supported os_crypt schemes"""
found = False
for name in unix_crypt_schemes:
handler = get_crypt_handler(name)
if handler.has_backend("os_crypt"):
found = True
yield name
if found:
# only offer disabled handler if there's another scheme in front,
# as this can't actually hash any passwords
yield "unix_disabled"
else: # pragma: no cover -- sanity check
# no idea what OS this could happen on...
warn("crypt.crypt() function is present, but doesn't support any "
"formats known to passlib!", PasslibRuntimeWarning)
host_context = LazyCryptContext(_iter_os_crypt_schemes())
#=============================================================================
# other platforms
#=============================================================================
# known platform strings -
# aix3
# aix4
# atheos
# beos5
# darwin
# generic
# hp-ux11
# irix5
# irix6
# mac
# next3
# os2emx
# riscos
# sunos5
# unixware7
#=============================================================================
# eof
#=============================================================================
| {
"content_hash": "e848f1c42a060952bc7fcd2daa0e2c4f",
"timestamp": "",
"source": "github",
"line_count": 115,
"max_line_length": 79,
"avg_line_length": 32.208695652173915,
"alnum_prop": 0.46787257019438444,
"repo_name": "austindlawless/cudas",
"id": "7df3efd2a5edd874215f08bcc97a7c944bf6749e",
"size": "3704",
"binary": false,
"copies": "6",
"ref": "refs/heads/master",
"path": "lambda/signup/passlib/hosts.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "4137"
},
{
"name": "HTML",
"bytes": "13044"
},
{
"name": "JavaScript",
"bytes": "12026"
},
{
"name": "Python",
"bytes": "2651169"
}
],
"symlink_target": ""
} |
import pytest
from unittest.mock import patch
from unittest.mock import Mock
from .. import factories as f
from taiga.projects.history import services
pytestmark = pytest.mark.django_db(transaction=True)
from taiga.base.utils import json
def test_webhooks_when_create_user_story(settings):
settings.WEBHOOKS_ENABLED = True
project = f.ProjectFactory()
f.WebhookFactory.create(project=project)
f.WebhookFactory.create(project=project)
obj = f.UserStoryFactory.create(project=project)
with patch('taiga.webhooks.tasks._send_request') as send_request_mock:
services.take_snapshot(obj, user=obj.owner)
assert send_request_mock.call_count == 2
(webhook_id, url, key, data) = send_request_mock.call_args[0]
assert data["action"] == "create"
assert data["type"] == "userstory"
assert data["by"]["id"] == obj.owner.id
assert "date" in data
assert data["data"]["id"] == obj.id
def test_webhooks_when_update_user_story(settings):
settings.WEBHOOKS_ENABLED = True
project = f.ProjectFactory()
f.WebhookFactory.create(project=project)
f.WebhookFactory.create(project=project)
obj = f.UserStoryFactory.create(project=project)
with patch('taiga.webhooks.tasks._send_request') as send_request_mock:
services.take_snapshot(obj, user=obj.owner)
assert send_request_mock.call_count == 2
obj.subject = "test webhook update"
obj.save()
with patch('taiga.webhooks.tasks._send_request') as send_request_mock:
services.take_snapshot(obj, user=obj.owner, comment="test_comment")
assert send_request_mock.call_count == 2
(webhook_id, url, key, data) = send_request_mock.call_args[0]
assert data["action"] == "change"
assert data["type"] == "userstory"
assert data["by"]["id"] == obj.owner.id
assert "date" in data
assert data["data"]["id"] == obj.id
assert data["data"]["subject"] == obj.subject
assert data["change"]["comment"] == "test_comment"
assert data["change"]["diff"]["subject"]["to"] == data["data"]["subject"]
assert data["change"]["diff"]["subject"]["from"] != data["data"]["subject"]
def test_webhooks_when_delete_user_story(settings):
settings.WEBHOOKS_ENABLED = True
project = f.ProjectFactory()
f.WebhookFactory.create(project=project)
f.WebhookFactory.create(project=project)
obj = f.UserStoryFactory.create(project=project)
with patch('taiga.webhooks.tasks._send_request') as send_request_mock:
services.take_snapshot(obj, user=obj.owner, delete=True)
assert send_request_mock.call_count == 2
(webhook_id, url, key, data) = send_request_mock.call_args[0]
assert data["action"] == "delete"
assert data["type"] == "userstory"
assert data["by"]["id"] == obj.owner.id
assert "date" in data
assert "data" in data
def test_webhooks_when_update_user_story_attachments(settings):
settings.WEBHOOKS_ENABLED = True
project = f.ProjectFactory()
f.WebhookFactory.create(project=project)
f.WebhookFactory.create(project=project)
obj = f.UserStoryFactory.create(project=project)
with patch('taiga.webhooks.tasks._send_request') as send_request_mock:
services.take_snapshot(obj, user=obj.owner)
assert send_request_mock.call_count == 2
# Create attachments
attachment1 = f.UserStoryAttachmentFactory(project=obj.project, content_object=obj, owner=obj.owner)
attachment2 = f.UserStoryAttachmentFactory(project=obj.project, content_object=obj, owner=obj.owner)
with patch('taiga.webhooks.tasks._send_request') as send_request_mock:
services.take_snapshot(obj, user=obj.owner, comment="test_comment")
assert send_request_mock.call_count == 2
(webhook_id, url, key, data) = send_request_mock.call_args[0]
assert data["action"] == "change"
assert data["type"] == "userstory"
assert data["by"]["id"] == obj.owner.id
assert "date" in data
assert data["data"]["id"] == obj.id
assert data["change"]["comment"] == "test_comment"
assert len(data["change"]["diff"]["attachments"]["new"]) == 2
assert len(data["change"]["diff"]["attachments"]["changed"]) == 0
assert len(data["change"]["diff"]["attachments"]["deleted"]) == 0
# Update attachment
attachment1.description = "new attachment description"
attachment1.save()
with patch('taiga.webhooks.tasks._send_request') as send_request_mock:
services.take_snapshot(obj, user=obj.owner, comment="test_comment")
assert send_request_mock.call_count == 2
(webhook_id, url, key, data) = send_request_mock.call_args[0]
assert data["action"] == "change"
assert data["type"] == "userstory"
assert data["by"]["id"] == obj.owner.id
assert "date" in data
assert data["data"]["id"] == obj.id
assert data["change"]["comment"] == "test_comment"
assert len(data["change"]["diff"]["attachments"]["new"]) == 0
assert len(data["change"]["diff"]["attachments"]["changed"]) == 1
assert len(data["change"]["diff"]["attachments"]["deleted"]) == 0
# Delete attachment
attachment2.delete()
with patch('taiga.webhooks.tasks._send_request') as send_request_mock:
services.take_snapshot(obj, user=obj.owner, comment="test_comment")
assert send_request_mock.call_count == 2
(webhook_id, url, key, data) = send_request_mock.call_args[0]
assert data["action"] == "change"
assert data["type"] == "userstory"
assert data["by"]["id"] == obj.owner.id
assert "date" in data
assert data["data"]["id"] == obj.id
assert data["change"]["comment"] == "test_comment"
assert len(data["change"]["diff"]["attachments"]["new"]) == 0
assert len(data["change"]["diff"]["attachments"]["changed"]) == 0
assert len(data["change"]["diff"]["attachments"]["deleted"]) == 1
def test_webhooks_when_update_user_story_custom_attributes(settings):
settings.WEBHOOKS_ENABLED = True
project = f.ProjectFactory()
f.WebhookFactory.create(project=project)
f.WebhookFactory.create(project=project)
obj = f.UserStoryFactory.create(project=project)
custom_attr_1 = f.UserStoryCustomAttributeFactory(project=obj.project)
ct1_id = "{}".format(custom_attr_1.id)
custom_attr_2 = f.UserStoryCustomAttributeFactory(project=obj.project)
ct2_id = "{}".format(custom_attr_2.id)
with patch('taiga.webhooks.tasks._send_request') as send_request_mock:
services.take_snapshot(obj, user=obj.owner)
assert send_request_mock.call_count == 2
# Create custom attributes
obj.custom_attributes_values.attributes_values = {
ct1_id: "test_1_updated",
ct2_id: "test_2_updated"
}
obj.custom_attributes_values.save()
with patch('taiga.webhooks.tasks._send_request') as send_request_mock:
services.take_snapshot(obj, user=obj.owner, comment="test_comment")
assert send_request_mock.call_count == 2
(webhook_id, url, key, data) = send_request_mock.call_args[0]
assert data["action"] == "change"
assert data["type"] == "userstory"
assert data["by"]["id"] == obj.owner.id
assert "date" in data
assert data["data"]["id"] == obj.id
assert data["change"]["comment"] == "test_comment"
assert len(data["change"]["diff"]["custom_attributes"]["new"]) == 2
assert len(data["change"]["diff"]["custom_attributes"]["changed"]) == 0
assert len(data["change"]["diff"]["custom_attributes"]["deleted"]) == 0
# Update custom attributes
obj.custom_attributes_values.attributes_values[ct1_id] = "test_2_updated"
obj.custom_attributes_values.save()
with patch('taiga.webhooks.tasks._send_request') as send_request_mock:
services.take_snapshot(obj, user=obj.owner, comment="test_comment")
assert send_request_mock.call_count == 2
(webhook_id, url, key, data) = send_request_mock.call_args[0]
assert data["action"] == "change"
assert data["type"] == "userstory"
assert data["by"]["id"] == obj.owner.id
assert "date" in data
assert data["data"]["id"] == obj.id
assert data["change"]["comment"] == "test_comment"
assert len(data["change"]["diff"]["custom_attributes"]["new"]) == 0
assert len(data["change"]["diff"]["custom_attributes"]["changed"]) == 1
assert len(data["change"]["diff"]["custom_attributes"]["deleted"]) == 0
# Delete custom attributes
del obj.custom_attributes_values.attributes_values[ct1_id]
obj.custom_attributes_values.save()
with patch('taiga.webhooks.tasks._send_request') as send_request_mock:
services.take_snapshot(obj, user=obj.owner, comment="test_comment")
assert send_request_mock.call_count == 2
(webhook_id, url, key, data) = send_request_mock.call_args[0]
assert data["action"] == "change"
assert data["type"] == "userstory"
assert data["by"]["id"] == obj.owner.id
assert "date" in data
assert data["data"]["id"] == obj.id
assert data["change"]["comment"] == "test_comment"
assert len(data["change"]["diff"]["custom_attributes"]["new"]) == 0
assert len(data["change"]["diff"]["custom_attributes"]["changed"]) == 0
assert len(data["change"]["diff"]["custom_attributes"]["deleted"]) == 1
def test_webhooks_when_update_user_story_points(settings):
settings.WEBHOOKS_ENABLED = True
project = f.ProjectFactory()
f.WebhookFactory.create(project=project)
f.WebhookFactory.create(project=project)
role1 = f.RoleFactory.create(project=project)
role2 = f.RoleFactory.create(project=project)
points1 = f.PointsFactory.create(project=project, value=None)
points2 = f.PointsFactory.create(project=project, value=1)
points3 = f.PointsFactory.create(project=project, value=2)
obj = f.UserStoryFactory.create(project=project)
obj.role_points.all().delete()
with patch('taiga.webhooks.tasks._send_request') as send_request_mock:
services.take_snapshot(obj, user=obj.owner)
assert send_request_mock.call_count == 2
# Set points
f.RolePointsFactory.create(user_story=obj, role=role1, points=points1)
f.RolePointsFactory.create(user_story=obj, role=role2, points=points2)
with patch('taiga.webhooks.tasks._send_request') as send_request_mock:
services.take_snapshot(obj, user=obj.owner)
assert send_request_mock.call_count == 2
(webhook_id, url, key, data) = send_request_mock.call_args[0]
assert data["action"] == "change"
assert data["type"] == "userstory"
assert data["by"]["id"] == obj.owner.id
assert "date" in data
assert data["data"]["id"] == obj.id
assert data["change"]["comment"] == ""
assert data["change"]["diff"]["points"][role1.name]["from"] == None
assert data["change"]["diff"]["points"][role1.name]["to"] == points1.name
assert data["change"]["diff"]["points"][role2.name]["from"] == None
assert data["change"]["diff"]["points"][role2.name]["to"] == points2.name
# Change points
obj.role_points.all().update(points=points3)
with patch('taiga.webhooks.tasks._send_request') as send_request_mock:
services.take_snapshot(obj, user=obj.owner)
assert send_request_mock.call_count == 2
(webhook_id, url, key, data) = send_request_mock.call_args[0]
assert data["action"] == "change"
assert data["type"] == "userstory"
assert data["by"]["id"] == obj.owner.id
assert "date" in data
assert data["data"]["id"] == obj.id
assert data["change"]["comment"] == ""
assert data["change"]["diff"]["points"][role1.name]["from"] == points1.name
assert data["change"]["diff"]["points"][role1.name]["to"] == points3.name
assert data["change"]["diff"]["points"][role2.name]["from"] == points2.name
assert data["change"]["diff"]["points"][role2.name]["to"] == points3.name
| {
"content_hash": "6e877702945da487a02bf08a62b2b715",
"timestamp": "",
"source": "github",
"line_count": 290,
"max_line_length": 104,
"avg_line_length": 42.220689655172414,
"alnum_prop": 0.6390068605031035,
"repo_name": "curiosityio/taiga-docker",
"id": "716697ce2a00f9503a2b00d2f7c5e9c901aa350f",
"size": "13216",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "taiga-back/taiga-back/tests/integration/test_webhooks_userstories.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "186988"
},
{
"name": "JavaScript",
"bytes": "2007"
},
{
"name": "Nginx",
"bytes": "4140"
},
{
"name": "Python",
"bytes": "2793020"
},
{
"name": "Shell",
"bytes": "1392"
}
],
"symlink_target": ""
} |
"""assert functions from numpy and pandas testing
"""
from statsmodels.compat.pandas import testing as pdt
import numpy.testing as npt
import pandas
from statsmodels.tools.tools import Bunch
# Standard list for parsing tables
PARAM_LIST = ['params', 'bse', 'tvalues', 'pvalues']
def bunch_factory(attribute, columns):
"""
Generates a special purpose Bunch class
Parameters
----------
attribute: str
Attribute to access when splitting
columns: List[str]
List of names to use when splitting the columns of attribute
Notes
-----
After the class is initialized as a Bunch, the columne of attribute
are split so that Bunch has the keys in columns and
bunch[column[i]] = bunch[attribute][:, i]
"""
class FactoryBunch(Bunch):
def __init__(self, *args, **kwargs):
super(FactoryBunch, self).__init__(*args, **kwargs)
if not hasattr(self, attribute):
raise AttributeError('{0} is required and must be passed to '
'the constructor'.format(attribute))
for i, att in enumerate(columns):
self[att] = getattr(self, attribute)[:, i]
return FactoryBunch
ParamsTableTestBunch = bunch_factory('params_table', PARAM_LIST)
MarginTableTestBunch = bunch_factory('margins_table', PARAM_LIST)
class Holder:
"""
Test-focused class to simplify accessing values by attribute
"""
def __init__(self, **kwds):
self.__dict__.update(kwds)
def __str__(self):
ss = "\n".join(str(k) + " = " + str(v).replace('\n', '\n ')
for k, v in vars(self).items())
return ss
def __repr__(self):
# use repr for values including nested cases as in tost
ss = "\n".join(str(k) + " = " + repr(v).replace('\n', '\n ')
for k, v in vars(self).items())
ss = str(self.__class__) + "\n" + ss
return ss
# adjusted functions
def assert_equal(actual, desired, err_msg='', verbose=True, **kwds):
if isinstance(desired, pandas.Index):
pdt.assert_index_equal(actual, desired)
elif isinstance(desired, pandas.Series):
pdt.assert_series_equal(actual, desired, **kwds)
elif isinstance(desired, pandas.DataFrame):
pdt.assert_frame_equal(actual, desired, **kwds)
else:
npt.assert_equal(actual, desired, err_msg='', verbose=True)
| {
"content_hash": "df215ad52a6dd59f9eb1fbd4b02fe8af",
"timestamp": "",
"source": "github",
"line_count": 79,
"max_line_length": 77,
"avg_line_length": 30.924050632911392,
"alnum_prop": 0.6058125255832992,
"repo_name": "bashtage/statsmodels",
"id": "4a25f34e3de16a9d1c9686bd1517a600beb56992",
"size": "2443",
"binary": false,
"copies": "3",
"ref": "refs/heads/main",
"path": "statsmodels/tools/testing.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "AGS Script",
"bytes": "457842"
},
{
"name": "Assembly",
"bytes": "10035"
},
{
"name": "Batchfile",
"bytes": "625"
},
{
"name": "C",
"bytes": "381"
},
{
"name": "Cython",
"bytes": "225838"
},
{
"name": "Fortran",
"bytes": "16671"
},
{
"name": "HTML",
"bytes": "148470"
},
{
"name": "MATLAB",
"bytes": "100525"
},
{
"name": "Python",
"bytes": "14433387"
},
{
"name": "R",
"bytes": "106569"
},
{
"name": "Shell",
"bytes": "25329"
},
{
"name": "Stata",
"bytes": "50129"
}
],
"symlink_target": ""
} |
import argparse
import os
import sys
import urllib.parse
from google.cloud import compute
CANARY_COMMAND_NAME = "canary"
ROLLBACK_CANARY_COMMAND_NAME = "rollback-canary"
PROMOTE_CANARY_COMMAND_NAME = "promote-canary"
DIRECT_UPDATE_COMMAND_NAME = "direct-update"
CANARY_SIZE = compute.FixedOrPercent(fixed=1)
def resource_basename(resource):
return os.path.basename(urllib.parse.urlparse(resource).path)
def error(msg):
print("ERROR: ", msg, file=sys.stderr)
sys.exit(1)
def confirm(msg):
user_input = ""
while user_input.lower() not in ["yes", "no", "y", "n"]:
user_input = input(f"{msg} [y/n] ")
if user_input.lower() in ["n", "no"]:
print("Aborting")
sys.exit(1)
def check_scary_action(action, skip_confirmation):
if skip_confirmation:
print(f"WARNING: Performing {action}.\n"
f"Proceeding because '--skip-confirmation' is set.")
else:
confirm(f"You are about to perform {action}.\n"
f" Are you sure you want to proceed?")
def summarize_versions(versions):
return {v.name: resource_basename(v.instance_template) for v in versions}
class MigFetcher():
def __init__(self, *, migs_client, regions_client, project):
self._migs_client = migs_client
self._regions_client = regions_client
self._project = project
def get_migs(self, *, region, type, group, prefix, modifier=None):
print("Finding matching MIGs")
migs = []
request = compute.ListRegionsRequest(project=self._project)
if region != "all":
request.filter = f"name eq {region}"
regions = [r.name for r in self._regions_client.list(request)]
if type == "all":
type = r"\w+"
if group == "all":
group = r"\w+"
for region in regions:
filter_parts = [p for p in [prefix, modifier, group, type, region] if p]
filter = f"name eq '{'-'.join(filter_parts)}'"
list_mig_request = compute.ListRegionInstanceGroupManagersRequest(
project=self._project,
region=region,
filter=filter,
)
region_migs = self._migs_client.list(list_mig_request)
migs.extend([mig for mig in region_migs])
return migs
def main(args):
templates_client = compute.InstanceTemplatesClient()
migs_client = compute.RegionInstanceGroupManagersClient()
updater = MigFetcher(
migs_client=migs_client,
regions_client=compute.RegionsClient(),
project=args.project,
)
# Prod instances just have the bare name
modifier = None if args.env == "prod" else args.env
migs = updater.get_migs(region=args.region,
type=args.type,
group=args.group,
prefix=args.name_prefix,
modifier=modifier)
if len(migs) == 0:
error("arguments matched no instance groups")
sys.exit(1)
print(f"Found:\n ", "\n ".join([m.name for m in migs]), sep="")
if args.skip_confirmation:
print("Proceeding with update as --skip-confirmation is set")
else:
confirm("Proceed with updating these MIGs?")
if args.mode == "proactive" and args.action != "refresh":
mig_desc = f"'{migs[0].name}'" if len(migs) == 1 else f"{len(migs)} groups"
scary_action = (
f"an update on {mig_desc} that will shut down instances even if"
f" they're in the middle of running a job")
check_scary_action(scary_action, args.skip_confirmation)
for mig in migs:
region = resource_basename(mig.region)
if args.command in [DIRECT_UPDATE_COMMAND_NAME, CANARY_COMMAND_NAME]:
if "testing" in args.version and args.env != "testing":
scary_action = (f"using testing template version '{args.version}' in"
f" environment '{args.env}'")
check_scary_action(scary_action, args.skip_confirmation)
strip = f"-{region}"
if not mig.name.endswith(strip):
raise ValueError(f"MIG name does not end with '{strip}' as expected")
template_name = f"{mig.name[:-len(strip)]}-{args.version}"
# TODO(gcmn): Make template naming consistent (ran into length limits)
template_name = template_name.replace(f"-{args.env}-", "-")
template_url = templates_client.get(
project=args.project, instance_template=template_name).self_link
current_templates = {v.name: v.instance_template for v in mig.versions}
if not current_templates:
error(f"Found no template versions for '{mig.name}'."
f" This shouldn't be possible.")
# TODO(gcmn): These should probably be factored into functions
if args.command == CANARY_COMMAND_NAME:
if len(current_templates) > 1:
error(f"Instance group '{mig.name}' has multiple versions, but canary"
f" requires it start with exactly one. Current versions:"
f" {summarize_versions(mig.versions)}")
base_template = current_templates.get(args.base_version_name)
if not base_template:
error(f"Instance group '{mig.name}' does not have a current version"
f" named '{args.base_version_name}', which is required for an"
f" automatic canary. Current versions:"
f" {summarize_versions(mig.versions)}")
if base_template == template_url:
error(f"Instance group '{mig.name}' already has the requested canary"
f" version '{template_name}' as its base version. Current"
" versions:"
f" {summarize_versions(mig.versions)}")
new_versions = [
compute.InstanceGroupManagerVersion(name=args.base_version_name,
instance_template=base_template),
compute.InstanceGroupManagerVersion(name=args.canary_version_name,
instance_template=template_url,
target_size=CANARY_SIZE)
]
elif args.command == DIRECT_UPDATE_COMMAND_NAME:
scary_action = (f"an update of all instances in '{mig.name}' directly"
f" without doing a canary")
check_scary_action(scary_action, args.skip_confirmation)
new_versions = [
compute.InstanceGroupManagerVersion(name=args.base_version_name,
instance_template=template_url)
]
elif args.command == PROMOTE_CANARY_COMMAND_NAME:
new_base_template = current_templates.get(args.canary_version_name)
if new_base_template is None:
error(f"Instance group '{mig.name}' does not have a current version"
f" named '{args.canary_version_name}', which is required for an"
f" automatic canary promotion. Current versions:"
f" {summarize_versions(mig.versions)}")
new_versions = [
compute.InstanceGroupManagerVersion(
name=args.base_version_name, instance_template=new_base_template)
]
elif args.command == ROLLBACK_CANARY_COMMAND_NAME:
base_template = current_templates.get(args.base_version_name)
if base_template is None:
error(f"Instance group '{mig.name}' does not have a current version"
f" named '{args.base_version_name}', which is required for an"
f" automatic canary rollback. Current versions:"
f" {summarize_versions(mig.versions)}")
new_versions = [
compute.InstanceGroupManagerVersion(name=args.base_version_name,
instance_template=base_template)
]
else:
error(f"Unrecognized command '{args.command}'")
update_policy = compute.InstanceGroupManagerUpdatePolicy(
type_=args.mode,
minimal_action=args.action,
most_disruptive_allowed_action=args.action)
print(f"Updating {mig.name} to new versions:"
f" {summarize_versions(new_versions)}")
request = compute.PatchRegionInstanceGroupManagerRequest(
project=args.project,
region=region,
instance_group_manager=mig.name,
instance_group_manager_resource=compute.InstanceGroupManager(
versions=new_versions, update_policy=update_policy))
if not args.dry_run:
migs_client.patch(request)
else:
print(f"Dry run, so not sending this patch request:\n```\n{request}```")
print(f"Successfully updated {mig.name}")
def parse_args():
parser = argparse.ArgumentParser(description=(
"Updates one or more GCP Managed Instance Groups (MIGs) to new"
" instance template versions. Wraps the GCP API with shortcuts for the"
" patterns we have in our MIGs. See the README and"
" https://cloud.google.com/compute/docs/instance-groups/updating-migs for"
" more details."))
# Makes global options come *after* command.
# See https://stackoverflow.com/q/23296695
subparser_base = argparse.ArgumentParser(add_help=False)
subparser_base.add_argument("--project",
default="iree-oss",
help="The cloud project for the MIGs.")
subparser_base.add_argument(
"--region",
"--regions",
required=True,
help=("The cloud region (e.g. 'us-west1') of the MIG to update, an RE2"
" regex for matching region names (e.g. 'us-.*'), or 'all' to"
" search for MIGs in all regions."))
subparser_base.add_argument(
"--group",
"--groups",
required=True,
help=("The runner group of the MIGs to update, an RE2 regex for matching"
" the group (e.g. 'cpu|gpu'), or 'all' to search for MIGs for all"
" groups."),
)
subparser_base.add_argument(
"--type",
"--types",
required=True,
help=("The runner type of the MIGs to update, an RE2 regex for matching"
" the type (e.g. 'presubmit|postsubmit'), or 'all' to search for"
" MIGs for all types."),
)
subparser_base.add_argument(
"--mode",
choices=["opportunistic", "proactive"],
help=(
"The mode in which to update instances. See README and"
" https://cloud.google.com/compute/docs/instance-groups/updating-migs."
))
subparser_base.add_argument(
"--action",
default="refresh",
choices=["refresh", "restart", "replace"],
help=(
"What action to take when updating an instance. See README and"
" https://cloud.google.com/compute/docs/instance-groups/updating-migs."
))
subparser_base.add_argument("--env",
"--environment",
default="testing",
help="The environment for the MIGs.",
choices=["prod", "testing"])
subparser_base.add_argument(
"--dry-run",
action="store_true",
default=False,
help="Print all output but don't actually send the update request.")
subparser_base.add_argument("--skip-confirmation",
"--force",
action="store_true",
help="Skip all confirmation prompts. Be careful.")
# These shouldn't be set very often, but it's just as easy to make them flags
# as it is to make them global constants.
subparser_base.add_argument("--name-prefix",
default="github-runner",
help="The first part of MIG and template names.")
subparser_base.add_argument(
"--base-version-name",
default="base",
help="The name given to the MIG instance version that isn't in canary.")
subparser_base.add_argument(
"--canary-version-name",
default="canary",
help="The name given to the MIG instance version that is being canaried.")
subparsers = parser.add_subparsers(required=True, dest="command")
canary_sp = subparsers.add_parser(CANARY_COMMAND_NAME,
parents=[subparser_base],
help="Canary a new template version.")
rollback_sp = subparsers.add_parser(
ROLLBACK_CANARY_COMMAND_NAME,
parents=[subparser_base],
help=("Rollback a previous canary, restoring all instances to the base"
" version."))
promote_sp = subparsers.add_parser(
PROMOTE_CANARY_COMMAND_NAME,
parents=[subparser_base],
help="Promote the current canary version to be the base version.")
direct_sp = subparsers.add_parser(
DIRECT_UPDATE_COMMAND_NAME,
parents=[subparser_base],
help=("Update all instances in the MIG to a new version. Generally should"
" not be used for prod."))
for sp in [canary_sp, direct_sp]:
sp.add_argument(
"--version",
help=("The new instance template version. Usually git hash + ISO date +"
" timestamp, e.g. b213037174-2022-09-06-1662502818"))
# TODO: Add this argument with a custom parser
# canary_sp.add_argument("--canary-size", type=int, default=1)
args = parser.parse_args()
if args.mode is None:
if args.action == "refresh":
args.mode = "proactive"
else:
args.mode = "opportunistic"
return args
if __name__ == "__main__":
main(parse_args())
| {
"content_hash": "66441a5ac572cdc7f10a4885516734f2",
"timestamp": "",
"source": "github",
"line_count": 342,
"max_line_length": 81,
"avg_line_length": 38.57309941520468,
"alnum_prop": 0.6162825955124318,
"repo_name": "iree-org/iree",
"id": "ee7372da626a6a632f857bc60a0381c2e4cc2a22",
"size": "13434",
"binary": false,
"copies": "2",
"ref": "refs/heads/main",
"path": "build_tools/github_actions/runner/gcp/update_instance_groups.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "23010"
},
{
"name": "Batchfile",
"bytes": "353"
},
{
"name": "C",
"bytes": "3830546"
},
{
"name": "C++",
"bytes": "8161374"
},
{
"name": "CMake",
"bytes": "899403"
},
{
"name": "Dockerfile",
"bytes": "28245"
},
{
"name": "GLSL",
"bytes": "2629"
},
{
"name": "HTML",
"bytes": "31018"
},
{
"name": "Java",
"bytes": "31697"
},
{
"name": "JavaScript",
"bytes": "18714"
},
{
"name": "MLIR",
"bytes": "5606822"
},
{
"name": "NASL",
"bytes": "3852"
},
{
"name": "PowerShell",
"bytes": "7893"
},
{
"name": "Python",
"bytes": "1143963"
},
{
"name": "Shell",
"bytes": "248374"
},
{
"name": "Starlark",
"bytes": "600260"
}
],
"symlink_target": ""
} |
import json
from collections import namedtuple
from django.conf import settings
from casexml.apps.case.models import CommCareCase
import logging
from corehq.apps.change_feed import topics
from pillowtop.checkpoints.manager import DEFAULT_EMPTY_CHECKPOINT_SEQUENCE
from pillowtop.checkpoints.util import construct_checkpoint_doc_id_from_name
from pillowtop.utils import get_pillow_config_by_name
def noop_reverse_migration(apps, schema_editor):
# by default the reverse migration does nothing
pass
def migrate_legacy_pillows(migration_apps, pillow_names):
for pillow_name in pillow_names:
migrate_legacy_pillow_by_name(migration_apps, pillow_name)
def migrate_legacy_pillow_by_name(migration_apps, pillow_name):
if settings.UNIT_TESTING:
return
try:
DjangoPillowCheckpoint = migration_apps.get_model('pillowtop', 'DjangoPillowCheckpoint')
pillow_config = get_pillow_config_by_name(pillow_name)
checkpoint_id = construct_checkpoint_doc_id_from_name(pillow_config.get_class().get_legacy_name())
legacy_checkpoint = CommCareCase.get_db().get(checkpoint_id)
new_checkpoint = DjangoPillowCheckpoint(
checkpoint_id=pillow_config.get_instance().checkpoint.checkpoint_id,
sequence=legacy_checkpoint['seq'],
old_sequence=legacy_checkpoint.get('old_seq', None)
)
new_checkpoint.save()
except Exception as e:
logging.exception('Failed to update pillow checkpoint. {}'.format(e))
CheckpointTopic = namedtuple("CheckpointTopic", "checkpoint_id, topic")
def merge_kafka_pillow_checkpoints(new_checkpoint_id, checkpoint_topics, migration_apps):
if settings.UNIT_TESTING:
return
try:
DjangoPillowCheckpoint = migration_apps.get_model('pillowtop', 'DjangoPillowCheckpoint')
checkpoint_doc_topics = []
for checkpoint_id, topic in checkpoint_topics:
try:
checkpoint_doc = DjangoPillowCheckpoint.objects.get(checkpoint_id=checkpoint_id)
except DjangoPillowCheckpoint.DoesNotExist:
logging.warning('Checkpoint not found: {}'.format(checkpoint_id))
continue
if topic:
assert topic in topics.ALL, "Unknown topic: {}".format(topic)
checkpoint_doc_topics.append((checkpoint_doc, topic))
merged_sequence = get_merged_sequence(checkpoint_doc_topics)
new_checkpoint = DjangoPillowCheckpoint(
checkpoint_id=new_checkpoint_id,
sequence=json.dumps(merged_sequence),
sequence_format='json'
)
new_checkpoint.save()
except Exception as e:
logging.exception('Failed to merge pillow checkpoints: {}. {}'.format(new_checkpoint_id, e))
def get_merged_sequence(checkpoints_topics):
merged_sequence = {}
def _merge_seq(topic, seq):
existing_seq = merged_sequence.get(topic, None)
merged_sequence[topic] = min(seq, existing_seq) if existing_seq is not None else seq
for checkpoint_doc, topic in checkpoints_topics:
if checkpoint_doc.sequence == DEFAULT_EMPTY_CHECKPOINT_SEQUENCE:
continue
if checkpoint_doc.sequence_format != 'json':
_merge_seq(topic, int(checkpoint_doc.sequence))
else:
sequence = json.loads(checkpoint_doc.sequence)
for sub_topic, seq in sequence.items():
_merge_seq(sub_topic, seq)
return merged_sequence
| {
"content_hash": "b7241bb47d47b7751ae219f5ea766884",
"timestamp": "",
"source": "github",
"line_count": 93,
"max_line_length": 106,
"avg_line_length": 37.60215053763441,
"alnum_prop": 0.6822991135258794,
"repo_name": "qedsoftware/commcare-hq",
"id": "15f1c296ba93d26b893d0a4149f51734ccc1c181",
"size": "3497",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "corehq/apps/cleanup/pillow_migrations.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "ActionScript",
"bytes": "15950"
},
{
"name": "CSS",
"bytes": "508392"
},
{
"name": "HTML",
"bytes": "2869325"
},
{
"name": "JavaScript",
"bytes": "2395360"
},
{
"name": "PHP",
"bytes": "2232"
},
{
"name": "PLpgSQL",
"bytes": "125298"
},
{
"name": "Python",
"bytes": "14670713"
},
{
"name": "Shell",
"bytes": "37514"
}
],
"symlink_target": ""
} |
import unittest
from rx.observable import Observable
from rx.testing import TestScheduler, ReactiveTest
from rx.disposables import Disposable, SerialDisposable
on_next = ReactiveTest.on_next
on_completed = ReactiveTest.on_completed
on_error = ReactiveTest.on_error
subscribe = ReactiveTest.subscribe
subscribed = ReactiveTest.subscribed
disposed = ReactiveTest.disposed
created = ReactiveTest.created
class RxException(Exception):
pass
# Helper function for raising exceptions within lambdas
def _raise(ex):
raise RxException(ex)
class TestLastOrDefault(unittest.TestCase):
def test_last_or_default_async_empty(self):
scheduler = TestScheduler()
xs = scheduler.create_hot_observable(on_next(150, 1), on_completed(250))
def create():
return xs.last_or_default(None, 0)
res = scheduler.start(create=create)
res.messages.assert_equal(on_next(250, 0), on_completed(250))
xs.subscriptions.assert_equal(subscribe(200, 250))
def test_last_or_default_async(self):
scheduler = TestScheduler()
xs = scheduler.create_hot_observable(on_next(150, 1), on_next(210, 2), on_completed(250))
def create():
return xs.last_or_default(None, 0)
res = scheduler.start(create=create)
res.messages.assert_equal(on_next(250, 2), on_completed(250))
xs.subscriptions.assert_equal(subscribe(200, 250))
def test_last_or_default_async_many(self):
scheduler = TestScheduler()
xs = scheduler.create_hot_observable(on_next(150, 1), on_next(210, 2), on_next(220, 3), on_completed(250))
def create():
return xs.last_or_default(None, 0)
res = scheduler.start(create=create)
res.messages.assert_equal(on_next(250, 3), on_completed(250))
xs.subscriptions.assert_equal(subscribe(200, 250))
def test_last_or_default_async_error(self):
ex = 'ex'
scheduler = TestScheduler()
xs = scheduler.create_hot_observable(on_next(150, 1), on_error(210, ex))
def create():
return xs.last_or_default(None, 0)
res = scheduler.start(create=create)
res.messages.assert_equal(on_error(210, ex))
xs.subscriptions.assert_equal(subscribe(200, 210))
def test_last_or_default_async_predicate(self):
scheduler = TestScheduler()
xs = scheduler.create_hot_observable(on_next(150, 1), on_next(210, 2), on_next(220, 3), on_next(230, 4), on_next(240, 5), on_completed(250))
def create():
def predicate(x):
return x % 2 == 1
return xs.last_or_default(predicate, 0)
res = scheduler.start(create=create)
res.messages.assert_equal(on_next(250, 5), on_completed(250))
xs.subscriptions.assert_equal(subscribe(200, 250))
def test_last_or_default_async_Predicate_none(self):
scheduler = TestScheduler()
xs = scheduler.create_hot_observable(on_next(150, 1), on_next(210, 2), on_next(220, 3), on_next(230, 4), on_next(240, 5), on_completed(250))
def create():
def predicate(x):
return x > 10
return xs.last_or_default(predicate, 0)
res = scheduler.start(create=create)
res.messages.assert_equal(on_next(250, 0), on_completed(250))
xs.subscriptions.assert_equal(subscribe(200, 250))
def test_last_or_default_async_Predicate_throw(self):
ex = 'ex'
scheduler = TestScheduler()
xs = scheduler.create_hot_observable(on_next(150, 1), on_error(210, ex))
def create():
def predicate(x):
return x > 10
return xs.last_or_default(predicate, 0)
res = scheduler.start(create=create)
res.messages.assert_equal(on_error(210, ex))
xs.subscriptions.assert_equal(subscribe(200, 210))
def test_last_or_default_async_predicate_throws(self):
ex = 'ex'
scheduler = TestScheduler()
xs = scheduler.create_hot_observable(on_next(150, 1), on_next(210, 2), on_next(220, 3), on_next(230, 4), on_next(240, 5), on_completed(250))
def create():
def predicate(x):
if x < 4:
return x % 2 == 1
else:
raise Exception(ex)
return xs.last_or_default(predicate, 0)
res = scheduler.start(create=create)
res.messages.assert_equal(on_error(230, ex))
xs.subscriptions.assert_equal(subscribe(200, 230))
| {
"content_hash": "5276942a0e738198ca0d01218253c520",
"timestamp": "",
"source": "github",
"line_count": 136,
"max_line_length": 148,
"avg_line_length": 33.3235294117647,
"alnum_prop": 0.6284201235657546,
"repo_name": "dbrattli/RxPY",
"id": "8e1f306b9103d6e71da5eb9b85e75e663c6d4857",
"size": "4532",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_observable/test_lastordefault.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "1334787"
}
],
"symlink_target": ""
} |
from __future__ import print_function, division
import matplotlib
matplotlib.use('Agg') # Must be before importing matplotlib.pyplot or pylab!
from neuralnilm import Net, RealApplianceSource, BLSTMLayer, DimshuffleLayer
from neuralnilm.net import BidirectionalRecurrentLayer
from lasagne.nonlinearities import sigmoid, rectify, tanh
from lasagne.objectives import crossentropy, mse
from lasagne.init import Uniform, Normal
from lasagne.layers import LSTMLayer, DenseLayer, Conv1DLayer, ReshapeLayer, FeaturePoolLayer, RecurrentLayer
from lasagne.updates import nesterov_momentum
from functools import partial
import os
from neuralnilm.source import standardise, discretize, fdiff, power_and_fdiff
from neuralnilm.experiment import run_experiment
from neuralnilm.net import TrainingError
import __main__
from copy import deepcopy
from math import sqrt
NAME = os.path.splitext(os.path.split(__main__.__file__)[1])[0]
PATH = "/homes/dk3810/workspace/python/neuralnilm/figures"
SAVE_PLOT_INTERVAL = 500
GRADIENT_STEPS = 100
"""
e233
based on e131c but with:
* lag=32
* pool
e234
* init final layer and conv layer
235
no lag
236
should be exactly as 131c: no pool, no lag, no init for final and conv layer
237
putting the pool back
238
seems pooling hurts us! disable pooling.
enable lag = 32
239
BLSTM
lag = 20
240
LSTM not BLSTM
various lags
241
output is prediction
260
standardise inputs and outputs.
261
trying just 3 appliances. Standardisation
263
conv1d between layers
ideas for next TODO:
* 3 LSTM layers with smaller conv between them
* why does pooling hurt us?
"""
from theano.ifelse import ifelse
import theano.tensor as T
THRESHOLD = 0
def scaled_cost(x, t):
sq_error = (x - t) ** 2
def mask_and_mean_sq_error(mask):
masked_sq_error = sq_error[mask.nonzero()]
mean = masked_sq_error.mean()
mean = ifelse(T.isnan(mean), 0.0, mean)
return mean
above_thresh_mean = mask_and_mean_sq_error(t > THRESHOLD)
below_thresh_mean = mask_and_mean_sq_error(t <= THRESHOLD)
return (above_thresh_mean + below_thresh_mean) / 2.0
source_dict = dict(
filename='/data/dk3810/ukdale.h5',
appliances=[
['fridge freezer', 'fridge', 'freezer'],
'hair straighteners',
'television'
#'dish washer',
#['washer dryer', 'washing machine']
],
max_appliance_powers=[300, 500, 200, 2500, 2400],
on_power_thresholds=[5] * 5,
max_input_power=5900,
min_on_durations=[60, 60, 60, 1800, 1800],
min_off_durations=[12, 12, 12, 1800, 600],
window=("2013-06-01", "2014-07-01"),
seq_length=1500,
output_one_appliance=False,
boolean_targets=False,
train_buildings=[1],
validation_buildings=[1],
# skip_probability=0.0,
n_seq_per_batch=50,
# subsample_target=5,
include_diff=False,
clip_appliance_power=True,
target_is_prediction=False,
standardise_input=True,
standardise_targets=True,
input_padding=0,
lag=0
)
def change_learning_rate(net, epoch):
net.updates = partial(nesterov_momentum, learning_rate=0.01)
net.compile()
def change_subsample(net, epoch):
net.source.subsample_target = 5
net.generate_validation_data_and_set_shapes()
net_dict = dict(
save_plot_interval=SAVE_PLOT_INTERVAL,
loss_function=scaled_cost,
updates=partial(nesterov_momentum, learning_rate=0.001),
do_save_activations=True
# epoch_callbacks={250: change_learning_rate}
)
def exp_a(name):
global source
# source_dict_copy = deepcopy(source_dict)
# source = RealApplianceSource(**source_dict_copy)
source.subsample_target = 5
net_dict_copy = deepcopy(net_dict)
net_dict_copy.update(dict(experiment_name=name, source=source))
net_dict_copy['layers_config'] = [
{
'type': BidirectionalRecurrentLayer,
'num_units': 25,
'gradient_steps': GRADIENT_STEPS,
'W_in_to_hid': Normal(std=1.),
'nonlinearity': tanh
},
{
'type': DimshuffleLayer,
'pattern': (0, 2, 1)
},
{
'type': Conv1DLayer,
'num_filters': 5,
'filter_length': 5,
'stride': 5,
'nonlinearity': tanh,
'W': Normal(std=1/sqrt(25))
},
{
'type': DimshuffleLayer,
'pattern': (0, 2, 1)
},
{
'type': BidirectionalRecurrentLayer,
'num_units': 25,
'gradient_steps': GRADIENT_STEPS,
'W_in_to_hid': Normal(std=1/sqrt(25)),
'nonlinearity': tanh
},
{
'type': DenseLayer,
'num_units': source.n_outputs,
'nonlinearity': None,
'W': Normal(std=(1/sqrt(25)))
}
]
net = Net(**net_dict_copy)
return net
def init_experiment(experiment):
full_exp_name = NAME + experiment
func_call = 'exp_{:s}(full_exp_name)'.format(experiment)
print("***********************************")
print("Preparing", full_exp_name, "...")
net = eval(func_call)
return net
def main():
for experiment in list('a'):
full_exp_name = NAME + experiment
path = os.path.join(PATH, full_exp_name)
try:
net = init_experiment(experiment)
run_experiment(net, path, epochs=None)
except KeyboardInterrupt:
break
except TrainingError as exception:
print("EXCEPTION:", exception)
except Exception as exception:
print("EXCEPTION:", exception)
raise
if __name__ == "__main__":
main()
| {
"content_hash": "859531ebb406a5bd73dbbbf43f829d76",
"timestamp": "",
"source": "github",
"line_count": 214,
"max_line_length": 109,
"avg_line_length": 26.42056074766355,
"alnum_prop": 0.629642730810046,
"repo_name": "JackKelly/neuralnilm_prototype",
"id": "3d44fcc34375b87a49f23ad41529fd42d8d081ac",
"size": "5654",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "scripts/e270.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "4536723"
}
],
"symlink_target": ""
} |
import os
import gevent
import gevent_inotifyx
import ezRPGreenlet
import ezRPConfig as gConfig
from ezbake.discovery import ServiceDiscoveryClient
from ezbake.reverseproxy.thriftapi.constants import SERVICE_NAME as EzFrontendServiceName
class EzReverseProxyService(object):
def __init__(self, logger, staticFileHandler):
self._logger = logger
self._sfh = staticFileHandler
def run(self):
glt = ezRPGreenlet.EzReverseProxyGreenlet(self._logger, self._sfh)
self._logger.info("starting greenlet for thrift service...")
clientGreenlet = gevent.spawn(glt.clientServiceGreenlet)
self._logger.info("started")
self._logger.info("starting greenlet to monitor zookeeper...")
kzGreenlet = gevent.spawn(glt.kzMonitorGreenlet)
self._logger.info("started")
self._logger.info("starting greenlet to write out nginx configuration changes...")
cfgProcessorGreenlet = gevent.spawn(glt.configurationChangeQueueGreenlet)
self._logger.info("started")
self._logger.info("starting greenlet to monitor for shutdown...")
fd = gevent_inotifyx.init()
wd = gevent_inotifyx.add_watch(fd, gConfig.shutdownFile, gevent_inotifyx.IN_DELETE)
wGreenlet = gevent.spawn(glt.watchGreenlet, fd)
self._logger.info("started")
gConfig.addGreenlets(clientGreenlet, kzGreenlet, cfgProcessorGreenlet, wGreenlet)
gevent.joinall([clientGreenlet])
self._logger.warn("joined thrift service greenlet")
gevent.joinall([kzGreenlet])
self._logger.warn("joined zookeeper monitoring greenlet")
gConfig.run = False
while not gConfig.configurationChangeQueue.empty():
print "queue not empty"
gConfig.configurationChangeQueue.get()
print "got"
gConfig.configurationChangeQueue.task_done()
print "joining conf queue"
gConfig.configurationChangeQueue.join()
self._logger.warn("joined configuration queue")
gevent.joinall([cfgProcessorGreenlet])
self._logger.warn("joined configuration change greenlet")
gConfig.wGreenlet.join()
self._logger.warn("joined shutdown monitor greenlet")
ServiceDiscoveryClient(gConfig.zk).unregister_endpoint(gConfig.appName,
EzFrontendServiceName,
gConfig.internal_hostname,
gConfig.thriftPort)
self._logger.warn("unregistered from discovery service")
| {
"content_hash": "b23605e1a0cc38f0ed3e107148f53d0c",
"timestamp": "",
"source": "github",
"line_count": 61,
"max_line_length": 89,
"avg_line_length": 42.049180327868854,
"alnum_prop": 0.674074074074074,
"repo_name": "ezbake/ezbake-platform-services",
"id": "4f6f65215dd43444d889397943363774280fd638",
"size": "3188",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "efe/frontend_app/modules/ezRPService.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "1050"
},
{
"name": "C",
"bytes": "3953594"
},
{
"name": "C++",
"bytes": "93178"
},
{
"name": "Diff",
"bytes": "16157"
},
{
"name": "HTML",
"bytes": "3026"
},
{
"name": "Java",
"bytes": "3360028"
},
{
"name": "Makefile",
"bytes": "5161"
},
{
"name": "Nginx",
"bytes": "4509"
},
{
"name": "Objective-C",
"bytes": "2834"
},
{
"name": "Perl",
"bytes": "15118"
},
{
"name": "Python",
"bytes": "884976"
},
{
"name": "Ruby",
"bytes": "988"
},
{
"name": "Shell",
"bytes": "72367"
},
{
"name": "Thrift",
"bytes": "19868"
},
{
"name": "VimL",
"bytes": "32089"
},
{
"name": "XS",
"bytes": "21306"
},
{
"name": "XSLT",
"bytes": "5197"
}
],
"symlink_target": ""
} |
from treehopper.api import spi
from treehopper.api.spi import ChipSelectMode, SpiMode, SpiBurstMode
from treehopper.api.pin import SpiChipSelectPin
class SpiDevice:
"""
SPI device
This class provides a simple wrapper around Spi.send_receive() that preserves the chip-select, mode, and frequency
configurations.
"""
def __init__(self, spi_module: spi, chip_select: SpiChipSelectPin, chip_select_mode=ChipSelectMode.SpiActiveLow, speed_mhz=6, spi_mode=SpiMode.Mode00):
"""
Construct a new SPI device.
:param spi_module: The SPI module to use
:param chip_select: The chip select pin to use
:param chip_select_mode: The chip select mode to use (e.g., ChipSelectMode.SpiActiveLow)
:param speed_mhz: The speed, in MHz, to use.
:param spi_mode: The SPI mode (e.g., SpiMode.Mode00) to use.
Due to an open firmware issue, we recommend avoiding frequencies between 0.8 and 6 MHz, as these can cause ISR glitches under certain bus constraints.
"""
self.chip_select_mode = chip_select_mode
self.chip_select = chip_select
self._spi = spi_module # type: spi
self.frequency = speed_mhz
self.mode = spi_mode
self._spi.enabled = True
chip_select.make_digital_push_pull_out()
if chip_select_mode == ChipSelectMode.PulseLowAtBeginning or chip_select_mode == ChipSelectMode.PulseLowAtEnd or chip_select_mode == ChipSelectMode.SpiActiveLow:
chip_select.digital_value = True
else:
chip_select.digital_value = False
def send_receive(self, data_to_send, burst=SpiBurstMode.NoBurst):
"""
Send and/or receive data using the settings specified by the constructor.
:param data_to_send: A list-like object of data to send
:param burst: the SPI burst mode (choose from SpiBurstMode members).
:return: Data received during the transaction.
"""
self._spi.send_receive(data_to_send, self.chip_select, self.chip_select_mode, self.frequency, burst, self.mode)
| {
"content_hash": "5caa938bee993bf6ded11fa64c7da619",
"timestamp": "",
"source": "github",
"line_count": 44,
"max_line_length": 169,
"avg_line_length": 47.36363636363637,
"alnum_prop": 0.6799424184261037,
"repo_name": "treehopper-electronics/treehopper-sdk",
"id": "c910cec7135360678b2f04ddb233ca0a249ab329",
"size": "2084",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Python/treehopper/libraries/spi_device.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Assembly",
"bytes": "6296"
},
{
"name": "Batchfile",
"bytes": "183"
},
{
"name": "C",
"bytes": "522458"
},
{
"name": "C#",
"bytes": "2112573"
},
{
"name": "C++",
"bytes": "517633"
},
{
"name": "CMake",
"bytes": "4426"
},
{
"name": "Java",
"bytes": "552020"
},
{
"name": "Jupyter Notebook",
"bytes": "169891"
},
{
"name": "Limbo",
"bytes": "19"
},
{
"name": "MATLAB",
"bytes": "1860"
},
{
"name": "Python",
"bytes": "599033"
}
],
"symlink_target": ""
} |
"""Test of gRPC Python's interaction with the python logging module"""
import unittest
import logging
import grpc
import subprocess
import sys
INTERPRETER = sys.executable
class LoggingTest(unittest.TestCase):
def test_logger_not_occupied(self):
script = """if True:
import logging
import grpc
if len(logging.getLogger().handlers) != 0:
raise Exception('expected 0 logging handlers')
"""
self._verifyScriptSucceeds(script)
def test_handler_found(self):
script = """if True:
import logging
import grpc
"""
out, err = self._verifyScriptSucceeds(script)
self.assertEqual(0, len(err), 'unexpected output to stderr')
def test_can_configure_logger(self):
script = """if True:
import logging
import six
import grpc
intended_stream = six.StringIO()
logging.basicConfig(stream=intended_stream)
if len(logging.getLogger().handlers) != 1:
raise Exception('expected 1 logging handler')
if logging.getLogger().handlers[0].stream is not intended_stream:
raise Exception('wrong handler stream')
"""
self._verifyScriptSucceeds(script)
def test_grpc_logger(self):
script = """if True:
import logging
import grpc
if "grpc" not in logging.Logger.manager.loggerDict:
raise Exception('grpc logger not found')
root_logger = logging.getLogger("grpc")
if len(root_logger.handlers) != 1:
raise Exception('expected 1 root logger handler')
if not isinstance(root_logger.handlers[0], logging.NullHandler):
raise Exception('expected logging.NullHandler')
"""
self._verifyScriptSucceeds(script)
def _verifyScriptSucceeds(self, script):
process = subprocess.Popen([INTERPRETER, '-c', script],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
out, err = process.communicate()
self.assertEqual(
0, process.returncode,
'process failed with exit code %d (stdout: %s, stderr: %s)' %
(process.returncode, out, err))
return out, err
if __name__ == '__main__':
unittest.main(verbosity=2)
| {
"content_hash": "cdd81c5716f21fbf77914991e8bd98b9",
"timestamp": "",
"source": "github",
"line_count": 86,
"max_line_length": 77,
"avg_line_length": 28.5,
"alnum_prop": 0.5760913912688699,
"repo_name": "firebase/grpc",
"id": "1b8335a52a7fee4c8d97faeefd3d987b25fc9b85",
"size": "3028",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "src/python/grpcio_tests/tests/unit/_logging_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Awk",
"bytes": "5444"
},
{
"name": "Batchfile",
"bytes": "35774"
},
{
"name": "C",
"bytes": "3708933"
},
{
"name": "C#",
"bytes": "2162951"
},
{
"name": "C++",
"bytes": "12275592"
},
{
"name": "CMake",
"bytes": "495117"
},
{
"name": "CSS",
"bytes": "1519"
},
{
"name": "DTrace",
"bytes": "147"
},
{
"name": "Dockerfile",
"bytes": "169468"
},
{
"name": "Go",
"bytes": "34794"
},
{
"name": "HTML",
"bytes": "14"
},
{
"name": "Java",
"bytes": "6259"
},
{
"name": "JavaScript",
"bytes": "84355"
},
{
"name": "M4",
"bytes": "69163"
},
{
"name": "Makefile",
"bytes": "1104867"
},
{
"name": "Mako",
"bytes": "5629"
},
{
"name": "Objective-C",
"bytes": "696194"
},
{
"name": "Objective-C++",
"bytes": "77574"
},
{
"name": "PHP",
"bytes": "392133"
},
{
"name": "PowerShell",
"bytes": "3226"
},
{
"name": "Python",
"bytes": "3401091"
},
{
"name": "Ruby",
"bytes": "982979"
},
{
"name": "Shell",
"bytes": "532295"
},
{
"name": "Starlark",
"bytes": "554304"
},
{
"name": "Swift",
"bytes": "3516"
},
{
"name": "TSQL",
"bytes": "4901"
},
{
"name": "XSLT",
"bytes": "9846"
}
],
"symlink_target": ""
} |
import sys
import commands
import utility
usage = '''
Kit, a project manager for C/C++
--------------------------------
- usage:
kit <command> [all|<module>] [flags/options]
kit [flags/options]
- commands:
build ...... compile all sources
clean ...... remove compilation products
dist ....... generate kit-independent project
fetch ...... fetch from central or remote repository
help ....... get more information
remove ..... uninstall module
init ....... prepare new project
install .... make available globally
modules .... list available modules
run ........ execute outputted application
test ....... execute unit tests
'''
option_table = {
'v': 'verbose',
's': 'save-cmake',
'd': 'debug'
}
def is_option(s):
return s[:1] == '-'
def not_option(s):
return not is_option(s)
def parse_option(a):
if a[:2] == '--' and a[2:] in option_table.values():
return a[2:]
elif a[:1] == '-' and a[1:] in option_table.keys():
return option_table[a[1:]]
else:
print utility.color(' - ERROR: invalid option: ' + a, 'red')
quit()
def run_cli():
argv = sys.argv[1:]
args = filter(not_option, argv)
opts = map(parse_option, filter(is_option, argv))
com = args[0] if len(args) > 0 else 'run'
arg = args[1] if len(args) > 1 else None
if com == 'help':
print usage
else:
try:
commands.execute(com, arg, opts)
except KeyboardInterrupt:
print '\n[kit]: aborting due to keyboard interupt'
if __name__ == '__main__':
run_cli()
| {
"content_hash": "6c4c045386106fbfff03ad7c6aadb4b6",
"timestamp": "",
"source": "github",
"line_count": 68,
"max_line_length": 68,
"avg_line_length": 25.13235294117647,
"alnum_prop": 0.5301345816266823,
"repo_name": "dasmithii/Kit",
"id": "e9f7ac981c11a3914e3b50e890ee5682441a1899",
"size": "1709",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "kit/main.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "128"
},
{
"name": "Python",
"bytes": "21556"
}
],
"symlink_target": ""
} |
__author__ = 'pacogomez'
from pyVim import connect
from pyVmomi import vim
import requests
import ssl
import atexit
def connect_to_nsx_api(ip, user, password, ssl_verify):
appliance_check_url = 'https://{}/api/2.0/services/vcconfig'.format(ip)
response = requests.request('GET', appliance_check_url,
auth=(user, password),
verify=ssl_verify)
if response.status_code == 200:
return True
else:
raise Exception('response: {}'.format(response.status_code))
def connect_to_vcenter_api(vc_host, vc_user, vc_pwd, ssl_verify):
if hasattr(ssl, 'SSLContext'):
context = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
context.verify_mode = ssl.CERT_NONE
else:
context = None
if ssl_verify:
service_instance = connect.SmartConnect(host=vc_host, user=vc_user, pwd=vc_pwd)
else:
service_instance = connect.SmartConnect(host=vc_host, user=vc_user, pwd=vc_pwd, sslContext=context)
atexit.register(connect.Disconnect, service_instance)
return service_instance.RetrieveContent()
def main():
module = AnsibleModule(
argument_spec=dict(
ip=dict(required=True, type='str'),
user=dict(required=True, type='str'),
password=dict(required=False, type='str', no_log=True),
max_seconds=dict(required=False, type='int', default=True),
service_type=dict(required=False, type='str', default='vcenter'),
ssl_verify=dict(required=False, type='bool', default=True),
),
supports_check_mode=True,
)
status_poll_count = 0
sleep_time = 1
while status_poll_count < module.params['max_seconds']:
try:
if module.params['service_type'] == 'vcenter':
content = connect_to_vcenter_api(module.params['ip'],
module.params['user'],
module.params['password'],
module.params['ssl_verify'])
elif module.params['service_type'] == 'nsx':
content = connect_to_nsx_api(module.params['ip'],
module.params['user'],
module.params['password'],
module.params['ssl_verify'])
else:
module.fail_json(changed=False, msg='unknown service {}'.format(module.params['service_type']))
break
except vim.fault.InvalidLogin:
module.fail_json(msg='exception while connecting to service {}, login failure, check username and password'.format(module.params['service_type']))
except:
status_poll_count += 1
time.sleep(sleep_time)
if status_poll_count == module.params['max_seconds']:
module.fail_json(changed=False, msg='timeout')
else:
module.exit_json(changed=False, msg='success')
from ansible.module_utils.basic import *
if __name__ == '__main__':
main()
| {
"content_hash": "f43269b60868f790da56aa357b98a651",
"timestamp": "",
"source": "github",
"line_count": 77,
"max_line_length": 158,
"avg_line_length": 40.688311688311686,
"alnum_prop": 0.5652729013724864,
"repo_name": "pacogomez/ovftool",
"id": "d0909aa2ee45c9a12c295fea13653dc7f83acaf0",
"size": "3152",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "library/wait_for_service.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "8443"
}
],
"symlink_target": ""
} |
from __future__ import print_function, unicode_literals, division, absolute_import
from builtins import (bytes, dict, int, list, object, range, str, # noqa
ascii, chr, hex, input, next, oct, open, pow, round, super, filter, map, zip)
from future import standard_library
standard_library.install_aliases() # noqa: Counter, OrderedDict,
from future.utils import viewitems # noqa
from pugnlp import regexes
from nlpia.web import requests_get
def generate_download_mccauley():
# amazon product reviews for recommendation engine training and review sentiment analysis
response = requests_get('http://jmcauley.ucsd.edu/data/amazon/', allow_redirects=True, timeout=5)
urls_product_review = [m[0] for m in regexes.cre_url_popular.findall(response.text) if m[0].lower().endswith('.json.gz')]
response = requests_get('http://jmcauley.ucsd.edu/data/amazon/qa/', allow_redirects=True, timeout=5)
urls_question_answer = [m[1] for m in regexes.cre_href.findall(response.text) if m[1].lower().endswith('.json.gz')]
with open('download_mccauley_autogenerated.sh', 'w') as f:
for pr in urls_product_review:
f.write('wget ' + pr)
for qa in urls_question_answer:
f.write('wget ' + qa)
| {
"content_hash": "15d82f3e7b0b4d9415e5527b29fef760",
"timestamp": "",
"source": "github",
"line_count": 25,
"max_line_length": 125,
"avg_line_length": 49.72,
"alnum_prop": 0.7055510860820595,
"repo_name": "totalgood/nlpia",
"id": "f00e66f44b0f63dbf88ff3a4a71859da2a525034",
"size": "1289",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/nlpia/scripts/generate_mccauley_downloader.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "107"
},
{
"name": "Dockerfile",
"bytes": "230"
},
{
"name": "HTML",
"bytes": "10157530"
},
{
"name": "Jupyter Notebook",
"bytes": "697442"
},
{
"name": "Python",
"bytes": "671641"
},
{
"name": "Shell",
"bytes": "33839"
},
{
"name": "TSQL",
"bytes": "1280"
}
],
"symlink_target": ""
} |
"""SCons.Tool.dvipdf
Tool-specific initialization for dvipdf.
There normally shouldn't be any need to import this module directly.
It will usually be imported through the generic SCons.Tool.Tool()
selection method.
"""
#
# Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "src/engine/SCons/Tool/dvipdf.py 2009/09/04 16:33:07 david"
import SCons.Action
import SCons.Defaults
import SCons.Tool.pdf
import SCons.Tool.tex
import SCons.Util
_null = SCons.Scanner.LaTeX._null
def DviPdfPsFunction(XXXDviAction, target = None, source= None, env=None):
"""A builder for DVI files that sets the TEXPICTS environment
variable before running dvi2ps or dvipdf."""
try:
abspath = source[0].attributes.path
except AttributeError :
abspath = ''
saved_env = SCons.Scanner.LaTeX.modify_env_var(env, 'TEXPICTS', abspath)
result = XXXDviAction(target, source, env)
if saved_env is _null:
try:
del env['ENV']['TEXPICTS']
except KeyError:
pass # was never set
else:
env['ENV']['TEXPICTS'] = saved_env
return result
def DviPdfFunction(target = None, source= None, env=None):
result = DviPdfPsFunction(PDFAction,target,source,env)
return result
def DviPdfStrFunction(target = None, source= None, env=None):
"""A strfunction for dvipdf that returns the appropriate
command string for the no_exec options."""
if env.GetOption("no_exec"):
result = env.subst('$DVIPDFCOM',0,target,source)
else:
result = ''
return result
PDFAction = None
DVIPDFAction = None
def PDFEmitter(target, source, env):
"""Strips any .aux or .log files from the input source list.
These are created by the TeX Builder that in all likelihood was
used to generate the .dvi file we're using as input, and we only
care about the .dvi file.
"""
def strip_suffixes(n):
return not SCons.Util.splitext(str(n))[1] in ['.aux', '.log']
source = filter(strip_suffixes, source)
return (target, source)
def generate(env):
"""Add Builders and construction variables for dvipdf to an Environment."""
global PDFAction
if PDFAction is None:
PDFAction = SCons.Action.Action('$DVIPDFCOM', '$DVIPDFCOMSTR')
global DVIPDFAction
if DVIPDFAction is None:
DVIPDFAction = SCons.Action.Action(DviPdfFunction, strfunction = DviPdfStrFunction)
import pdf
pdf.generate(env)
bld = env['BUILDERS']['PDF']
bld.add_action('.dvi', DVIPDFAction)
bld.add_emitter('.dvi', PDFEmitter)
env['DVIPDF'] = 'dvipdf'
env['DVIPDFFLAGS'] = SCons.Util.CLVar('')
env['DVIPDFCOM'] = 'cd ${TARGET.dir} && $DVIPDF $DVIPDFFLAGS ${SOURCE.file} ${TARGET.file}'
# Deprecated synonym.
env['PDFCOM'] = ['$DVIPDFCOM']
def exists(env):
return env.Detect('dvipdf')
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
| {
"content_hash": "1fc43f984c46e76de6108811ebb74ce4",
"timestamp": "",
"source": "github",
"line_count": 125,
"max_line_length": 97,
"avg_line_length": 32.64,
"alnum_prop": 0.6992647058823529,
"repo_name": "cournape/numscons",
"id": "a2a3169ce588d53830a07eff49368ff825961c3e",
"size": "4080",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "numscons/scons-local/scons-local-1.2.0/SCons/Tool/dvipdf.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "1275"
},
{
"name": "FORTRAN",
"bytes": "146"
},
{
"name": "Python",
"bytes": "2033297"
},
{
"name": "Shell",
"bytes": "421"
}
],
"symlink_target": ""
} |
"""empty message
Revision ID: b472cb98fffd
Revises: 7e206db94cf6
Create Date: 2018-01-04 20:57:19.845298
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = 'b472cb98fffd'
down_revision = '7e206db94cf6'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('dataset', sa.Column('metaseek_env_package', sa.String(length=50), nullable=True))
op.add_column('dataset', sa.Column('metaseek_investigation_type', sa.String(length=50), nullable=True))
op.add_column('dataset', sa.Column('metaseek_investigation_type_P', sa.Float(), nullable=True))
op.add_column('dataset', sa.Column('metaseek_mixs_specification', sa.String(length=20), nullable=True))
op.add_column('dataset', sa.Column('metaseek_mixs_specification_P', sa.Float(), nullable=True))
op.add_column('dataset', sa.Column('metaseek_sequencing_method', sa.String(length=50), nullable=True))
op.create_index(op.f('ix_dataset_metaseek_investigation_type'), 'dataset', ['metaseek_investigation_type'], unique=False)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_index(op.f('ix_dataset_metaseek_investigation_type'), table_name='dataset')
op.drop_column('dataset', 'metaseek_sequencing_method')
op.drop_column('dataset', 'metaseek_mixs_specification_P')
op.drop_column('dataset', 'metaseek_mixs_specification')
op.drop_column('dataset', 'metaseek_investigation_type_P')
op.drop_column('dataset', 'metaseek_investigation_type')
op.drop_column('dataset', 'metaseek_env_package')
# ### end Alembic commands ###
| {
"content_hash": "c7410e9e04b82243e867fda5dfe8ac03",
"timestamp": "",
"source": "github",
"line_count": 40,
"max_line_length": 125,
"avg_line_length": 43.375,
"alnum_prop": 0.7112391930835735,
"repo_name": "ahoarfrost/metaseek",
"id": "603066882f202c6fd77cab1b8d976a89fe6645a7",
"size": "1735",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "server/migrations/versions/b472cb98fffd_.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "47015"
},
{
"name": "HTML",
"bytes": "14749"
},
{
"name": "JavaScript",
"bytes": "153126"
},
{
"name": "Mako",
"bytes": "494"
},
{
"name": "Python",
"bytes": "245103"
}
],
"symlink_target": ""
} |
"""
Generalized linear models currently supports estimation using the one-parameter
exponential families
References
----------
Gill, Jeff. 2000. Generalized Linear Models: A Unified Approach.
SAGE QASS Series.
Green, PJ. 1984. "Iteratively reweighted least squares for maximum
likelihood estimation, and some robust and resistant alternatives."
Journal of the Royal Statistical Society, Series B, 46, 149-192.
Hardin, J.W. and Hilbe, J.M. 2007. "Generalized Linear Models and
Extensions." 2nd ed. Stata Press, College Station, TX.
McCullagh, P. and Nelder, J.A. 1989. "Generalized Linear Models." 2nd ed.
Chapman & Hall, Boca Rotan.
"""
import numpy as np
import families, tools
from regression import WLS#,GLS #might need for mlogit
from model import LikelihoodModel, LikelihoodModelResults
from decorators import *
__all__ = ['GLM']
class GLM(LikelihoodModel):
'''
Generalized Linear Models class
GLM inherits from statsmodels.LikelihoodModel
Parameters
-----------
endog : array-like
1d array of endogenous response variable. This array can be
1d or 2d for Binomial family models.
exog : array-like
n x p design / exogenous data array
family : family class instance
The default is Gaussian. To specify the binomial distribution
family = sm.family.Binomial()
Each family can take a link instance as an argument. See
statsmodels.family.family for more information.
Attributes
-----------
df_model : float
`p` - 1, where `p` is the number of regressors including the intercept.
df_resid : float
The number of observation `n` minus the number of regressors `p`.
endog : array
See Parameters.
exog : array
See Parameters.
history : dict
Contains information about the iterations.
iteration : int
The number of iterations that fit has run. Initialized at 0.
family : family class instance
A pointer to the distribution family of the model.
mu : array
The estimated mean response of the transformed variable.
normalized_cov_params : array
`p` x `p` normalized covariance of the design / exogenous data.
pinv_wexog : array
For GLM this is just the pseudo inverse of the original design.
scale : float
The estimate of the scale / dispersion. Available after fit is called.
scaletype : str
The scaling used for fitting the model. Available after fit is called.
weights : array
The value of the weights after the last iteration of fit.
Methods
-------
estimate_scale
Estimates the dispersion / scale of the model.
fit
Fits the model using iteratively reweighted least squares.
information
Returns the Fisher information matrix. Not yet implemented.
initialize
(Re)initialize the design. Resets history and number of iterations.
loglike
Returns the loglikelihood at `params` for a given distribution family.
newton
Used to fit the model via Newton-Raphson. Not yet implemented.
predict
Returns the linear predictor of the model.
score
Returns the score matrix of the model. Not yet implemented.
Examples
--------
>>> import scikits.statsmodels as sm
>>> data = sm.datasets.scotland.load()
>>> data.exog = sm.add_constant(data.exog)
Instantiate a gamma family model with the default link function.
>>> gamma_model = sm.GLM(data.endog, data.exog, \
family=sm.families.Gamma())
>>> gamma_results = gamma_model.fit()
>>> gamma_results.params
array([ 4.96176830e-05, 2.03442259e-03, -7.18142874e-05,
1.11852013e-04, -1.46751504e-07, -5.18683112e-04,
-2.42717498e-06, -1.77652703e-02])
>>> gamma_results.scale
0.0035842831734919055
>>> gamma_results.deviance
0.087388516416999198
>>> gamma_results.pearson_chi2
0.086022796163805704
>>> gamma_results.llf
-83.017202161073527
See also
--------
statsmodels.families.*
Notes
-----
Only the following combinations make sense for family and link ::
+ ident log logit probit cloglog pow opow nbinom loglog logc
Gaussian | x x x
inv Gaussian | x x x
binomial | x x x x x x x x x
Poission | x x x
neg binomial | x x x x
gamma | x x x
Not all of these link functions are currently available.
Endog and exog are references so that if the data they refer to are already
arrays and these arrays are changed, endog and exog will change.
**Attributes**
df_model : float
Model degrees of freedom is equal to p - 1, where p is the number
of regressors. Note that the intercept is not reported as a
degree of freedom.
df_resid : float
Residual degrees of freedom is equal to the number of observation n
minus the number of regressors p.
endog : array
See above. Note that endog is a reference to the data so that if
data is already an array and it is changed, then `endog` changes
as well.
exog : array
See above. Note that endog is a reference to the data so that if
data is already an array and it is changed, then `endog` changes
as well.
history : dict
Contains information about the iterations. Its keys are `fittedvalues`,
`deviance`, and `params`.
iteration : int
The number of iterations that fit has run. Initialized at 0.
family : family class instance
A pointer to the distribution family of the model.
mu : array
The mean response of the transformed variable. `mu` is the value of
the inverse of the link function at eta, where eta is the linear
predicted value of the WLS fit of the transformed variable. `mu` is
only available after fit is called. See
statsmodels.families.family.fitted of the distribution family for more
information.
normalized_cov_params : array
The p x p normalized covariance of the design / exogenous data.
This is approximately equal to (X.T X)^(-1)
pinv_wexog : array
The pseudoinverse of the design / exogenous data array. Note that
GLM has no whiten method, so this is just the pseudo inverse of the
design.
The pseudoinverse is approximately equal to (X.T X)^(-1)X.T
scale : float
The estimate of the scale / dispersion of the model fit. Only
available after fit is called. See GLM.fit and GLM.estimate_scale
for more information.
scaletype : str
The scaling used for fitting the model. This is only available after
fit is called. The default is None. See GLM.fit for more information.
weights : array
The value of the weights after the last iteration of fit. Only
available after fit is called. See statsmodels.families.family for
the specific distribution weighting functions.
'''
def __init__(self, endog, exog, family=families.Gaussian()):
endog = np.asarray(endog)
exog = np.asarray(exog)
if endog.shape[0] != len(exog):
msg = "Size of endog (%s) does not match the shape of exog (%s)"
raise ValueError(msg % (endog.size, len(exog)))
self.endog = endog
self.exog = exog
self.family = family
self.initialize()
def initialize(self):
"""
Initialize a generalized linear model.
"""
#TODO: intended for public use?
self.history = { 'fittedvalues' : [], 'params' : [np.inf],
'deviance' : [np.inf]}
self.iteration = 0
self.pinv_wexog = np.linalg.pinv(self.exog)
self.normalized_cov_params = np.dot(self.pinv_wexog,
np.transpose(self.pinv_wexog))
self.df_model = tools.rank(self.exog)-1
self.df_resid = self.exog.shape[0] - tools.rank(self.exog)
def score(self, params):
"""
Score matrix. Not yet implemeneted
"""
raise NotImplementedError
def loglike(self, *args):
"""
Loglikelihood function.
Each distribution family has its own loglikelihood function.
See statsmodels.families.family
"""
return self.family.loglike(*args)
def information(self, params):
"""
Fisher information matrix. Not yet implemented.
"""
raise NotImplementedError
def _update_history(self, tmp_result, mu):
"""
Helper method to update history during iterative fit.
"""
self.history['params'].append(tmp_result.params)
self.history['fittedvalues'].append(tmp_result.fittedvalues)
self.history['deviance'].append(self.family.deviance(self.endog, mu))
def estimate_scale(self, mu):
"""
Estimates the dispersion/scale.
Type of scale can be chose in the fit method.
Parameters
----------
mu : array
mu is the mean response estimate
Returns
--------
Estimate of scale
Notes
-----
The default scale for Binomial and Poisson families is 1. The default
for the other families is Pearson's Chi-Square estimate.
See also
--------
statsmodels.glm.fit for more information
"""
if not self.scaletype:
if isinstance(self.family, (families.Binomial, families.Poisson)):
return np.array(1.)
else:
resid = self.endog - mu
return ((np.power(resid, 2) / self.family.variance(mu)).sum() \
/ self.df_resid)
if isinstance(self.scaletype, float):
return np.array(self.scaletype)
if isinstance(self.scaletype, str):
if self.scaletype.lower() == 'x2':
resid = self.endog - mu
return ((np.power(resid, 2) / self.family.variance(mu)).sum() \
/ self.df_resid)
elif self.scaletype.lower() == 'dev':
return self.family.deviance(self.endog, mu)/self.df_resid
else:
raise ValueError, "Scale %s with type %s not understood" %\
(self.scaletype,type(self.scaletype))
else:
raise ValueError, "Scale %s with type %s not understood" %\
(self.scaletype, type(self.scaletype))
def predict(self, exog, params=None, linear=False):
"""
Return predicted values for a design matrix
Parameters
----------
exog : array-like
Design / exogenous data
params : array-like, optional after fit has been called
Parameters / coefficients of a GLM.
linear : bool
If True, returns the linear predicted values. If False,
returns the value of the inverse of the model's link function at
the linear predicted values.
Returns
-------
An array of fitted values
Notes
-----
If the model as not yet been fit, params is not optional.
"""
if self._results is None and params is None:
raise ValueError, "If the model has not been fit, then you must \
specify the params argument."
if self._results is not None:
params = self.results.params
if linear:
return np.dot(exog, params)
else:
return self.family.fitted(np.dot(exog, params))
def fit(self, maxiter=100, method='IRLS', tol=1e-8, data_weights=1.,
scale=None):
'''
Fits a generalized linear model for a given family.
parameters
----------
data_weights : array-like or scalar, only used with Binomial
Number of trials for each observation. Used for only for
binomial data when `endog` is specified as a 2d array of
(successes, failures). Note that this argument will be
dropped in the future.
maxiter : int, optional
Default is 100.
method : string
Default is 'IRLS' for iteratively reweighted least squares. This
is currently the only method available for GLM fit.
scale : string or float, optional
`scale` can be 'X2', 'dev', or a float
The default value is None, which uses `X2` for Gamma, Gaussian,
and Inverse Gaussian.
`X2` is Pearson's chi-square divided by `df_resid`.
The default is 1 for the Binomial and Poisson families.
`dev` is the deviance divided by df_resid
tol : float
Convergence tolerance. Default is 1e-8.
'''
if np.shape(data_weights) != () and not isinstance(self.family,
families.Binomial):
raise ValueError, "Data weights are only to be supplied for\
the Binomial family"
self.data_weights = data_weights
if np.shape(self.data_weights) == () and self.data_weights>1:
self.data_weights = self.data_weights *\
np.ones((self.exog.shape[0]))
self.scaletype = scale
if isinstance(self.family, families.Binomial):
# thisc checks what kind of data is given for Binomial. family will need a reference to
# endog if this is to be removed from the preprocessing
self.endog = self.family.initialize(self.endog)
mu = self.family.starting_mu(self.endog)
wlsexog = self.exog
eta = self.family.predict(mu)
self.iteration += 1
dev = self.family.deviance(self.endog, mu)
if np.isnan(dev):
raise ValueError, "The first guess on the deviance function \
returned a nan. This could be a boundary problem and should be reported."
else:
self.history['deviance'].append(dev)
# first guess on the deviance is assumed to be scaled by 1.
while((np.fabs(self.history['deviance'][self.iteration]-\
self.history['deviance'][self.iteration-1])) > tol and \
self.iteration < maxiter):
self.weights = data_weights*self.family.weights(mu)
wlsendog = eta + self.family.link.deriv(mu) * (self.endog-mu)
# - offset
wls_results = WLS(wlsendog, wlsexog, self.weights).fit()
eta = np.dot(self.exog, wls_results.params) # + offset
mu = self.family.fitted(eta)
self._update_history(wls_results, mu)
self.scale = self.estimate_scale(mu)
self.iteration += 1
self.mu = mu
glm_results = GLMResults(self, wls_results.params,
wls_results.normalized_cov_params, self.scale)
glm_results.bse = np.sqrt(np.diag(wls_results.cov_params(\
scale=self.scale)))
return glm_results
# doesn't make sense really if there are arguments to fit
# also conflicts with refactor of GAM
# @property
# def results(self):
# """
# A property that returns a GLMResults class.
#
# Notes
# -----
# Calls fit if it has not already been called. The default values for
# fit are used. If the data_weights argument needs to be supplied for
# the Binomial family, then you should directly call fit.
# """
# if self._results is None:
# self._results = self.fit()
# return self._results
#TODO: remove dataweights argument and have it calculated from endog
# note that data_weights is not documented because I'm going to remove it.
# make the number of trials an argument to Binomial if constant and 1d endog
class GLMResults(LikelihoodModelResults):
'''
Class to contain GLM results.
GLMResults inherits from statsmodels.LikelihoodModelResults
Parameters
----------
See statsmodels.LikelihoodModelReesults
Returns
-------
**Attributes**
aic : float
Akaike Information Criterion
-2 * `llf` + 2*(`df_model` + 1)
bic : float
Bayes Information Criterion
`deviance` - `df_resid` * log(`nobs`)
deviance : float
See statsmodels.families.family for the distribution-specific deviance
functions.
df_model : float
See GLM.df_model
df_resid : float
See GLM.df_resid
fittedvalues : array
Linear predicted values for the fitted model.
dot(exog, params)
llf : float
Value of the loglikelihood function evalued at params.
See statsmodels.families.family for distribution-specific loglikelihoods.
model : class instance
Pointer to GLM model instance that called fit.
mu : array
See GLM docstring.
nobs : float
The number of observations n.
normalized_cov_params : array
See GLM docstring
null_deviance : float
The value of the deviance function for the model fit with a constant
as the only regressor.
params : array
The coefficients of the fitted model. Note that interpretation
of the coefficients often depends on the distribution family and the
data.
pearson_chi2 : array
Pearson's Chi-Squared statistic is defined as the sum of the squares
of the Pearson residuals.
pinv_wexog : array
See GLM docstring.
resid_anscombe : array
Anscombe residuals. See statsmodels.families.family for distribution-
specific Anscombe residuals.
resid_deviance : array
Deviance residuals. See statsmodels.families.family for distribution-
specific deviance residuals.
resid_pearson : array
Pearson residuals. The Pearson residuals are defined as
(`endog` - `mu`)/sqrt(VAR(`mu`)) where VAR is the distribution
specific variance function. See statsmodels.families.family and
statsmodels.families.varfuncs for more information.
resid_response : array
Respnose residuals. The response residuals are defined as
`endog` - `fittedvalues`
resid_working : array
Working residuals. The working residuals are defined as
`resid_response`/link'(`mu`). See statsmodels.family.links for the
derivatives of the link functions. They are defined analytically.
scale : array
The estimate of the scale / dispersion for the model fit.
See GLM.fit and GLM.estimate_scale for more information.
stand_errors : array
The standard errors of the fitted GLM. #TODO still named bse
Methods
-------
conf_int
Returns the confidence intervals for the parameter estimates. See
statsmodels.model.LikelihoodModelResults.conf_int for more information.
Note that the confidence interval for the GLMs are based on the
standard normal distribution.
cov_params
Returns the estimated covariance matrix scaled by `scale`.
f_test
Compute an F test / F contrast for a contrast matrix.
See statsmodels.model.LikelihoodModelResults.f_test for more
information. Note that the f_test method for GLMs is untested.
t
Return the t-values for the parameter estimates. Note that the
z values are more appropriate for GLMs. A convenenience function
is not yet implemented for z values.
t_test
t test linear restrictions for the model.
See statsmodels.model.LikelihoodModelResults.t_test for more
information. Note that t_test for GLMS is untested.
See Also
--------
statsmodels.LikelihoodModelResults
'''
#TODO: add a z value function to LLMResults
def __init__(self, model, params, normalized_cov_params, scale):
super(GLMResults, self).__init__(model, params,
normalized_cov_params=normalized_cov_params, scale=scale)
self.model._results = self.model.results = self # TODO: get rid of this
# since results isn't a
# property for GLM
# above is needed for model.predict
self.family = model.family
self._endog = model.endog
self.nobs = model.endog.shape[0]
self.mu = model.mu
self._data_weights = model.data_weights
self.df_resid = model.df_resid
self.df_model = model.df_model
self.pinv_wexog = model.pinv_wexog
self._cache = resettable_cache()
# are these intermediate results needed or can we just call the model's attributes?
@cache_readonly
def resid_response(self):
return self._data_weights * (self._endog-self.mu)
@cache_readonly
def resid_pearson(self):
return np.sqrt(self._data_weights) * (self._endog-self.mu)/\
np.sqrt(self.family.variance(self.mu))
@cache_readonly
def resid_working(self):
val = (self.resid_response / self.family.link.deriv(self.mu))
val *= self._data_weights
return val
@cache_readonly
def resid_anscombe(self):
return self.family.resid_anscombe(self._endog, self.mu)
@cache_readonly
def resid_deviance(self):
return self.family.resid_dev(self._endog, self.mu)
@cache_readonly
def pearson_chi2(self):
chisq = (self._endog- self.mu)**2 / self.family.variance(self.mu)
chisq *= self._data_weights
chisqsum = np.sum(chisq)
return chisqsum
@cache_readonly
def fittedvalues(self):
return self.mu
@cache_readonly
def null(self):
_endog = self._endog
wls = WLS(_endog, np.ones((len(_endog),1)),
weights=self._data_weights)
return wls.fit().fittedvalues
@cache_readonly
def deviance(self):
return self.family.deviance(self._endog, self.mu)
@cache_readonly
def null_deviance(self):
return self.family.deviance(self._endog, self.null)
@cache_readonly
def llf(self):
_modelfamily = self.family
if isinstance(_modelfamily, families.NegativeBinomial):
val = _modelfamily.loglike(self.model.endog,
fittedvalues = np.dot(self.model.exog,self.params))
else:
val = _modelfamily.loglike(self._endog, self.mu,
scale=self.scale)
return val
@cache_readonly
def aic(self):
return -2 * self.llf + 2*(self.df_model+1)
@cache_readonly
def bic(self):
return self.deviance - self.df_resid*np.log(self.nobs)
#TODO: write summary method to use output.py in sandbox
| {
"content_hash": "fe89a5acb6efa753988575d3f6d57fda",
"timestamp": "",
"source": "github",
"line_count": 619,
"max_line_length": 88,
"avg_line_length": 37.268174474959615,
"alnum_prop": 0.6164983310936755,
"repo_name": "matthew-brett/draft-statsmodels",
"id": "1bf00995a7b09a5172e785e296866a099378e769",
"size": "23069",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "scikits/statsmodels/glm.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "12147"
},
{
"name": "Python",
"bytes": "3886167"
}
],
"symlink_target": ""
} |
import unittest2
import mock
import json
import flask
import rq
import fakeredis
import jobmonitor
from jobmonitor import start_worker
# The resolve_connection method in rq.connections calls patch_connection(conn)
# in rq.compat.connections. This method checks if conn is an instance of
# Redis or StrictRedis.
# As the mocked (Strict)Redis connection in fakeredis is not such an instance,
# the check fails, and so we patch resolve_connection to not make the call.
# The addition of the underscore-prefixed properties are taken directly from
# the rq.compat.connections.resolve_connection source
def mocked_resolve_connection(connection):
connection._setex = connection.setex
connection._lrem = connection.lrem
connection._zadd = connection.zadd
connection._pipeline = connection.pipeline
connection._ttl = connection.ttl
if hasattr(connection, 'pttl'):
connection._pttl = connection.pttl
return connection
def str_resolver(jname):
"""Job resolver that always returns the str builtin name."""
return 'str'
def conditional_resolver(jname):
"""Job resolver that resolves to str_resolver if jnames start with 'a'."""
return str_resolver(jname) if jname.startswith('a') else None
# The decorators on the TestCase class apply the patch to all test_* methods
@mock.patch('redis.StrictRedis', fakeredis.FakeStrictRedis)
@mock.patch('rq.queue.resolve_connection', mocked_resolve_connection)
@mock.patch('rq.job.resolve_connection', mocked_resolve_connection)
class TestJobs(unittest2.TestCase):
# The setUp method is not patched by the class decorators, and so we must
# repeat ourselves
@mock.patch('redis.StrictRedis', fakeredis.FakeStrictRedis)
@mock.patch('rq.queue.resolve_connection', mocked_resolve_connection)
@mock.patch('rq.job.resolve_connection', mocked_resolve_connection)
def setUp(self):
self.app = jobmonitor.create_app()
self.app.config['TESTING'] = True
self.client = self.app.test_client()
self.queue = rq.Queue(connection=start_worker.create_connection())
# Make sure there are some of jobs on the queue, so we can validate
# job retrieval and `GET /jobs`
for i in range(2):
self.queue.enqueue('str', args=('foo',))
# Dummy request data
self.request_data = json.dumps(dict(task_name='task_name'))
def get_json_response(self, url):
"""Return the rv for the URL and the decoded JSON data."""
rv = self.client.get(url)
data = json.loads(rv.data)
return rv, data
def validate_job(self, job):
"""Assert that a job dictionary, from JSON, is valid.
As this method checks the job URI with flask.url_for, it must be called
in an app.test_request_context.
"""
assert 'id' in job
job_id = job['id']
assert 'uri' in job
assert job['uri'] == flask.url_for('jobs.get_job',
job_id=job_id, _external=True)
assert 'status' in job
# Status should be one of the values allowed by rq
# https://github.com/nvie/rq/blob/0.4.6/rq/job.py#L30
assert job['status'] in ('queued', 'finished', 'failed', 'started')
assert 'result' in job
def test_list_jobs(self):
"""The correct number of jobs should be returned."""
rv, data = self.get_json_response('/jobs')
assert len(data['jobs']) == self.queue.count
def test_list_job_serialisation(self):
"""All jobs in a list should be serialise correctly."""
with self.app.test_request_context():
rv, data = self.get_json_response('/jobs')
for job in data['jobs']: self.validate_job(job)
def test_create_job(self):
"""Job response should be the new job and a success status code."""
# Add a job resolver so the job actually submits
self.app.add_job_resolver(str_resolver)
# Get the number of jobs before the request, so we can compare after
njobs = self.queue.count
rv = self.client.post('/jobs', data=self.request_data,
content_type='application/json')
# Remove the job resolver now we're done with it
self.app.remove_job_resolver(str_resolver)
data = json.loads(rv.data)
assert 'job' in data
assert rv.status_code == 201
assert self.queue.count == (njobs + 1)
def test_invalid_job_creation_no_task_name(self):
"""Attempting to create a job without a task name should give 400."""
rv = self.client.post('/jobs', data=json.dumps(dict()),
content_type='application/json')
data = json.loads(rv.data)
assert rv.status_code == 400
assert 'message' in data
assert len('message') > 0
def test_invalid_job_creation_job_name_unresolved_task_name(self):
"""Attempting to create a job with an unresolvable name should 400."""
# Add the conditional resolver that we know we'll fail
self.app.add_job_resolver(conditional_resolver)
rv = self.client.post('/jobs', data=json.dumps(dict(task_name='bcd')),
content_type='application/json')
data = json.loads(rv.data)
assert rv.status_code == 400
assert 'message' in data
assert len('message') > 0
def test_invalid_job_creation_not_json(self):
"""Only JSON requests can create jobs, else 400."""
rv = self.client.post('/jobs', data=self.request_data)
data = json.loads(rv.data)
assert 'message' in data
assert len(data['message']) > 0
assert rv.status_code == 400
def test_get_job(self):
"""A job existing in the queue can be retrieved with its ID."""
job_id = self.queue.job_ids[0]
rv, data = self.get_json_response('/jobs/{0}'.format(job_id))
job = data['job']
assert 'id' in job
def test_get_job_serialisation(self):
"""All necessary information should be present in the job response."""
job_id = self.queue.job_ids[0]
with self.app.test_request_context():
rv, data = self.get_json_response('/jobs/{0}'.format(job_id))
self.validate_job(data['job'])
def test_bad_request(self):
"""`400 bad request` should be a JSON response with a message."""
rv = self.client.post('/jobs', data=json.dumps(dict()),
content_type='application/json')
data = json.loads(rv.data)
assert 'message' in data
assert len(data['message']) > 0
assert rv.status_code == 400
def test_not_found(self):
"""`404 not found` should be a JSON response with a message."""
rv, data = self.get_json_response('/jobs/fake_id')
assert 'message' in data
assert len(data['message']) > 0
assert rv.status_code == 404
if __name__ == '__main__':
unittest2.main()
| {
"content_hash": "2a48746691404a753506eb7fd7ee4007",
"timestamp": "",
"source": "github",
"line_count": 166,
"max_line_length": 79,
"avg_line_length": 41.566265060240966,
"alnum_prop": 0.6431884057971015,
"repo_name": "alexpearce/jobmonitor",
"id": "d8d5d99f70be6d835c6cf10e5928b5a4f59b0315",
"size": "6900",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_jobs_blueprint.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "2542"
},
{
"name": "HTML",
"bytes": "5924"
},
{
"name": "JavaScript",
"bytes": "7677"
},
{
"name": "Python",
"bytes": "22357"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('catalogue', '0015_product_is_public'),
]
operations = [
migrations.AlterField(
model_name='productattributevalue',
name='value_boolean',
field=models.NullBooleanField(db_index=True, verbose_name='Boolean'),
),
migrations.AlterField(
model_name='productattributevalue',
name='value_date',
field=models.DateField(blank=True, db_index=True, null=True, verbose_name='Date'),
),
migrations.AlterField(
model_name='productattributevalue',
name='value_datetime',
field=models.DateTimeField(blank=True, db_index=True, null=True, verbose_name='DateTime'),
),
migrations.AlterField(
model_name='productattributevalue',
name='value_float',
field=models.FloatField(blank=True, db_index=True, null=True, verbose_name='Float'),
),
migrations.AlterField(
model_name='productattributevalue',
name='value_integer',
field=models.IntegerField(blank=True, db_index=True, null=True, verbose_name='Integer'),
),
]
| {
"content_hash": "c568666ec3d2deed5c6e2dcff2422535",
"timestamp": "",
"source": "github",
"line_count": 38,
"max_line_length": 102,
"avg_line_length": 34.73684210526316,
"alnum_prop": 0.6045454545454545,
"repo_name": "django-oscar/django-oscar",
"id": "80c3726a80e67b15ede26ffa53bbb23e93e83ee2",
"size": "1394",
"binary": false,
"copies": "6",
"ref": "refs/heads/master",
"path": "tests/_site/apps/catalogue/migrations/0016_auto_20190327_0757.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Dockerfile",
"bytes": "529"
},
{
"name": "HTML",
"bytes": "565297"
},
{
"name": "JavaScript",
"bytes": "41944"
},
{
"name": "Makefile",
"bytes": "4234"
},
{
"name": "Python",
"bytes": "2261460"
},
{
"name": "SCSS",
"bytes": "21815"
},
{
"name": "Shell",
"bytes": "308"
}
],
"symlink_target": ""
} |
import subprocess, threading
class Command(object):
def __init__(self, cmd):
self.cmd = cmd
def run(self, timeout):
self.process = None
self.rc = None
self.stdout = ""
self.stderr = ""
def target():
self.process = subprocess.Popen(self.cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, close_fds=True)
self.rc = self.process.wait()
self.stdout,self.stderr = self.process.communicate()
thread = threading.Thread(target=target)
thread.start()
thread.join(timeout)
if thread.is_alive():
self.process.terminate()
thread.join()
return self.rc, self.stdout, self.stderr
if __name__ == "__main__":
command = Command("echo 'Process started'; sleep 2; echo 'Process finished'")
print command.run(timeout=3)
print command.run(timeout=1)
| {
"content_hash": "e453747f94e5e624fcaccc6e5625cf7a",
"timestamp": "",
"source": "github",
"line_count": 32,
"max_line_length": 131,
"avg_line_length": 28.75,
"alnum_prop": 0.5956521739130435,
"repo_name": "osynge/whatenv",
"id": "1130fb4f8fad63d2179de1ed0a54c97578e301f2",
"size": "920",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "osweint/command_runner.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "50168"
}
],
"symlink_target": ""
} |
import os
import sys
import hashlib
import logging
import json
from cPickle import PicklingError
import redis
from totalimpact import REDIS_CACHE_DATABASE_NUMBER
# set up logging
logger = logging.getLogger("ti.cache")
cache_client = redis.from_url(os.getenv("REDIS_URL"), REDIS_CACHE_DATABASE_NUMBER)
MAX_PAYLOAD_SIZE_BYTES = 1000*1000 # 1mb
MAX_CACHE_SIZE_BYTES = 100*1000*1000 #100mb
class CacheException(Exception):
pass
class Cache(object):
""" Maintains a cache of URL responses in memcached """
def _build_hash_key(self, key):
json_key = json.dumps(key)
hash_key = hashlib.md5(json_key.encode("utf-8")).hexdigest()
return hash_key
def _get_client(self):
return cache_client
def __init__(self, max_cache_age=60*60): #one hour
self.max_cache_age = max_cache_age
self.flush_cache()
def flush_cache(self):
#empties the cache
mc = self._get_client()
# mc.flushdb()
def get_cache_entry(self, key):
""" Get an entry from the cache, returns None if not found """
mc = self._get_client()
hash_key = self._build_hash_key(key)
response = mc.get(hash_key)
if response:
response = json.loads(response)
return response
def set_cache_entry(self, key, data):
""" Store a cache entry """
if sys.getsizeof(data["text"]) > MAX_PAYLOAD_SIZE_BYTES:
logger.debug(u"Not caching because payload is too large")
return None
mc = self._get_client()
if mc.info()["used_memory"] >= MAX_CACHE_SIZE_BYTES:
logger.debug(u"Not caching because redis cache is too full")
return None
hash_key = self._build_hash_key(key)
set_response = mc.set(hash_key, json.dumps(data))
mc.expire(hash_key, self.max_cache_age)
if not set_response:
logger.warning("Unable to store into Redis. Make sure redis server is running.")
raise CacheException("Unable to store into Redis. Make sure redis server is running.")
return set_response
| {
"content_hash": "1203fc2fba06d61d15e1c8113f34baba",
"timestamp": "",
"source": "github",
"line_count": 72,
"max_line_length": 98,
"avg_line_length": 29.555555555555557,
"alnum_prop": 0.6268796992481203,
"repo_name": "Impactstory/total-impact-core",
"id": "683f0111507d9a056ca107283716ec8013f28ac8",
"size": "2128",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "totalimpact/cache.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "JavaScript",
"bytes": "10570"
},
{
"name": "Python",
"bytes": "1313553"
},
{
"name": "R",
"bytes": "1545"
},
{
"name": "Shell",
"bytes": "2420"
},
{
"name": "TeX",
"bytes": "339123"
}
],
"symlink_target": ""
} |
from stackalytics.tests.api import test_api
class TestAPIModules(test_api.TestAPI):
def test_get_modules(self):
with test_api.make_runtime_storage(
{
'repos': [
{'module': 'nova', 'organization': 'openstack',
'uri': 'git://git.openstack.org/openstack/nova.git'},
{'module': 'glance', 'organization': 'openstack',
'uri': 'git://git.openstack.org/openstack/glance.git'}
],
'module_groups': {
'nova-group': {'id': 'nova-group',
'module_group_name': 'nova-group',
'modules': ['nova', 'nova-cli'],
'tag': 'group'},
'nova': test_api.make_module('nova'),
'nova-cli': test_api.make_module('nova-cli'),
'glance': test_api.make_module('glance'),
},
'releases': [
{'release_name': 'prehistory', 'end_date': 1234567890},
{'release_name': 'icehouse', 'end_date': 1234567890}],
'project_types': [{'id': 'all', 'title': 'All',
'modules': ['nova', 'glance',
'nova-cli']},
{'id': 'integrated',
'title': 'Integrated',
'modules': ['nova', 'glance']}]},
test_api.make_records(record_type=['commit'],
module=['glance', 'nova', 'nova-cli'])):
response = self.app.get('/api/1.0/modules?'
'project_type=all&metric=commits')
modules = test_api.load_json(response)['data']
self.assertEqual(
[{'id': 'glance', 'text': 'glance', 'tag': 'module'},
{'id': 'nova', 'text': 'nova', 'tag': 'module'},
{'id': 'nova-cli', 'text': 'nova-cli', 'tag': 'module'},
{'id': 'nova-group', 'text': 'nova-group', 'tag': 'group'}],
modules,
message='Expected modules belonging to project type plus '
'module groups that are completely within '
'project type')
response = self.app.get('/api/1.0/modules?module=nova-group&'
'project_type=integrated&metric=commits')
modules = test_api.load_json(response)['data']
self.assertEqual(
[{'id': 'glance', 'text': 'glance', 'tag': 'module'},
{'id': 'nova', 'text': 'nova', 'tag': 'module'},
{'id': 'nova-group', 'text': 'nova-group', 'tag': 'group'}],
modules,
message='Expected modules belonging to project type plus '
'module groups that are completely within '
'project type')
def test_get_module(self):
with test_api.make_runtime_storage(
{
'repos': [
{'module': 'nova', 'organization': 'openstack',
'uri': 'git://git.openstack.org/openstack/nova.git'}],
'module_groups': {
'nova-group': {'id': 'nova-group',
'module_group_name': 'nova-group',
'modules': ['nova-cli', 'nova'],
'tag': 'group'},
'nova': test_api.make_module('nova'),
'nova-cli': test_api.make_module('nova-cli'),
},
'releases': [{'release_name': 'prehistory',
'end_date': 1234567890},
{'release_name': 'icehouse',
'end_date': 1234567890}],
'project_types': [
{'id': 'all', 'title': 'All',
'modules': ['nova', 'glance', 'nova-cli']},
{'id': 'openstack', 'title': 'OpenStack',
'modules': ['nova', 'glance']}]},
test_api.make_records(record_type=['commit'])):
response = self.app.get('/api/1.0/modules/nova')
module = test_api.load_json(response)['module']
self.assertEqual(
{'id': 'nova',
'modules': [
{'module_name': 'nova',
'visible': True,
'repo_uri': 'git://git.openstack.org/openstack/nova.git'}
],
'name': 'Nova', 'tag': 'module'}, module)
response = self.app.get('/api/1.0/modules/nova-group')
module = test_api.load_json(response)['module']
self.assertEqual(
{'id': 'nova-group',
'modules': [{
'module_name': 'nova',
'visible': True,
'repo_uri': 'git://git.openstack.org/openstack/nova.git'},
{'module_name': 'nova-cli', 'visible': False},
],
'name': 'Nova-group', 'tag': 'group'}, module)
| {
"content_hash": "da55692f648e7ae9e834383979810888",
"timestamp": "",
"source": "github",
"line_count": 107,
"max_line_length": 79,
"avg_line_length": 51.32710280373832,
"alnum_prop": 0.39603058994901674,
"repo_name": "0xf2/stackalytics",
"id": "e4ad64b59caaf3f9e9339bc94ba0223f2a87fec9",
"size": "6074",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "stackalytics/tests/api/test_modules.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "40486"
},
{
"name": "HTML",
"bytes": "105450"
},
{
"name": "JavaScript",
"bytes": "80552"
},
{
"name": "Python",
"bytes": "441953"
},
{
"name": "Shell",
"bytes": "177"
}
],
"symlink_target": ""
} |
import random
class Node:
def __init__(self, data):
self.data = data
self.left = None
self.right = None
def insert(data, root):
"""
Inserts a node into the tree.
:param data: The data which will be given to the inserted node
:param root: The root of the tree
:return: The new root of the tree, with a new node inserted
"""
if root is None:
return Node(data)
elif data > root.data:
root.right = insert(data,root.right)
elif data < root.data:
root.left = insert(data,root.left)
return root
def createTree(num_nodes):
"""
A function created to construct the tree from 0 to num_nodes
The root will be the the middle number of num_nodes
:param num_nodes: The number of nodes to create in the list
:return: The root of the newly assembled tree
"""
root = Node(num_nodes//2)
for i in range(num_nodes):
if i == num_nodes//2:
continue
root = insert(i, root)
return root
def printPreOrder(root):
"""
Prints the tree in preorder
:param root: The root of the tree
:return: None
"""
if root is None:
return
print(root.data)
printPreOrder(root.left)
printPreOrder(root.right)
def remove(root, data):
if root is None:
return root
elif data < root.data:
# If the data is less than that of the current node
# We know we have to look to the left
root.left = remove(root.left, data)
elif data > root.data:
# If the data is greater than that of hte current node
# we know that we have to look to the right
root.right = remove(root.right, data)
elif data == root.data:
# We have found the node we are looking for
# In order to solve this problem the node we are removing (assuming its a non leaf node)
# will need to be replaced with something. After some experimenting and soul searching
# you will see that the node we need to replace it with is the closest node to what it was.
# In other words we need the largest node in the left sub-tree or the smallest node in the
# Right sub-tree. We are going to worry about swapping the data only to make things a lot easier.
if root.left is not None:
# If we know the left subtree exists, we need to look for hte largest node in it.
largest = findBiggest(root.left).data
elif root.right is not None:
# If we know the right subtree exists (which at this point means the left does not)
# means that we need to search for the smallest value in the right subtree.
largest = findSmallest(root.right).data
else:
# If the root is a leaf node then we can easily remove it
root = None
return root
# Search through the list and find the largest value (which will either just remove directly
# or cause us to remove a few others)
root = remove(root, largest)
root.data = largest
return root
def findBiggest(root):
"""
Finds the largest node in the tree
:param root: The starting node of the subnodes you want to saerch through
:return: The largest Node in the given subtree
"""
if root.right is None:
return root
else:
return findBiggest(root.right)
def findSmallest(root):
"""
Finds the smallest node in the subtree
:param root: The root node of the tree or subtree
:return: The smallest node in the given subtree
"""
if root.left is None:
return root
else:
return findSmallest(root.left)
root = createTree(100)
#printPreOrder(root)
print('\n\n')
root = remove(root, 50)
root = remove(root, 99)
root = remove(root, 98)
root = remove(root, 0)
print('\n\n')
printPreOrder(root)
| {
"content_hash": "415a511de963c4291f16c3827f5062ff",
"timestamp": "",
"source": "github",
"line_count": 130,
"max_line_length": 105,
"avg_line_length": 29.823076923076922,
"alnum_prop": 0.6337374258447253,
"repo_name": "mindm/2017Challenges",
"id": "5f98514cc5abcd1ac0b2289cbf5c8d399295c6cf",
"size": "3919",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "challenge_11/python/slandau3/bst.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Assembly",
"bytes": "5192"
},
{
"name": "C",
"bytes": "26511"
},
{
"name": "C#",
"bytes": "3541"
},
{
"name": "C++",
"bytes": "20264"
},
{
"name": "CoffeeScript",
"bytes": "162"
},
{
"name": "Elixir",
"bytes": "10813"
},
{
"name": "Go",
"bytes": "10229"
},
{
"name": "HTML",
"bytes": "5621"
},
{
"name": "Haskell",
"bytes": "7244"
},
{
"name": "Java",
"bytes": "30370"
},
{
"name": "JavaScript",
"bytes": "12944"
},
{
"name": "Julia",
"bytes": "1411"
},
{
"name": "Makefile",
"bytes": "5780"
},
{
"name": "PHP",
"bytes": "3515"
},
{
"name": "Python",
"bytes": "666366"
},
{
"name": "R",
"bytes": "3558"
},
{
"name": "Ruby",
"bytes": "461"
},
{
"name": "Rust",
"bytes": "15962"
},
{
"name": "Scala",
"bytes": "4309"
},
{
"name": "Shell",
"bytes": "5988"
},
{
"name": "Swift",
"bytes": "317"
}
],
"symlink_target": ""
} |
from rewriter import *
from os.path import basename
import copy
from lan_parser import *
from cgen import *
from transformation import *
from analysis import *
fileprefix = "../test/C/"
SetNoReadBack = True
DoOptimizations = True
def LexAndParse(name, createTemp):
import ply.yacc as yacc
cparser = yacc.yacc()
lex.lex()
run = 1
while run:
filename = fileprefix + name + '/' + name + 'For.cpp'
funcname = basename(os.path.splitext(filename)[0])
try:
f = open(filename, 'r')
s = f.read()
f.close()
## print s
except EOFError:
break
lex.input(s)
while 1:
tok = lex.token()
if not tok: break
## print tok
ast = cparser.parse(s)
## ast.show()
## print ast
## print slist
cprint = CGenerator()
## printres = cprint.visit(ast)
## print printres
rw = Rewriter()
rw.initOriginal(ast)
tempfilename = fileprefix + name + '/'+'temp' +name.lower() + '.cpp'
if createTemp:
rw.rewrite(ast, funcname, changeAST = True)
cprint.createTemp(ast, filename = tempfilename)
run = 0
filename = tempfilename
## funcname = basename(os.path.splitext(filename)[0])
try:
f = open(filename, 'r')
s = f.read()
f.close()
except EOFError:
break
ast = cparser.parse(s)
## ## ast.show()
tempast = copy.deepcopy(ast)
tempast2 = copy.deepcopy(ast)
return (rw, ast, tempast, tempast2, funcname)
def CGen(name, funcname, an, tempast2, ast, kernelstringname = ''):
cprint = CGenerator()
rw = an.rw
an.generate_kernels(tempast2, name, fileprefix)
## rw.InSourceKernel(tempast2, filename = fileprefix + name + '/'+funcname + '.cl', kernelstringname = kernelstringname)
boilerast = rw.generateBoilerplateCode(ast)
cprint.createTemp(boilerast, filename = fileprefix + name + '/'+'boilerplate.cpp')
def matmul():
name = 'MatMul'
(rw, ast, tempast, tempast2, funcname) = LexAndParse(name, True)
rw.initNewRepr(tempast, dev='CPU')
tf = Transformation(rw)
an = Analysis(rw, tf)
if DoOptimizations:
an.Transpose()
an.DefineArguments()
an.PlaceInReg()
an.PlaceInLocalMemory()
if SetNoReadBack:
tf.SetNoReadBack()
## rw.DataStructures()
CGen(name, funcname, an, tempast2, ast)
def jacobi():
name = 'Jacobi'
(rw, ast, tempast, tempast2, funcname) = LexAndParse(name, True)
rw.initNewRepr(tempast, dev='CPU')
tf = Transformation(rw)
an = Analysis(rw, tf)
if DoOptimizations:
an.Transpose()
an.DefineArguments()
an.PlaceInReg()
tf.localMemory(['X1'], west = 1, north = 1, east = 1, south = 1, middle = 0)
an.PlaceInLocalMemory()
if SetNoReadBack:
tf.SetNoReadBack()
CGen(name, funcname, an, tempast2, ast)
def knearest():
name = 'KNearest'
(rw, ast, tempast, tempast2, funcname) = LexAndParse(name, True)
tf = Transformation(rw)
tf.SetParDim(1)
rw.initNewRepr(tempast, dev='CPU')
an = Analysis(rw, tf)
if DoOptimizations:
an.Transpose()
an.DefineArguments()
an.PlaceInReg()
an.PlaceInLocalMemory()
if SetNoReadBack:
tf.SetNoReadBack()
## rw.DataStructures()
## rw.Unroll2({'k' : 0})
CGen(name, funcname, an, tempast2, ast)
def nbody():
name = 'NBody'
(rw, ast, tempast, tempast2, funcname) = LexAndParse(name, True)
rw.initNewRepr(tempast, dev='CPU')
tf = Transformation(rw)
an = Analysis(rw, tf)
if DoOptimizations:
an.Transpose()
an.DefineArguments()
an.PlaceInReg()
an.PlaceInLocalMemory()
if SetNoReadBack:
tf.SetNoReadBack()
## rw.Unroll2({'j': 32})
CGen(name, funcname, an, tempast2, ast)
def laplace():
name = 'Laplace'
(rw, ast, tempast, tempast2, funcname) = LexAndParse(name, True)
tf = Transformation(rw)
tf.SetParDim(1)
rw.initNewRepr(tempast, dev='CPU')
an = Analysis(rw, tf)
if DoOptimizations:
an.Transpose()
an.DefineArguments()
an.PlaceInReg()
an.PlaceInLocalMemory()
else:
tf.SetDefine(['dim'])
if SetNoReadBack:
tf.SetNoReadBack()
## rw.DataStructures()
## tf.Unroll2({'d' : 0, 'd_outer' : 0, 'd_inner' : 0})
CGen(name, funcname, an, tempast2, ast)
def gaussian():
name = 'GaussianDerivates'
(rw, ast, tempast, tempast2, funcname) = LexAndParse(name, True)
## rw.SetParDim(1)
rw.initNewRepr(tempast, dev='CPU')
tf = Transformation(rw)
an = Analysis(rw, tf)
if DoOptimizations:
an.Transpose()
an.DefineArguments()
an.PlaceInReg()
an.PlaceInLocalMemory()
## tf.Unroll2({'k' : 0, 'd' : 0, 'g' : 0, 'b' : 0})
## rw.DataStructures()
if SetNoReadBack:
tf.SetNoReadBack()
CGen(name, funcname, an, tempast2, ast)
if __name__ == "__main__":
matmul()
jacobi()
knearest()
nbody()
laplace()
gaussian()
| {
"content_hash": "f4c8ac4e295aa606a2fc340d163bf79b",
"timestamp": "",
"source": "github",
"line_count": 206,
"max_line_length": 128,
"avg_line_length": 25.95145631067961,
"alnum_prop": 0.5680882903105126,
"repo_name": "dikujepsen/OpenTran",
"id": "bd13fd4264372dd2fed0b28bde8f932699833d88",
"size": "5346",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "v2.0/framework/main.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "109"
},
{
"name": "C",
"bytes": "50583"
},
{
"name": "C++",
"bytes": "2400250"
},
{
"name": "Makefile",
"bytes": "8272"
},
{
"name": "Matlab",
"bytes": "6479"
},
{
"name": "Python",
"bytes": "1125197"
},
{
"name": "Shell",
"bytes": "4560"
}
],
"symlink_target": ""
} |
"""
Helpers to implement a recursive file processing command line script.
Verbosity:
Verbose modes (default: 3)
0: quiet
1: errors only
2: errors and summary
3: errors, changed files, and summary
4: errors, visited files, and summary
5: debug output
"""
from __future__ import print_function
from __future__ import absolute_import
from datetime import datetime
from fnmatch import fnmatch
from optparse import OptionParser
import os
import shutil
import time
from zipfile import ZipFile
TEMP_SUFFIX = ".$temp"
BACKUP_SUFFIX = ".bak"
def is_text_file(filename, blocksize=512):
try:
with open(filename, "rb") as f:
s = f.read(blocksize)
except IOError:
return False
if b"\0" in s:
return False
if not s: # Empty files are considered text
return True
return True
def increment_data(data, key, inc=1):
if type(data) is dict:
if key in data:
data[key] += inc
else:
data[key] = inc
return
def is_matching(fspec, match_list):
"""Return True if the name part of fspec matches the pattern (using fnmatch)."""
if match_list:
assert isinstance(match_list, (tuple, list))
name = os.path.basename(fspec)
for m in match_list:
if fnmatch(name, m):
return True
return False
# ==============================================================================
# WalkerOptions
# ==============================================================================
class WalkerOptions(object):
"""Common options used by cmd_walker.process().
This object, may be used instead of command line args.
An implementation should derive its options from this base class and call
cmd_walker.add_common_options().
"""
def __init__(self):
self.backup = True
self.dry_run = False
self.ignore_errors = False
self.ignore_list = None
self.match_list = None
self.recursive = False
self.target_path = None
self.verbose = 3
self.zip_backup = False
# ==============================================================================
# Walker
# ==============================================================================
def _process_file(fspec, opts, func, data):
# handle --ignore
if is_matching(fspec, opts.ignore_list):
data["files_ignored"] += 1
return False
fspec = os.path.abspath(fspec)
if not os.path.isfile(fspec):
ValueError("Invalid fspec: %s" % fspec)
try:
target_fspec = opts.target_path or fspec
target_fspec = os.path.abspath(target_fspec)
assert not fspec.endswith(TEMP_SUFFIX)
assert not target_fspec.endswith(TEMP_SUFFIX)
temp_fspec = fspec + TEMP_SUFFIX
if os.path.exists(temp_fspec):
os.remove(temp_fspec)
try:
data["files_processed"] += 1
res = func(fspec, temp_fspec, opts, data)
if res is not False:
data["files_modified"] += 1
except Exception:
raise
#
if res is False or opts.dry_run:
# If processor returns False (or we are in dry run mode), don't
# change the file
if os.path.exists(temp_fspec):
os.remove(temp_fspec)
elif opts.backup:
if opts.zip_backup:
if os.path.exists(target_fspec):
if not data.get("zipfile"):
data["zipfile"] = ZipFile(data["zipfile_fspec"], "w")
relPath = os.path.relpath(target_fspec, data["zipfile_folder"])
data["zipfile"].write(target_fspec, arcname=relPath)
else:
bakFilePath = "%s%s" % (target_fspec, BACKUP_SUFFIX)
if os.path.exists(bakFilePath):
os.remove(bakFilePath)
if os.path.exists(target_fspec):
shutil.move(target_fspec, bakFilePath)
shutil.move(temp_fspec, target_fspec)
else:
if os.path.exists(target_fspec):
os.remove(target_fspec)
shutil.move(temp_fspec, target_fspec)
except Exception:
data["exceptions"] += 1
raise
return
def _process_folder(path, opts, func, data):
"""Process matching files inside <path> folder (potentially recursive)."""
assert opts.match_list
assert os.path.isdir(path)
assert not opts.target_path
data["dirs_processed"] += 1
try:
for name in os.listdir(path):
f = os.path.join(path, name)
is_file = os.path.isfile(f)
# handle --ignore
if is_matching(name, opts.ignore_list):
if is_file:
data["files_ignored"] += 1
else:
data["dirs_ignored"] += 1
continue
if is_file:
# handle --match (only applied to files)
if opts.match_list and not is_matching(name, opts.match_list):
data["files_ignored"] += 1
continue
_process_file(f, opts, func, data)
elif opts.recursive:
_process_folder(f, opts, func, data)
except Exception as e:
if opts.ignore_errors:
if opts.verbose >= 1:
print("Skipping due to ERROR", e)
else:
raise
return
def process(args, opts, func, data):
data.setdefault("elapsed", 0)
data.setdefault("elapsed_string", "n.a.")
data.setdefault("files_processed", 0)
data.setdefault("files_modified", 0)
data.setdefault("files_skipped", 0) # rejected by processor (e.g. binary or empty files)
data.setdefault("files_ignored", 0) # due to --match or --ignore
data.setdefault("dirs_processed", 0)
data.setdefault("dirs_ignored", 0) # due to --ignore
data.setdefault("lines_processed", 0)
data.setdefault("lines_modified", 0)
data.setdefault("bytes_read", 0)
data.setdefault("bytes_written", 0) # count 0 for unmodified files
data.setdefault("bytes_written_if", 0) # count full bytes for unmodified files
data.setdefault("exceptions", 0)
if opts.zip_backup:
zip_folder = os.path.abspath(args[0])
assert os.path.isdir(zip_folder)
zip_fspec = os.path.join(
zip_folder, "backup_{}.zip".format(datetime.now().strftime("%Y%m%d-%H%M%S")))
data["zipfile_folder"] = zip_folder
data["zipfile_fspec"] = zip_fspec
start = time.clock()
if opts.recursive:
for path in args:
_process_folder(path, opts, func, data)
elif opts.match_list:
assert len(args) == 1
# data["dirs_processed"] += 1
_process_folder(args[0], opts, func, data)
else:
for f in args:
_process_file(f, opts, func, data)
if data.get("zipfile"):
data["zipfile"].close()
data["elapsed"] = time.clock() - start
data["elapsed_string"] = "%.3f sec" % data["elapsed"]
# if opts.dry_run and opts.verbose >= 1:
# print("\n*** Dry-run mode: no files have been modified!\n"
# " ***Use -x or --execute to process files.\n")
return
def add_common_options(parser):
"""Return a valid options object.
@param parser: OptionParser
"""
parser.add_option("-q", "--quiet",
action="count", default=0, dest="verboseDecrement",
help="decrease verbosity to 2 (use -qq for 1, ...)")
parser.add_option("-v", "--verbose",
action="count", dest="verbose", default=3,
help="increment verbosity to 4 (use -vv for 5, ...)")
parser.add_option("-n", "--dry-run",
action="store_true", dest="dry_run", default=False,
help="dry run: just print status messages; don't change anything")
parser.add_option("-m", "--match",
action="append", dest="match_list",
help="match this file name pattern (separate by ',' or repeat this option)")
parser.add_option("-x", "--exclude",
action="append", dest="ignore_list",
help="skip this file or folder name patterns "
"(separate by ',' or repeat this option)")
parser.add_option("-r", "--recursive",
action="store_true", dest="recursive", default=False,
help="visit sub directories")
parser.add_option("-o", "--target",
action="store", dest="target_path", default=None,
metavar="FILENAME",
help="name of output file")
parser.add_option("-b", "--backup",
action="store_true", dest="backup", default=False,
help="create backup files (*.bak)")
parser.add_option("", "--zip-backup",
action="store_true", dest="zip_backup", default=False,
help="add backups of modified files to a zip-file (implies -b)")
parser.add_option("", "--ignore-errors",
action="store_true", dest="ignore_errors", default=False,
help="ignore errors during processing")
return
def check_common_options(parser, options, args):
"""Preprocess and validate common options."""
# if len(args) != 1:
# parser.error("expected exactly one source file or folder")
# allow multiple patterns in one -m option (separated by ',')
if options.match_list:
match_list = []
for matches in options.match_list:
for pattern in matches.split(","):
if pattern not in match_list:
match_list.append(pattern)
options.match_list = match_list
# allow multiple patterns in one -i option (separated by ',')
if options.ignore_list:
match_list = []
for matches in options.ignore_list:
for pattern in matches.split(","):
if pattern not in match_list:
match_list.append(pattern)
options.ignore_list = match_list
# TODO:
# if options.quiet and options.verbose:
# parser.error("options -q and -v are mutually exclusive")
if options.match_list and not args:
args.append(".")
# decrement vorbisity by 1 for every -q option
if options.verboseDecrement:
options.verbose = max(0, options.verbose - options.verboseDecrement)
del options.verboseDecrement
# --zip-backup implies -b
if options.zip_backup:
options.backup = True
if len(args) < 1:
parser.error("missing required PATH")
elif options.target_path and len(args) != 1:
parser.error("-o option requires exactly one source file")
# elif options.recursive and len(args) != 1:
# parser.error("-r option requires exactly one source directory")
elif options.recursive and len(args) < 1:
parser.error("-r option requires one or more source directories")
elif options.recursive and not options.match_list:
parser.error("-r option requires -m")
for f in args:
if not os.path.exists(f):
parser.error("input not found: %r" % f)
elif os.path.isdir(f) and not options.match_list:
parser.error("must specify a match pattern, if source is a folder")
elif os.path.isfile(f) and options.match_list:
parser.error("must not specify a match pattern, if source is a file")
# if not os.path.exists(args[0]):
# parser.error("input not found: %r" % args[0])
# elif os.path.isdir(args[0]) and not options.match:
# parser.error("must specify a match pattern, if source is a folder")
# elif os.path.isfile(args[0]) and options.match:
# parser.error("must not specify a match pattern, if source is a file")
if options.target_path and options.match_list:
parser.error("-m and -o are mutually exclusive")
if options.zip_backup and not options.backup:
parser.error("--zip-backup and --no-backup are mutually exclusive")
elif options.zip_backup and (len(args) != 1 or not os.path.isdir(args[0])):
parser.error("--zip-backup requires exactly one source directory")
return True
# ==============================================================================
# Sample processor
# ==============================================================================
def piggify(fspec, target_fspec, opts, data):
"""Sample file processor."""
pass
def test():
# Create option parser for common and custom options
parser = OptionParser(usage="usage: %prog [options] PATH",
version="0.0.1")
parser.add_option("-c", "--count",
action="store", dest="count", default=3,
metavar="COUNT",
help="number of '.' to prepend (default: %default)")
add_common_options(parser)
# Parse command line
(options, args) = parser.parse_args()
# Check syntax
check_common_options(parser, options, args)
try:
count = int(options.count)
except:
count = 0
if count < 1:
parser.error("count must be numeric and greater than 1")
# Call processor
data = {}
process(args, options, piggify, data)
if __name__ == "__main__":
test()
| {
"content_hash": "225447800c1b5727466e095bb361f6a2",
"timestamp": "",
"source": "github",
"line_count": 383,
"max_line_length": 98,
"avg_line_length": 36.27676240208877,
"alnum_prop": 0.5417446379732258,
"repo_name": "mar10/tabfix",
"id": "240d53fda271339093db3d7300c2eab854c843a4",
"size": "14048",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tabfix/cmd_walker.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "38349"
}
],
"symlink_target": ""
} |
import sys
import unittest
import httplib
from libcloud.compute.drivers.ec2 import EC2NodeDriver, EC2APSENodeDriver
from libcloud.compute.drivers.ec2 import EC2APNENodeDriver, IdempotentParamError
from libcloud.compute.base import Node, NodeImage, NodeSize, NodeLocation
from test import MockHttp
from test.compute import TestCaseMixin
from test.file_fixtures import ComputeFileFixtures
from test.secrets import EC2_ACCESS_ID, EC2_SECRET
class EC2Tests(unittest.TestCase, TestCaseMixin):
def setUp(self):
EC2NodeDriver.connectionCls.conn_classes = (None, EC2MockHttp)
EC2MockHttp.use_param = 'Action'
EC2MockHttp.type = None
self.driver = EC2NodeDriver(EC2_ACCESS_ID, EC2_SECRET)
def test_create_node(self):
image = NodeImage(id='ami-be3adfd7',
name='ec2-public-images/fedora-8-i386-base-v1.04.manifest.xml',
driver=self.driver)
size = NodeSize('m1.small', 'Small Instance', None, None, None, None, driver=self.driver)
node = self.driver.create_node(name='foo', image=image, size=size)
self.assertEqual(node.id, 'i-2ba64342')
def test_create_node_idempotent(self):
EC2MockHttp.type = 'idempotent'
image = NodeImage(id='ami-be3adfd7',
name='ec2-public-images/fedora-8-i386-base-v1.04.manifest.xml',
driver=self.driver)
size = NodeSize('m1.small', 'Small Instance', None, None, None, None, driver=self.driver)
token = 'testclienttoken'
node = self.driver.create_node(name='foo', image=image, size=size,
ex_clienttoken=token)
self.assertEqual(node.id, 'i-2ba64342')
self.assertEqual(node.extra['clienttoken'], token)
# from: http://docs.amazonwebservices.com/AWSEC2/latest/DeveloperGuide/index.html?Run_Instance_Idempotency.html
# If you repeat the request with the same client token, but change
# another request parameter, Amazon EC2 returns an
# IdempotentParameterMismatch error.
# In our case, changing the parameter doesn't actually matter since we
# are forcing the error response fixture.
EC2MockHttp.type = 'idempotent_mismatch'
idem_error = None
try:
self.driver.create_node(name='foo', image=image, size=size,
ex_mincount='2', ex_maxcount='2', # different count
ex_clienttoken=token)
except IdempotentParamError, e:
idem_error = e
self.assertTrue(idem_error is not None)
def test_create_node_no_availability_zone(self):
image = NodeImage(id='ami-be3adfd7',
name='ec2-public-images/fedora-8-i386-base-v1.04.manifest.xml',
driver=self.driver)
size = NodeSize('m1.small', 'Small Instance', None, None, None, None,
driver=self.driver)
node = self.driver.create_node(name='foo', image=image, size=size)
location = NodeLocation(0, 'Amazon US N. Virginia', 'US', self.driver)
self.assertEqual(node.id, 'i-2ba64342')
node = self.driver.create_node(name='foo', image=image, size=size,
location=location)
self.assertEqual(node.id, 'i-2ba64342')
def test_list_nodes(self):
node = self.driver.list_nodes()[0]
public_ips = sorted(node.public_ip)
self.assertEqual(node.id, 'i-4382922a')
self.assertEqual(len(node.public_ip), 2)
self.assertEqual(public_ips[0], '1.2.3.4')
self.assertEqual(public_ips[1], '1.2.3.5')
def test_list_location(self):
locations = self.driver.list_locations()
self.assertTrue(len(locations) > 0)
self.assertTrue(locations[0].availability_zone != None)
def test_reboot_node(self):
node = Node('i-4382922a', None, None, None, None, self.driver)
ret = self.driver.reboot_node(node)
self.assertTrue(ret)
def test_destroy_node(self):
node = Node('i-4382922a', None, None, None, None, self.driver)
ret = self.driver.destroy_node(node)
self.assertTrue(ret)
def test_list_sizes(self):
region_old = self.driver.region_name
for region_name in [ 'us-east-1', 'us-west-1', 'eu-west-1',
'ap-southeast-1' ]:
self.driver.region_name = region_name
sizes = self.driver.list_sizes()
ids = [s.id for s in sizes]
self.assertTrue('t1.micro' in ids)
self.assertTrue('m1.small' in ids)
self.assertTrue('m1.large' in ids)
self.assertTrue('m1.xlarge' in ids)
self.assertTrue('c1.medium' in ids)
self.assertTrue('c1.xlarge' in ids)
self.assertTrue('m2.xlarge' in ids)
self.assertTrue('m2.2xlarge' in ids)
self.assertTrue('m2.4xlarge' in ids)
if region_name == 'us-east-1':
self.assertEqual(len(sizes), 11)
self.assertTrue('cg1.4xlarge' in ids)
self.assertTrue('cc1.4xlarge' in ids)
else:
self.assertEqual(len(sizes), 9)
self.driver.region_name = region_old
def test_list_images(self):
images = self.driver.list_images()
image = images[0]
self.assertEqual(len(images), 1)
self.assertEqual(image.name, 'ec2-public-images/fedora-8-i386-base-v1.04.manifest.xml')
self.assertEqual(image.id, 'ami-be3adfd7')
def test_ex_list_availability_zones(self):
availability_zones = self.driver.ex_list_availability_zones()
availability_zone = availability_zones[0]
self.assertTrue(len(availability_zones) > 0)
self.assertEqual(availability_zone.name, 'eu-west-1a')
self.assertEqual(availability_zone.zone_state, 'available')
self.assertEqual(availability_zone.region_name, 'eu-west-1')
def test_ex_describe_tags(self):
node = Node('i-4382922a', None, None, None, None, self.driver)
tags = self.driver.ex_describe_tags(node)
self.assertEqual(len(tags), 3)
self.assertTrue('tag' in tags)
self.assertTrue('owner' in tags)
self.assertTrue('stack' in tags)
def test_ex_create_tags(self):
node = Node('i-4382922a', None, None, None, None, self.driver)
self.driver.ex_create_tags(node, {'sample': 'tag'})
def test_ex_delete_tags(self):
node = Node('i-4382922a', None, None, None, None, self.driver)
self.driver.ex_delete_tags(node, {'sample': 'tag'})
def test_ex_describe_addresses_for_node(self):
node1 = Node('i-4382922a', None, None, None, None, self.driver)
ip_addresses1 = self.driver.ex_describe_addresses_for_node(node1)
node2 = Node('i-4382922b', None, None, None, None, self.driver)
ip_addresses2 = sorted(self.driver.ex_describe_addresses_for_node(node2))
node3 = Node('i-4382922g', None, None, None, None, self.driver)
ip_addresses3 = sorted(self.driver.ex_describe_addresses_for_node(node3))
self.assertEqual(len(ip_addresses1), 1)
self.assertEqual(ip_addresses1[0], '1.2.3.4')
self.assertEqual(len(ip_addresses2), 2)
self.assertEqual(ip_addresses2[0], '1.2.3.5')
self.assertEqual(ip_addresses2[1], '1.2.3.6')
self.assertEqual(len(ip_addresses3), 0)
def test_ex_describe_addresses(self):
node1 = Node('i-4382922a', None, None, None, None, self.driver)
node2 = Node('i-4382922g', None, None, None, None, self.driver)
nodes_elastic_ips1 = self.driver.ex_describe_addresses([node1])
nodes_elastic_ips2 = self.driver.ex_describe_addresses([node2])
self.assertEqual(len(nodes_elastic_ips1), 1)
self.assertTrue(node1.id in nodes_elastic_ips1)
self.assertEqual(nodes_elastic_ips1[node1.id], ['1.2.3.4'])
self.assertEqual(len(nodes_elastic_ips2), 1)
self.assertTrue(node2.id in nodes_elastic_ips2)
self.assertEqual(nodes_elastic_ips2[node2.id], [])
class EC2MockHttp(MockHttp):
fixtures = ComputeFileFixtures('ec2')
def _DescribeInstances(self, method, url, body, headers):
body = self.fixtures.load('describe_instances.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _DescribeAvailabilityZones(self, method, url, body, headers):
body = self.fixtures.load('describe_availability_zones.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _RebootInstances(self, method, url, body, headers):
body = self.fixtures.load('reboot_instances.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _DescribeImages(self, method, url, body, headers):
body = self.fixtures.load('describe_images.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _RunInstances(self, method, url, body, headers):
body = self.fixtures.load('run_instances.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _idempotent_RunInstances(self, method, url, body, headers):
body = self.fixtures.load('run_instances_idem.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _idempotent_mismatch_RunInstances(self, method, url, body, headers):
body = self.fixtures.load('run_instances_idem_mismatch.xml')
return (httplib.BAD_REQUEST, body, {}, httplib.responses[httplib.BAD_REQUEST])
def _TerminateInstances(self, method, url, body, headers):
body = self.fixtures.load('terminate_instances.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _DescribeTags(self, method, url, body, headers):
body = self.fixtures.load('describe_tags.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _CreateTags(self, method, url, body, headers):
body = self.fixtures.load('create_tags.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _DeleteTags(self, method, url, body, headers):
body = self.fixtures.load('delete_tags.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _DescribeAddresses(self, method, url, body, headers):
body = self.fixtures.load('describe_addresses_multi.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
class EC2APSETests(EC2Tests):
def setUp(self):
EC2APSENodeDriver.connectionCls.conn_classes = (None, EC2MockHttp)
EC2MockHttp.use_param = 'Action'
EC2MockHttp.type = None
self.driver = EC2APSENodeDriver(EC2_ACCESS_ID, EC2_SECRET)
class EC2APNETests(EC2Tests):
def setUp(self):
EC2APNENodeDriver.connectionCls.conn_classes = (None, EC2MockHttp)
EC2MockHttp.use_param = 'Action'
EC2MockHttp.type = None
self.driver = EC2APNENodeDriver(EC2_ACCESS_ID, EC2_SECRET)
if __name__ == '__main__':
sys.exit(unittest.main())
| {
"content_hash": "d97c29757bbec9d7762979f9093249f5",
"timestamp": "",
"source": "github",
"line_count": 259,
"max_line_length": 119,
"avg_line_length": 42.96911196911197,
"alnum_prop": 0.6366250336957499,
"repo_name": "cloudkick/libcloud",
"id": "ca193fd4cd91ffdd9bebc4c24abf0207140f9c6d",
"size": "11910",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test/compute/test_ec2.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "574113"
},
{
"name": "Shell",
"bytes": "5637"
}
],
"symlink_target": ""
} |
import json
import os
import time
import phonenumbers
import psycopg2
from ndoh_hub.constants import LANGUAGES
def get_addresses(addresses):
addresses = addresses.get("msisdn") or {}
result = []
for addr, details in addresses.items():
try:
p = phonenumbers.parse(addr, "ZA")
assert phonenumbers.is_possible_number(p)
assert phonenumbers.is_valid_number(p)
p = phonenumbers.format_number(p, phonenumbers.PhoneNumberFormat.E164)
except Exception:
continue
if details.get("default"):
return [p]
if not details.get("optedout"):
result.append(p)
return result
def process_identity(identities, id, details, failed_msgs_count):
details = details or {}
addresses = get_addresses(details.get("addresses", {}))
if not addresses or "redacted" in addresses:
return
identities[id] = {
"msisdns": addresses,
"failed_msgs_count": failed_msgs_count,
"uuid": id,
}
for k in [
"operator_id",
"passport_no",
"passport_origin",
"consent",
"sa_id_no",
"mom_given_name",
"mom_family_name",
"faccode",
"id_type",
]:
if details.get(k):
identities[id][k] = details[k]
language = (
details.get("lang_code")
or details.get("language")
or details.get("preferred_language")
)
if language and language in LANGUAGES:
identities[id]["language"] = language.rstrip("_ZA")
pmtct_risk = details.get("pmtct", {}).get("risk_status", None)
if pmtct_risk:
identities[id]["pmtct_risk"] = pmtct_risk
dob = details.get("mom_dob") or details.get("dob")
if dob:
identities[id]["mom_dob"] = dob
def process_optout(identities, id, created, reason):
if not identities.get(id):
return
created = created.isoformat()
timestamp = identities[id].get("optout_timestamp")
if timestamp and timestamp > created:
return
identities[id]["optout_timestamp"] = created
identities[id]["optout_reason"] = reason
def process_registration(identities, id, data):
if not identities.get(id):
return
for k in [
"edd",
"faccode",
"id_type",
"mom_dob",
"mom_given_name",
"mom_family_name",
"msisdn_device",
"passport_no",
"passport_origin",
"sa_id_no",
"consent",
]:
if data.get(k):
if not identities[id].get(k):
identities[id][k] = data[k]
if data.get("baby_dob"):
if not identities[id].get("baby_dobs"):
identities[id]["baby_dobs"] = [data["baby_dob"]]
else:
identities[id]["baby_dobs"].append(data["baby_dob"])
uuid_device = data.get("uuid_device") or data.get("operator_id")
if uuid_device and not identities[id].get("msisdn_device"):
try:
identities[id]["msisdn_device"] = identities[uuid_device]["msisdns"][0]
except Exception:
pass
if data.get("language") and not identities[id].get("language"):
if data["language"] in LANGUAGES:
identities[id]["language"] = data["language"].rstrip("_ZA")
def process_change(identities, id, action, data, created):
if not identities.get(id):
return
created = created.isoformat()
if "optout" in action:
timestamp = identities[id].get("optout_timestamp")
if timestamp and timestamp > created:
return
identities[id]["optout_timestamp"] = created
if data.get("reason"):
identities[id]["optout_reason"] = data["reason"]
elif action == "baby_switch":
baby_dobs = identities[id].get("baby_dobs")
if not baby_dobs:
identities[id]["baby_dobs"] = [created]
else:
identities[id]["baby_dobs"].append(created)
def process_subscription(identities, id, name, created_at):
if not identities.get(id):
return
created_at = created_at.isoformat()
if "whatsapp" in name:
identities[id]["channel"] = "WhatsApp"
else:
if not identities[id].get("channel"):
identities[id]["channel"] = "SMS"
if "pmtct" in name:
identities[id]["pmtct_messaging"] = "TRUE"
elif "loss" in name:
identities[id]["optout_reason"] = name.split(".")[0].split("_")[-1]
identities[id]["optout_timestamp"] = created_at
identities[id]["loss_messaging"] = "TRUE"
elif (
"momconnect_prebirth.patient" in name
or "momconnect_prebirth.hw_partial" in name
):
identities[id]["public_messaging"] = "TRUE"
identities[id]["public_registration_date"] = created_at
elif "momconnect_prebirth.hw_full" in name:
identities[id]["prebirth_messaging"] = name[-1]
elif "momconnect_postbirth.hw_full" in name:
identities[id]["postbirth_messaging"] = "TRUE"
else:
return
def merge_dicts(d1, d2):
for k, v in d2.items():
if type(v) == list:
d1[k] = d1.get(k, []) + v
else:
d1[k] = v
return d1
def deduplicate_msisdns(identities):
msisdns: dict = {}
total = 0
start, d_print = time.time(), time.time()
for identity in identities.values():
for msisdn in identity.pop("msisdns"):
msisdns[msisdn] = merge_dicts(
msisdns.get(msisdn, {"msisdn": msisdn}), identity
)
if time.time() - d_print > 1:
print(
f"\rProcessed {total} msisdns at {total/(time.time() - start):.0f}/s",
end="",
)
d_print = time.time()
total += 1
print(f"\nProcessed {total} msisdns in {time.time() - start:.0f}s")
return msisdns
if __name__ == "__main__":
identities: dict = {}
conn = psycopg2.connect(
dbname="identitystore",
user="identitystore",
password=os.environ["IDENTITY_PASS"],
host="localhost",
port=7000,
)
cursor = conn.cursor("identity_store_identities")
print("Processing identities...")
cursor.execute(
"""
SELECT
id, details, failed_message_count
FROM
identities_identity
"""
)
total = 0
start, d_print = time.time(), time.time()
for (id, details, failed_msgs_count) in cursor:
process_identity(identities, id, details, failed_msgs_count)
if time.time() - d_print > 1:
print(
f"\rProcessed {total} identities at "
f"{total/(time.time() - start):.0f}/s",
end="",
)
d_print = time.time()
total += 1
print(f"\nProcessed {total} identities in {time.time() - start:.0f}s")
print("Processing opt outs...")
cursor = conn.cursor("identity_store_optouts")
cursor.execute(
"""
SELECT
identity_id, created_at, reason
FROM
identities_optout
"""
)
total = 0
start, d_print = time.time(), time.time()
for (id, created, reason) in cursor:
process_optout(identities, id, created, reason)
if time.time() - d_print > 1:
print(
f"\rProcessed {total} optouts at {total/(time.time() - start):.0f}/s",
end="",
)
d_print = time.time()
total += 1
print(f"\nProcessed {total} optouts in {time.time() - start:.0f}s")
print("Processing Registrations...")
conn = psycopg2.connect(
dbname="hub",
user="hub",
password=os.environ["HUB_PASS"],
host="localhost",
port=7000,
)
cursor = conn.cursor("hub_registrations")
cursor.execute(
"""
SELECT
registrant_id, data
FROM
registrations_registration
WHERE
validated=true
ORDER BY
created_at ASC
"""
)
total = 0
start, d_print = time.time(), time.time()
for (id, data) in cursor:
process_registration(identities, id, data)
if time.time() - d_print > 1:
print(
f"\rProcessed {total} registrations at "
f"{total/(time.time() - start):.0f}/s",
end="",
)
d_print = time.time()
total += 1
print(f"\nProcessed {total} registrations in {time.time() - start:.0f}s")
print("Processing Changes...")
cursor = conn.cursor("hub_changes")
cursor.execute(
"""
SELECT
registrant_id, action, data, created_at
FROM
changes_change
WHERE
validated=true
ORDER BY
created_at ASC
"""
)
total = 0
start, d_print = time.time(), time.time()
for (id, action, data, created) in cursor:
process_change(identities, id, action, data, created)
if time.time() - d_print > 1:
print(
f"\rProcessed {total} changes at {total/(time.time() - start):.0f}/s",
end="",
)
d_print = time.time()
total += 1
print(f"\nProcessed {total} changes in {time.time() - start:.0f}s")
print("Processing subscriptions...")
conn = psycopg2.connect(
dbname="stage_based_messaging",
user="stage_based_messaging",
password=os.environ["STAGE_PASS"],
host="localhost",
port=7000,
)
cursor = conn.cursor("stage_subscriptions")
cursor.execute(
"""
SELECT
subscription.identity, messageset.short_name, subscription.created_at
FROM
subscriptions_subscription as subscription
JOIN
contentstore_messageset as messageset
ON
subscription.messageset_id = messageset.id
WHERE
subscription.active=true and
subscription.completed=false and
subscription.process_status=0
"""
)
total = 0
start, d_print = time.time(), time.time()
for (id, name, created) in cursor:
process_subscription(identities, id, name, created)
if time.time() - d_print > 1:
print(
f"\rProcessed {total} subscriptions at "
f"{total/(time.time() - start):.0f}/s",
end="",
)
d_print = time.time()
total += 1
print(f"\nProcessed {total} subscriptions in {time.time() - start:.0f}s")
print("Deduplicating msisdns")
identities = deduplicate_msisdns(identities)
print("Writing results to file..")
start = time.time()
with open("results.json", "w") as f:
for i in identities.values():
f.write(json.dumps(i))
f.write("\n")
print(f"Wrote results to file in {time.time() - start:.0f}s")
| {
"content_hash": "6a7130f7c8a4ce37bc6a5b3934a9772d",
"timestamp": "",
"source": "github",
"line_count": 364,
"max_line_length": 86,
"avg_line_length": 29.695054945054945,
"alnum_prop": 0.5564807105190119,
"repo_name": "praekeltfoundation/ndoh-hub",
"id": "ba23de293dafe8d8235bcc59b70f3ff9c1b49c2f",
"size": "10809",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "scripts/migrate_to_rapidpro/collect_information.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Dockerfile",
"bytes": "450"
},
{
"name": "HTML",
"bytes": "2200"
},
{
"name": "Python",
"bytes": "957306"
},
{
"name": "Shell",
"bytes": "2796"
}
],
"symlink_target": ""
} |
""" run with
nosetests -v --nocapture
or
nosetests -v
"""
from cloudmesh.config.cm_config import cm_config
from cloudmesh.config.mock_user import mock_user
from cloudmesh.config.mock_cloud import mock_cloud
from cloudmesh.openstack_grizzly_cloud import openstack_grizzly_cloud
import mock_keystone
import json
import os
import warnings
import pprint
from cloudmesh_base.util import HEADING
class Test_cloudmesh_keystone:
def test18_initialize(self):
HEADING()
username = 'misterbojangles'
self.config.userdata_handler = mock_user
self.config.cloudcreds_handler = mock_cloud
self.config.initialize(username)
assert 'cloudmesh' in self.config.data
assert len(self.config.data.keys()) == 1
cmdata = self.config.data['cloudmesh']
assert 'prefix' in cmdata
assert 'profile' in cmdata
assert 'username' in cmdata['profile']
assert cmdata['profile']['username'] == username
assert 'keys' in cmdata
assert 'projects' in cmdata
assert 'active' in cmdata
assert 'default' in cmdata
assert 'clouds' in cmdata
assert 'security' in cmdata
assert 'default' in cmdata['keys']
assert 'india-openstack' in cmdata['clouds']
assert 'sierra' in cmdata['clouds']
assert 'credentials' in cmdata['clouds']['sierra']
assert cmdata['clouds']['sierra'][
'credentials']['OS_VERSION'] == 'grizzly'
assert cmdata['clouds']['sierra'][
'credentials']['OS_USERNAME'] == username
assert cmdata['prefix'] == username
def test19_openstack_grizzly(self):
HEADING()
username = 'misterbojangles'
self.config.userdata_handler = mock_user
self.config.cloudcreds_handler = openstack_grizzly_cloud
self.config.cloudcreds_handler._client = mock_keystone.Client
self.config.cloudcreds_handler._client.mockusername = username
self.config.cloudcreds_handler._client.mocktenants = self.config.data[
'cloudmesh']['active']
self.config.initialize(username)
cmdata = self.config.data['cloudmesh']
assert cmdata['clouds']['sierra'][
'credentials']['OS_VERSION'] == 'grizzly'
assert cmdata['clouds']['sierra'][
'credentials']['OS_USERNAME'] == username
assert 'OS_PASSWORD' in cmdata['clouds'][
'sierra']['credentials']
assert 'project' in cmdata['clouds'][
'sierra']['default']
| {
"content_hash": "175b67b754448a2224ff182791e621f0",
"timestamp": "",
"source": "github",
"line_count": 75,
"max_line_length": 78,
"avg_line_length": 33.8,
"alnum_prop": 0.642603550295858,
"repo_name": "rajpushkar83/cloudmesh",
"id": "dc7659c606583a6a38ff8c0feadd53f4b77a59a7",
"size": "2535",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_keystone.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "ActionScript",
"bytes": "15982"
},
{
"name": "CSS",
"bytes": "390396"
},
{
"name": "HTML",
"bytes": "4158355"
},
{
"name": "Java",
"bytes": "369"
},
{
"name": "JavaScript",
"bytes": "2803977"
},
{
"name": "Makefile",
"bytes": "7572"
},
{
"name": "PHP",
"bytes": "183557"
},
{
"name": "Python",
"bytes": "1736957"
},
{
"name": "Ruby",
"bytes": "10670"
},
{
"name": "Shell",
"bytes": "32263"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tempfile
import numpy as np
import tensorflow as tf
from tensorflow import flags
from tensorflow.examples.tutorials.mnist import input_data
from tensorflow.lite.experimental.examples.lstm.rnn import bidirectional_dynamic_rnn
from tensorflow.python.framework import test_util
from tensorflow.python.platform import test
FLAGS = flags.FLAGS
# Number of steps to train model.
TRAIN_STEPS = 1
CONFIG = tf.ConfigProto(device_count={"GPU": 0})
class BidirectionalSequenceRnnTest(test_util.TensorFlowTestCase):
def __init__(self, *args, **kwargs):
super(BidirectionalSequenceRnnTest, self).__init__(*args, **kwargs)
# Define constants
# Unrolled through 28 time steps
self.time_steps = 28
# Rows of 28 pixels
self.n_input = 28
# Learning rate for Adam optimizer
self.learning_rate = 0.001
# MNIST is meant to be classified in 10 classes(0-9).
self.n_classes = 10
# Batch size
self.batch_size = 16
# Rnn Units.
self.num_units = 16
def setUp(self):
super(BidirectionalSequenceRnnTest, self).setUp()
# Import MNIST dataset
data_dir = tempfile.mkdtemp(dir=FLAGS.test_tmpdir)
self.mnist = input_data.read_data_sets(data_dir, one_hot=True)
def buildRnnLayer(self):
return tf.keras.layers.StackedRNNCells([
tf.lite.experimental.nn.TfLiteRNNCell(self.num_units, name="rnn1"),
tf.lite.experimental.nn.TfLiteRNNCell(self.num_units, name="rnn2")
])
def buildModel(self,
fw_rnn_layer,
bw_rnn_layer,
is_dynamic_rnn,
is_inference,
use_sequence_length=False):
"""Build Mnist recognition model.
Args:
fw_rnn_layer: The forward rnn layer either a single rnn cell or a multi
rnn cell.
bw_rnn_layer: The backward rnn layer either a single rnn cell or a multi
rnn cell.
is_dynamic_rnn: Use dynamic_rnn or not.
use_sequence_length: Whether to use sequence length or not. Default to
False.
Returns:
A tuple containing:
- Input tensor of the model.
- Prediction tensor of the model.
- Output class tensor of the model.
"""
# Weights and biases for output softmax layer.
out_weights = tf.Variable(
tf.random_normal([self.num_units * 2, self.n_classes]))
out_bias = tf.Variable(tf.random_normal([self.n_classes]))
batch_size = self.batch_size
if is_inference:
batch_size = 1
# input image placeholder
x = tf.placeholder(
"float", [batch_size, self.time_steps, self.n_input],
name="INPUT_IMAGE")
sequence_length = None
if use_sequence_length:
sequence_length = [self.time_steps] * batch_size
if is_dynamic_rnn:
rnn_inputs = tf.transpose(x, [1, 0, 2])
outputs, _ = bidirectional_dynamic_rnn(
fw_rnn_layer,
bw_rnn_layer,
rnn_inputs,
sequence_length,
dtype="float32",
time_major=True)
fw_outputs, bw_outputs = outputs
output = tf.concat([fw_outputs, bw_outputs], 2)
output = tf.unstack(output, axis=0)
output = output[-1]
else:
rnn_inputs = tf.unstack(x, self.time_steps, 1)
# Sequence length is not supported for static since we don't have a
# wrapper for it. At training phase, we can still have sequence_length,
# but inference phase, we change it to None.
if is_inference:
sequence_length = None
outputs, _, _ = tf.nn.static_bidirectional_rnn(
fw_rnn_layer,
bw_rnn_layer,
rnn_inputs,
dtype="float32",
sequence_length=sequence_length)
output = outputs[-1]
# Compute logits by multiplying output of shape [batch_size,num_units*2]
# by the softmax layer's out_weight of shape [num_units*2,n_classes]
# plus out_bias
prediction = tf.matmul(output, out_weights) + out_bias
output_class = tf.nn.softmax(prediction, name="OUTPUT_CLASS")
return x, prediction, output_class
def trainModel(self, x, prediction, output_class, sess):
"""Train the model.
Args:
x: The input tensor.
prediction: The prediction class tensor.
output_class: The output tensor.
sess: The graph session.
"""
# input label placeholder
y = tf.placeholder("float", [None, self.n_classes])
# Loss function
loss = tf.reduce_mean(
tf.nn.softmax_cross_entropy_with_logits(logits=prediction, labels=y))
# Optimization
opt = tf.train.AdamOptimizer(
learning_rate=self.learning_rate).minimize(loss)
# Initialize variables
init = tf.global_variables_initializer()
sess.run(init)
for _ in range(TRAIN_STEPS):
batch_x, batch_y = self.mnist.train.next_batch(
batch_size=self.batch_size, shuffle=False)
batch_x = batch_x.reshape((self.batch_size, self.time_steps,
self.n_input))
sess.run(opt, feed_dict={x: batch_x, y: batch_y})
def saveAndRestoreModel(self,
fw_rnn_layer,
bw_rnn_layer,
sess,
saver,
is_dynamic_rnn,
use_sequence_length=False):
"""Saves and restores the model to mimic the most common use case.
Args:
fw_rnn_layer: The forward rnn layer either a single rnn cell or a multi
rnn cell.
bw_rnn_layer: The backward rnn layer either a single rnn cell or a multi
rnn cell.
sess: Old session.
saver: Saver created by tf.compat.v1.train.Saver()
is_dynamic_rnn: Use dynamic_rnn or not.
use_sequence_length: Whether to use sequence length or not. Default to
False.
Returns:
A tuple containing:
- Input tensor of the restored model.
- Prediction tensor of the restored model.
- Output tensor, which is the softwmax result of the prediction tensor.
- new session of the restored model.
"""
model_dir = tempfile.mkdtemp(dir=FLAGS.test_tmpdir)
saver.save(sess, model_dir)
# Reset the graph.
tf.reset_default_graph()
x, prediction, output_class = self.buildModel(
fw_rnn_layer, bw_rnn_layer, is_dynamic_rnn, True, use_sequence_length)
new_sess = tf.Session(config=CONFIG)
saver = tf.train.Saver()
saver.restore(new_sess, model_dir)
return x, prediction, output_class, new_sess
def getInferenceResult(self, x, output_class, sess):
"""Get inference result given input tensor and output tensor.
Args:
x: The input tensor.
output_class: The output tensor.
sess: Current session.
Returns:
A tuple containing:
- Input of the next batch, batch size is 1.
- Expected output.
"""
b1, _ = self.mnist.train.next_batch(batch_size=1)
sample_input = np.reshape(b1, (1, self.time_steps, self.n_input))
expected_output = sess.run(output_class, feed_dict={x: sample_input})
return sample_input, expected_output
def tfliteInvoke(self, sess, test_inputs, input_tensor, output_tensor):
"""Get tflite inference result.
This method will convert tensorflow from session to tflite model then based
on the inputs, run tflite inference and return the results.
Args:
sess: Current tensorflow session.
test_inputs: The test inputs for tflite.
input_tensor: The input tensor of tensorflow graph.
output_tensor: The output tensor of tensorflow graph.
Returns:
The tflite inference result.
"""
converter = tf.lite.TFLiteConverter.from_session(sess, [input_tensor],
[output_tensor])
tflite = converter.convert()
interpreter = tf.lite.Interpreter(model_content=tflite)
interpreter.allocate_tensors()
input_index = interpreter.get_input_details()[0]["index"]
interpreter.set_tensor(input_index, test_inputs)
interpreter.invoke()
output_index = interpreter.get_output_details()[0]["index"]
result = interpreter.get_tensor(output_index)
# Reset all variables so it will not pollute other inferences.
interpreter.reset_all_variables()
return result
def testStaticRnnMultiRnnCell(self):
sess = tf.Session(config=CONFIG)
x, prediction, output_class = self.buildModel(
self.buildRnnLayer(), self.buildRnnLayer(), False, is_inference=False)
self.trainModel(x, prediction, output_class, sess)
saver = tf.train.Saver()
x, prediction, output_class, new_sess = self.saveAndRestoreModel(
self.buildRnnLayer(), self.buildRnnLayer(), sess, saver, False)
test_inputs, expected_output = self.getInferenceResult(
x, output_class, new_sess)
result = self.tfliteInvoke(new_sess, test_inputs, x, output_class)
self.assertTrue(np.allclose(expected_output, result, rtol=1e-6, atol=1e-2))
def testStaticRnnMultiRnnCellWithSequenceLength(self):
sess = tf.Session(config=CONFIG)
x, prediction, output_class = self.buildModel(
self.buildRnnLayer(),
self.buildRnnLayer(),
False,
is_inference=False,
use_sequence_length=True)
self.trainModel(x, prediction, output_class, sess)
saver = tf.train.Saver()
x, prediction, output_class, new_sess = self.saveAndRestoreModel(
self.buildRnnLayer(),
self.buildRnnLayer(),
sess,
saver,
False,
use_sequence_length=True)
test_inputs, expected_output = self.getInferenceResult(
x, output_class, new_sess)
result = self.tfliteInvoke(new_sess, test_inputs, x, output_class)
self.assertTrue(np.allclose(expected_output, result, rtol=1e-6, atol=1e-2))
@test_util.enable_control_flow_v2
def testDynamicRnnMultiRnnCell(self):
sess = tf.Session(config=CONFIG)
x, prediction, output_class = self.buildModel(
self.buildRnnLayer(), self.buildRnnLayer(), True, is_inference=False)
self.trainModel(x, prediction, output_class, sess)
saver = tf.train.Saver()
x, prediction, output_class, new_sess = self.saveAndRestoreModel(
self.buildRnnLayer(),
self.buildRnnLayer(),
sess,
saver,
is_dynamic_rnn=True)
test_inputs, expected_output = self.getInferenceResult(
x, output_class, new_sess)
result = self.tfliteInvoke(new_sess, test_inputs, x, output_class)
self.assertTrue(np.allclose(expected_output, result, rtol=1e-6, atol=1e-2))
@test_util.enable_control_flow_v2
def testDynamicRnnMultiRnnCellWithSequenceLength(self):
sess = tf.Session(config=CONFIG)
x, prediction, output_class = self.buildModel(
self.buildRnnLayer(),
self.buildRnnLayer(),
True,
is_inference=False,
use_sequence_length=True)
self.trainModel(x, prediction, output_class, sess)
saver = tf.train.Saver()
x, prediction, output_class, new_sess = self.saveAndRestoreModel(
self.buildRnnLayer(),
self.buildRnnLayer(),
sess,
saver,
is_dynamic_rnn=True,
use_sequence_length=True)
test_inputs, expected_output = self.getInferenceResult(
x, output_class, new_sess)
result = self.tfliteInvoke(new_sess, test_inputs, x, output_class)
self.assertTrue(np.allclose(expected_output, result, rtol=1e-6, atol=1e-2))
if __name__ == "__main__":
test.main()
| {
"content_hash": "c7bbf8502d6a843495254c7d5aae6517",
"timestamp": "",
"source": "github",
"line_count": 347,
"max_line_length": 84,
"avg_line_length": 33.25648414985591,
"alnum_prop": 0.6494800693240901,
"repo_name": "ghchinoy/tensorflow",
"id": "a9bfc4ee8a1021d0657709a994ae345b6beee351",
"size": "12229",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tensorflow/lite/experimental/examples/lstm/bidirectional_sequence_rnn_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "3568"
},
{
"name": "Batchfile",
"bytes": "15317"
},
{
"name": "C",
"bytes": "699905"
},
{
"name": "C#",
"bytes": "8446"
},
{
"name": "C++",
"bytes": "67022491"
},
{
"name": "CMake",
"bytes": "206499"
},
{
"name": "Dockerfile",
"bytes": "73602"
},
{
"name": "Go",
"bytes": "1585039"
},
{
"name": "HTML",
"bytes": "4680118"
},
{
"name": "Java",
"bytes": "836400"
},
{
"name": "Jupyter Notebook",
"bytes": "1665583"
},
{
"name": "LLVM",
"bytes": "6536"
},
{
"name": "Makefile",
"bytes": "98194"
},
{
"name": "Objective-C",
"bytes": "94022"
},
{
"name": "Objective-C++",
"bytes": "175222"
},
{
"name": "PHP",
"bytes": "17600"
},
{
"name": "Pascal",
"bytes": "3239"
},
{
"name": "Perl",
"bytes": "7536"
},
{
"name": "Python",
"bytes": "48407007"
},
{
"name": "RobotFramework",
"bytes": "891"
},
{
"name": "Ruby",
"bytes": "4733"
},
{
"name": "Shell",
"bytes": "476920"
},
{
"name": "Smarty",
"bytes": "27495"
},
{
"name": "Swift",
"bytes": "56155"
}
],
"symlink_target": ""
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.