repo_name stringlengths 5 100 | path stringlengths 4 231 | language stringclasses 1 value | license stringclasses 15 values | size int64 6 947k | score float64 0 0.34 | prefix stringlengths 0 8.16k | middle stringlengths 3 512 | suffix stringlengths 0 8.17k |
|---|---|---|---|---|---|---|---|---|
dNG-git/pas_http_user | src/dNG/module/controller/user/module.py | Python | mpl-2.0 | 2,059 | 0.005828 | # -*- coding: utf-8 -*-
"""
direct PAS
Python Application Services
----------------------------------------------------------------------------
(C) direct Netware Group - All rights reserved
https://www.direct-netware.de/redirect?pas;http;user
This Source Code Form is subject to the terms of the Mozilla Public License,
v. 2.0. If a copy of the MPL was not distributed with this file, You can
obtain one at http://mozilla.org/MPL/2.0/.
----------------------------------------------------------------------------
https://www.direct-netware.de/redirect? | licenses;mpl2
----------------------------------------------------------------------------
#echo(pasHttpUserVersion)#
#echo(__FILEPATH__)#
"""
from dNG.data.settings import Settings
from dNG.data.translatable_exception import TranslatableException
from dNG.database.connection import Connection
from dNG.module.controller.abstract_http import AbstractHttp as Abstr | actHttpController
class Module(AbstractHttpController):
"""
Module for "user"
:author: direct Netware Group et al.
:copyright: (C) direct Netware Group - All rights reserved
:package: pas.http
:subpackage: user
:since: v0.2.00
:license: https://www.direct-netware.de/redirect?licenses;mpl2
Mozilla Public License, v. 2.0
"""
def __init__(self):
"""
Constructor __init__(Module)
:since: v0.2.00
"""
AbstractHttpController.__init__(self)
Settings.read_file("{0}/settings/pas_http_user.json".format(Settings.get("path_data")))
#
def execute(self):
"""
Execute the requested action.
:since: v0.2.00
"""
# pylint: disable=broad-except
try: database = Connection.get_instance()
except Exception as handled_exception:
if (self.log_handler is not None): self.log_handler.error(handled_exception, context = "pas_http_site")
raise TranslatableException("core_database_error", _exception = handled_exception)
#
with database: return AbstractHttpController.execute(self)
#
#
|
sadanandb/pmt | src/tactic/triggers/data_trigger.py | Python | epl-1.0 | 2,434 | 0.002465 | ###########################################################
#
# Copyright (c) 2005, Southpaw Technology
# All Rights Reserved
#
# PROPRIETARY INFORMATION. This software is proprietary to
# Southpaw Technology, and is not to be reproduced, transmitted,
# or disclosed in any way without written permission.
#
#
#
__all__ = ["DataUpdat | eTrigger"]
from pyasm.command import Trigger
from tactic.command import PythonCmd
class DataValidationTrigger(Trigger):
def execute(my):
sobject = my.get_current_sobject()
class DataUpdateTrigger(Trigger):
def | get_args_keys(my):
return {
}
def execute(my):
input = my.get_input()
if input.get("mode") == 'delete':
return
print "input: ", input
sobject = input.get("sobject")
trigger_sobj = my.get_trigger_sobj()
data = my.get_trigger_data()
print "data: ", data
op = data.get("op")
assert op
print "op: ", op
if op == 'join':
src_cols = data.get("src_cols")
dst_col = data.get("dst_col")
src_cols = src_cols.split("|")
delimiter = "_"
values = []
for col in src_cols:
value = sobject.get_value(col)
values.append(value)
value = delimiter.join(values)
print "value: ", value
sobject.set_value(dst_col, value)
sobject.commit()
elif op == 'part':
src_col = data.get("src_col")
dst_col = data.get("dst_col")
index = data.get("index")
if index:
index = int(index)
value = sobject.get_value(src_col)
delimiter = "_"
parts = value.split(delimiter)
value = parts[index]
sobject.set_value(dst_col, value)
sobject.commit()
elif op == 'expression':
# use the full expression language
dst_col = data.get("dst_col")
# {@GET(.sequence_code)}_{@GET(.name)}
# or
# {@UPPER(.name)}
expression = data.get("expression")
value = Search.eval(expression, sobject)
sobject.set_value(dst_col, value)
sobject.commit()
else:
return
if __name__ == '__main__':
trigger = DataUpdateTrigger()
|
ActiveCoders/platformerSkel | pyGamePlatformerSkeletonV1.py | Python | gpl-3.0 | 250 | 0.008 | # Title
# Author
# ActiveCoders.club
# import external modules
import pygame
# setup global constants
# setup global variables
# se | tup classes
class PlayerSprite(pygame.sprite.Sprite):
pass
class Pla | yerControl():
pass
# main game loop
|
NvanAdrichem/networkx | networkx/readwrite/gexf.py | Python | bsd-3-clause | 36,773 | 0.001713 | # Copyright (C) 2013-2016 by
#
# Authors: Aric Hagberg <hagberg@lanl.gov>
# Dan Schult <dschult@colgate.edu>
# Pieter Swart <swart@lanl.gov>
# All rights reserved.
# BSD license.
# Based on GraphML NetworkX GraphML reader
"""Read and write graphs in GEXF format.
GEXF (Graph Exchange XML Format) is a language for describing complex
network structures, their associated data and dynamics.
This implementation does not support mixed graphs (directed and
undirected edges together).
Format
------
GEXF is an XML format. See http://gexf.net/format/schema.html for the
specification and http://gexf.net/format/basic.html for examples.
"""
import itertools
import time
import networkx as nx
from networkx.utils import open_file, make_str
try:
from xml.etree.cElementTree import Element, ElementTree, SubElement, tostring
except ImportError:
try:
from xml.etree.ElementTree import Element, ElementTree, SubElement, tostring
except ImportError:
pass
__all__ = ['write_gexf', 'read_gexf', 'relabel_gexf_graph', 'generate_gexf']
@open_file(1, mode='wb')
def write_gexf(G, path, encoding='utf-8', prettyprint=True, version='1.1draft'):
"""Write G in GEXF format to path.
"GEXF (Graph Exchange XML Format) is a language for describing
complex networks structures, their associated data and dynamics" [1]_.
Parameters
----------
G : graph
A NetworkX graph
path : file or string
File or file name to write.
File names ending in .gz or .bz2 will be compressed.
encoding : string (optional)
Encoding for text data.
prettyprint : bool (optional)
If True use line breaks and indenting in output XML.
Examples
--------
>>> G = nx.path_graph(4)
>>> nx.write_gexf(G, "test.gexf")
Notes
-----
This implementation does not support mixed graphs (directed and undirected
edges together).
The node id attribute is set to be the string of the node label.
If you want to specify an id use set it as node data, e.g.
node['a']['id']=1 to set the id of node 'a' to 1.
References
----------
.. [1] GEXF graph format, http://gexf.net/format/
"""
writer = GEXFWriter(encoding=encoding, prettyprint=prettyprint,
version=version)
writer.add_graph(G)
writer.write(path)
def generate_gexf(G, encoding='utf-8', prettyprint=True, version='1.1draft'):
"""Generate lines of GEXF format representation of G.
"GEXF (Graph Exchange XML Format) is a language for describing
complex networks structures, their associated data and dynamics" [1]_.
Parameters
----------
G : graph
A NetworkX graph
encoding : string (optional)
Encoding for text data.
prettyprint : bool (optional)
If True use line breaks and indenting in output XML.
Examples
--------
>>> G = nx.path_graph(4)
>>> linefeed = chr(10) # linefeed=\n
>>> s = linefeed.join(nx.generate_gexf(G)) # doctest: +SKIP
>>> for line in nx.generate_gexf(G): # doctest: +SKIP
... print line
Notes
-----
This implementation does not support mixed graphs (directed and undirected
edges together).
The node id attribute is set to be the string of the node label.
If you want to specify an id use set it as node data, e.g.
node['a']['id']=1 to set the id of node 'a' to 1.
References
----------
.. [1] GEXF graph format, http://gexf.net/format/
"""
writer = GEXFWriter(encoding=encoding,prettyprint=prettyprint,
version=version)
writer.add_graph(G)
for line in str(writer).splitlines():
yield line
@open_file(0, mode='rb')
def read_gexf(path, node_type=None, relabel=False, version='1.1draft'):
"""Read graph in GEXF format from path.
"GEXF (Graph Exchange XML Format) is a language for describing
complex networ | ks structures, | their associated data and dynamics" [1]_.
Parameters
----------
path : file or string
File or file name to write.
File names ending in .gz or .bz2 will be compressed.
node_type: Python type (default: None)
Convert node ids to this type if not None.
relabel : bool (default: False)
If True relabel the nodes to use the GEXF node "label" attribute
instead of the node "id" attribute as the NetworkX node label.
Returns
-------
graph: NetworkX graph
If no parallel edges are found a Graph or DiGraph is returned.
Otherwise a MultiGraph or MultiDiGraph is returned.
Notes
-----
This implementation does not support mixed graphs (directed and undirected
edges together).
References
----------
.. [1] GEXF graph format, http://gexf.net/format/
"""
reader = GEXFReader(node_type=node_type, version=version)
if relabel:
G = relabel_gexf_graph(reader(path))
else:
G = reader(path)
return G
class GEXF(object):
versions = {}
d = {'NS_GEXF': "http://www.gexf.net/1.1draft",
'NS_VIZ': "http://www.gexf.net/1.1draft/viz",
'NS_XSI': "http://www.w3.org/2001/XMLSchema-instance",
'SCHEMALOCATION': ' '.join(['http://www.gexf.net/1.1draft',
'http://www.gexf.net/1.1draft/gexf.xsd']),
'VERSION':'1.1'}
versions['1.1draft'] = d
d = {'NS_GEXF': "http://www.gexf.net/1.2draft",
'NS_VIZ': "http://www.gexf.net/1.2draft/viz",
'NS_XSI': "http://www.w3.org/2001/XMLSchema-instance",
'SCHEMALOCATION': ' '.join(['http://www.gexf.net/1.2draft',
'http://www.gexf.net/1.2draft/gexf.xsd']),
'VERSION':'1.2'}
versions['1.2draft'] = d
types = [(int, "integer"),
(float, "float"),
(float, "double"),
(bool, "boolean"),
(list, "string"),
(dict, "string")]
try: # Python 3.x
blurb = chr(1245) # just to trigger the exception
types.extend([
(int, "long"),
(str, "liststring"),
(str, "anyURI"),
(str, "string")])
except ValueError: # Python 2.6+
types.extend([
(long, "long"),
(str, "liststring"),
(str, "anyURI"),
(str, "string"),
(unicode, "liststring"),
(unicode, "anyURI"),
(unicode, "string")])
xml_type = dict(types)
python_type = dict(reversed(a) for a in types)
# http://www.w3.org/TR/xmlschema-2/#boolean
convert_bool = {
'true': True, 'false': False,
'True': True, 'False': False,
'0': False, 0: False,
'1': True, 1: True
}
def set_version(self, version):
d = self.versions.get(version)
if d is None:
raise nx.NetworkXError('Unknown GEXF version %s.' % version)
self.NS_GEXF = d['NS_GEXF']
self.NS_VIZ = d['NS_VIZ']
self.NS_XSI = d['NS_XSI']
self.SCHEMALOCATION = d['NS_XSI']
self.VERSION = d['VERSION']
self.version = version
class GEXFWriter(GEXF):
# class for writing GEXF format files
# use write_gexf() function
def __init__(self, graph=None, encoding='utf-8', prettyprint=True,
version='1.1draft'):
try:
import xml.etree.ElementTree
except ImportError:
raise ImportError('GEXF writer requires '
'xml.elementtree.ElementTree')
self.prettyprint = prettyprint
self.encoding = encoding
self.set_version(version)
self.xml = Element('gexf',
{'xmlns': self.NS_GEXF,
'xmlns:xsi': self.NS_XSI,
'xmlns:viz': self.NS_VIZ,
'xsi:schemaLocation': self.SCHEMALOCATION,
'version': self.VERSION})
# counters for edge and attribute identifiers
self.edge_id = itertools.count()
self.attr_id = itertools.count()
# default attributes are stored in dictionaries
self.attr = {}
self.attr['node'] = {}
self.attr['edge'] = |
sn-amber/mylpp | scripts/dview.py | Python | gpl-2.0 | 382 | 0.005236 | #!/usr/bin/python
# Script: dview.py
# Purpose: launch vcr tool on LAMMPS dump files
# Syntax: dview.py dump.1 dump.2 ...
# files = one or more | dump files
# Example: dview.py dump.*
# Author: Steve Plimpton (Sandia)
# main s | cript
if len(argv) < 2:
raise StandardError, "Syntax: dview.py dump.1 ..."
files = ' '.join(argv[1:])
d = dump(files)
g = gl(d)
v = vcr(g)
|
rakeshmi/tempest | tempest/services/telemetry/json/telemetry_client.py | Python | apache-2.0 | 5,632 | 0 | # Copyright 2014 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_serialization import jsonutils as json
from six.moves.urllib import parse as urllib
from tempest.common import service_client
class TelemetryClient(service_client.ServiceClient):
version = '2'
uri_prefix = "v2"
def deserialize(self, body):
return json.loads(body.replace("\n", ""))
def serialize(self, body):
return json.dumps(body)
def add_sample(self, sample_list, meter_name, meter_unit, volume,
sample_type, resource_id, **kwargs):
sample = {"counter_name": meter_name, "counter_unit": meter_unit,
"counter_volume": volume, "counter_type": sample_type,
"resource_id": resource_id}
for key in kwargs:
sample[key] = kwargs[key]
sample_list.append(self.serialize(sample))
return sample_list
def create_sample(self, meter_name, sample_list):
uri = "%s/meters/%s" % (self.uri_prefix, meter_name)
body = self.serialize(sample_list)
resp, body = self.post(uri, body)
self.expected_success(200, resp.status)
body = self.deserialize(body)
return service_client.ResponseBody(resp, body)
def _helper_list(self, uri, query=None, period=None):
uri_dict = {}
if query:
uri_dict = {'q.field': query[0],
'q.op': query[1],
'q.value': query[2]}
if period:
uri_dict['period'] = period
if uri_dict:
uri += "?%s" % urllib.urlencode(uri_dict)
resp, body = self.get(uri)
self.expected_success(200, resp.status)
body = self.deserialize(body)
return service_client.ResponseBodyList(resp, body)
def list_resources(self, query=None):
uri = '%s/resources' % self.uri_prefix
return self._helper_list(uri, query)
def list_meters(self, query=None):
uri = '%s/meters' % self.uri_prefix
return self._helper_list(uri, query)
def list_alarms(self, query=None):
uri = '%s/alarms' % self.uri_prefix
return self._helper_list(uri, query)
def list_statistics(self, meter, period=None, query=None):
uri = "%s/meters/%s/statistics" % (self.uri_prefix, meter)
return self._helper_list(uri, query, period)
def list_samples(self, meter_id, query=None):
uri = '%s/meters/%s' % (self.uri_prefix, meter_id)
return self._helper_list(uri, query)
def list_events(self, query=None):
uri = '%s/events' % self.uri_prefix
return self._helper_list(uri, query)
def show_resource(self, resource_id):
uri = '%s/resources/%s' % (self.uri_prefix, resource_id)
resp, body = self.get(uri)
self.expected_suc | cess(200, resp.status)
body = self.deserialize(body)
return service_client.ResponseBody(resp, body)
def show_alarm(self, alarm_id):
uri = '%s/alarms/%s' % (self.uri_prefix, alarm_id)
resp, body = self.get(uri)
self.expected_success(200, resp.status)
body = self.deserialize(body)
return service_client.ResponseBody(resp, body)
def delete_alarm(self, alarm_id):
| uri = "%s/alarms/%s" % (self.uri_prefix, alarm_id)
resp, body = self.delete(uri)
self.expected_success(204, resp.status)
if body:
body = self.deserialize(body)
return service_client.ResponseBody(resp, body)
def create_alarm(self, **kwargs):
uri = "%s/alarms" % self.uri_prefix
body = self.serialize(kwargs)
resp, body = self.post(uri, body)
self.expected_success(201, resp.status)
body = self.deserialize(body)
return service_client.ResponseBody(resp, body)
def update_alarm(self, alarm_id, **kwargs):
uri = "%s/alarms/%s" % (self.uri_prefix, alarm_id)
body = self.serialize(kwargs)
resp, body = self.put(uri, body)
self.expected_success(200, resp.status)
body = self.deserialize(body)
return service_client.ResponseBody(resp, body)
def show_alarm_state(self, alarm_id):
uri = "%s/alarms/%s/state" % (self.uri_prefix, alarm_id)
resp, body = self.get(uri)
self.expected_success(200, resp.status)
body = self.deserialize(body)
return service_client.ResponseBodyData(resp, body)
def alarm_set_state(self, alarm_id, state):
uri = "%s/alarms/%s/state" % (self.uri_prefix, alarm_id)
body = self.serialize(state)
resp, body = self.put(uri, body)
self.expected_success(200, resp.status)
body = self.deserialize(body)
return service_client.ResponseBodyData(resp, body)
def show_alarm_history(self, alarm_id):
uri = "%s/alarms/%s/history" % (self.uri_prefix, alarm_id)
resp, body = self.get(uri)
self.expected_success(200, resp.status)
body = self.deserialize(body)
return service_client.ResponseBodyList(resp, body)
|
jdubs/cloud-custodian | tests/test_asg.py | Python | apache-2.0 | 14,780 | 0.000135 | # Copyright 2016-2017 Capital One Services, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import, division, print_function, unicode_literals
import boto3
from .common import BaseTest
class LaunchConfigTest(BaseTest):
def test_config_unused(self):
factory = self.replay_flight_data('test_launch_config_unused')
p = | self.load_policy({
'name': 'unused-cfg',
'resource': 'launch-config',
'filters': [{'type': 'unused'}]}, session_factory=factory)
resources = p.run()
self.assertEqual(len(resources), 1)
self.assertEqual(resources[0]['LaunchConfigurationName'],
'CloudClusterCopy')
def test_config_delete(self):
fac | tory = self.replay_flight_data('test_launch_config_delete')
p = self.load_policy({
'name': 'delete-cfg',
'resource': 'launch-config',
'filters': [{
'LaunchConfigurationName': 'CloudClusterCopy'}],
'actions': ['delete']},
session_factory=factory)
resources = p.run()
self.assertEqual(len(resources), 1)
self.assertEqual(resources[0]['LaunchConfigurationName'],
'CloudClusterCopy')
class AutoScalingTest(BaseTest):
def get_ec2_tags(self, ec2, instance_id):
results = ec2.describe_tags(
Filters=[
{'Name': 'resource-id',
'Values': [instance_id]},
{'Name': 'resource-type',
'Values': ['instance']}])['Tags']
return {t['Key']: t['Value'] for t in results}
def test_asg_delete(self):
factory = self.replay_flight_data('test_asg_delete')
p = self.load_policy({
'name': 'asg-delete',
'resource': 'asg',
'filters': [
{'AutoScalingGroupName': 'ContainersFTW'}],
'actions': [{'type': 'delete', 'force': True}]},
session_factory=factory)
resources = p.run()
self.assertEqual(len(resources), 1)
self.assertEqual(resources[0]['AutoScalingGroupName'], 'ContainersFTW')
def test_asg_non_encrypted_filter(self):
factory = self.replay_flight_data('test_asg_non_encrypted_filter')
p = self.load_policy({
'name': 'asg-encrypted-filter',
'resource': 'asg',
'filters': [{'type': 'not-encrypted'}]}, session_factory=factory)
resources = p.run()
self.assertEqual(len(resources), 1)
self.assertEqual(
resources[0]['Unencrypted'], ['Image', 'LaunchConfig'])
def test_asg_image_age_filter(self):
factory = self.replay_flight_data('test_asg_image_age_filter')
p = self.load_policy({
'name': 'asg-cfg-filter',
'resource': 'asg',
'filters': [
{'type': 'image-age',
'days': 90}]}, session_factory=factory)
resources = p.run()
self.assertEqual(len(resources), 1)
def test_asg_config_filter(self):
factory = self.replay_flight_data('test_asg_config_filter')
p = self.load_policy({
'name': 'asg-cfg-filter',
'resource': 'asg',
'filters': [
{'type': 'launch-config',
'key': 'ImageId',
'value': 'ami-9abea4fb'}]}, session_factory=factory)
resources = p.run()
self.assertEqual(len(resources), 1)
def test_asg_vpc_filter(self):
factory = self.replay_flight_data('test_asg_vpc_filter')
p = self.load_policy({
'name': 'asg-vpc-filter',
'resource': 'asg',
'filters': [
{'type': 'vpc-id',
'value': 'vpc-399e3d52'}]
}, session_factory=factory)
resources = p.run()
self.assertEqual(len(resources), 1)
self.assertEqual(
resources[0]['LaunchConfigurationName'], 'CustodianASGTest')
def test_asg_tag_and_propagate(self):
factory = self.replay_flight_data('test_asg_tag')
p = self.load_policy({
'name': 'asg-tag',
'resource': 'asg',
'filters': [
{'tag:Platform': 'ubuntu'}],
'actions': [
{'type': 'tag',
'key': 'CustomerId', 'value': 'GetSome',
'propagate': True},
{'type': 'propagate-tags',
'trim': True, 'tags': ['CustomerId', 'Platform']}
]
}, session_factory=factory)
session = factory()
client = session.client('autoscaling')
# Put an orphan tag on an instance
result = client.describe_auto_scaling_groups()[
'AutoScalingGroups'].pop()
ec2 = session.client('ec2')
instance_id = result['Instances'][0]['InstanceId']
ec2.create_tags(
Resources=[instance_id],
Tags=[{'Key': 'Home', 'Value': 'Earth'}])
# Run the policy
resources = p.run()
self.assertEqual(len(resources), 1)
result = client.describe_auto_scaling_groups(
AutoScalingGroupNames=[resources[0]['AutoScalingGroupName']])[
'AutoScalingGroups'].pop()
tag_map = {t['Key']: (t['Value'], t['PropagateAtLaunch'])
for t in result['Tags']}
self.assertTrue('CustomerId' in tag_map)
self.assertEqual(tag_map['CustomerId'][0], 'GetSome')
self.assertEqual(tag_map['CustomerId'][1], True)
tag_map = self.get_ec2_tags(ec2, instance_id)
self.assertTrue('CustomerId' in tag_map)
self.assertFalse('Home' in tag_map)
def test_asg_remove_tag(self):
factory = self.replay_flight_data('test_asg_remove_tag')
p = self.load_policy({
'name': 'asg-remove-tag',
'resource': 'asg',
'filters': [
{'tag:CustomerId': 'not-null'}],
'actions': [
{'type': 'remove-tag',
'key': 'CustomerId'}],
}, session_factory=factory)
resources = p.run()
self.assertEqual(len(resources), 1)
client = factory().client('autoscaling')
result = client.describe_auto_scaling_groups(
AutoScalingGroupNames=[resources[0]['AutoScalingGroupName']])[
'AutoScalingGroups'].pop()
tag_map = {t['Key']: (t['Value'], t['PropagateAtLaunch'])
for t in result['Tags']}
self.assertFalse('CustomerId' in tag_map)
def test_asg_mark_for_op(self):
factory = self.replay_flight_data('test_asg_mark_for_op')
p = self.load_policy({
'name': 'asg-mark-for-op',
'resource': 'asg',
'filters': [
{'tag:Platform': 'ubuntu'}],
'actions': [
{'type': 'mark-for-op', 'key': 'custodian_action',
'op': 'suspend', 'days': 1}
],
}, session_factory=factory)
resources = p.run()
self.assertEqual(len(resources), 1)
client = factory().client('autoscaling')
result = client.describe_auto_scaling_groups(
AutoScalingGroupNames=[resources[0]['AutoScalingGroupName']])[
'AutoScalingGroups'].pop()
tag_map = {t['Key']: t['Value'] for t in result['Tags']}
self.assertTrue('custodian_action' in tag_map)
self.assertTrue('suspend@' in tag_map['custodian_action'])
def test_asg_rename_tag(self):
factory = self.replay_flight_data('test_asg_rename')
p = self.load_policy({
|
reinforceio/tensorforce | tensorforce/core/parameters/__init__.py | Python | apache-2.0 | 1,826 | 0.001643 | # Copyright 2020 Tensorforce Team. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a | copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES O | R CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from functools import partial
from tensorforce.core.parameters.parameter import Parameter
from tensorforce.core.parameters.constant import Constant
from tensorforce.core.parameters.decaying import Decaying
from tensorforce.core.parameters.exponential import Exponential
from tensorforce.core.parameters.linear import Linear
from tensorforce.core.parameters.ornstein_uhlenbeck import OrnsteinUhlenbeck
from tensorforce.core.parameters.piecewise_constant import PiecewiseConstant
from tensorforce.core.parameters.random import Random
parameter_modules = dict(
constant=Constant, decaying=Decaying, default=Constant, exponential=Exponential, linear=Linear,
ornstein_uhlenbeck=OrnsteinUhlenbeck, piecewise_constant=PiecewiseConstant, random=Random
)
for name in (
'polynomial', 'inverse_time', 'cosine', 'cosine_restarts', 'linear_cosine',
'linear_cosine_noisy'
):
assert name not in parameter_modules
parameter_modules[name] = partial(Decaying, decay=name)
__all__ = [
'Constant', 'Decaying', 'Exponential', 'Linear', 'OrnsteinUhlenbeck', 'Parameter',
'parameter_modules', 'PiecewiseConstant', 'Random'
]
|
evonove/evonove.it | django-website/home/migrations/0005_auto_20160512_0713.py | Python | bsd-3-clause | 518 | 0.001931 | # -*- coding: utf-8 -*-
# Generated by Django 1.9.6 on 2016-05-12 07:13
from __future__ import unicode_literals
from django.db impor | t migrations
import wagtail.core.fields
class Migration(migrations.Migration):
dependencies = [
('home', '0004_auto_20160511_0845'),
]
operations = [
migrations.AlterField(
model_name='teammember',
name='bio',
f | ield=wagtail.core.fields.RichTextField(help_text='The team member bio', max_length=360),
),
]
|
eflee/blobapy | blobapy/aws.py | Python | mit | 69 | 0 | import boto3
session = boto3.session.Session(profile_n | ame="blobapy")
| |
OpenGuide/Python-Guide-for-Beginners | SimpleAddition/Sum_2_numbers.py | Python | mit | 661 | 0.001513 |
# first lets get 2 numbers to add
print "enter 2 number to add"
py
# getting the first number
| number1 = raw_input("Ok enter the first number ")
# and now the second number
number2 = raw_input("and now the second number ") |
# finally calculating the addidition of the numbers
# sum = number1 + number2
# seems to be right
# but as the inputs are taken as string we need to type cast them
# that is convert number1 and number2 to integer from string
# so lets do that
number1 = int(number1)
number2 = int(number2)
# and now lets calculate the sum
sum = number1 + number2
# and now lets print the numbers
print "the addition of the 2 numbers is "
print sum
|
warnes/irrigatorpro | irrigator_pro/farms/migrations/0025_default_rain_irrigation_to_null.py | Python | mit | 2,383 | 0.002518 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
from decimal import Decimal
import | django.core.validators
class Migration(migrations.Migration):
dependencies = [
('farms', '0024_rain_and_irrigation_allow_null'),
]
operations = [
| migrations.AlterField(
model_name='probereading',
name='irrigation',
field=models.DecimalField(decimal_places=2, validators=[django.core.validators.MinValueValidator(Decimal('0'))], max_digits=4, blank=True, null=True, verbose_name=b'Irrigation in inches'),
preserve_default=True,
),
migrations.AlterField(
model_name='probereading',
name='rain',
field=models.DecimalField(decimal_places=2, validators=[django.core.validators.MinValueValidator(Decimal('0'))], max_digits=4, blank=True, null=True, verbose_name=b'Rainfall in inches'),
preserve_default=True,
),
migrations.AlterField(
model_name='waterhistory',
name='irrigation',
field=models.DecimalField(decimal_places=2, validators=[django.core.validators.MinValueValidator(Decimal('0'))], max_digits=4, blank=True, null=True, verbose_name=b'Irrigation in inches'),
preserve_default=True,
),
migrations.AlterField(
model_name='waterhistory',
name='rain',
field=models.DecimalField(decimal_places=2, validators=[django.core.validators.MinValueValidator(Decimal('0'))], max_digits=4, blank=True, null=True, verbose_name=b'Rainfall in inches'),
preserve_default=True,
),
migrations.AlterField(
model_name='waterregister',
name='irrigation',
field=models.DecimalField(decimal_places=2, validators=[django.core.validators.MinValueValidator(Decimal('0'))], max_digits=4, blank=True, null=True, verbose_name=b'Irrigation in inches'),
preserve_default=True,
),
migrations.AlterField(
model_name='waterregister',
name='rain',
field=models.DecimalField(decimal_places=2, validators=[django.core.validators.MinValueValidator(Decimal('0'))], max_digits=4, blank=True, null=True, verbose_name=b'Rainfall in inches'),
preserve_default=True,
),
]
|
MozillaSecurity/FuzzManager | server/covmanager/tests/conftest.py | Python | mpl-2.0 | 6,179 | 0.001618 | # coding: utf-8
'''Common test fixtures
@author: Jesse Schwartzentruber (:truber)
@license:
This Source Code Form is subject to the terms of the Mozilla Public
License, v. 2.0. If a copy of the MPL was not distributed with this
file, You can obtain one at http://mozilla.org/MPL/2.0/.
'''
import logging
import os
import shutil
import subprocess
import tempfile
import pytest
from django.contrib.auth.models import User, Permission
from django.contrib.contenttypes.models import ContentType
from covmanager.models import Collection, CollectionFile, Repository
from crashmanager.models import Client, Tool, User as cmUser
LOG = logging.getLogger("fm.covmanager.tests")
def _check_git( | ):
try:
proc = subprocess.Popen(["git"], stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
output = proc.communicate()
if output and proc.wait() == 1:
return True
except OSError: # FileNotFoundError
pass
return False
def _check_hg():
try:
proc = subprocess.Popen(["hg"], stdout=subprocess.PIPE, stderr=s | ubprocess.STDOUT)
output = proc.communicate()
if output and proc.wait() == 0:
return True
except OSError: # FileNotFoundError
pass
return False
HAVE_GIT = _check_git()
HAVE_HG = _check_hg()
@pytest.fixture
def covmanager_test(db): # pylint: disable=invalid-name,unused-argument
"""Common setup/teardown tasks for all server unittests"""
user = User.objects.create_user('test', 'test@mozilla.com', 'test')
user.user_permissions.clear()
content_type = ContentType.objects.get_for_model(cmUser)
perm = Permission.objects.get(content_type=content_type, codename='view_covmanager')
user.user_permissions.add(perm)
user_np = User.objects.create_user('test-noperm', 'test@mozilla.com', 'test')
user_np.user_permissions.clear()
@pytest.fixture
def cm(request, settings, tmpdir):
class _result(object):
have_git = HAVE_GIT
have_hg = HAVE_HG
@classmethod
def create_repository(cls, repotype, name="testrepo"):
location = tempfile.mkdtemp(prefix='testrepo', dir=os.path.dirname(__file__))
request.addfinalizer(lambda: shutil.rmtree(location))
if repotype == "git":
if not HAVE_GIT:
pytest.skip("git is not available")
classname = "GITSourceCodeProvider"
elif repotype == "hg":
if not HAVE_HG:
pytest.skip("hg is not available")
classname = "HGSourceCodeProvider"
else:
raise Exception("unknown repository type: %s (expecting git or hg)" % repotype)
result = Repository.objects.create(classname=classname, name=name, location=location)
LOG.debug("Created Repository pk=%d", result.pk)
if repotype == "git":
cls.git(result, "init")
elif repotype == "hg":
cls.hg(result, "init")
return result
@staticmethod
def create_collection_file(data):
# Use a specific temporary directory to upload covmanager files
# This is required as Django now needs a path relative to that folder in FileField
location = str(tmpdir)
CollectionFile.file.field.storage.location = location
tmp_fd, path = tempfile.mkstemp(suffix=".data", dir=location)
os.close(tmp_fd)
with open(path, "w") as fp:
fp.write(data)
result = CollectionFile.objects.create(file=os.path.basename(path))
LOG.debug("Created CollectionFile pk=%d", result.pk)
return result
@classmethod
def create_collection(cls,
created=None,
description="",
repository=None,
revision="",
branch="",
tools=("testtool",),
client="testclient",
coverage='{"linesTotal":0,'
'"name":null,'
'"coveragePercent":0.0,'
'"children":{},'
'"linesMissed":0,'
'"linesCovered":0}'):
# create collectionfile
coverage = cls.create_collection_file(coverage)
# create client
client, created = Client.objects.get_or_create(name=client)
if created:
LOG.debug("Created Client pk=%d", client.pk)
# create repository
if repository is None:
repository = cls.create_repository("git")
result = Collection.objects.create(description=description,
repository=repository,
revision=revision,
branch=branch,
client=client,
coverage=coverage)
LOG.debug("Created Collection pk=%d", result.pk)
# create tools
for tool in tools:
tool, created = Tool.objects.get_or_create(name=tool)
if created:
LOG.debug("Created Tool pk=%d", tool.pk)
result.tools.add(tool)
return result
@staticmethod
def git(repo, *args):
path = os.getcwd()
try:
os.chdir(repo.location)
return subprocess.check_output(["git"] + list(args)).decode("utf-8")
finally:
os.chdir(path)
@staticmethod
def hg(repo, *args):
path = os.getcwd()
try:
os.chdir(repo.location)
return subprocess.check_output(["hg"] + list(args)).decode("utf-8")
finally:
os.chdir(path)
return _result()
|
mariuszlitwin/frets | frets.py | Python | apache-2.0 | 4,690 | 0.011301 | #!/usr/bin/python3
from colorama import Fore, Back
class frets:
tuning = list()
max_string_name_len = 0;
frets_count = 0;
strings = dict()
NOTES = ('E', 'F', 'F#', 'G', 'G#', 'A', 'A#', 'B', 'C', 'C#', 'D', 'D#')
def __init__(self,
tuning=('E', 'A', 'D', 'G'),
frets_count=24):
self.tuning = tuning
self.frets_count = frets_count
for string in tuning:
if len(string) > self.max_string_name_len:
self.max_string_name_len = len(string)
padding_count = 0;
padding = ''
self.strings[string] = list()
starting_note = self.NOTES.index(string) + 1
for i in range(frets_count):
padding = '^' * int(((starting_note + i) / len(self.NOTES)))
self.strings[string].append(self.NOTES[(starting_note + i) % len(self.NOTES)] + padding)
#print('{}{} ({}) = {}'.format(string,
# i,
# int(((starting_note + i) / len(self.NOTES))),
# self.NOTES[(starting_note + i) % len(self.NOTES)] + padding))
def debug_strings(self):
print(self.strings)
def show_me_plz(self,
seek_note=None,
seek_string=None):
if (seek_string):
seek_note = self.strings[seek_string[0]][int(seek_string[1]) - 1]
upper_seek_note = None
lower_seek_note = None
if seek_note and seek_note.endswith('^'):
lower_seek_note = seek_note[0:-1]
if seek_note:
upper_seek_note = seek_note + '^'
upper_found_position = list()
found_position = list()
lower_found_position = list()
print(Fore.WHITE + \
' ' * (self.max_string_name_len + 2),
end='')
for fret_nr in range(1, self.frets_count + 1):
print(Fore.WHITE + \
(' ' * (4 - len(str(fret_nr)))) + str(fret_nr),
end='')
print(Fore.YELLOW + '|', end='')
print('')
for string in reversed(self.tuning):
color = Fore.WHITE + Back.BLACK
if string == seek_note:
color = Fore.WHITE + Back.RED
found_position.append(string + "0")
elif string == upper_seek_note:
color = Fore.WHITE + Back.CYAN
upper_found_position.append(string + "0")
elif string == lower_seek_note:
color = Fore.WHITE + Back.MAGENTA
lower_found_position.append(string + "0")
print(color + \
(' ' * (self.max_string_name_len - len(string))) + \
string, end='')
print(Fore.YELLOW + '||', end='')
fret_nr = 1
for note in self.strings[string]:
color = Fore.WHITE + Back.BLACK
if note == seek_note:
color = Fore.WHITE + Back.RED
found_position.append(string + str(fret_nr))
elif note == upper_seek_note:
color = Fore.WHITE + Back.CYAN
upper_found_position.append(string + str(fret_nr))
elif note == lower_seek_note:
color = Fore.WHITE + Back.MAGENTA
lower_found_position.append(string + str(fret_nr))
print(color + \
note[0:4] + \
'-' * (4 - len(note)), end='')
print(Fore.YELLOW + Back.BLACK + '|', end='')
fret_nr += 1
print(Fore.WHITE + Back.BLACK + '')
print(Fore.WHITE + '\n')
print(Back.CYAN + ' ' + Back.BLACK + \
' Found octave-higher note {} on: {}'.format(upper_seek_note,
| upper_found_position))
print(Back.RED + ' ' + Back.BLACK + \
' Found note {} on: {}'.format(seek_note,
found_position))
print(Fore.WHITE + \
Back.MAGENTA + ' ' + Back.BLACK + \
' Found octave-lower note {} on: {}'.format(lower_seek_note,
| lower_found_position))
|
devonjones/PSRD-Parser | src/json_loader.py | Python | gpl-3.0 | 414 | 0.019324 | #!/usr/bin/env python
import sys
import os
import re
import json
import sqlite3
from psrd.options import load_option_parser, exec_load_main
from psrd.loader import load_documents
def main():
usage = "usage: %pro | g [options] [filenames]\nImports sections into psr | d db from feat json files."
parser = load_option_parser(usage)
exec_load_main(parser, load_documents)
if __name__ == "__main__":
sys.exit(main())
|
motord/Motorcycle-Diaries | lib/apiclient/oauth.py | Python | bsd-3-clause | 9,765 | 0.005735 | # Copyright 2010 Google Inc. All Rights Reserved.
"""Utilities for OAuth.
Utilities for making it easier to work with OAuth.
"""
__author__ = 'jcgregorio@google.com (Joe Gregorio)'
import copy
import httplib2
import logging
import oauth2 as oauth
import urllib
import urlparse
from anyjson import simplejson
try:
from urlparse import parse_qsl
except ImportError:
from cgi import parse_qsl
class Error(Exception):
"""Base error for this module."""
pass
class RequestError(Error):
"""Error occurred during request."""
pass
class MissingParameter(Error):
pass
class CredentialsInvalidError(Error):
pass
def _abstract():
raise NotImplementedError('You need to override this function')
def _oauth_uri(name, discovery, params):
"""Look up the OAuth URI from the discovery
document and add query parameters based on
params.
name - The name of the OAuth URI to lookup, one
of 'request', 'access', or 'authorize'.
discovery - Portion of discovery document the describes
the OAuth endpoints.
params - Dictionary that is used to form the query parameters
for the specified URI.
"""
if name not in ['request', 'access', 'authorize']:
raise KeyError(name)
keys = discovery[name]['parameters'].keys()
query = {}
for key in keys:
if key in params:
query[key] = params[key]
return discovery[name]['url'] + '?' + urllib.urlencode(query)
class Credentials(object):
"""Base class for all Credentials objects.
Subclasses must define an authorize() method
that applies the credentials to an HTTP transport.
"""
def authorize(self, http):
"""Take an httplib2.Http instance (or equivalent) and
authorizes it for the set of credentials, usually by
replacing http.request() with a method that adds in
the appropriate headers and then delegates to the original
Http.request() method.
"""
_abstract()
class Flow(object):
"""Base class for all Flow objects."""
pass
class Storage(object):
"""Base class for all Storage objects.
Store and retrieve a single credential.
"""
def get(self):
"""Retrieve credential.
Returns:
apiclient.oauth.Credentials
"""
_abstract()
def put(self, credentials):
"""Write a credential.
Args:
credentials: Credentials, the credentials to store.
"""
_abstract()
class OAuthCredentials(Credentials):
"""Credentials object for OAuth 1.0a
"""
def __init__(self, consumer, token, user_agent):
"""
consumer - An instance of oauth.Consumer.
token - An instance of oauth.Token constructed with
the access token and secret.
user_agent - The HTTP User-Agent to provide for this application.
"""
self.consumer = consumer
self.token = token
self.user_agent = user_agent
self.store = None
# True if the credentials have been revoked
self._invalid = False
@property
def invalid(self):
"""True if the credentials are invalid, such as being revoked."""
return getattr(self, "_invalid", False)
def set_store(self, store):
"""Set the storage for the credential.
Args:
store: callable, a callable that when passed a Credential
will store the credential back to where it came from.
This is needed to store the latest access_token if it
has been revoked.
"""
self.store = store
def __getstate__(self):
"""Trim the state down to something that can be pickled.
"""
d = copy.copy(self.__dict__)
del d['store']
return d
def __setstate__(self, state):
"""Reconstitute the state of the object from being pickled.
"""
self.__dict__.update(state)
self.store = None
def authorize(self, http):
"""
Args:
http - An instance of httplib2.Http
or something that acts like it.
Returns:
A modified instance of http that was passed in.
Example:
h = httplib2.Http()
h = credentials.authorize(h)
You can't create a new OAuth
subclass of httplib2.Authenication because
it never gets passed the absolute URI, which is
needed for signing. So instead we have to overload
'request' with a closure that adds in the
Authorization header and then calls the original version
of 'request()'.
"""
request_orig = http.request
signer = oauth.SignatureMethod_HMAC_SHA1()
# The closure that will replace 'httplib2.Http.request'.
def new_request(uri, method='GET', body=None, headers=None,
redirections=httplib2.DEFAULT_MAX_REDIRECTS,
connection_type=None):
"""Modify the request headers to add the appropriate
Authorization header."""
response_code = 302
http.follow_redirects = False
while response_code in [301, 302]:
req = oauth.Request.from_consumer_and_token(
self.consumer, self.token, http_method=method, http_url=uri)
req.sign_request(signer, self.consumer, self.token)
if headers is None:
headers = {}
headers.update(req.to_header())
if 'user-agent' in headers:
headers['user-agent'] = self.user_agent + ' ' + headers['user-agent']
else:
headers['user-agent'] = self.user_agent
resp, content = request_orig(uri, method, body, headers,
redirections, connection_type)
response_code = resp.status
if response_code in [301, 302]:
uri = resp['location']
# Update the stored credential if it becomes invalid.
if response_code == 401:
logging.info('Access token no longer valid: %s' % content)
self._invalid = True
if self.store is not None:
self.store(self)
raise CredentialsInvalidError("Credentials are no longer valid.")
return resp, content
http.request = new_request
return http
class FlowThreeLegged(Flow):
"""Does the Three Legged Dance for OAuth 1.0a.
"""
def __init__(self, discovery, consumer_key, consumer_secret, user_agent,
**kwargs):
"""
discovery - Section of the API discovery document that describes
the OAuth endpoints.
consumer_key - OAuth consumer key
consumer_secret - OAuth consumer secret
user_agent - The HTTP User-Agent that identifies the application.
**kwargs - The keyword argu | ments are all optional and required
parameters for the OAuth c | alls.
"""
self.discovery = discovery
self.consumer_key = consumer_key
self.consumer_secret = consumer_secret
self.user_agent = user_agent
self.params = kwargs
self.request_token = {}
required = {}
for uriinfo in discovery.itervalues():
for name, value in uriinfo['parameters'].iteritems():
if value['required'] and not name.startswith('oauth_'):
required[name] = 1
for key in required.iterkeys():
if key not in self.params:
raise MissingParameter('Required parameter %s not supplied' % key)
def step1_get_authorize_url(self, oauth_callback='oob'):
"""Returns a URI to redirect to the provider.
oauth_callback - Either the string 'oob' for a non-web-based application,
or a URI that handles the callback from the authorization
server.
If oauth_callback is 'oob' then pass in the
generated verification code to step2_exchange,
otherwise pass in the query parameters received
at the callback uri to step2_exchange.
"""
consumer = oauth.Consumer(self.consumer_key, self.consumer_secret)
client = oauth.Client(consumer)
headers = {
'user-agent': self.user_agent,
'content-type': 'application/x-www-form-urlencoded'
}
body = urllib.urlencode({'oauth_callback': oauth_callback})
uri = _oauth_uri('request', self.discovery, self.params)
resp, content = client.request(uri, 'POST', headers=headers,
body=body)
if resp['status'] != '200':
logging.error('Failed to retrieve temporary authorization: |
domovilam/pimucha | piHAcontrollers/ctrlfcts/tsduofct.py | Python | gpl-3.0 | 2,304 | 0.013455 | # ----------------------------------------------------------------------------
# Copyright (C) 2013-2014 Huynh Vi Lam <domovilam@gmail.com>
#
# This file is part of pimucha.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
# ----------------------------------------------------------------------------
# ----------------------------------------------------------------------------
# TellStick Duo additionnal functions
# with Python 2.7.x and 3.x
# ----------------------------------------------------------------------------
# Company Telldus Technologies
# ----------------------------
# - Website: http://www.telldus.com
# - Documentation and source code TellStick Duo is used as base for development of scripts
# ----------------------------------------------------------------------------
import logging,time
logger = logging.getLogger()
RequestCmd = {
'STAT':('V+',5,'Status : Version request "V+"'),
'VERS':('V+',5,'Version request "V+"'),
}
# 'STAT':([0x56,0x2B],5,'Status : Version request "V+"'),
# 'VERS':([0x56,0x2B],5,'Version request "V+"'),
def requestcmd(dev,rcmd):
if rcmd in RequestCmd:
seq, lenresp, msg = RequestCmd[rcmd]
| logger.debug("Request %s to controller...",msg)
bytes = [ord(i) for i in seq]
for byte in bytes:
| dev.write(byte)
time.sleep(0.3)
res = dev.read(100)
if len(res) == 0:
return None
if res[0:2] == b"+W":
logger.debug("Event incoming from controller : %s",repr(res))
r = res
else:
r = res[0:lenresp]
logger.debug("Response from controller : %s",repr(r))
return r
return None
|
mlflow/mlflow | mlflow/projects/backend/loader.py | Python | apache-2.0 | 1,079 | 0.00278 | import entrypoints
import logging
from ml | flow.projects.backend.local import LocalBackend
ENTRYPOINT_GROUP_NAME = "mlflow.project_backend"
__logger__ = logging.getLogger(__name__)
# Statically register backend defined in mlflow
MLFLOW_BACKENDS = {
"local": LocalBackend,
}
def load_backend(backend_name):
# Static backends
if backend_name in MLFLOW_BACKENDS:
return MLFLOW_BACKENDS[backend_name]()
# backends from p | lugin
try:
backend_builder = entrypoints.get_single(ENTRYPOINT_GROUP_NAME, backend_name).load()
return backend_builder()
except entrypoints.NoSuchEntryPoint:
# TODO Should be a error when all backends are migrated here
available_entrypoints = entrypoints.get_group_all(ENTRYPOINT_GROUP_NAME)
available_plugins = [entrypoint.name for entrypoint in available_entrypoints]
__logger__.warning(
"Backend '%s' is not available. Available plugins are %s",
backend_name,
available_plugins + list(MLFLOW_BACKENDS.keys()),
)
return None
|
Andr3iC/juriscraper | opinions/united_states/state/nmctapp_slip.py | Python | bsd-2-clause | 435 | 0 | import nm_p
class Site(nm_p.Site):
| def __init__(self, *args, **kwargs):
super(Site, self).__init__(*args, **kwargs)
self.url = 'http://www.nmcompcomm.us/nmcases/NMCASlip.aspx'
self.court_id = self.__module__
def _get_docket_numbers(self):
path = '//table[@id="GridView1"]/tr/td[3]//text()'
return list(self.html.xpath | (path))
def _get_neutral_citations(self):
return None
|
jwhui/openthread | tests/scripts/thread-cert/pktverify/layer_fields.py | Python | bsd-3-clause | 26,646 | 0.0006 | #!/usr/bin/env python3
#
# Copyright (c) 2019, The OpenThread Authors.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. Neither the name of the copyright holder nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
import datetime
import sys
import time
from typing import Any, Union
from pyshark.packet.fields import LayerFieldsContainer, LayerField
from pyshark.packet.packet import Packet as RawPacket
from pktverify.addrs import EthAddr, ExtAddr, Ipv6Addr
from pktverify.bytes import Bytes
from pktverify.consts import VALID_LAYER_NAMES
from pktverify.null_field import nullField
def _auto(v: Union[LayerFieldsContainer, LayerField]):
"""parse the layer field automatically according to its format"""
assert not isinstance(v, LayerFieldsContainer) or len(v.fields) == 1 or v.get_default_value() is not None, v.fields
dv = v.get_default_value()
rv = v.raw_value
if dv.startswith('0x'):
return int(dv, 16)
try:
if dv == rv:
return int(dv)
elif int(dv) == int(rv, 16):
return int(dv)
except (ValueError, TypeError):
pass
if rv is None:
try:
return int(dv)
except (ValueError, TypeError):
pass
if ':' in dv and '::' not in dv and dv.replace(':', '') == rv: # '88:00', '8800'
return int(rv, 16)
# timestamp: 'Jan 1, 1970 08:00:00.000000000 CST', '0000000000000000'
# convert to seconds from 1970, ignore the nanosecond for now since
# there are integer seconds applied in the test cases
try:
time_str = datetime.datetime.strptime(dv, "%b %d, %Y %H:%M:%S.%f000 %Z")
time_in_sec = time.mktime(time_str.utctimetuple())
return int(time_in_sec)
except (ValueError, TypeError):
pass
try:
int(rv, 16)
return int(dv)
except Exception:
pass
raise ValueError((v, v.get_default_value(), v.raw_value))
def _payload(v: Union[LayerFieldsContainer, LayerField]) -> bytearray:
"""parse the layer field as a bytearray"""
assert not isinstance(v, LayerFieldsContainer) or len(v.fields) == 1
hex_value = v.raw_value
assert len(hex_value) % 2 == 0
s = bytearray()
for i in range(0, len(hex_value), 2):
s.append(int(hex_value[i:i + 2], 16))
return s
def _hex(v: Union[LayerFieldsContainer, LayerField]) -> int:
"""parse the layer field as a hex string"""
# split v into octets and reverse the order
assert not isinstance(v, LayerFieldsContainer) or len(v.fields) == 1
return int(v.get_default_value(), 16)
def _raw_hex(v: Union[LayerFieldsContainer, LayerField]) -> int:
"""parse the layer field as a raw hex string"""
# split v into octets and reverse the order
assert not isinstance(v, LayerFieldsContainer) or len(v.fields) == 1
iv = v.hex_value
try:
int(v.get_default_value())
assert int(v.get_default_value()) == iv, (v.get_default_value(), v.raw_value)
except ValueError:
pass
try:
int(v.get_default_value(), 16)
assert int(v.get_default_value(), 16) == iv, (v.get_default_value(), v.raw_value)
except ValueError:
pass
return iv
def _raw_hex_rev(v: Union[LayerFieldsContainer, LayerField]) -> int:
"""parse the layer field as a reversed raw hex string"""
# split v into octets and reverse the order
assert not isinstance(v, LayerFieldsContainer) or len(v.fields) == 1
rv = v.raw_value
octets = [rv[i:i + 2] for i in range(0, len(rv), 2)]
iv = int(''.join(reversed(octets)), 16)
try:
int(v.get_default_value())
assert int(v.get_default_value()) == iv, (v.get_default_value(), v.raw_value)
except ValueError:
pass
try:
int(v.get_default_value(), 16)
assert int(v.get_default_value(), 16) == iv, (v.get_default_value(), v.raw_value)
except ValueError:
pass
return iv
def _dec(v: Union[LayerFieldsContainer, LayerField]) -> int:
"""parse the layer field as a decimal"""
assert not isinstance(v, LayerFieldsContainer) or len(v.fields) == 1
return int(v.get_default_value())
def _float(v: Union[LayerFieldsContainer, LayerField]) -> float:
"""parse the layer field as a float"""
assert not isinstance(v, LayerFieldsContainer) or len(v.fields) == 1
return float(v.get_default_value())
def _str(v: Union[LayerFieldsContainer, LayerField]) -> str:
"""parse the layer field as a string"""
assert not isinstance(v, LayerFieldsContainer) or len(v.fields) == 1
return str(v.get_default_value())
def _bytes(v: Union[LayerFieldsContainer, LayerField]) -> Bytes:
"""parse the layer field as raw bytes"""
assert not isinstance(v, LayerFieldsContainer) or len(v.fields) == 1
return Bytes(v.raw_value)
def _ext_addr(v: Union[LayerFieldsContainer, LayerField]) -> ExtAddr:
"""parse the layer field as an extended address"""
assert not isinstance(v, LayerFieldsContainer) or len(v.fields) == 1
return ExtAddr | (v.get_default_value())
def _ipv6_addr(v: Union[LayerFieldsContainer, LayerField]) -> Ipv6Addr:
"""parse the layer field as an IPv6 address"""
assert not isinstance(v, LayerFieldsContainer) or len(v.fields) == 1
return Ipv6Addr(v.get_default_value())
def _eth_addr(v: Union[LayerFields | Container, LayerField]) -> EthAddr:
"""parse the layer field as an Ethernet MAC address"""
assert not isinstance(v, LayerFieldsContainer) or len(v.fields) == 1, v.fields
return EthAddr(v.get_default_value())
def _routerid_set(v: Union[LayerFieldsContainer, LayerField]) -> set:
"""parse the layer field as a set of router ids
Notes: the router ID mask in wireshark is a
hexadecimal string separated by ':'
"""
assert not isinstance(v, LayerFieldsContainer) or len(v.fields) == 1
try:
ridmask = str(v.get_default_value())
assert isinstance(ridmask, str), ridmask
ridmask_int = int(ridmask.replace(':', ''), base=16)
rid_set = set()
count = 0
while ridmask_int:
count += 1
if ridmask_int & 1:
rid_set.add(64 - count)
ridmask_int = ridmask_int >> 1
except ValueError:
pass
return rid_set
class _first(object):
"""parse the first layer field"""
def __init__(self, sub_parse):
self._sub_parse = sub_parse
def __call__(self, v: Union[LayerFieldsContainer, LayerField]):
return self._sub_parse(v.fields[0])
class _list(object):
"""parse all layer fields into a list"""
def __init__(self, sub_parse):
self._sub_parse = sub_parse
def __call__(self, v: Union[LayerFieldsContainer, LayerField]):
return [self._sub_parse(f) for f in v.fields |
yibitcoin/yibitcoin | qa/rpc-tests/txn_doublespend.py | Python | apache-2.0 | 4,971 | 0.003018 | #!/usr/bin/env python2
# Copyright (c) 2014 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
# Test proper accounting with malleable transactions
#
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import *
from decimal import Decimal
import os
import shutil
class TxnMallTest(BitcoinTestFramework):
def add_options(self, parser):
parser.add_option("--mineblock", dest="mine_block", default=False, action="store_true",
help="Test double-spend of 1-confirmed transaction")
def setup_network(self):
# Start with split network:
return super(TxnMallTest, self).setup_network(True)
def run_test(self):
# All nodes should start with 7,500,000 DOGE:
starting_balance = 7500000
for i in range(4):
assert_equal(self.nodes[i].getbalance(), starting_balance)
self.nodes[i].getnewaddress("") # bug workaround, coins generated assigned to first getnewaddress!
# Assign coins to foo and bar accounts:
self.nodes[0].move("", "foo", 7499970)
self.nodes[0].move("", "bar", 30)
assert_equal(self.nodes[0].getbalance(""), 0)
# Coins are sent to node1_address
node1_address = self.nodes[1].getnewaddress("from0")
# First: use raw transaction API to send 7499960 DOGE to node1_address,
# but don't broadcast:
(total_in, inputs) = gather_inputs(self.nodes[0], 7499960)
change_address = self.nodes[0].getnewaddress("foo")
outputs = {}
outputs[change_address] = 40
outputs[node1_address] = 7499960
rawtx = self.nodes[0].createrawtransaction(inputs, outputs)
doublespend = self.nodes[0].signrawtransaction(rawtx)
assert_equal(doublespend["complete"], True)
# Create two transacti | on from node[0] to node[1]; the
# second must spend change from the first because the first
# spends all mature inputs:
txid1 = self.nodes[0].sendfrom("foo", node1_address, 7499960, 0)
txid2 = self.nodes[0].sendfrom("bar", node1_address, 20, 0)
# Have node0 mine a block:
if (self.options.mine_block):
self.nodes[0].generate(1)
sync_blocks(self.nodes[0:2])
tx1 = self.nodes[ | 0].gettransaction(txid1)
tx2 = self.nodes[0].gettransaction(txid2)
# Node0's balance should be starting balance, plus 500,000 DOGE for another
# matured block, minus 7499960, minus 20, and minus transaction fees:
expected = starting_balance
if self.options.mine_block: expected += 500000
expected += tx1["amount"] + tx1["fee"]
expected += tx2["amount"] + tx2["fee"]
assert_equal(self.nodes[0].getbalance(), expected)
# foo and bar accounts should be debited:
assert_equal(self.nodes[0].getbalance("foo"), 7499970+tx1["amount"]+tx1["fee"])
assert_equal(self.nodes[0].getbalance("bar"), 30+tx2["amount"]+tx2["fee"])
if self.options.mine_block:
assert_equal(tx1["confirmations"], 1)
assert_equal(tx2["confirmations"], 1)
# Node1's "from0" balance should be both transaction amounts:
assert_equal(self.nodes[1].getbalance("from0"), -(tx1["amount"]+tx2["amount"]))
else:
assert_equal(tx1["confirmations"], 0)
assert_equal(tx2["confirmations"], 0)
# Now give doublespend to miner:
mutated_txid = self.nodes[2].sendrawtransaction(doublespend["hex"])
# ... mine a block...
self.nodes[2].generate(1)
# Reconnect the split network, and sync chain:
connect_nodes(self.nodes[1], 2)
self.nodes[2].generate(1) # Mine another block to make sure we sync
sync_blocks(self.nodes)
# Re-fetch transaction info:
tx1 = self.nodes[0].gettransaction(txid1)
tx2 = self.nodes[0].gettransaction(txid2)
# Both transactions should be conflicted
assert_equal(tx1["confirmations"], -1)
assert_equal(tx2["confirmations"], -1)
# Node0's total balance should be starting balance, plus 1,000,000 DOGE for
# two more matured blocks, minus 7499960 for the double-spend:
expected = starting_balance + 1000000 - 7499960
assert_equal(self.nodes[0].getbalance(), expected)
assert_equal(self.nodes[0].getbalance("*"), expected)
# foo account should be debited, but bar account should not:
assert_equal(self.nodes[0].getbalance("foo"), 7499970-7499960)
assert_equal(self.nodes[0].getbalance("bar"), 30)
# Node1's "from" account balance should be just the mutated send:
assert_equal(self.nodes[1].getbalance("from0"), 7499960)
if __name__ == '__main__':
TxnMallTest().main()
|
tkzeng/molecular-design-toolkit | moldesign/_tests/test_mm.py | Python | apache-2.0 | 4,019 | 0.002737 | import random
import pytest
import numpy as np
import moldesign as mdt
from moldesign import units as u
from . import helpers
registered_types = {}
def typedfixture(*types, **kwargs):
"""This is a decorator that lets us associate fixtures with one or more arbitrary types.
We'll later use this type to determine what tests to run on the result"""
def fixture_wrapper(func):
for t in types:
registered_types.setdefault(t, []).append(func.__name__)
return pytest.fixture(**kwargs)(func)
return fixture_wrapper
@pytest.fixture
def small_molecule():
mol = mdt.from_smiles('CNCOS(=O)C')
mol.positions += 0.001*np.random.random(mol.positions.shape)*u.angstrom # move out of minimum
return mol
@typedfixture('hasmodel')
def parameterize_zeros(small_molecule):
params = mdt.parameterize(small_molecule, charges='zero')
mol = mdt.assign_forcefield(small_molecule, parameters=params)
mol.set_energy_model(mdt.models.ForceField)
return mol
@typedfixture('hasmodel')
def parameterize_am1bcc(small_molecule):
params = mdt.parameterize(small_molecule, charges='am1-bcc', ffname='gaff')
mol = mdt.assign_forcefield(small_molecule, parameters=params)
mol.set_energy_model(mdt.models.ForceField)
return mol
@typedfixture('hasmodel')
def openbabel_mmff94(small_molecule):
small_molecule.set_energy_model(mdt.models.OpenBabelPotential, forcefield='mmff94')
return small_molecule
@typedfixture('hasmodel')
def openbabel_mmff94s(small_molecule):
small_molecule.set_energy_model(mdt.models.OpenBabelPotential, forcefield='mmff94s')
return small_molecule
# This test (along with the uff energy model) is disabled because it does not appear to return a
# gradient that's consistent with the energy surface
#@typedfixture('hasmodel')
#def openbabel_uff(small_molecule):
# small_molecule.set_energy_model(mdt.models.OpenBabelPotential, forcefield='uff')
# return small_molecule
@typedfixture('hasmodel')
def openbabel_ghemical(small_molecule):
small_molecule.set_energy_model(mdt.models.OpenBabelPotential, forcefield='ghemical')
return small_molecule
@typedfixture('hasmodel')
def protein_default_amber_forcefield():
mol = mdt.from_pdb('1YU8')
newmol = mdt.assign_forcefield(mol)
newmol.set_energy_model(mdt.models.ForceField)
return newmol
@typedfixture('hasmodel')
def gaff_model_gasteiger(small_molecule):
small_molecule.set_energy_model(mdt.models.GAFF, charges='gasteiger')
return small_molecule
@pytest.mark.parametrize('objkey', registered_types['hasmodel'])
def test_forces_and_energy_were_calculated(objkey, request):
mol = request.getfuncargvalue(objkey)
energy = mol.calculate_potential_energy()
forces = mol.calculate_forces()
assert forces.shape == mol.positions.shape
@pytest.mark.skipif(mdt.interfaces.openmm.force_remote,
reason="Numerical derivatives need to be parallelized, "
"otherwise this takes too long")
@pytest.mark.parametrize('objkey', registered_types['hasmodel'])
def test_analytical_vs_numerical_forces(objkey, request):
mol = request.getfuncargvalue(objkey)
if mol.num_atoms > 20:
atoms = random.sample(mol.atoms, 20)
else:
atoms = mol.atoms
atom_indices = [a | tom.index for atom in atoms]
anagrad = -mo | l.calculate_forces()[atom_indices]
numgrad = helpers.num_grad(mol,
mol.calculate_potential_energy,
atoms=atoms,
step=0.005*u.angstrom)
assert (anagrad-numgrad).norm()/(3.0*len(atoms)) <= 5.0e-4 * u.eV / u.angstrom
@pytest.mark.parametrize('objkey', registered_types['hasmodel'])
def test_minimization_reduces_energy(objkey, request):
mol = request.getfuncargvalue(objkey)
e1 = mol.calculate_potential_energy()
mol = request.getfuncargvalue(objkey)
traj = mol.minimize()
assert mol.calculate_potential_energy() < e1
|
spencervillars/AutoSchema | tests/test_tabulate.py | Python | mit | 360 | 0 | from myc | li.packages.tabulate import tabulate
from textwrap import dedent
def test_dont_strip_leading_whitespace():
data = [[' abc']]
headers = ['xyz']
tbl, _ = tabulate(data, headers, tablefmt='psql')
assert tbl == dedent('''
+---------+
| xyz |
|---------|
| | abc |
+---------+ ''').strip()
|
Qalthos/remysmoke | remysmoke/websetup/bootstrap.py | Python | agpl-3.0 | 246 | 0.004065 | # -*- coding: utf-8 -*-
"""Setup the remysmoke application"""
import logging
from tg import config
from remysmoke i | mport mod | el
import transaction
def bootstrap(command, conf, vars):
"""Place any commands to setup remysmoke here"""
pass
|
sinnwerkstatt/django-indisposable-email-field | indisposable_email_field/data.py | Python | apache-2.0 | 9,940 | 0.008853 |
domain_names = ['0clickemail.com', 'noclickemail.com', '10minutemail.com', 'bofthew.com', 'drdrb.com', 'drdrb.net',
'jnxjn.com', 'klzlk.com', 'nepwk.com', 'nwldx.com', 'owlpic.com', 'pjjkp.com', 'prtnx.com', 'rmqkr.net',
'rppkn.com', 'rtrtr.com', 'trbvm.com', 'tyldd.com', 'uggsrock.com', '10minutemail.net', 'akerd.com',
'soisz.com', 'zoaxe.com', '12houremail.com', '12minutemail.com', '1pad.de', '20minutemail.com',
'30minutenmail.eu', '5ymail.com', '60-minuten-mail.de', 'akapost.com', 'anon-mail.de', 'anonbox.net',
'anonmails.de', 'anonymbox.com', 'anonymous-email.net', 'anonymousfeedback.net', 'antispam.de',
'antispam24.de', 'antispammail.de', 'b2cmail.de', 'breakthru.com', 'bspamfree.org', 'bugmenot.com',
'bumpymail.com', 'byom.de', 'trashmail.org', 'cam4you.cc', 'centermail.com', 'centermail.net',
'deadaddress.com', 'despammed.com', 'dispostable.com', 'dodgeit.com', 'dodgit.com', 'dontsendmespam.de',
'dotman.de', 'dudmail.com', 'dump-email.info', 'dumpmail.de', 'e4ward.com', 'easytrashmail.com',
'edv.to', 'einfach.to', 'eintagsmail.de', 'emailgo.de', 'emailias.com', 'emailsensei.com',
| 'emailtemporanea.com', 'emailtemporanea.net', 'empiremail.de', 'eyepaste.com', 'fakeinbox.com',
'fakemail.fr', 'adresseemailtemporaire.com', 'armyspy.com', 'cuvox.de', 'dayrep.com', 'einrot.com',
'fakemailgenerator.com', 'fleckens.hu', 'gustr.com', 'jourrapide.com', 'rhyta.com', 'superrito.com',
'teleworm.us', 'wegwerfemailadresse.com', 'filzmail.com', 'f | litafir.de', 'frapmail.com',
'garbagemail.org', 'garliclife.com', '7tags.com', 'broadbandninja.com', 'cellurl.com', 'dealja.com',
'getairmail.com', 'moburl.com', 'tagyourself.com', 'vidchart.com', 'getmails.eu', 'consumerriot.com',
'getonemail.com', 'gishpuppy.com', 'nurfuerspam.de', 'guerillamail.org', 'guerrillamail.net', 'grr.la',
'guerrillamail.biz', 'guerrillamail.com', 'guerrillamail.de', 'guerrillamail.info', 'guerrillamail.org',
'sharklasers.com', 'spam4.me', 'guerrillamailblock.com', 'haltospam.com', 'harakirimail.com',
'hidemail.de', 'hidemyass.com', 'hmamail.com', 'bootybay.de', 'gehensiemirnichtaufdensack.de',
'hat-geld.de', 'ieh-mail.de', 'plexolan.de', 'inbox.si', 'incognitomail.com', 'incognitomail.net',
'incognitomail.org', 'instant-mail.de', 'sinnlos-mail.de', 'wegwerf-email-adressen.de',
'wegwerf-emails.de', 'ip6.li', 'irish2me.com', 'jetable.com', 'jetable.net', 'jetable.org', 'junk.to',
'kasmail.com', 'keepmymail.com', 'koszmail.pl', 'lhsdv.com', 'lifebyfood.com', 'lr78.com',
'luckymail.org', 'card.zp.ua', 'express.net.ua', 'infocom.zp.ua', 'mail.zp.ua', 'mycard.net.ua',
'mail1a.de', 'delikkt.de', 'm21.cc', 'mail21.cc', 'mysamp.de', 'mail4trash.com', 'mailcatch.com',
'mailbiz.biz', 'mailde.de', 'mailde.info', 'mailms.com', 'mailorg.org', 'mailtv.net', 'mailtv.tv',
'ministry-of-silly-walks.de', 'maildrop.cc', 'maileater.com', 'maileimer.de', 'mailexpire.com',
'mailfish.de', 'mailforspam.com', 'binkmail.com', 'bobmail.info', 'chammy.info', 'chogmail.com',
'devnullmail.com', 'letthemeatspam.com', 'mailin8r.com', 'mailinater.com', 'mailinator.com',
'mailinator.net', 'mailinator2.com', 'mailismagic.com', 'mailtothis.com', 'monumentmail.com',
'notmailinator.com', 'putthisInyourspamdatabase.com', 'reallymymail.com', 'safetymail.info',
'slopsbox.com', 'sogetthis.com', 'spamgoes.in', 'spamherelots.com', 'SpamHerePlease.com',
'supergreatmail.com', 'suremail.info', 'thisisnotmyrealemail.com', 'tradermail.info',
'veryrealemail.com', 'zippymail.info', 'mailita.tk', 'mailme24.com', 'mailnesia.com', 'mailnull.com',
'mailshell.com', 'mailtome.de', 'mailtrash.net', 'makemetheking.com', 'mbx.cc', 'meltmail.com',
'messagebeamer.de', 'migmail.pl', 'mintemail.com', 'muell.email', 'my10minutemail.com',
'mytempmail.com', 'mailmetrash.com', 'mt2009.com', 'mt2014.com', 'mytrashmail.com', 'thankyou2010.com',
'trash2009.com', 'trashymail.com ', 'nervmich.net', 'nervtmich.net', 'wegwerfadresse.de',
'netzidiot.de', 'no-spam.ws', 'nospam4.us', 'nospamfor.us', 'nospammail.net', 'nowmymail.com',
'nsaking.de', 'obobbo.com', 'ohaaa.de', 'blackmarket.to', 'omail.pro', 'thc.st', 'vpn.st',
'oneoffemail.com', 'oneoffmail.com', 'onewaymail.com', 'onlatedotcom.info', 'otherinbox.com',
'pookmail.com', 'privacy.net', 'privatdemail.net', 'fansworldwide.de', 'privy-mail.de', 'privymail.de',
'trashmailer.com', 'put2.net', 'quickinbox.com', 'realtyalerts.ca', 'mailseal.de', 'receiveee.com',
'safersignup.de', 'safetypost.de', 'sapya.com', 'schafmail.de', 'schmeissweg.tk', 'schrott-email.de',
'secretemail.de', 'lolfreak.net', 'secure-mail.biz', 'secure-mail.cc', 'z1p.biz', 'send-email.org',
'SendSpamHere.com', 'senseless-entertainment.com', 'is.af', 'server.ms', 'us.af', 'shieldemail.com',
'sneakemail.com', 'snkmail.com', 'sofort-mail.de', 'sofortmail.de', 'soodonims.com', 'spam.la',
'spam.su', 'spamail.de', 'spamavert.com', 'spambob.com', '0815.ru', '3d-painting.com', '6ip.us',
'agedmail.com', 'ama-trade.de', 'ama-trans.de', 'ano-mail.net', 'bio-muesli.info', 'bio-muesli.net',
'brennendesreich.de', 'buffemail.com', 'bund.us', 'cust.in', 'dbunker.com', 'discardmail.com',
'discardmail.de', 'dropcake.de', 'duskmail.com', 'e-postkasten.com', 'e-postkasten.de',
'e-postkasten.eu', 'e-postkasten.info', 'emaillime.com', 'ero-tube.org', 'film-blog.biz', 'fly-ts.de',
'flyspam.com', 'fr33mail.info', 'geschent.biz', 'great-host.in', 'hochsitze.com', 'hulapla.de',
'imails.info', 'kulturbetrieb.info', 'lags.us', 'm4ilweb.info', 'malahov.de', 'misterpinball.de',
'mypartyclip.de', 'nomail2me.com', 'nospamthanks.info', 'politikerclub.de', 'recode.me', 's0ny.net',
'sandelf.de', 'sky-ts.de', 'spambog.com', 'spambog.de', 'spambog.ru', 'superstachel.de', 'teewars.org',
'thanksnospam.info', 'watch-harry-potter.com', 'watchfull.net', 'webm4il.info', 'spambox.us',
'spamcero.com', 'spamcorptastic.com', 'spamex.com', 'spamfree.eu', 'spamfree24.com', 'spamfree24.de',
'spamfree24.info', 'spamfree24.org', 'antichef.net', 'spamcannon.net', 'spamgourmet.com',
'spamgourmet.net', 'spamgourmet.org', 'spamhole.com', 'spaminator.de', 'spaml.de', 'spammotel.com',
'spamobox.com', 'spamspot.com', 'SpamThisPlease.com', 'spamtrail.com', 'spamtroll.net', 'cheatmail.de',
'dodgemail.de', 'fivemail.de', 'giantmail.de', 'nevermail.de', 'smashmail.de', 'sneakmail.de',
'spoofmail.de', 'stuffmail.de', 'tokenmail.de', 'trialmail.de', 'squizzy.de', 'sry.li',
'stinkefinger.net', 'sofimail.com', 'stop-my-spam.com', 'super-auswahl.de', 'teleworm.com',
'checknew.pw', 'gomail.in', 'inboxed.im', 'inboxed.pw', 'linuxmail.so', 'nomail.pw', 'powered.name',
'secmail.pw', 'shut.name', 'shut.ws', 'temp-mail.org', 'tokem.co', 'vipmail.name', 'vipmail.pw',
'writeme.us', 'yanet.me', 'llogin.ru', 'odnorazovoe.ru', 'temp-mail.ru', 'tempail.com',
'tempemail.co.za', 'tempemail.net', 'beefmilk.com', 'dingbone.com', 'fudgerub.com', 'lookugly.com',
'smellfear.com', 'tempinbox.com', 'tempmail.eu', 'tempmailer.com', 'tempmailer.de', 'tempomail.fr',
'temporarily.de', 'temporaryemail.net', 'temporaryinbox.com', 'temporarymailaddress.com',
'thismail.net', 'tittbit |
scavarda/mysql-dbcompare | mysql-utilities-1.6.0/scripts/mysqlrplsync.py | Python | apache-2.0 | 11,414 | 0.000789 | #!/usr/bin/env python
#
# Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; version 2 of the License.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
#
"""
This file contains the replication synchronization checker utility. It is used
to check the data consistency between master and slaves (and synchronize the
data if requested by the user).
"""
from mysql.utilities.common.tools import check_python_version
# Check Python version compatibility
check_python_version()
import os
import sys
from mysql.utilities.command.rpl_sync_check import check_data_consistency
from mysql.utilities.common.messages import (
ERROR_MASTER_IN_SLAVES, PARSE_ERR_DISCO_REQ_MASTER,
PARSE_ERR_OPT_REQ_NON_NEGATIVE_VALUE, PARSE_ERR_OPT_REQ_GREATER_VALUE,
PARSE_ERR_OPT_REQ_VALUE, PARSE_ERR_OPTS_EXCLD,
PARSE_ERR_SLAVE_DISCO_REQ
)
from mysql.utilities.common.options import (add_discover_slaves_option,
add_master_option,
add_slaves_option,
add_ssl_options, add_verbosity,
check_server_lists,
db_objects_list_to_dictionary,
setup_common_options,
check_password_security)
from mysql.utilities.common.server import check_hostname_alias, Server
from mysql.utilities.common.tools import check_connector_python
from mysql.utilities.common.topology import parse_topology_connections
from mysql.utilities.exception import UtilError, UtilRplError
# Check for connector/python
if not check_connector_python():
sys.exit(1)
# Constants
NAME = "MySQL Utilities - mysqlrplsync"
DESCRIPTION = "mysqlrplsync - replication synchronization checker utility"
USAGE = ("%prog --master=user:pass@host:port --slaves=user:pass@host:port \\\n"
" [<db_name>[.<tbl_name>]]")
EXTENDED_HELP = """
Introduction
------------
The mysqlrplsync utility is designed to check if replication servers with
GTIDs enabled are synchronized. In other words, it checks the data consistency
between a master and a slave or between two slaves.
The utility permits you to run the check while replication is active. The
synchronization algorithm is applied using GTID information to identify those
transactions that differ (missing, not read, etc.) between the servers. During
the process, the utility waits for the slave to catch up to the master to
ensure all GTIDs have been read prior to performing the data consistency
check.
Note: if replication is not running (e.g., all slaves are stopped), the
utility can still perform the check, but the step to wait for the slave to
catch up to the master will be skipped. If you want to run the utility on a
stopped replication topology, you should ensure the slaves are up to date
first.
By default, all data is | included in the comparison. To check specific
databases or tables, list each element as a separated argument for the
utility using full qualified names as shown in the following examples.
# Check the data consistency of a replication topology, explicitly
# specifying the master and slaves.
$ mysqlrplsync --master=root:pass@host1:3306 \\
--slav | es=rpl:pass@host2:3306,rpl:pass@host3:3306
# Check the data consistency of a replication topology, specifying the
# master and using the slaves discovery feature.
$ mysqlrplsync --master=root:pass@host1:3306 \\
--discover-slaves-login=rpl:pass
# Check the data consistency only between specific slaves (no check
# performed on the master).
$ mysqlrplsync --slaves=rpl:pass@host2:3306,rpl:pass@host3:3306
# Check the data consistency of a specific database (db1) and table
# (db2.t1), explicitly specifying master and slaves.
$ mysqlrplsync --master=root:pass@host1:3306 \\
--slaves=rpl:pass@host2:3306,rpl:pass@host3:3306 \\
db1 db2.t1
# Check the data consistency of all data excluding a specific database
# (db2) and table (db1.t2), specifying the master and using slave
# discovery.
$ mysqlrplsync --master=root:pass@host1:3306 \\
--discover-slaves-login=rpl:pass --exclude=db2,db1.t2
Helpful Hints
-------------
- The default timeout for performing the table checksum is 5 seconds.
This value can be changed with the --checksum-timeout option.
- The default timeout for waiting for slaves to catch up is 300 seconds.
This value can be changed with the --rpl-timeout option.
- The default interval to periodically verify if a slave has read all of
the GTIDs from the master is 3 seconds. This value can be changed
with the --interval option.
"""
if __name__ == '__main__':
# Setup the command parser (with common options).
parser = setup_common_options(os.path.basename(sys.argv[0]),
DESCRIPTION, USAGE, server=False,
extended_help=EXTENDED_HELP)
# Add the --discover-slaves-login option.
add_discover_slaves_option(parser)
# Add the --master option.
add_master_option(parser)
# Add the --slaves option.
add_slaves_option(parser)
# Add the --ssl options
add_ssl_options(parser)
# Add verbosity option (no --quite option).
add_verbosity(parser, False)
# Add timeout options.
parser.add_option("--rpl-timeout", action="store", dest="rpl_timeout",
type="int", default=300,
help="maximum timeout in seconds to wait for "
"synchronization (slave waiting to catch up to "
"master). Default = 300.")
parser.add_option("--checksum-timeout", action="store",
dest="checksum_timeout", type="int", default=5,
help="maximum timeout in seconds to wait for CHECKSUM "
"query to complete. Default = 5.")
# Add polling interval option.
parser.add_option("--interval", "-i", action="store", dest="interval",
type="int", default="3", help="interval in seconds for "
"polling slaves for sync status. Default = 3.")
# Add option to exclude databases/tables check.
parser.add_option("--exclude", action="store", dest="exclude",
type="string", default=None,
help="databases or tables to exclude. Example: "
"<db_name>[.<tbl_name>]. List multiple names in a "
"comma-separated list.")
# Parse the options and arguments.
opt, args = parser.parse_args()
# Check security settings
check_password_security(opt, args)
# At least one of the options --discover-slaves-login or --slaves is
# required.
if not opt.discover and not opt.slaves:
parser.error(PARSE_ERR_SLAVE_DISCO_REQ)
# The --discover-slaves-login and --slaves options cannot be used
# simultaneously (only one).
if opt.discover and opt.slaves:
parser.error(PARSE_ERR_OPTS_EXCLD.format(
opt1='--discover-slaves-login', opt2='--slaves'
))
if opt.discover and not opt.master:
parser.error(PARSE_ERR_DISCO_REQ_MASTER)
# Check timeout values, must be greater than zero.
if opt.rpl_timeout < 0:
parser.error(
PARSE_ERR_OPT_REQ_NON_NEGATIVE_VALUE.format(opt='--rpl-timeout')
)
if opt.checksum_time |
maxvonhippel/q2-diversity | q2_diversity/_alpha/_visualizer.py | Python | bsd-3-clause | 8,213 | 0.000244 | # ----------------------------------------------------------------------------
# Copyright (c) 2016-2017, QIIME 2 development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file LICENSE, distributed with this software.
# ----------------------------------------------------------------------------
import json
import os
import pkg_resources
import shutil
from urllib.parse import quote
import scipy
import numpy as np
import pandas as pd
import qiime2
from statsmodels.sandbox.stats.multicomp import multipletests
import q2templates
TEMPLATES = pkg_resources.resource_filename('q2_diversity', '_alpha')
def alpha_group_significance(output_dir: str, alpha_diversity: pd.Series,
metadata: qiime2.Metadata) -> None:
metadata_df = metadata.to_dataframe()
metadata_df = metadata_df.apply(pd.to_numeric, errors='ignore')
pre_filtered_cols = set(metadata_df.columns)
metadata_df = metadata_df.select_dtypes(exclude=[np.number])
post_filtered_cols = set(metadata_df.columns)
filtered_numeric_categories = pre_filtered_cols - post_filtered_cols
filtered_group_comparisons = []
categories = metadata_df.columns
metric_name = alpha_diversity.name
if len(categories) == 0:
raise ValueError('Only numeric data is present in metadata file.')
filenames = []
filtered_categories = []
for category in categories:
metadata_category = metadata.get_category(category).to_series()
metadata_category = metadata_category.loc[alpha_diversity.index]
metadata_category = metadata_category.replace(r'', np.nan).dropna()
initial_data_length = alpha_diversity.shape[0]
data = pd.concat([alpha_diversity, metadata_category], axis=1,
join='inner')
filtered_data_length = data.shape[0]
names = []
groups = []
for name, group in data.groupby(metadata_category.name):
names.append('%s (n=%d)' % (name, len(group)))
groups.append(list(group[alpha_diversity.name]))
if (len(groups) > 1 and len(groups) != len(data.index)):
escaped_category = quote(category)
filename = 'category-%s.jsonp' % escaped_category
filenames.append(filename)
# perform Kruskal-Wallis across all groups
kw_H_all, kw_p_all = scipy.stats.mstats.kruskalwallis(*groups)
# perform pairwise Kruskal-Wallis across all pairs of groups and
# correct for multiple comparisons
kw_H_pairwise = []
for i in range(len(names)):
for j in range(i):
try:
H, p = scipy.stats.mstats.kruskalwallis(groups[i],
groups[j])
kw_H_pairwise.append([names[j], names[i], H, p])
except ValueError:
filtered_group_comparisons.append(
['%s:%s' % (category, names[i]),
'%s:%s' % (category, names[j])])
kw_H_pairwise = pd.DataFrame(
kw_H_pairwise, columns=['Group 1', 'Group 2', 'H', 'p-value'])
kw_H_pairwise.set_index(['Group 1', 'Group 2'], inplace=True)
kw_H_pairwise['q-value'] = multipletests(
kw_H_pairwise['p-value'], method='fdr_bh')[1]
kw_H_pairwise.sort_index(inplace=True)
pairwise_fn = 'kruskal-wallis-pairwise-%s.csv' % escaped_category
pairwise_path = os.path.join(output_dir, pairwise_fn)
| kw_H_pairwise.to_csv(pairwise_path)
with open(os.path.join(output_dir, filename), 'w') as fh:
df = pd.Series(groups, index=names)
fh.write("load_data('%s'," % category)
df.to_json(fh, orient='split')
fh.write(",")
json.dump({'initial': initial_data_length,
| 'filtered': filtered_data_length}, fh)
fh.write(",")
json.dump({'H': kw_H_all, 'p': kw_p_all}, fh)
fh.write(",'")
table = kw_H_pairwise.to_html(classes="table table-striped "
"table-hover")
table = table.replace('border="1"', 'border="0"')
fh.write(table.replace('\n', ''))
fh.write("','%s', '%s');" % (quote(pairwise_fn), metric_name))
else:
filtered_categories.append(category)
index = os.path.join(
TEMPLATES, 'alpha_group_significance_assets', 'index.html')
q2templates.render(index, output_dir, context={
'categories': [quote(fn) for fn in filenames],
'filtered_numeric_categories': ', '.join(filtered_numeric_categories),
'filtered_categories': ', '.join(filtered_categories),
'filtered_group_comparisons':
'; '.join([' vs '.join(e) for e in filtered_group_comparisons])})
shutil.copytree(
os.path.join(TEMPLATES, 'alpha_group_significance_assets', 'dst'),
os.path.join(output_dir, 'dist'))
_alpha_correlation_fns = {'spearman': scipy.stats.spearmanr,
'pearson': scipy.stats.pearsonr}
def alpha_correlation(output_dir: str,
alpha_diversity: pd.Series,
metadata: qiime2.Metadata,
method: str='spearman') -> None:
try:
alpha_correlation_fn = _alpha_correlation_fns[method]
except KeyError:
raise ValueError('Unknown alpha correlation method %s. The available '
'options are %s.' %
(method, ', '.join(_alpha_correlation_fns.keys())))
metadata_df = metadata.to_dataframe()
metadata_df = metadata_df.apply(pd.to_numeric, errors='ignore')
pre_filtered_cols = set(metadata_df.columns)
metadata_df = metadata_df.select_dtypes(include=[np.number])
post_filtered_cols = set(metadata_df.columns)
filtered_categories = pre_filtered_cols - post_filtered_cols
categories = metadata_df.columns
if len(categories) == 0:
raise ValueError('Only non-numeric data is present in metadata file.')
filenames = []
for category in categories:
metadata_category = metadata_df[category]
metadata_category = metadata_category.loc[alpha_diversity.index]
metadata_category = metadata_category.dropna()
# create a dataframe containing the data to be correlated, and drop
# any samples that have no data in either column
df = pd.concat([metadata_category, alpha_diversity], axis=1,
join='inner')
# compute correlation
correlation_result = alpha_correlation_fn(df[metadata_category.name],
df[alpha_diversity.name])
warning = None
if alpha_diversity.shape[0] != df.shape[0]:
warning = {'initial': alpha_diversity.shape[0],
'method': method.title(),
'filtered': df.shape[0]}
escaped_category = quote(category)
filename = 'category-%s.jsonp' % escaped_category
filenames.append(filename)
with open(os.path.join(output_dir, filename), 'w') as fh:
fh.write("load_data('%s'," % category)
df.to_json(fh, orient='split')
fh.write(",")
json.dump(warning, fh)
fh.write(",")
json.dump({
'method': method.title(),
'testStat': '%1.4f' % correlation_result[0],
'pVal': '%1.4f' % correlation_result[1],
'sampleSize': df.shape[0]}, fh)
fh.write(");")
index = os.path.join(TEMPLATES, 'alpha_correlation_assets', 'index.html')
q2templates.render(index, output_dir, context={
'categories': [quote(fn) for fn in filenames],
'filtered_categories': ', '.join(filtered_categories)})
shutil.copytree(os.path.join(TEMPLATES, 'alpha_correlation_assets', 'dst'),
o |
south-coast-science/scs_core | tests/sys/tail_test.py | Python | mit | 547 | 0 | #!/usr/bin/env python3
"""
Created on 20 Jan 2020
@author: Bruno Beloff (bruno.beloff@southcoastscience.com)
"""
import os
from scs_core.sys.tail import Tail
# --------------------------------------------------------------------------------------------------------------------
path = os.path.expanduser('~/SCS/scs_core/tests/sys/ta | il_test.json')
tail = Tail.const | ruct(path)
print(tail)
try:
tail.open()
for message in tail.readlines():
print("got: %s" % message)
except RuntimeError:
pass
finally:
tail.close()
|
jackrzhang/zulip | zerver/webhooks/airbrake/view.py | Python | apache-2.0 | 1,393 | 0.005743 | # Webhooks for external integrations.
from typing import Any, Dict
from django.http import HttpRequest, HttpResponse
from django.utils.translation import ugettext as _
from zerver.decorator import api_key_only_webhook_view
from zerver.lib.web | hooks.common import check_send_webhook_message
from zerver.lib.request import REQ, has_request_variables
from zerver.lib.response import json_error, json_success
from zerver.models import UserProfile
AIRBRAKE_TOPIC_TEMPLATE = '{project_name}'
AIRBRAKE_MESSAGE_TEMPLATE = '[{error_class}]({error_url}): "{error_message}" occurred.'
@api_key_only_webhook_view('Airbrake')
@has_request_variables
def api_airbrake_webhook(request: HttpRequest, user_profile: UserProfile,
| payload: Dict[str, Any]=REQ(argument_type='body')) -> HttpResponse:
subject = get_subject(payload)
body = get_body(payload)
check_send_webhook_message(request, user_profile, subject, body)
return json_success()
def get_subject(payload: Dict[str, Any]) -> str:
return AIRBRAKE_TOPIC_TEMPLATE.format(project_name=payload['error']['project']['name'])
def get_body(payload: Dict[str, Any]) -> str:
data = {
'error_url': payload['airbrake_error_url'],
'error_class': payload['error']['error_class'],
'error_message': payload['error']['error_message'],
}
return AIRBRAKE_MESSAGE_TEMPLATE.format(**data)
|
CroceRossaItaliana/jorvik | anagrafica/validators.py | Python | gpl-3.0 | 3,974 | 0.003531 | import stdnum
import re
from datetime import datetime, date
from django.utils import timezone
from django.core.exceptions import ValidationError
from django.utils.deconstruct import deconstructible
from stdnum.it import codicefiscale
def _valida_codice_fiscale(codice_fiscale):
"""
Validatore esteso che verifica che il codice fiscale sia valido.
Se il codice fiscale e' temporaneo (11 cifre numeriche), e' considerato valido.
:param codice_fiscale: Il codice fiscale.
:return: None. Exception in caso di validazione.
"""
try:
codicefiscale.validate(codice_fiscale)
except:
if re.search("^[0-9]{11}$", codice_fiscale) is None:
raise
def valida_codice_fiscale(codice_fiscale):
try:
_valida_codice_fiscale(codice_fiscale)
except:
raise ValidationError("Il codice fiscale non è valido.")
def ottieni_genere_da_codice_fiscale(codice_fiscale, default=None):
try:
return codicefiscale.get_gender(codice_fiscale)
except:
return default
def crea_validatore_dimensione_file(mb=10):
def _validatore(fieldfile_obj):
filesize = fieldfile_obj.file.size
megabyte_limit = mb
if filesize > megabyte_limit*1024*1024:
raise ValidationError("Seleziona un file più piccolo di %sMB" % str(megabyte_limit))
return _validatore
def valida_partita_iva(partita_iva):
try:
return stdnum.it.iva.validate(partita_iva)
except:
raise ValidationError("Partita IVA non corretta.")
def valida_iban(iban_no):
from stdnum import iban
try:
return iban.validate(iban_no)
except:
ra | ise ValidationError("IBAN non valido.")
def valida_dimensione_file_5mb(fieldfile_obj):
filesize = fieldfile_obj.file.size
megabyte_limit = 5
if filesize > megabyte_limit*1024*1024:
raise ValidationError("Seleziona un file più piccolo di %sMB" % str(megabyte_limit))
def valida_dimensione_file_10mb(fieldfile_obj):
filesize = fieldfile_obj.file.size
megabyte_limit = 10
if filesize > megabyte_limit*1024*1024:
raise ValidationError("Seleziona un file pi | ù piccolo di %sMB" % str(megabyte_limit))
def valida_dimensione_file_8mb(fieldfile_obj):
filesize = fieldfile_obj.file.size
megabyte_limit = 8
if filesize > megabyte_limit*1024*1024:
raise ValidationError("Seleziona un file più piccolo di %sMB" % str(megabyte_limit))
def valida_almeno_14_anni(data):
anni = 14
al_giorno = timezone.now().date()
if (al_giorno.year - data.year - ((al_giorno.month, al_giorno.day) < (data.month, data.day))) < anni:
raise ValidationError("Sono necessari almeno %d anni di età" % (anni,))
def valida_email_personale(email):
coppie = (
('cl.', '@cri.it'),
('cp.', '@cri.it'),
('cr.', '@cri.it'),
)
for coppia in coppie:
if email and email.lower().startswith(coppia[0]) and email.lower().endswith(coppia[1]):
raise ValidationError("Non è possibile utilizzare una casella istituzionale come "
"indirizzo e-mail personale.")
def valida_data_nel_passato(data):
if isinstance(data, datetime):
if data > datetime.now():
raise ValidationError("Data ed ora non possono essere nel futuro.")
elif isinstance(data, date):
if data > date.today():
raise ValidationError("La data non può essere nel futuro.")
else:
raise TypeError("Fornito tipo di data non valido.")
@deconstructible
class ValidateFileSize(object):
def __init__(self, filesize=2):
self.filesize = filesize
def __call__(self, *args, **kwargs):
file = args[0]
filesize = file.file.size
megabyte_limit = self.filesize
if filesize > megabyte_limit * 1024 * 1024:
raise ValidationError(
"Seleziona un file più piccolo di %sMB" % str(megabyte_limit))
|
mlperf/training_results_v0.5 | v0.5.0/nvidia/submission/code/object_detection/pytorch/maskrcnn_benchmark/utils/logging.py | Python | apache-2.0 | 727 | 0.001376 | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
import logging
import os
import sys
def setup_logger(name, save_dir, local_rank):
logger = logging.getLogger(name)
logger.setLevel(logging.DEBUG)
# don't log results for the non-mas | ter process
if local_rank > 0:
return
| ch = logging.StreamHandler(stream=sys.stdout)
ch.setLevel(logging.DEBUG)
formatter = logging.Formatter("%(asctime)s %(name)s %(levelname)s: %(message)s")
ch.setFormatter(formatter)
logger.addHandler(ch)
if save_dir:
fh = logging.FileHandler(os.path.join(save_dir, "log.txt"))
fh.setLevel(logging.DEBUG)
fh.setFormatter(formatter)
logger.addHandler(fh)
|
emmanuelle/scikits.image | skimage/morphology/tests/test_convex_hull.py | Python | bsd-3-clause | 2,035 | 0.000491 | import numpy as np
from numpy.testing import assert_array_equal
from numpy.testing.decorators import skipif
from skimage.morphology import convex_hull_image
from skimage.morphology._convex_hull import possible_hull
try:
import scipy.spatial
scipy_spatial = True
except ImportError:
scipy_spatial = False
@skipif(not scipy_spatial)
def test_basic():
image = np.array(
[[0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 1, 0, 1, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 1, 0, 0],
[0, 1, 0, 0, 0, 0, 0, 1, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0]], dtype=bool)
expected = np.array(
[[0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, | 1, 0, 0, 0, 0],
[0, 0, 0, 1, 1, 1, 0, 0, 0],
[0, 0, 1, 1, 1, 1, 1, 0, 0],
[0, 1, 1, 1, 1, 1, 1, 1, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0]], dtype=bool)
assert_array_equal(convex_hull_image(image), expected)
@skipif(not scipy_spatial)
def test_possible_hull():
image = np.array(
[[0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 1, 0, 1, 0, 0, 0],
| [0, 0, 1, 1, 1, 1, 1, 0, 0],
[0, 1, 1, 1, 1, 1, 1, 1, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0]], dtype=np.uint8)
expected = np.array([[1, 4],
[2, 3],
[3, 2],
[4, 1],
[4, 1],
[3, 2],
[2, 3],
[1, 4],
[2, 5],
[3, 6],
[4, 7],
[2, 5],
[3, 6],
[4, 7],
[4, 2],
[4, 3],
[4, 4],
[4, 5],
[4, 6]])
ph = possible_hull(image)
assert_array_equal(ph, expected)
if __name__ == "__main__":
np.testing.run_module_suite()
|
divio/django-cms-demo | src/wsgi.py | Python | bsd-3-clause | 382 | 0 | """
WSGI config for this project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on t | his file, see
https://docs.djangoproject.com/en/1.8/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ | .setdefault("DJANGO_SETTINGS_MODULE", "src.settings")
application = get_wsgi_application()
|
zooniverse/aggregation | experimental/algorithms/clustering_dict.py | Python | apache-2.0 | 244 | 0.028689 | __author__ = 'greg'
import agglomerative
clustering_dict = {}
# maps each sahpe to a clustering algorithm and any key word par | am
clustering_dict["seasons"] = {"point":(agglomerative.Agglomerative,{}),"ellipse":(agglomerative.Agglomerative,{})} | |
JQ-K/OMOOC2py | _src/om2py4w/4wex0/server/jserver.py | Python | mit | 531 | 0.028249 | #/usr/bin/env python
# -*- coding: utf-8 -*-
from bottle import get, post, request, run, jinja2_view
#import sae.kvdb
from datetime import datetime
import jnote
@g | et('/note')
@jinja2_view('jnote.html')
def ShowPage():
notes = jnote.GetNotes()
return {'notes':notes}
@post('/note')
@jinja2_view('jnote.html')
def CreatNote():
note = unicode(request.forms.get('newnote'),'utf-8')
if note:
jnote.NewNote(note)
notes = jnote.GetNotes()
return {'notes': notes}
run (host='127.0.0.1', port=8080, debug=True, reloa | der=True)
|
nv8h/PyRattus | base/modules/rat/application/abstract.py | Python | mit | 1,309 | 0.022918 | import sys
#from OpenGL.GLUT import *
#from OpenGL.GLU import *
#from OpenGL.GL import *
class abstract:
| params = {}
windowId = None
terminated = False
def initParams(self):
return self
def __init__(self):
self.initParams().init()
return
def init(self):
return
def mouse(self, button, state, x, y):
return
def mouseMotion(self, x, y):
return
def | keyboard(self, asciiCode, x, y):
return
def keyboardSpecial(self, key, x, y):
return
def idle(self):
return
def timer(self, value):
return
def render(self):
return
def reshape(self, width, height):
return
def run(self):
return self
def destroy(self):
del self
return
def select(self):
return self.activate()
def activate(self):
return self
def redisplay(self):
return self
def hide(self):
return self
def show(self):
return self
def title(self, title):
return self
def setPosition(self, x, y):
return self
def setResolution(self, width, height):
return self
|
Jgarcia-IAS/SAT | openerp/addons-extra/odoo-pruebas/odoo-server/addons-extra/account_transfer_advance/sale.py | Python | agpl-3.0 | 1,249 | 0.004804 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2013 Cubic ERP - Teradata SAC (<http://cubicerp.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields, osv
cl | ass sale_order(osv.osv):
| _inherit = 'sale.order'
_columns = {
'transfer_ids': fields.one2many('account.transfer','sale_id',string='Payment Advanced Transfered',readonly=True),
}
|
LittleRichard/luxalert | luxweb/luxweb/spiders/ParklaneSpider.py | Python | gpl-3.0 | 4,429 | 0.002032 | import datetime
import logging
import re
import scrapy
from nest.storage.luxalert.entity.Apartment import Apartment
from nest.storage.luxalert.entity.ApartmentSnapshot import ApartmentSnapshot
from luxweb.luxweb import HMTL_SPIDER_DATA_TUPLE_KEY
from luxweb.luxweb.ScrapeErrorHandler import ScrapeErrorHandler
from luxweb.luxweb.spiders.AbstractHTMLSpider import AbstractHTMLSpider
class ParklaneSpider(AbstractHTMLSpider):
PARKLANE_NAME = 'Park Lane Seaport'
BUILDING_NAMES = (
PARKLANE_NAME,
)
BEDROOM_REGEX = r"(\d+) (Bedroom)"
BATHROOM_REGEX = r"(\d+) (Bathroom)"
FLOOR_REGEX = r"(\d{2})(\d+)"
# name of the spider, a scrapy-required thing
name = "parklane"
@classmethod
def get_building_names(cls):
return ParklaneSpider.BUILDING_NAMES
def start_requests(self):
# urls to scrape
urls = [
'http://www.parklaneseaport.com/availableunits.aspx'
]
for url in urls:
y | ield scrapy.Request(url=url, callback=self.parse)
@ScrapeErrorHandler.wrap_to_raise
def parse(self, response):
buildings_by_name = self.get_building | s_by_name(ParklaneSpider.BUILDING_NAMES)
table_index = 0
titles = []
# get apartment titles
for title in response.xpath('//h3'):
titles.append(title.xpath('text()').extract_first())
# loops through all tables
for plan in response.xpath(
'//table[@class="availableUnits table table-bordered table-striped table-responsive"]'):
for row in plan.xpath('tbody/tr[@class="AvailUnitRow"]'):
# unit: extract first element in h3 list
unit = str(row.xpath('td/text()').extract_first())
# floor:
floor = unit.split("-")
floor = floor[1]
match = re.search(ParklaneSpider.FLOOR_REGEX, floor)
floor = match.group(1)
if floor[0] == "0":
floor = floor[1:]
floor = str(floor)
# bedrooms:
match = re.search(ParklaneSpider.BEDROOM_REGEX, titles[table_index])
if match is None:
continue # sometimes they add text before the list of units, skip these
bedrooms = match.group(1)
bedrooms = int(bedrooms)
# bathrooms:
match = re.search(ParklaneSpider.BATHROOM_REGEX, titles[table_index])
bathrooms = match.group(1)
bathrooms = float(bathrooms)
# sq_ft:
sq_ft = row.xpath('td[2]/text()').extract_first()
sq_ft = int(sq_ft)
# price: strip $, strip comma
price = row.xpath('td[3]/text()').extract_first()
if price == ParklaneSpider.__CALL_FOR_PRICE_TEXT:
price = None
else:
price = price.replace("$", "")
price = price.replace(",", "")
price = price.split("-")
price = price[0]
price = float(price)
# availability: change "NOW" text
availability = row.xpath('td[5]/span/text()').extract_first()
if availability == "Available":
availability = datetime.datetime.now().date()
else:
availability = datetime.datetime.strptime(availability, '%m/%d/%Y').date()
# floor_plan
floor_plan = plan.xpath('div[@class="plan_image desktop_and_tab"]/img/@src').extract_first()
building = buildings_by_name[ParklaneSpider.PARKLANE_NAME]
apartment = Apartment(
building,
floor,
sq_ft,
bathrooms,
bedrooms,
unit,
)
apartment_snap = ApartmentSnapshot(
apartment,
datetime.datetime.utcnow(),
price,
availability,
floor_plan,
)
yield {HMTL_SPIDER_DATA_TUPLE_KEY: (apartment, apartment_snap)}
table_index += 1
__CALL_FOR_PRICE_TEXT = u'Call'
__LOGGER = logging.getLogger(__name__) |
airelil/pywinauto | pywinauto/handleprops.py | Python | bsd-3-clause | 14,685 | 0.002315 | # GUI Application automation and testing library
# Copyright (C) 2006-2018 Mark Mc Mahon and Contributors
# https://github.com/pywinauto/pywinauto/graphs/contributors
# http://pywinauto.readthedocs.io/en/latest/credits.html
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of pywinauto nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Functions to retrieve properties from a window handle
These are implemented in a procedural way so as to to be
useful to other modules with the least conceptual overhead
"""
import warnings
import win32process
import win32api
import win32con
import win32gui
from ctypes import wintypes
from ctypes import WINFUNCTYPE
from ctypes import c_int
from ctypes import byref
from ctypes import sizeof
from ctypes import create_unicode_buffer
from . import win32functions
from . import win32defines
from . import win32structures
from .actionlogger import ActionLogger
#=========================================================================
def text(handle):
"""Return the text of the window"""
class_name = classname(handle)
if class_name == 'IME':
return 'Default IME'
if class_name == 'MSCTFIME UI':
return 'M'
if class_name is None:
return ''
#length = win32functions.SendMessage(handle, win32defines.WM_GETTEXTLENGTH, 0, 0)
# XXX: there are some very rare cases when WM_GETTEXTLENGTH hangs!
# WM_GETTEXTLENGTH may hang even for notepad.exe main window!
c_length = win32structures.DWORD_PTR(0)
result = win32functions.SendMessageTimeout(
handle,
win32defines.WM_GETTEXTLENGTH,
0,
0,
win32defines.SMTO_ABORTIFHUNG,
500,
byref(c_length)
)
if result == 0:
ActionLogger().log('WARNING! Cannot retrieve text length for handle = ' + str(handle))
return ''
else:
length = c_length.value
textval = ''
# In some rare cases, the length returned by WM_GETTEXTLENGTH is <0.
# Guard against this by checking it is >0 (==0 is not of interest):
if length > 0:
length += 1
buffer_ = create_unicode_buffer(length)
ret = win32functions.SendMessage(
handle, win32defines.WM_GETTEXT, length, byref(buffer_))
if ret:
textval = buffer_.value
return textval
#=========================================================================
def classname(handle):
"""Return the class name of the window"""
if handle is None:
return None
class_name = create_unicode_buffer(u"", 257)
win32functions.GetClassName(handle, class_name, 256)
return class_name.value
#=========================================================================
def parent(handle):
"""Return the handle of the parent of the window"""
return win32functions.GetParent(handle)
#=========================================================================
def style(handle):
"""Return the style of the window"""
return win32functions.GetWindowLong(handle, win32defines.GWL_STYLE)
#=========================================================================
def exstyle(handle):
"""Return the extended style of the window"""
return win32functions.GetWindowLong(handle, win32defines.GWL_EXSTYLE)
#=========================================================================
def controlid(handle):
"""Return the ID of the control"""
return win32functions.GetWindowLong(handle, win32defines.GWL_ID)
#=========================================================================
def userdata(handle):
"""Return the value of any user data associated with the window"""
return win32functions.GetWindowLong(handle, win32defines.GWL_USERDATA)
#=========================================================================
def contexthelpid(handle):
"""Return the context help id of the window"""
return win32functions.GetWindowContextHelpId(handle)
#=========================================================================
def iswindow(handle):
"""Return True if the handle is a window"""
return False if handle is None else bool(win32functions.IsWindow(handle))
#=========================================== | ==============================
def isvisible(handle):
"""Return True if the window is visible"""
return False if handle is None else bool(win32functions.IsWindowVisible(handle))
#=========================================================================
def isunicode(handle):
"""Return True if the window is a Unicode window"""
return False if handle is None else bool(win32functions.IsWindowUnicode(handle))
#======================= | ==================================================
def isenabled(handle):
"""Return True if the window is enabled"""
return False if handle is None else bool(win32functions.IsWindowEnabled(handle))
#=========================================================================
def is64bitprocess(process_id):
"""Return True if the specified process is a 64-bit process on x64
Return False if it is only a 32-bit process running under Wow64.
Always return False for x86.
"""
from .sysinfo import is_x64_OS
is32 = True
if is_x64_OS():
phndl = win32api.OpenProcess(win32con.MAXIMUM_ALLOWED, 0, process_id)
if phndl:
is32 = win32process.IsWow64Process(phndl)
#print("is64bitprocess, is32: %d, procid: %d" % (is32, process_id))
return (not is32)
#=========================================================================
def is64bitbinary(filename):
"""Check if the file is 64-bit binary"""
import win32file
try:
binary_type = win32file.GetBinaryType(filename)
return binary_type != win32file.SCS_32BIT_BINARY
except Exception as exc:
warnings.warn('Cannot get binary type for file "{}". Error: {}'
.format(filename, exc), RuntimeWarning, stacklevel=2)
return None
#=========================================================================
def clientrect(handle):
"""Return the client rectangle of the control"""
client_rect = win32structures.RECT()
win32functions.GetClientRect(handle, byref(client_rect))
return client_rect
#=========================================================================
def rectangle(handle):
"""Return the rectangle of the window"""
rect = win32structures.RECT()
win32functions.GetWindowRect(handle, byref(rect))
return rect
#=========================================================================
def fon |
jessicalucci/NovaOrc | nova/tests/api/openstack/test_xmlutil.py | Python | apache-2.0 | 28,151 | 0.000107 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2011 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from lxml import etree
from xml.dom import minidom
from nova.api.openstack import xmlutil
from nova import exception
from nova import test
from nova.tests import utils as tests_utils
class SelectorTest(test.TestCase):
o | bj_for_test = {
'test': {
'name': 'test',
'values': [1, 2, 3],
'attrs': {
'foo': 1,
'bar': 2,
'baz': 3,
},
},
}
def test_empty_selector(self):
sel = xmlutil.Selector()
self.assertEqual(len(sel.chain), 0)
self.assertEqual(sel(self.obj_for_test), self.obj_for_test)
def test_dict_selector(self):
| sel = xmlutil.Selector('test')
self.assertEqual(len(sel.chain), 1)
self.assertEqual(sel.chain[0], 'test')
self.assertEqual(sel(self.obj_for_test),
self.obj_for_test['test'])
def test_datum_selector(self):
sel = xmlutil.Selector('test', 'name')
self.assertEqual(len(sel.chain), 2)
self.assertEqual(sel.chain[0], 'test')
self.assertEqual(sel.chain[1], 'name')
self.assertEqual(sel(self.obj_for_test), 'test')
def test_list_selector(self):
sel = xmlutil.Selector('test', 'values', 0)
self.assertEqual(len(sel.chain), 3)
self.assertEqual(sel.chain[0], 'test')
self.assertEqual(sel.chain[1], 'values')
self.assertEqual(sel.chain[2], 0)
self.assertEqual(sel(self.obj_for_test), 1)
def test_items_selector(self):
sel = xmlutil.Selector('test', 'attrs', xmlutil.get_items)
self.assertEqual(len(sel.chain), 3)
self.assertEqual(sel.chain[2], xmlutil.get_items)
for key, val in sel(self.obj_for_test):
self.assertEqual(self.obj_for_test['test']['attrs'][key], val)
def test_missing_key_selector(self):
sel = xmlutil.Selector('test2', 'attrs')
self.assertEqual(sel(self.obj_for_test), None)
self.assertRaises(KeyError, sel, self.obj_for_test, True)
def test_constant_selector(self):
sel = xmlutil.ConstantSelector('Foobar')
self.assertEqual(sel.value, 'Foobar')
self.assertEqual(sel(self.obj_for_test), 'Foobar')
class TemplateElementTest(test.TestCase):
def test_element_initial_attributes(self):
# Create a template element with some attributes
elem = xmlutil.TemplateElement('test', attrib=dict(a=1, b=2, c=3),
c=4, d=5, e=6)
# Verify all the attributes are as expected
expected = dict(a=1, b=2, c=4, d=5, e=6)
for k, v in expected.items():
self.assertEqual(elem.attrib[k].chain[0], v)
def test_element_get_attributes(self):
expected = dict(a=1, b=2, c=3)
# Create a template element with some attributes
elem = xmlutil.TemplateElement('test', attrib=expected)
# Verify that get() retrieves the attributes
for k, v in expected.items():
self.assertEqual(elem.get(k).chain[0], v)
def test_element_set_attributes(self):
attrs = dict(a=None, b='foo', c=xmlutil.Selector('foo', 'bar'))
# Create a bare template element with no attributes
elem = xmlutil.TemplateElement('test')
# Set the attribute values
for k, v in attrs.items():
elem.set(k, v)
# Now verify what got set
self.assertEqual(len(elem.attrib['a'].chain), 1)
self.assertEqual(elem.attrib['a'].chain[0], 'a')
self.assertEqual(len(elem.attrib['b'].chain), 1)
self.assertEqual(elem.attrib['b'].chain[0], 'foo')
self.assertEqual(elem.attrib['c'], attrs['c'])
def test_element_attribute_keys(self):
attrs = dict(a=1, b=2, c=3, d=4)
expected = set(attrs.keys())
# Create a template element with some attributes
elem = xmlutil.TemplateElement('test', attrib=attrs)
# Now verify keys
self.assertEqual(set(elem.keys()), expected)
def test_element_attribute_items(self):
expected = dict(a=xmlutil.Selector(1),
b=xmlutil.Selector(2),
c=xmlutil.Selector(3))
keys = set(expected.keys())
# Create a template element with some attributes
elem = xmlutil.TemplateElement('test', attrib=expected)
# Now verify items
for k, v in elem.items():
self.assertEqual(expected[k], v)
keys.remove(k)
# Did we visit all keys?
self.assertEqual(len(keys), 0)
def test_element_selector_none(self):
# Create a template element with no selector
elem = xmlutil.TemplateElement('test')
self.assertEqual(len(elem.selector.chain), 0)
def test_element_selector_string(self):
# Create a template element with a string selector
elem = xmlutil.TemplateElement('test', selector='test')
self.assertEqual(len(elem.selector.chain), 1)
self.assertEqual(elem.selector.chain[0], 'test')
def test_element_selector(self):
sel = xmlutil.Selector('a', 'b')
# Create a template element with an explicit selector
elem = xmlutil.TemplateElement('test', selector=sel)
self.assertEqual(elem.selector, sel)
def test_element_subselector_none(self):
# Create a template element with no subselector
elem = xmlutil.TemplateElement('test')
self.assertEqual(elem.subselector, None)
def test_element_subselector_string(self):
# Create a template element with a string subselector
elem = xmlutil.TemplateElement('test', subselector='test')
self.assertEqual(len(elem.subselector.chain), 1)
self.assertEqual(elem.subselector.chain[0], 'test')
def test_element_subselector(self):
sel = xmlutil.Selector('a', 'b')
# Create a template element with an explicit subselector
elem = xmlutil.TemplateElement('test', subselector=sel)
self.assertEqual(elem.subselector, sel)
def test_element_append_child(self):
# Create an element
elem = xmlutil.TemplateElement('test')
# Make sure the element starts off empty
self.assertEqual(len(elem), 0)
# Create a child element
child = xmlutil.TemplateElement('child')
# Append the child to the parent
elem.append(child)
# Verify that the child was added
self.assertEqual(len(elem), 1)
self.assertEqual(elem[0], child)
self.assertEqual('child' in elem, True)
self.assertEqual(elem['child'], child)
# Ensure that multiple children of the same name are rejected
child2 = xmlutil.TemplateElement('child')
self.assertRaises(KeyError, elem.append, child2)
def test_element_extend_children(self):
# Create an element
elem = xmlutil.TemplateElement('test')
# Make sure the element starts off empty
self.assertEqual(len(elem), 0)
# Create a few children
children = [
xmlutil.TemplateElement('child1'),
xmlutil.TemplateElement('child2'),
xmlutil.TemplateElement('child3'),
]
# Extend the parent by those children
elem.extend(children)
# Verify that the children were added
self.assertEqual(len(elem), 3)
for idx in range(len(elem)):
self.assertEqual(children[idx], elem[idx])
|
skarphed/skarphed | core/lib/operation.py | Python | agpl-3.0 | 16,724 | 0.00891 | #!/usr/bin/python
#-*- coding: utf-8 -*-
###########################################################
# © 2011 Daniel 'grindhold' Brendle and Team
#
# This file is part of Skarphed.
#
# Skarphed is free software: you can redistribute it and/or
# modify it under the terms of the GNU Affero General Public License
# as published by the Free Software Foundation, either
# version 3 of the License, or (at your option) any later
# version.
#
# Skarphed is distributed in the hope that it will be
# useful, but WITHOUT ANY WARRANTY; without even the implied
# warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
# PURPOSE. See the GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public
# License along with Skarphed.
# If not, see http://www.gnu.org/licenses/.
###########################################################
import os
from daemon import Daemon
from time import sleep
from StringIO import StringIO
from traceback import print_exc
from skarphedcore.configuration import Configuration
from skarphedcore.database import Database
from skarphedcore.core import Core
from skarphedcore.module import Module
from common.errors import OperationException
class Operation(object):
"""
Contais everything necessary to Handle Operations
"""
STATUS_PENDING = 0
STATUS_ACTIVE = 1
STATUS_FAILED = 2
VALID_STORAGE_TYPES = ('int','bool','str','unicode')
def __init__(self, parent_id = None):
"""
"""
self._id = None
self._parent = parent_id
self._values = {}
@classmethod
def drop_operation(cls,operation_id):
"""
D | rops an Operation, identified by it's Operation Id and
it's children recursively
Drop deletes the Operations from Database
"""
db = Database()
stmnt | = "SELECT OPE_ID FROM OPERATIONS WHERE OPE_OPE_PARENT = ? AND OPE_STATUS IN (0, 2) ;"
cur = db.query(stmnt,(operation_id,))
for row in cur.fetchallmap():
cls.drop_operation(row["OPE_ID"])
stmnt = "DELETE FROM OPERATIONS WHERE OPE_ID = ? AND OPE_STATUS IN (0, 2) ;"
db.query(stmnt,(operation_id,),commit=True)
@classmethod
def retry_operation(cls,operation_id):
"""
Resets the state of an operation and it's children recursively to 0 (PENDING)
The operation is identified by a given operationId
"""
db = Database()
stmnt = "SELECT OPE_ID FROM OPERATIONS WHERE OPE_OPE_PARENT = ? AND OPE_STATUS = 2 ;"
cur = db.query(stmnt,(operation_id,))
for row in cur.fetchallmap():
cls.retry_operation(row["OPE_ID"])
stmnt = "UPDATE OPERATIONS SET OPE_STATUS = 0 WHERE OPE_ID = ? AND OPE_STATUS = 2 ;"
db.query(stmnt,(operation_id,),commit=True)
@classmethod
def cancel_operation(cls,operation_id):
"""
Cancels an Operation, identified by it's Operation Id and
it's children recursively
Cancel Deletes the Operation from Database
"""
db = Database()
stmnt = "SELECT OPE_ID FROM OPERATIONS WHERE OPE_OPE_PARENT = ? AND OPE_STATUS = 0 ;"
cur = db.query(stmnt,(operation_id,))
for row in cur.fetchallmap():
cls.cancel_operation(row["OPE_ID"])
stmnt = "DELETE FROM OPERATIONS WHERE OPE_ID = ? AND OPE_STATUS = 0 ;"
db.query(stmnt,(operation_id,),commit=True)
@classmethod
def restore_operation(cls, operation_record):
"""
Restore an Operationobject stored in the database by a Dataset consisting of
the operation's ID and the operation's TYPE:
For example: {"OPE_ID": 100, "OPE_TYPE": "TestOperation"}
Restores the Operationobject's _values-attribute by the data saved
in the DB-Table OPERATIONDATA
"""
classname = operation_record["OPE_TYPE"]
module = "" #TODO Implement modulename from database if Operation belongs to Module
is_operation_of_module = False
exec """
try:
type(%(class)s)
except NameError,e:
is_operation_of_module = True"""%{'class':classname}
if is_operation_of_module:
exec """
from %(module)s import %(class)s
operation = %(class)s()"""%{'class':classname,'module':module}
else:
exec """
operation = %(class)s()"""%{'class':classname}
operation.set_id(operation_record['OPE_ID'])
db = Database()
stmnt = "SELECT OPD_KEY, OPD_VALUE, OPD_TYPE FROM OPERATIONDATA WHERE OPD_OPE_ID = ? ;"
cur = db.query(stmnt,(operation_record["OPE_ID"],))
for row in cur.fetchallmap():
val = row["OPD_VALUE"]
exec """val = %s(val)"""%row["OPD_TYPE"]
operation.set_value(row["OPD_KEY"], val)
return operation
@classmethod
def process_children(cls, operation):
"""
Recursively executes the workloads of Operation's Childoperations
It hereby catches exceptions in the workloads, sets the OPE_STATUS
to 2 (FAILED) if a catch occurs, then passes the exception on to the
higher layer.
If an Operation succeeds, it's entry in DB gets deleted
"""
db = Database()
stmnt = "SELECT OPE_ID, OPE_TYPE FROM OPERATIONS WHERE OPE_OPE_PARENT = ? ORDER BY OPE_INVOKED ;"
stmnt_lock = "UPDATE OPERATIONS SET OPE_STATUS = 1 WHERE OPE_ID = ? ;"
cur = db.query(stmnt,(operation.get_id(),))
for row in cur.fetchallmap():
child_operation = cls.restore_operation(row)
db.query(stmnt_lock,(child_operation.get_id(),),commit=True)
try:
cls.process_children(child_operation)
child_operation.do_workload()
except Exception,e:
stmnt_err = "UPDATE OPERATIONS SET OPE_STATUS = 2 WHERE OPE_ID = ? ;"
db.query(stmnt_err,(int(row["OPE_ID"]),),commit=True)
#TODO GENERATE ERROR IN LOG
raise e
stmnt_delete = "DELETE FROM OPERATIONS WHERE OPE_ID = ?;"
db.query(stmnt_delete,(child_operation.get_id(),),commit=True)
@classmethod
def process_next(cls):
"""
Sets the status of the next toplevel operation to 1 (ACTIVE)
Fetches the next toplevel-operation from the database, applies a FILESYSTEMLOCK!
Which is /tmp/scv_operating.lck !!!
"""
db = Database()
configuration = Configuration()
if os.path.exists(configuration.get_entry("core.webpath")+"/scv_operating.lck"):
return False
lockfile = open(configuration.get_entry("core.webpath")+"/scv_operating.lck","w")
lockfile.close()
stmnt_lock = "UPDATE OPERATIONS SET OPE_STATUS = 1 \
WHERE OPE_ID IN ( \
SELECT OPE_ID FROM OPERATIONS \
WHERE OPE_OPE_PARENT IS NULL AND OPE_STATUS = 0 \
AND OPE_INVOKED = ( \
SELECT MIN(OPE_INVOKED) FROM OPERATIONS \
WHERE OPE_OPE_PARENT IS NULL AND OPE_STATUS = 0) \
) ;"
stmnt = "SELECT OPE_ID, OPE_TYPE FROM OPERATIONS WHERE OPE_OPE_PARENT IS NULL AND OPE_STATUS = 1 ;"
db.query(stmnt_lock,commit=True)
cur = db.query(stmnt)
res = cur.fetchallmap()
if len(res) > 0:
operation = cls.restore_operation(res[0])
try:
cls.process_children(operation)
operation.do_workload()
except Exception, e:
stmnt_err = "UPDATE OPERATIONS SET OPE_STATUS = 2 WHERE OPE_ID = ? ;"
db.query(stmnt_err,(operation.get_id(),),commit=True)
error = StringIO()
print_exc(None,error)
Core().log(error.getvalue())
ret = True
else:
ret = False
stmnt_delete = "DELETE FROM OPERATIONS WHERE OPE_STATUS = 1 ;"
db.query(stmnt_delete,commit=True)
db.commit()
try:
|
GoogleCloudPlatform/python-repo-tools | setup.py | Python | apache-2.0 | 1,458 | 0 | # Copyright 2016 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# | http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required | by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import io
import setuptools
with io.open('README.rst', 'r') as fh:
long_description = fh.read()
requirements = [
'google-cloud-core',
'requests',
'retrying',
'setuptools >= 25.0.0',
'packaging',
'pylint >= 1.8.0',
]
setuptools.setup(
name='gcp-devrel-py-tools',
version='0.0.16',
description='Tools for Cloud Platform Python libraries and samples.',
long_description=long_description,
url='https://github.com/GoogleCloudPlatform/python-repo-tools',
author='Thea Flowers',
author_email='theaflowers@google.com',
license='Apache Software License',
classifiers=[
'Operating System :: POSIX',
],
packages=setuptools.find_packages(),
install_requires=requirements,
entry_points={
'console_scripts': [
'gcp-devrel-py-tools=gcp_devrel.tools:main',
],
},
)
|
Shopify/shopify_python | shopify_python/packaging.py | Python | mit | 2,108 | 0.003795 | import os.path
import subprocess
import pkg_resources
import setuptools # pylint: disable=unused-import
def get_package_revision(package_name):
# type: (str) -> str
"""Determine the Git commit hash for the Shopify package.
If the package is installed in "develop" mode the SHA is retrieved using Git. Otherwise it will be retrieved from
the package's Egg metadata. Returns an empty string if the package is not installed or does not contain revision
information.
"""
egg_info = pkg_resources.working_set.find(pkg_resources.Requirement.parse(package_name))
if egg_info is None:
return ''
if os.path.exists(os.path.join(egg_info.location, '.git')):
return str(subprocess.check_output(['git', 'rev-parse', 'HEAD'], cwd=egg_info.location).decode()).strip()
if | egg_info.has_metadata('git_sha.txt'):
return egg_info.get_metadata('git_sha.txt')
return ''
def write_package_revision(cmd, _, filename):
# type: (setuptools.Command, str, str) -> None
"""Write the Git commit hash for the package th | at is currently being built.
If the build is not occurring from a Git checkout the current revision must be stored in a text file named
"REVISION".
This function should not be called except via setuptools, by specifying an 'egg_info.writers' entrypoint as follows:
setuptools.setup(
name='test_packaging',
...
install_requires=[
'shopify_python'
],
...
entry_points={
'egg_info.writers': [
'git_sha.txt = shopify_python.packaging:write_package_revision',
],
}
...
)
"""
git_sha = None
if os.path.exists('.git'):
git_sha = subprocess.check_output(['git', 'rev-parse', 'HEAD']).decode().strip()
elif os.path.exists('REVISION'):
with open('REVISION') as revision_file:
git_sha = revision_file.read().strip()
if git_sha is not None:
cmd.write_or_delete_file("Git SHA", filename, git_sha)
|
plotly/plotly.py | packages/python/plotly/plotly/validators/barpolar/_opacity.py | Python | mit | 474 | 0.00211 | import _plotly_utils.basevalidators
class OpacityValidator(_plot | ly_utils.basevalidators.NumberValidator):
def __init__(self, plotly_name="opacity", parent_name="barpolar", **kwargs):
super(OpacityValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", " | style"),
max=kwargs.pop("max", 1),
min=kwargs.pop("min", 0),
**kwargs
)
|
xbash/LabUNAB | 03_condiciones/Ejercicio8_saltos.py | Python | gpl-3.0 | 1,849 | 0.018388 | """
En las competencias de clavados, cada salto es evaluado por un panel de siete jueces. Cada juez entrega una
puntuacion en una escala de 1 a 10, con incrementos de 0:5 . La puntuacion mas alta y la mas baja son
eliminadas. La suma de los cinco puntajes restantes es multiplicada por 0.6 , y el resultado es multiplicado
por el grado de difcultad del salto. El valor obtenido es el puntaje total del salto.
Desarrolle un diagrama de flujo que lea el grado de difcultad del salto y los 7 puntajes de los jueces,
para luego mostrar por pantalla el puntaje total del salto.
A continuacion se muestra un ejemplo:
Grado de dificultad: 3.0
Juez 1: 5.0
Juez 2: 5.5
Juez 3: 4.0
Juez 4: 5.0
Juez 5: 4.5
Juez 6: 5.5
Juez 7: 5.0
El puntaje total es 45.0
"""
# Variables
ptje=0.0
menor=0.0
mayor=0.0
ptjetotal=0.0
gradodif=0
i=1
print "\t\tCalificacion de Saltos Clavados"
print "Ingrese Grado de Dificultad del Salto"
gradodif=input(" Grado :")
print "Ingrese 7 calificaciones"
ptje=float(raw_input("Calificacion :"))
menor=ptje
mayor=ptje
ptjetotal =ptjetotal+ptje
for i in range( | 2,8,1):
ptje=float(raw_input("Calificacion :"))
if menor>ptje:
menor=ptje
if mayor<ptje:
mayor=ptje
ptjetotal =ptjetotal+ptje
# fuera del ciclo
ptjetotal=ptjeto | tal-(mayor+menor)
ptjetotal=ptjetotal*0.6
ptjetotal=ptjetotal*gradodif
print"\n\n\t\t Puntaje Total del Salto :", ptjetotal
"""
Resultado ejecucion:
Calificacion de Saltos Clavados
Ingrese Grado de Dificultad del Salto
Grado :3
Ingrese 7 calificaciones
Calificacion :5
Calificacion :5.5
Calificacion :4
Calificacion :5
Calificacion :4.5
Calificacion :5.5
Calificacion :5
Puntaje Total del Salto : 45.0
"""
|
vikalpindia/vikalp | vikalp/compressor/filters.py | Python | apache-2.0 | 695 | 0.011511 | from compressor.conf import settings
from compressor.filters import CompilerFilter
class YUglifyFilter(CompilerFilter):
command = "{binary} { | args}"
def __init__(self, *args, **kwargs):
super(YUglifyFilter, self).__init__(*args, **kwargs)
self.command += ' --type=%s' | % self.type
class YUglifyCSSFilter(YUglifyFilter):
type = 'css'
options = (
("binary", settings.COMPRESS_YUGLIFY_BINARY),
("args", settings.COMPRESS_YUGLIFY_CSS_ARGUMENTS),
)
class YUglifyJSFilter(YUglifyFilter):
type = 'js'
options = (
("binary", settings.COMPRESS_YUGLIFY_BINARY),
("args", settings.COMPRESS_YUGLIFY_JS_ARGUMENTS),
) |
mxm/incubator-beam | sdks/python/apache_beam/examples/wordcount_debugging.py | Python | apache-2.0 | 6,652 | 0.006464 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""An example that verifies the counts and includes best practices.
On top of the basic concepts in the wordcount example, this workflow introduces
logging to Cloud Logging, and using assertions in a Dataflow pipeline.
To execute this pipeline locally, specify a local output file or output prefix
on GCS::
--output [YOUR_LOCAL_FILE | gs://YOUR_OUTPUT_PREFIX]
To execute this pipeline using the Google Cloud Dataflow service, specify
pipeline configuration::
--project YOUR_PROJECT_ID
--staging_location gs://YOUR_STAGING_DIRECTORY
--temp_location gs://YOUR_TEMP_DIRECTORY
--job_name YOUR_JOB_NAME
--runner DataflowRunner
and an output prefix on GCS::
--output gs://YOUR_OUTPUT_PREFIX
"""
from __future__ import absolute_import
import argparse
import logging
import re
from past.builtins import unicode
import apache_beam as beam
from apache_beam.io import ReadFromText
from apache_beam.io import WriteToText
from apache_beam.metrics import Metrics
from apache_beam.options.pipeline_options import PipelineOptions
from apache_beam.options.pipeline_options import SetupOptions
from apache_beam.testing.util import assert_that
from apache_beam.testing.util import equal_to
class FilterTextFn(beam.DoFn):
"""A DoFn that filters for a specific key based on a regular expression."""
def __init__(self, pattern):
super(FilterTextFn, self).__init__()
self.pattern = pattern
# A custom metric can track values in your pipeline as it runs. Those
# values will be available in the monitoring system of the runner used
# to run the pipeline. These metrics below track the number of
# matched and unmatched words.
self.matched_words = Metrics.counter(self.__class__, 'matched_words')
self.umatched_words = Metrics.counter(self.__class__, 'umatched_words')
def process(self, element):
word, _ = element
if re.match(self.pattern, word):
# Log at INFO level each element we match. When executing this pipeline
# using the Dataflow service, these log lines will appear in the Cloud
# Logging UI.
logging.info('Matched %s', word)
self.matched_words.inc()
yield element
else:
# Log at the "DEBUG" level each element that is not matched. Different log
# levels can be used to control the verbosity of logging providing an
# effective mechanism to filter less important information.
# Note currently only "INFO" and higher level logs are emitted to the
# Cloud Logger. This log message will not be visible in the Cloud Logger.
logging.debug('Did not match %s', word)
self.umatched_words.inc()
class CountWords(beam.PTransform):
"""A transform to count the occurrences of each word.
A PTransform that converts a PCollection containing lines of text into a
PCollection of (word, count) tuples.
"""
def expand(self, pcoll):
def count_ones(word_ones):
(word, ones) = word_ones
return (word, sum(ones))
return (pcoll
| 'split' >> (beam.FlatMap(lambda x: re.findall(r'[A-Za-z\']+', x))
.with_output_types(unicode))
| 'pair_with_one' >> beam.Map(lambda x: (x, 1))
| 'group' >> beam.GroupByKey()
| 'count' >> beam.Map(count_ones))
def run(argv=None):
"""Runs the debugging wordcount pipeline."""
parser = argparse.ArgumentParser()
parser.add_argument('--input',
des | t='input',
default='gs://dataflow-samples/shakespeare/kinglear.txt',
| help='Input file to process.')
parser.add_argument('--output',
dest='output',
required=True,
help='Output file to write results to.')
known_args, pipeline_args = parser.parse_known_args(argv)
# We use the save_main_session option because one or more DoFn's in this
# workflow rely on global context (e.g., a module imported at module level).
pipeline_options = PipelineOptions(pipeline_args)
pipeline_options.view_as(SetupOptions).save_main_session = True
with beam.Pipeline(options=pipeline_options) as p:
# Read the text file[pattern] into a PCollection, count the occurrences of
# each word and filter by a list of words.
filtered_words = (
p | 'read' >> ReadFromText(known_args.input)
| CountWords()
| 'FilterText' >> beam.ParDo(FilterTextFn('Flourish|stomach')))
# assert_that is a convenient PTransform that checks a PCollection has an
# expected value. Asserts are best used in unit tests with small data sets
# but is demonstrated here as a teaching tool.
#
# Note assert_that does not provide any output and that successful
# completion of the Pipeline implies that the expectations were met. Learn
# more at https://cloud.google.com/dataflow/pipelines/testing-your-pipeline
# on how to best test your pipeline.
assert_that(
filtered_words, equal_to([('Flourish', 3), ('stomach', 1)]))
# Format the counts into a PCollection of strings and write the output using
# a "Write" transform that has side effects.
# pylint: disable=unused-variable
def format_result(word_count):
(word, count) = word_count
return '%s: %s' % (word, count)
output = (filtered_words
| 'format' >> beam.Map(format_result)
| 'write' >> WriteToText(known_args.output))
if __name__ == '__main__':
# Cloud Logging would contain only logging.INFO and higher level logs logged
# by the root logger. All log statements emitted by the root logger will be
# visible in the Cloud Logging UI. Learn more at
# https://cloud.google.com/logging about the Cloud Logging UI.
#
# You can set the default logging level to a different level when running
# locally.
logging.getLogger().setLevel(logging.INFO)
run()
|
emiliom/pyoos | pyoos/parsers/ioos/describe_sensor.py | Python | lgpl-3.0 | 1,657 | 0.003018 | from pyoos.utils.etree import etree
| from owslib.namespaces import Namespaces
ns = Namespaces()
SML_NS = ns.get_versioned_namespace('sml', '1.0.1')
SWE_NS = [ns.get_versioned_namespace('swe', '1.0.1')]
class IoosDescribeSensor(object):
def __new__(cls, element):
if isinstance(element, str):
root = etree.fromstring(element)
else:
root = element
sml_str = ".//{{{0}}}ide | ntifier/{{{0}}}Term[@definition='http://mmisw.org/ont/ioos/definition/%s']".format(SML_NS)
if hasattr(root, 'getroot'):
root = root.getroot()
# circular dependencies are bad. consider a reorganization
# find the the proper type for the DescribeSensor
from pyoos.parsers.ioos.one.describe_sensor import (NetworkDS,
StationDS, SensorDS)
for ds_type, constructor in [('networkID', NetworkDS), ('stationID', StationDS), ('sensorID', SensorDS)]:
if root.find(sml_str % ds_type) is not None:
return super(IoosDescribeSensor, cls).__new__(constructor)
# NOAA CO-OPS
sml_str = ".//{{{0}}}identifier/{{{0}}}Term[@definition='urn:ioos:def:identifier:NOAA::networkID']".format(SML_NS)
if root.find(sml_str) is not None:
return super(IoosDescribeSensor, cls).__new__(NetworkDS)
# if we don't find the proper request from the IOOS definitions,
# try to adapt a generic DescribeSensor request to the dataset
from pyoos.parsers.ioos.one.describe_sensor import GenericSensor
return super(IoosDescribeSensor, cls).__new__(GenericSensor)
|
therightmandev/WebShop | server/config.py | Python | mit | 257 | 0.003891 | import sys, os
EMAIL = os.environ.get('FLASK_EMAIL', '')
EMAIL_PASSWORD = os.environ.get('FLASK_EMAIL_PASSWORD', '' | )
DB_P | ATH = 'sqlite:///test.db'
if EMAIL == '' or EMAIL_PASSWORD == '':
sys.exit('Please write email and password in server/config.py')
|
jwhitlock/kuma | kuma/users/migrations/0003_user_discourse_url.py | Python | mpl-2.0 | 650 | 0.001538 | # -*- coding: utf-8 -*-
# Generated by Django 1.11.15 on 2018-08-13 15:29
from __future__ import unicode_literals
import djang | o.core.validators
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('users', '0002_pytz_2018_5_and_username'),
]
operations = [
migrations.AddField(
model_name='user',
name='discourse_url',
field=models.TextField(blank=True, validators=[django.core.validators.RegexValidator(b'^https://discourse\\.mozilla\\.org/u/', 'Enter a valid Discourse URL.', b'inv | alid')], verbose_name='Discourse'),
),
]
|
rockychen-dpaw/borgcollector | borg_utils/borg_config.py | Python | bsd-3-clause | 890 | 0.020225 | import os
from django.conf import settings
class BorgConfiguration():
@staticmethod
def initialize():
setattr(BorgConfiguration,"DEBUG",getat | tr(settings,"DEBUG",False))
config = getattr(settings,"HARVEST_CONFIG")
if not config:
config = {}
for name, value in config.iteritems():
setattr(BorgConfiguration, name, value)
setattr(BorgConfiguration,"TEST_INPUT_SCHEMA",BorgConfiguration.test_schema(BorgCo | nfiguration.INPUT_SCHEMA))
setattr(BorgConfiguration,"TEST_NORMAL_SCHEMA",BorgConfiguration.test_schema(BorgConfiguration.NORMAL_SCHEMA))
setattr(BorgConfiguration,"TEST_TRANSFORM_SCHEMA",BorgConfiguration.test_schema(BorgConfiguration.TRANSFORM_SCHEMA))
@staticmethod
def test_schema(schema):
return "test_" + schema
BorgConfiguration.initialize()
#import ipdb;ipdb.set_trace()
|
dbmi-pitt/DIKB-Micropublication | scripts/mp-scripts/Bio/HMM/DynamicProgramming.py | Python | apache-2.0 | 12,658 | 0.001264 | """Dynamic Programming algorithms for general usage.
This module contains classes which implement Dynamic Programming
algorithms that can be used generally.
"""
class AbstractDPAlgorithms:
"""An abstract class to calculate forward and backward probabiliies.
This class should not be instantiated directly, but should be used
through a derived class which implements proper scaling of variables.
This class is just meant to encapsulate the basic foward and backward
algorithms, and allow derived classes to deal with the problems of
multiplying probabilities.
Derived class of this must implement:
o _forward_recursion -- Calculate the forward values in the recursion
using some kind of technique for preventing underflow errors.
o _backward_recursion -- Calculate the backward values in the recursion
step using some technique to prevent underflow errors.
"""
def __init__(self, markov_model, sequence):
"""Initialize to calculate foward and backward probabilities.
Arguments:
o markov_model -- The current Markov model we are working with.
o sequence -- A training sequence containing a set of emissions.
"""
self._mm = markov_model
self._seq = sequence
def _foward_recursion(self, cur_state, sequence_pos, forward_vars):
"""Calculate the forward recursion value.
"""
raise NotImplementedError("Subclasses must implement")
def forward_algorithm(self):
"""Calculate sequence probability using the forward algorithm.
This implements the foward algorithm, as described on p57-58 of
Durbin et al.
Returns:
o A dictionary containing the foward variables. This has keys of the
form (state letter, position in the training sequence), and values
containing the calculated forward variable.
o The calculated probability of the sequence.
"""
# all of the different letters that the state path can be in
state_letters = self._seq.states.alphabet.letters
# -- initialize the algorithm
#
# NOTE: My index numbers are one less than what is given in Durbin
# et al, since we are indexing the sequence going from 0 to
# (Length - 1) not 1 to Length, like in Durbin et al.
#
forward_var = {}
# f_{0}(0) = 1
forward_var[(state_letters[0], -1)] = 1
# f_{k}(0) = 0, for k > 0
for k in range(1, len(state_letters)):
forward_var[(state_letters[k], -1)] = 0
# -- now do the recursion step
# loop over the training sequence
# Recursion step: (i = 1 .. L)
for i in range(len(self._seq.emissions)):
# now loop over the letters in the state path
for main_state in state_letters:
# calculate the forward value using the appropriate
# method to prevent underflow errors
forward_value = self._forward_recursion(main_state, i,
forward_var)
if forward_value is not None:
forward_var[(main_state, i)] = forward_value
# -- termination step - calculate the probability of the sequence
first_state = state_letters[0]
seq_prob = 0
for state_item in state_letters:
# f_{k}(L)
forward_value = forward_var[(state_item,
len(self._seq.emissions) - 1)]
# a_{k0}
transition_value = self._mm.transition_prob[(state_item,
first_state)]
seq_prob += forward_value * transition_value
return forward_var, seq_prob
def _backward_recursion(self, cur_state, sequence_pos, forward_vars):
"""Calculate the backward recursion value.
"""
raise NotImplementedError("Subclasses must implement")
def backward_algorithm(self):
"""Calculate sequence probability using the backward algorithm.
This implements the backward algorithm, as described on p58-59 of
Durbin et al.
Returns:
o A dictionary containing the backwards variables. This has keys
of the form (state letter, position in the training sequence),
and values containing the calculated backward variable.
"""
# all of the different letters that the state path can be in
state_letters = self._seq.states.alphabet.letters
# -- initialize the algorithm
#
# NOTE: My index numbers are one less than what is given in Durbin
# et al, since we are indexing the sequence going from 0 to
# (Length - 1) not 1 to Length, like in Durbin et al.
#
backward_var = {}
first_letter = state_letters[0]
# b_{k}(L) = a_{k0} for all k
for state in state_letters:
backward_var[(state, len(self._seq.emissions) - 1)] = \
self._mm.transition_prob[(state, state_letters[0])]
# -- recursion
# first loop over the training sequence backwards
# Recursion step: (i = L - 1 ... 1)
all_indexes = range(len(self._seq.emissions) - 1)
all_indexes.reverse()
for i in all_indexes:
# now loop over the letters in the state path
for main_state in state_letters:
# calculate the backward value using the appropriate
# method to prevent underflow errors
backward_value = self._backward_recursion(main_state, i,
backward_var)
if backward_value is not None:
backward_var[(main_state, i)] = backward_value
# skip the termination step to avoid recalculations -- you should
# get sequence probabilities using the forward algorithm
return backward_var
class ScaledDPAlgorithms(AbstractDPAlgorithms):
"""Implement forward and backward algorithms using a rescaling approach.
This scales the f and b variables, so that they remain within a
manageable numerical interval during calculations. This approach is
described in Durbin et al. on p 78.
This approach is a little more straightfoward then log transformation
but may still give underflow errors for some types of models. In these
cases, the LogDPAlgorithms class should be used.
"""
def __init__(self, markov_model, sequence):
"""Initialize the scaled approach to calculating probabilities.
Arguments:
o markov_model -- The current Markov model we are working with.
o sequence -- A TrainingSequence object that must have a
set of emissions to work with.
"""
AbstractDPAlgorithms.__init__(self, markov_model, sequence)
self._s_values = {}
def _calculate_s_value(self, seq_pos, previous_vars):
"""Calculate the next scaling variable for a sequence position.
This utilizes the approach of choosing s values such that the
sum of all of the scaled f values is equal to 1.
Arguments:
o seq_pos -- The current position we are at in the sequ | ence.
o previous_vars -- All of the forward or backward variables
calculated so far.
Returns:
o The calculated scaling variable for the sequence item.
"""
# all of the different letters the state can have
state_letters = self._seq.states.alphabet.letters
# loop over all of the possible states
s_value = 0
for main_state in state_letters:
emission = self._mm.emission_prob[(main_state,
| self._seq.emissions[seq_pos])]
# now sum over all of the previous vars and transitions
trans_and_var_sum = 0
for second_state in self._mm.transitions_from(main_state):
# the value of the previous f or b value
var_value = previou |
lastralab/Statistics | Specialization/Personal/Loops.py | Python | mit | 855 | 0.02924 |
''' While x is less than 10 it will print x and add 1 to that number,
then print it and so on until that condition is false, which is
when x equals 10 '''
condition = input('Enter number: ')
x = int(condition)
print (' ')
print ('While loop:')
while x <= 10:
print (x)
x += 1
# will stop adding 1 when it reaches 11
while x > 10:
print('True')
print (x)
print('Number is higher than 10')
break # otherwise it will print True forever, like this:
# uncomment to | run and watch:
'''
while True:
print('infinite')
'''
print (' ')
print ('For Loop: ')
exampleList = [1,6,7,3,6,9,0]
print (' ')
print ('See co | de for reference')
print (' ')
for thing in exampleList:
print (thing)
print (' ')
print ('For x in range loop:')
print (' ')
for x in range (1,11): # range is not in list, this is separate
print (x)
|
snowleung/mywunder | mywunder/myconfig.py | Python | mit | 505 | 0.00396 | #coding:utf-8
import os
import logging
logging.basicCo | nfig(level=logging.DEBUG, format='%(asctime)s %(filename)s:%(lineno)s - %(funcName)20s() - %(name)s - %(levelname)s - %(message)s')
logging.warning('load module:%s', __name__)
user_path = os.path.expanduser("~/")
dir_path = os.path.join(user_path, '.mywunder/')
try:
os.mkdir(dir_path)
except OSError:
pass
config_txt = os.path.join(dir_path, 'config.txt')
db_path = os.path.jo | in(dir_path, "mywunder.db")
CLIENT_ID = 'ce310d4e732dc98c6a07'
|
zhongliliu/muse | muse/Similarity/Calc_Angle.py | Python | gpl-2.0 | 1,889 | 0.005294 | """
MUSE -- A Multi-algorithm-collaborative Universal Structure-prediction Environment
Copyright (C) 2010-2017 by Zhong-Li Liu
This program is free software; you can redistribute it and/or modify it under the
terms of the GNU General Public License as published by the Free Software Founda | tion
version 2 of the License.
This program is distributed in the hope that it will be useful, but WIT | HOUT ANY
WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A
PARTICULAR PURPOSE. See the GNU General Public License for more details.
E-mail: zl.liu@163.com
"""
import numpy as np
import math
def calc_angle(l1, l2):
v10 = l1
v12 = l2
v10 /= np.linalg.norm(v10)
v12 /= np.linalg.norm(v12)
angle = np.vdot(v10, v12)
angle = np.arccos(angle)*180/math.pi
return angle
def calc_3atoms_angle(pos1, pos2, pos3):
v10 = pos2 - pos1
v12 = pos3 - pos1
v10 /= np.linalg.norm(v10)
v12 /= np.linalg.norm(v12)
angle = np.vdot(v10, v12)
angle = np.arccos(angle)*180/math.pi
return angle
def dihedral_angle(cell_a, cell_b, cell_c):
a = cell_a
b = cell_b
c = cell_c
axb = np.cross(a, b)
axb /= np.linalg.norm(axb)
bxc = np.cross(b, c)
bxc /= np.linalg.norm(bxc)
angle = np.vdot(axb, bxc)
dangle = 180-np.arccos(angle)*180/math.pi
# print dangle
return dangle
if __name__ == '__main__':
cell = np.array([[2.7085009435849550, -2.7085009435849550, -0.0000000000000000],
[-2.7085009435849550, 0.0000000000000000, -2.7085009435849550],
[2.7085009435849550, 2.7085009435849550, -0.0000000000000000]])
cell_a = cell[0]
cell_b = cell[1]
cell_c = cell[2]
dihedral_angle(cell_a, cell_b, cell_c)
dihedral_angle(cell_b, cell_c, cell_a)
dihedral_angle(cell_c, cell_a, cell_b)
|
pyupio/dparse | tests/test_parse.py | Python | mit | 10,268 | 0.001169 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
"""Tests for `dparse.parser`"""
from dparse.parser import parse, Parser
from dparse import filetypes
from packaging.specifiers import SpecifierSet
def test_requirements_with_invalid_requirement():
content = "in=vali===d{}{}{"
dep_file = parse(content, file_type=filetypes.requirements_txt)
assert len(dep_file.dependencies) == 0
def test_tox_ini_with_invalid_requirement():
content = "[testenv]" \
"passenv = CI TRAVIS TRAVIS_*" \
"setenv =" \
"PYTHONPATH = {toxinidir}" \
"deps =" \
"-r{toxinidir}/requirements_dev.txt" \
"pytest-cov" \
"codecov"
dep_file = parse(content, file_type=filetypes.tox_ini)
assert len(dep_file.dependencies) == 0
def test_conda_file_with_invalid_requirement():
content = "name: my_env\n" \
"dependencies:\n" \
" - gevent=1.2.1\n" \
" - pip:\n" \
" - in=vali===d{}{}{"
dep_file = parse(content, file_type=filetypes.conda_yml)
assert len(dep_file.dependencies) == 0
def test_conda_file_invalid_yml():
content = "wawth:dda : awd:\ndlll"
dep_file = parse(content, file_type=filetypes.conda_yml)
assert dep_file.dependencies == []
def test_conda_file_marked_line():
content = "name: my_env\n" \
"dependencies:\n" \
" - gevent=1.2.1\n" \
" - pip:\n" \
" - beautifulsoup4==1.2.3\n # naaah, marked"
dep_file = parse(content, file_type=filetypes | .conda_yml)
assert len(dep_file.dependencies) == 1
dep_file = parse(content, file_type=filetypes.conda_yml, marker=((), "naah, marked"))
assert len(dep_file.dependencies) == 0
def test_tox_ini_marked_line():
content = "[testenv:bandit]\n" \
"commands =\n" \
"\tbandit --ini setup.cfg -ii -l --recursive project_directory\n" \
"deps =\n" \
"\tbandit==1.4.0 # naaah, mark | ed\n" \
"\n" \
"[testenv:manifest]\n" \
"commands =\n" \
"\tcheck-manifest --verbose\n"
dep_file = parse(content, "tox.ini")
assert len(dep_file.dependencies) == 1
dep_file = parse(content, "tox.ini", marker=((), "naah, marked"))
assert len(dep_file.dependencies) == 0
def test_resolve_file():
line = "-r req.txt"
assert Parser.resolve_file("/", line) == "/req.txt"
line = "-r req.txt # mysterious comment"
assert Parser.resolve_file("/", line) == "/req.txt"
line = "-r req.txt"
assert Parser.resolve_file("", line) == "req.txt"
def test_index_server():
line = "--index-url https://some.foo/"
assert Parser.parse_index_server(line) == "https://some.foo/"
line = "-i https://some.foo/"
assert Parser.parse_index_server(line) == "https://some.foo/"
line = "--extra-index-url https://some.foo/"
assert Parser.parse_index_server(line) == "https://some.foo/"
line = "--extra-index-url https://some.foo"
assert Parser.parse_index_server(line) == "https://some.foo/"
line = "--extra-index-url https://some.foo # some lousy comment"
assert Parser.parse_index_server(line) == "https://some.foo/"
line = "-i\t\t https://some.foo \t\t # some lousy comment"
assert Parser.parse_index_server(line) == "https://some.foo/"
line = "--index-url"
assert Parser.parse_index_server(line) is None
line = "--index-url=https://some.foo/"
assert Parser.parse_index_server(line) == "https://some.foo/"
line = "-i=https://some.foo/"
assert Parser.parse_index_server(line) == "https://some.foo/"
line = "--extra-index-url=https://some.foo/"
assert Parser.parse_index_server(line) == "https://some.foo/"
line = "--extra-index-url=https://some.foo"
assert Parser.parse_index_server(line) == "https://some.foo/"
line = "--extra-index-url=https://some.foo # some lousy comment"
assert Parser.parse_index_server(line) == "https://some.foo/"
line = "-i\t\t =https://some.foo \t\t # some lousy comment"
assert Parser.parse_index_server(line) == "https://some.foo/"
def test_requirements_package_with_index_server():
content = """-i https://some.foo/\ndjango"""
dep_file = parse(content=content, file_type=filetypes.requirements_txt)
dep = dep_file.dependencies[0]
assert dep.name == "django"
assert dep.index_server == "https://some.foo/"
def test_requirements_parse_empty_line():
content = """
"""
dep_file = parse(content=content, file_type=filetypes.requirements_txt)
assert dep_file.dependencies == []
assert dep_file.resolved_files == []
def test_requirements_parse_unsupported_line_start():
content = "-f foo\n" \
"--find-links bla\n" \
"-i bla\n" \
"--index-url bla\n" \
"--extra-index-url bla\n" \
"--no-index bla\n" \
"--allow-external\n" \
"--allow-unverified\n" \
"-Z\n" \
"--always-unzip\n"
dep_file = parse(content=content, file_type=filetypes.requirements_txt)
assert dep_file.dependencies == []
assert dep_file.resolved_files == []
def test_file_resolver():
content = "-r production/requirements.txt\n" \
"--requirement test.txt\n"
dep_file = parse(content=content, path="/", file_type=filetypes.requirements_txt)
assert dep_file.resolved_files == [
"/production/requirements.txt",
"/test.txt"
]
dep_file = parse(content=content, file_type=filetypes.requirements_txt)
assert dep_file.resolved_files == []
def test_is_marked_file():
content = "# DON'T\nfoo"
dep_file = parse(content, file_type=filetypes.requirements_txt)
assert not dep_file.parser.is_marked_file
dep_file = parse(content, file_type=filetypes.requirements_txt, marker=(("DON'T",), ()))
assert dep_file.parser.is_marked_file
def test_is_marked_line():
content = "foo # don't"
dep_file = parse(content, file_type=filetypes.requirements_txt)
assert not dep_file.parser.is_marked_line(next(dep_file.parser.iter_lines()))
dep_file = parse(content, file_type=filetypes.requirements_txt, marker=((), ("don't",)))
assert dep_file.parser.is_marked_line(next(dep_file.parser.iter_lines()))
def test_pipfile():
content = """[[source]]
url = "http://some.pypi.mirror.server.org/simple"
verify_ssl = false
name = "pypi"
[packages]
django = "==2.0"
djangorestframework = "*"
django-allauth = "*"
[dev-packages]
toml = "*"
"""
dep_file = parse(content, file_type=filetypes.pipfile)
assert len(dep_file.dependencies) == 4
assert dep_file.dependencies[0].name == 'django'
assert dep_file.dependencies[0].specs == SpecifierSet('==2.0')
assert dep_file.dependencies[1].name == 'djangorestframework'
assert dep_file.dependencies[1].specs == SpecifierSet()
def test_pipfile_lock():
content = """{
"_meta": {
"hash": {
"sha256": "8b5635a4f7b069ae6661115b9eaa15466f7cd96794af5d131735a3638be101fb"
},
"host-environment-markers": {
"implementation_name": "cpython",
"implementation_version": "3.6.3",
"os_name": "posix",
"platform_machine": "x86_64",
"platform_python_implementation": "CPython",
"platform_release": "17.3.0",
"platform_system": "Darwin",
"platform_version": "Darwin Kernel Version 17.3.0: Thu Nov 9 18:09:22 PST 2017; root:xnu-4570.31.3~1/RELEASE_X86_64",
"python_full_version": "3.6.3",
"python_version": "3.6",
"sys_platform": "darwin"
},
"pipfile-spec": 6,
"requires": {},
"sources": [
{
"name": "pypi",
"url": "https://pypi.python.org/simple",
"verify_ssl": true
}
]
},
"default": {
"django": {
"hashes": [
"sha256:52475f607c92035d4ac8fee284f56213065a4a |
CompassionCH/compassion-modules | mobile_app_connector/migrations/12.0.1.0.1/post-migration.py | Python | agpl-3.0 | 660 | 0 | ##############################################################################
#
# Copyright (C) 2020 Co | mpassion CH (http://www.compassion.ch)
# @author: Théo Nikles <theo.nikles@gmail.com>
#
# The licence is in the file __manifest__.py
#
##############################################################################
def migrate(cr, version):
if not version:
return
cr.execute("""
INSERT INTO mobile_app_messages(partner_id)
SELECT id FROM res_partner;
UP | DATE res_partner p
SET app_messages = (
SELECT id FROM mobile_app_messages
WHERE partner_id = p.id
);
""")
|
BCCVL/org.bccvl.tasks | src/org/bccvl/tasks/datamover/__init__.py | Python | gpl-2.0 | 359 | 0 | from __future__ import absolute_import
| from .tasks import move, update_metadata, import_multi_specie | s_csv
from .ala import pull_occurrences_from_ala
from .gbif import pull_occurrences_from_gbif
from .aekos import pull_occurrences_from_aekos, pull_traits_from_aekos
from .zoatrack import pull_traits_from_zoatrack
from .obis import pull_occurrences_from_obis
|
slosar/GMSampler | game.py | Python | gpl-2.0 | 11,404 | 0.033585 | from scipy import *
import random
import scipy.linalg as la
import cPickle
import pylab
class Sample:
def __init__ (self,pars, like, glikes):
self.pars=pars
self.like=like
self.glikes=glikes
class Gaussian:
def __init__(self,mean,cov, fastpars=None):
self.cov=cov
self.mean=mean
self.chol=la.cholesky(cov)
self.lndet=log(self.chol.diagonal()).sum()*2.0
self.icov=la.inv(cov)
self.N=len(cov)
if (fastpars!=None):
Nf=len(fastpars)
meanf=None
covif=zeros((Nf,Nf))
covf=zeros((Nf,Nf))
N=len(self.cov)
Ns=N-Nf
Css=zeros((Ns,Ns))
Cfs=zeros((Nf,Ns))
slowpars=range(N)
for i in fastpars:
slowpars.pop(slowpars.index(i))
for i,ip in enumerate(fastpars):
for j,jp in enumerate(fastpars):
covif[i,j]=self.icov[ip,jp]
covf[i,j]=self.cov[ip,jp]
covf=la.inv(covif)
## yes cov here, icov above, see
for i,ip in enumerate(slowpars):
for j,jp in enumerate(slowpars):
Css[i,j]=self.cov[ip,jp]
for i,ip in enumerate(fastpars):
for j,jp in enumerate(slowpars):
Cfs[i,j]=self.cov[ip,jp]
self.SubMatMu=dot(Cfs,la.inv(Css))
tmp=la.cholesky(Css)
tmpi=la.inv(tmp)
## is this stabler?
self.SubMatMu=dot(dot(Cfs,tmpi),transpose(tmpi))
self.Fast=Gaussian(None,covf,None)
self.fastpars=fastpars
self.slow | pars=slowpars
| self.Ns=Ns
def sample(self):
da=array([random.gauss(0.,1.) for x in range(self.N)])
glike = -(da**2).sum()/2.0-self.lndet/2.0
sa=dot(da,self.chol)
if (self.mean!=None):
sa+=self.mean
return sa,glike
def sample_fast(self, slowsamp):
## here we replace slowsamps relevant pars
sa,glike=self.Fast.sample()
outsamp=slowsamp*1.0
## now get the mean
mn=zeros(self.Ns)
for i,ip in enumerate(self.slowpars):
mn[i]=slowsamp[ip]-self.mean[ip]
mn=dot(self.SubMatMu,mn)
for i,ip in enumerate(self.fastpars):
outsamp[ip]=self.mean[ip]+mn[i]+sa[i]
## but let's just calculate like by bruteforce
glike=self.like(outsamp)
return outsamp, glike
def chi2(self,vec):
if mean!=None:
delta=vec-self.mean
else:
delta=vec
return dot(dot(delta,self.icov),delta)
def like(self,vec):
return -self.chi2(vec)/2-self.lndet/2.0
class Game:
def __init__ (self, likefuncmany, par0, sigreg=0.0):
#random.seed(10)
self.like=likefuncmany ## returns log like
self.sigreg=array(sigreg)
self.sigreg2=self.sigreg**2
self.N=len(par0)
self.N1=1000 ## how many samples for each Gaussian
self.N1f=4 ## subsample fast how much
self.blow=2.0 ## factor by which to increase the enveloping Gauss
self.wemin=0.00 ### outputs weights with this shit
self.mineffsamp=5000 ### minimum number effective samples that we require
self.fixedcov=False
self.fixedcovuse=None
self.toexplore=array(par0)
self.maxiter=30
self.fastpars=None
self.priorlow=None
self.priorhigh=None
self.pickleBetween=False
def run(self):
if self.fastpars==None:
self.N1f=0
done=False
toexplore=self.toexplore
badlist=[]
self.Gausses=[]
self.SamList=[]
while not done:
sample_list, G=self.isample (toexplore)
self.Gausses.append(G)
self.SamList+=sample_list
toexplore=self.rebuild_samples(self.SamList, self.Gausses)
if self.pickleBetween:
if (len(self.Gausses)%100==1):
fname='/tmp/game'+str(len(self.Gausses))+'.pickle'
cPickle.dump(self,open(fname,'w'),-1)
if (len(self.Gausses)>=self.maxiter):
print "Max iter exceeded"
done=True
if (self.effsamp>self.mineffsamp):
done=True
def gausses_eval(self,sam):
if len(sam.glikes)!=len(self.Gausses):
stop("SHIT")
probi=(exp(array(sam.glikes))).sum()
return probi
def rebuild_samples(self, SamList,Gausses):
maxlike=-1e30
gmaxlike=-1e30
for sa in SamList:
if (sa.like>maxlike):
maxlike=sa.like
maxlikesa=sa
sa.glike=self.gausses_eval(sa)
if (sa.glike>gmaxlike):
gmaxlike=sa.glike
gmaxlike2=self.gausses_eval(maxlikesa)
#gmaxlike=gmaxlike2
wemax=0.0
flist=[]
wemax=0.0
parmaxw=None
effsamp=0
for sa in SamList:
rellike=exp(sa.like-maxlike)
glike=sa.glike/gmaxlike
we=rellike/glike
sa.we=we
if we>wemax:
wemax=we
parmaxw=sa.pars
if we>self.wemin:
flist.append(sa)
#The highest weight counts one, others less
wei=array([sa.we for sa in SamList])
wei/=wei.max()
effsamp=wei.sum()
self.sample_list=flist
print "#G=",len(Gausses), "maxlike=",maxlike,"wemax=",wemax,"effsamp=",effsamp
self.effsamp=effsamp
self.wemax=wemax
return parmaxw
def getcov(self, around):
N=self.N
if (self.fixedcov):
if (self.fixedcovuse!=None):
G=Gaussian(around,self.fixedcovuse,self.fastpars)
return G
else:
cov=zeros((N,N))
for i in range(N):
cov[i,i]=self.sigreg2[i]
#print cov
G=Gaussian(around,cov,self.fastpars)
return G
icov=zeros((N,N))
delta=self.sigreg/20.0
toget=[]
toget.append(around)
### This is a kinda ugly hack
### We repeat the exactly the same loop twice.
### first populating where to evaluate like
### and the popping hoping for perfect sync
fastpars=self.fastpars
if fastpars==None:
fastpars=[]
for i in range(N):
parspi=around*1.0
parsmi=around*1.0
parspi[i]+=delta[i]
parsmi[i]-=delta[i]
for j in range(i,N):
if (i==j):
toget.append(parspi)
toget.append(parsmi)
else:
#if (i not in fastpars) and (j not in fastpars):
parspp=parspi*1.0
parspm=parspi*1.0
parsmp=parsmi*1.0
parsmm=parsmi*1.0
parspp[j]+=delta[j]
parspm[j]-=delta[j]
parsmp[j]+=delta[j]
parsmm[j]-=delta[j]
toget.append(parspp)
toget.append(parsmm)
toget.append(parspm)
toget.append(parsmp)
print "Doing covariance matrix",len(toget), N
likes=self.like(toget)
like0=likes.pop(0)
for i in range(N):
for j in range(i,N):
if (i==j):
der=(likes.pop(0)+likes.pop(0)-2*like0)/(delta[i]**2)
else:
#if (i not in fastpars) and (j not in fastpars):
der=(likes.pop(0)+likes.pop(0)-likes.pop(0)-likes.pop(0))/(4*delta[i]*delta[j])
#else:
# der=0
icov[i,j]=-der
icov[j,i]=-der
print "Checking d |
snapcore/snapcraft | snapcraft/internal/sources/__init__.py | Python | gpl-3.0 | 7,056 | 0 | # -*- Mode:Python; indent-tabs-mode:nil; tab-width:4 -*-
#
# Copyright (C) 2015-2017 Canonical Ltd
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 3 as
# published by the Free Software Found | ation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""Common 'source' options.
Unless the part plugin override | s this behaviour, a part can use these
'source' keys in its definition. They tell snapcraft where to pull source
code for that part, and how to unpack it if necessary.
- source: url-or-path
A URL or path to some source tree to build. It can be local
('./src/foo') or remote ('https://foo.org/...'), and can refer to a
directory tree or a tarball or a revision control repository
('git:...').
- source-type: git, bzr, hg, svn, tar, deb, rpm, or zip
In some cases the source string is not enough to identify the version
control system or compression algorithm. The source-type key can tell
snapcraft exactly how to treat that content.
- source-checksum: <algorithm>/<digest>
Snapcraft will use the digest specified to verify the integrity of the
source. The source-type needs to be a file (tar, zip, deb or rpm) and
the algorithm either md5, sha1, sha224, sha256, sha384, sha512, sha3_256,
sha3_384 or sha3_512.
- source-depth: <integer>
By default clones or branches with full history, specifying a depth
will truncate the history to the specified number of commits.
- source-branch: <branch-name>
Snapcraft will checkout a specific branch from the source tree. This
only works on multi-branch repositories from git and hg (mercurial).
- source-commit: <commit>
Snapcraft will checkout the specific commit from the source tree revision
control system.
- source-tag: <tag>
Snapcraft will checkout the specific tag from the source tree revision
control system.
- source-subdir: path
When building, Snapcraft will set the working directory to be this
subdirectory within the source.
- source-submodules: <list-of-submodules>
Configure which submodules to fetch from the source tree.
If source-submodules in defined and empty, no submodules are fetched.
If source-submodules is not defined, all submodules are fetched (default
behavior).
Note that plugins might well define their own semantics for the 'source'
keywords, because they handle specific build systems, and many languages
have their own built-in packaging systems (think CPAN, PyPI, NPM). In those
cases you want to refer to the help text for the specific plugin.
snapcraft help <plugin>
"""
import logging
import os
import os.path
import re
import sys
from . import errors
if sys.platform == "linux":
from ._7z import SevenZip # noqa
from ._bazaar import Bazaar # noqa
from ._deb import Deb # noqa
from ._git import Git # noqa
from ._local import Local # noqa
from ._mercurial import Mercurial # noqa
from ._rpm import Rpm # noqa
from ._script import Script # noqa
from ._snap import Snap # noqa: F401
from ._subversion import Subversion # noqa
from ._tar import Tar # noqa
from ._zip import Zip # noqa
_source_handler = {
"bzr": Bazaar,
"git": Git,
"hg": Mercurial,
"mercurial": Mercurial,
"subversion": Subversion,
"svn": Subversion,
"tar": Tar,
"zip": Zip,
"7z": SevenZip,
"local": Local,
"deb": Deb,
"rpm": Rpm,
"snap": Snap,
"": Local,
}
else:
from ._7z import SevenZip # noqa
from ._bazaar import Bazaar # noqa
from ._git import Git # noqa
from ._local import Local # noqa
from ._mercurial import Mercurial # noqa
from ._subversion import Subversion # noqa
from ._tar import Tar # noqa
from ._zip import Zip # noqa
_source_handler = {
"7z": SevenZip,
"bzr": Bazaar,
"git": Git,
"local": Local,
"hg": Mercurial,
"mercurial": Mercurial,
"subversion": Subversion,
"svn": Subversion,
"tar": Tar,
"zip": Zip,
"": Local,
}
logging.getLogger("urllib3").setLevel(logging.CRITICAL)
__SOURCE_DEFAULTS = {
"source": None,
"source-commit": None,
"source-checksum": None,
"source-depth": None,
"source-tag": None,
"source-type": None,
"source-branch": None,
"source-subdir": None,
"source-submodules": None,
}
def get_source_defaults():
return __SOURCE_DEFAULTS.copy()
def get(sourcedir, builddir, options):
"""Populate sourcedir and builddir from parameters defined in options.
:param str sourcedir: The source directory to use.
:param str builddir: The build directory to use.
:param options: source options.
"""
source_type = getattr(options, "source_type", None)
source_attributes = dict(
source_depth=getattr(options, "source_depth", None),
source_checksum=getattr(options, "source_checksum", None),
source_tag=getattr(options, "source_tag", None),
source_commit=getattr(options, "source_commit", None),
source_branch=getattr(options, "source_branch", None),
source_submodules=getattr(options, "source_submodules", None),
)
handler_class = get_source_handler(options.source, source_type=source_type)
handler = handler_class(options.source, sourcedir, **source_attributes)
handler.pull()
def get_source_handler_from_type(source_type):
"""Return the source handler for source_type."""
return _source_handler.get(source_type)
def get_source_handler(source, *, source_type=""):
if not source_type:
source_type = _get_source_type_from_uri(source)
return _source_handler[source_type]
_tar_type_regex = re.compile(r".*\.((tar(\.(xz|gz|bz2))?)|tgz)$")
def _get_source_type_from_uri(source, ignore_errors=False): # noqa: C901
for extension in ["zip", "deb", "rpm", "7z", "snap"]:
if source.endswith(".{}".format(extension)):
return extension
source_type = ""
if source.startswith("bzr:") or source.startswith("lp:"):
source_type = "bzr"
elif (
source.startswith("git:")
or source.startswith("git@")
or source.endswith(".git")
):
source_type = "git"
elif source.startswith("svn:"):
source_type = "subversion"
elif _tar_type_regex.match(source):
source_type = "tar"
elif os.path.isdir(source):
source_type = "local"
elif not ignore_errors:
raise errors.SnapcraftSourceUnhandledError(source)
return source_type
|
fabrickit/fabkit | core/db/impl_sqlalchemy/models.py | Python | mit | 2,392 | 0.000836 | # coding: utf-8
import datetime
from oslo_db.sqlalchemy import models
from sqlalchemy import (Column, Integer, String, ForeignKey, Index, DateTime,
Boolean, UniqueConstraint, BigInteger, MetaData)
from sqlalchemy.ext.declarative import declarative_base
metadata = MetaData()
class FabkitBase(models.ModelBase):
"""Base class for Neutron Models."""
id = Column(Integer, primary_key=True, autoincrement=True)
__table_args__ = {'mysql_engine': 'InnoDB'}
created_at = Column(DateTime, nullable=False, default=datetime.datetime.utcnow)
updated_at = Column(DateTime, nullable=False, default=datetime.datetime.utcnow,
onupdate=datetime.datetime.utcnow)
Base = declarative_base(cls=FabkitBase)
class Agent(Base):
__tablename__ = 'agent'
# agent, central
agent_type = Column(String(255), nullable=False)
# TOPIC.host is a target topic
host = Column(String(255), nullable=False)
# active, down, disable
status = Column(String(55), nullable=False)
# check
check_status = Column(Integer, nullable=True)
check_timestamp = Column(DateTime, nullable=True)
# setup
setup_status = Column(String(55), nullable=True)
setup_timestamp = Column(DateTime, nullable=True)
# fabscript_map
fabscript_map = Column(String(1000), nullable=False, default='{}')
class Task(Base):
__tablename__ = 'task'
# check, setup
method = Column(String(255), nullable=False)
json_arg = Column(String(500), nullable=False, default='{}')
owner = Column(String(255), default=None)
target = Column(String(255), nullable=False, default='.*')
# N: random wait(0-N s) on each node, 0<: serial
pallalel = Column(Integer, nullable=False, default=0)
| # requested, queued, complet | ed, error, canceled
status = Column(String(55), nullable=False, default='requested')
msg = Column(String(255), nullable=False, default='')
active = Column(Boolean(), nullable=False, default=True)
class Event(Base):
__tablename__ = 'event'
# check, setup
event_type = Column(String(255), nullable=False)
host = Column(String(255), nullable=False)
fabscript = Column(String(255), nullable=False)
msg = Column(String(255), nullable=False)
status = Column(Integer(), nullable=False)
hooked = Column(Boolean(), nullable=False, default=False)
|
Carreau/pip | pip/commands/list.py | Python | mit | 7,507 | 0 | from __future__ import absolute_import
import logging
import warnings
from pip.basecommand import Command
from pip.exceptions import DistributionNotFound
from pip.index import PackageFinder
from pip.req import InstallRequirement
from pip.utils import get_installed_distributions, dist_is_editable
from pip.utils.deprecation import RemovedInPip7Warning
from pip.cmdoptions import make_option_group, index_group
logger = logging.getLogger(__name__)
class ListCommand(Command):
"""
List installed packages, including editables.
Packages are listed in a case-insensitive sorted order.
"""
name = 'list'
usage = """
%prog [options]"""
summary = 'List installed packages.'
def __init__(self, *args, **kw):
super(ListCommand, self).__init__(*args, **kw)
cmd_opts = self.cmd_opts
cmd_opts.add_option(
'-o', '--outdated',
action='store_true',
default=False,
help='List outdated packages (excluding editables)')
cmd_opts.add_option(
'-u', '--uptodate',
action='store_true',
default=False,
help='List uptodate packages (excluding editables)')
cmd_opts.add_option(
'-e', '--editable',
action='store_true',
default=False,
help='List editable projects.')
cmd_opts.ad | d_option(
'-l', '--local',
action='store_true',
default=False,
help=('If in a virtualenv that has global access, do no | t list '
'globally-installed packages.'),
)
self.cmd_opts.add_option(
'--user',
dest='user',
action='store_true',
default=False,
help='Only output packages installed in user-site.')
cmd_opts.add_option(
'--pre',
action='store_true',
default=False,
help=("Include pre-release and development versions. By default, "
"pip only finds stable versions."),
)
index_opts = make_option_group(index_group, self.parser)
self.parser.insert_option_group(0, index_opts)
self.parser.insert_option_group(0, cmd_opts)
def _build_package_finder(self, options, index_urls, session):
"""
Create a package finder appropriate to this list command.
"""
return PackageFinder(
find_links=options.find_links,
index_urls=index_urls,
allow_external=options.allow_external,
allow_unverified=options.allow_unverified,
allow_all_external=options.allow_all_external,
allow_all_prereleases=options.pre,
process_dependency_links=options.process_dependency_links,
session=session,
)
def run(self, options, args):
if options.outdated:
self.run_outdated(options)
elif options.uptodate:
self.run_uptodate(options)
elif options.editable:
self.run_editables(options)
else:
self.run_listing(options)
def run_outdated(self, options):
for dist, remote_version_raw, remote_version_parsed in \
self.find_packages_latests_versions(options):
if remote_version_parsed > dist.parsed_version:
logger.info(
'%s (Current: %s Latest: %s)',
dist.project_name, dist.version, remote_version_raw,
)
def find_packages_latests_versions(self, options):
index_urls = [options.index_url] + options.extra_index_urls
if options.no_index:
logger.info('Ignoring indexes: %s', ','.join(index_urls))
index_urls = []
if options.use_mirrors:
warnings.warn(
"--use-mirrors has been deprecated and will be removed in the "
"future. Explicit uses of --index-url and/or --extra-index-url"
" is suggested.",
RemovedInPip7Warning,
)
if options.mirrors:
warnings.warn(
"--mirrors has been deprecated and will be removed in the "
"future. Explicit uses of --index-url and/or --extra-index-url"
" is suggested.",
RemovedInPip7Warning,
)
index_urls += options.mirrors
dependency_links = []
for dist in get_installed_distributions(local_only=options.local,
user_only=options.user):
if dist.has_metadata('dependency_links.txt'):
dependency_links.extend(
dist.get_metadata_lines('dependency_links.txt'),
)
with self._build_session(options) as session:
finder = self._build_package_finder(options, index_urls, session)
finder.add_dependency_links(dependency_links)
installed_packages = get_installed_distributions(
local_only=options.local,
user_only=options.user,
include_editables=False,
)
for dist in installed_packages:
req = InstallRequirement.from_line(dist.key, None)
try:
link = finder.find_requirement(req, True)
# If link is None, means installed version is most
# up-to-date
if link is None:
continue
except DistributionNotFound:
continue
else:
# It might be a good idea that link or finder had a public
# method that returned version
remote_version = finder._link_package_versions(
link, req.name
)[0]
remote_version_raw = remote_version[2]
remote_version_parsed = remote_version[0]
yield dist, remote_version_raw, remote_version_parsed
def run_listing(self, options):
installed_packages = get_installed_distributions(
local_only=options.local,
user_only=options.user,
)
self.output_package_listing(installed_packages)
def run_editables(self, options):
installed_packages = get_installed_distributions(
local_only=options.local,
user_only=options.user,
editables_only=True,
)
self.output_package_listing(installed_packages)
def output_package_listing(self, installed_packages):
installed_packages = sorted(
installed_packages,
key=lambda dist: dist.project_name.lower(),
)
for dist in installed_packages:
if dist_is_editable(dist):
line = '%s (%s, %s)' % (
dist.project_name,
dist.version,
dist.location,
)
else:
line = '%s (%s)' % (dist.project_name, dist.version)
logger.info(line)
def run_uptodate(self, options):
uptodate = []
for dist, remote_version_raw, remote_version_parsed in \
self.find_packages_latests_versions(options):
if dist.parsed_version == remote_version_parsed:
uptodate.append(dist)
self.output_package_listing(uptodate)
|
martinpopel/vowpal_wabbit | python/sklearn_vw.py | Python | bsd-3-clause | 15,244 | 0.002755 | # -*- coding: utf-8 -*-
# pylint: disable=line-too-long, unused-argument, invalid-name, too-many-arguments, too-many-locals
"""
Utilities to support integration of Vowpal Wabbit and scikit-learn
"""
import numpy as np
from pyvw import vw
import re
from scipy.sparse import csr_matrix
from sklearn.base import BaseEstimator, RegressorMixin
from sklearn.linear_model.base import LinearClassifierMixin, SparseCoefMixin
from sklearn.datasets.svmlight_format import dump_svmlight_file
from sklearn.utils.validation import check_is_fitted
import StringIO
DEFAULT_NS = ''
CONSTANT_HASH = 116060
INVALID_CHARS = re.compile(r"[\|: \n]+")
class VW(BaseEstimator, vw):
""" Vowpal Wabbit Scikit-learn Base Estimator wrapper
Attributes
----------
params : {dict}
dictionary of model parameter keys and values
fit_ : {bool}
this variable is only created after the model is fitted
"""
params = dict()
passes = 1
def __init__(self,
random_seed=None,
ring_size=None,
learning_rate=None,
l=None,
power_t=None,
decay_learning_rate=None,
initial_t=None,
feature_mask=None,
initial_regressor=None,
i=None,
initial_weight=None,
random_weights=None,
input_feature_regularizer=None,
audit=None,
a=None,
progress=None,
P=None,
quiet=None,
no_stdin=None,
hash=None,
ignore=None,
keep=None,
redefine=None,
bit_precision=None,
b=None,
noconstant=None,
constant=None,
C=None,
ngram=None,
skips=None,
feature_limit=None,
affix=None,
spelling=None,
dictionary=None,
dictionary_path=None,
interactions=None,
permutations=None,
leave_duplicate_interactions=None,
quadratic=None,
q=None,
cubic=None,
testonly=None,
t=None,
min_prediction=None,
max_prediction=None,
sort_features=None,
loss_function=None,
link=None,
quantile_tau=None,
l1=None,
l2=None,
named_labels=None,
final_regressor=None,
f=None,
readable_model=None,
invert_hash=None,
passes=None,
save_resume=None,
output_feature_regularizer_binary=None,
output_feature_regularizer_text=None):
""" VW model constructor, exposing all supported parameters to keep sklearn happy
Parameters
----------
random_seed (int): seed random number generator
ring_size (int): size of example ring
Update options
learning_rate,l (float): Set learning rate
power_t (float): t power value
decay_learning_rate (float): Set Decay factor for learning_rate between passes
initial_t (float): initial t value
feature_mask (str): Use existing regressor to determine which parameters may be updated.
If no initial_regressor given, also used for initial weights.
Weight options
initial_regressor,i (str): Initial regressor(s)
initial_weight (float): Set all weights to an initial value of arg.
random_weights (bool): make initial weights random
input_feature_regularizer (str): Per feature regularization input file
Diagnostic options
audit,a (bool): print weights of features
progress,P (str): Progress update frequency. int: additive, float: multiplicative
quiet (bool): Don't output disgnostics and progress updates
Feature options
hash (str): how to hash the features. Available options: strings, all
ignore (str): ignore namespaces beginning with character <arg>
keep (str): keep namespaces beginning with character <arg>
redefine (str): Redefine namespaces beginning with characters of string S as namespace N. <arg> shall be in
form 'N:=S' where := is operator. Empty N or S are treated as default namespace.
Use ':' as a wildcard in S.
bit_precision,b (int): number of bits in the feature table
noconstant (bool): Don't add a constant feature
constant,C (float): Set initial value of constant
ngram (str): Generate N grams. To generate N grams for a single namespace 'foo', arg should be fN.
skips (str): Generate skips in N grams. This in conjunction with the ngram tag can be used to generate
generalized n-skip-k-gram. To generate n-skips for a single namespace 'foo', arg should be fN.
feature_limit (str): limit to N features. To apply to a single namespace 'foo', arg should be fN
affix (str): generate prefixes/suffixes of features; argument '+2a,-3b,+1' means generate 2-char prefixes for
namespace a, 3-char suffixes for b and 1 char prefixes for default namespace
spelling (str): compute spelling features for a give namespace (use '_' for default namespace)
dictionary (str): read a dictionary for additional features (arg either 'x:file' or just 'file')
dictionary_path (str): look in this directory for dictionaries; defaults to current directory or env{PATH}
interactions (str): Create feature interactions of any level between namespaces.
permutations (bool): Use permutations instead of combinations for feature interactions of same namespace.
leave_duplicate_interactions (bool): Don't remove interactions with duplicate combinations of namespaces. For
ex. this is a duplicate: '-q ab -q ba' and a lot more in '-q ::'.
quadratic,q (str): Create and use quadratic features, q:: corresponds to a wildcard for all printable characters
cubic (str): Create and use cubic features
Example options
testonly,t (bool): Ig | nore label inform | ation and just test
min_prediction (float): Smallest prediction to output
max_prediction (float): Largest prediction to output
sort_features (bool): turn this on to disregard order in which features have been defined. This will lead to
smaller cache sizes
loss_function (str): default_value("squared"), "Specify the loss function to be used, uses squared by default.
Currently available ones are squared, classic, hinge, logistic and quantile.
link (str): apply a link function to convert output: e.g. 'logistic'
quantile_tau (float): default_value(0.5), "Parameter \\tau associated with Quantile loss. Defaults to 0.5
l1 (float): l_1 lambda
l2 (float): l_2 lambda
named_labels (str): use names for labels (multiclass, etc.) rather than integers, argument specified all
possible labels, comma-sep, eg \"--named_labels Noun,Verb,Adj,Punc\"
Output model
final_regressor,f (str): Final regressor
readable_model (str): Output human-readable final regressor with numeric features
invert_hash (str): Output human-readable final regressor with feature names. Computationally expensive.
passes (int): Number of training passes
save_resume (bool): save extra state so learning can be resumed later with new data
output_feature_regularizer_binary (str): Per feature regularization output file
output_feature_regularizer_text (str): Per feature regularization output file, in text
Return |
yinyanlong/iosig | src/analysis/single_trace_analysis.py | Python | bsd-3-clause | 8,008 | 0.006868 | #!/usr/bin/env python
"""
Single trace Analysis
"""
__author__ = "Yanlong Yin (yyin2@iit.edu)"
__version__ = "$Revision: 1.4$"
__date__ = "$Date: 02/08/2014 $"
__copyright__ = "Copyright (c) 2010-2014 SCS Lab, IIT"
__license__ = "Python"
import sys, os, string, getopt, gc, multiprocessing
from sig import *
from access import *
from accList import *
from prop import *
from util import *
def detectSignature(filename):
# the list contains all the accesses
rlist = AccList()
wlist = AccList()
accList = AccList() # all lines with "accList" are commentted out
# because the figure drawing using accList
# is replaced with rlist and wlist
# open the trace file
f = open(filename, 'r')
# skip | the first several lines
# Maybe the skipped lines are table heads
for i in range(int(sig._format_prop['skip_lines'])):
line = f.readlin | e()
# scan the file and put the access item into list
i = 0
j = 0
op_index = int(sig._format_prop['op'])
debugPrint ('op_index: ', op_index)
op = ''
# TODO: add while 1 loop here
for i in range(sig._range):
line = f.readline()
if not line:
break
words = string.split(line)
# there might be some blank lines
if len(words) < 6:
j+=1
continue
## only "READ" and "WRITE" will be saved
#if words[-1].count('READ') == 0 and words[-1].count('WRITE') == 0:
# to test chomob, only use write
# if words[-1].count('WRITE') == 0:
# j+=1
# continue
## save to list
op = words[op_index].upper();
acc = Access(words)
if acc.size >= 1:
accList.append(acc)
if op.count('READ')>0 or op == 'R':
debugPrint("one READ")
rlist.append(acc)
if op.count('WRITE')>0 or op == 'W':
debugPrint("one WRITE")
wlist.append(acc)
## close the opened file
f.close()
rlist.trace = filename
wlist.trace = filename
accList.trace = filename
# print the time summary
print 'Total read time: ', sig._total_read_time
print 'Total write time: ', sig._total_write_time
print 'Numbers of operations - ', 'Read: ', len(rlist), ' write: ', len(wlist)
## deal with the list
rlist.detect_signature(0, min(sig._range-j-1, len(rlist)-1) )
wlist.detect_signature(0, min(sig._range-j-1, len(wlist)-1) )
## Done with the whole process of detecting
## Print the whole signature
if len(rlist.signatures) > 0 or len(wlist.signatures) > 0:
print '----------------------------------------'
print 'The following signatures are detected:'
if len(rlist.signatures) > 0:
rlist.print_signature()
rlist.gen_protobuf(sig._out_path)
rlist.makeup_output(sig._out_path)
if len(wlist.signatures) > 0:
wlist.print_signature()
wlist.gen_protobuf(sig._out_path)
wlist.makeup_output(sig._out_path)
#if len(accList) > 0:
accList.gen_iorates(sig._out_path)
def generateCSVs(single_trace_filename):
"""Generate the Read/Write Bandwidth figures"""
trace_path, trace_filename = os.path.split(single_trace_filename)
# the list contains all the accesses
rlist = AccList()
wlist = AccList()
rlistEmpty = 1
wlistEmpty = 1
total_read_count = 0
total_write_count = 0
total_read_time = 0.0
total_write_time = 0.0
# Create and empty each CSV files, write the CSV title line
output = os.path.join(sig._out_path, trace_filename + ".read.rate.csv")
f = open(output, 'w')
f.write("Time,Rate\n")
f.close()
output = os.path.join(sig._out_path, trace_filename + ".write.rate.csv")
f = open(output, 'w')
f.write("Time,Rate\n")
f.close()
output = os.path.join(sig._out_path, trace_filename + ".read.interval.csv")
f = open(output, 'w')
f.write("Begin,End\n")
f.close()
output = os.path.join(sig._out_path, trace_filename + ".write.interval.csv")
f = open(output, 'w')
f.write("Begin,End\n")
f.close()
output = os.path.join(sig._out_path, trace_filename + ".read.hole.sizes.csv")
f = open(output, 'w')
f.write("Time,Size\n")
f.close()
# open the trace file
f = open(single_trace_filename, 'r')
# skip the first several lines
# Maybe the skipped lines are table heads
for i in range(int(sig._format_prop['skip_lines'])):
line = f.readline()
# scan the file and put the access item into list
i = 0
j = 0
eof = 0 # reaching the EOF?
op_index = int(sig._format_prop['op'])
debugPrint ('op_index: ', op_index)
op = ''
while 1:
# handle 5000 operations once
for i in range(sig._range):
line = f.readline()
if not line:
eof = 1
break
words = string.split(line)
# there might be some blank lines
if len(words) < 6:
j+=1
continue
## only "READ" and "WRITE" will be saved
#if words[-1].count('READ') == 0 and words[-1].count('WRITE') == 0:
# to test chomob, only use write
# if words[-1].count('WRITE') == 0:
# j+=1
# continue
## save to list
op = words[op_index].upper();
acc = Access(words)
if acc.size >= 1:
if op.count('READ')>0 or op == 'R':
debugPrint("one READ")
rlist.append(acc)
total_read_count += 1
total_read_time += acc.endTime - acc.startTime
if op.count('WRITE')>0 or op == 'W':
debugPrint("one WRITE")
wlist.append(acc)
total_write_count += 1
total_write_time += acc.endTime - acc.startTime
# finish reading a batch of 5000 lines of the trace file
# Generate all kinds of CSV files using the rlist and wlist
# here the write operation should be "append"
# because it's handling 5000 lines each time
if (len(rlist) > 0):
rlist.toIORStep(trace_filename, 'r') # 'r' for read
rlist.toDataAccessHoleSizes(trace_filename, 'r')
rlistEmpty = 0
if (len(wlist) > 0):
wlist.toIORStep(trace_filename, 'w') # 'w' for write
wlistEmpty = 0
# empty the two lists
rlist = AccList()
wlist = AccList()
gc.collect() # garbage collection
# reached EOF? exit the "while 1" loop
if eof == 1:
break
## close the opened file
f.close()
if (rlistEmpty == 1):
readF = open( os.path.join(sig._out_path, trace_filename + ".read.rate.csv"), 'a+')
readF.write( "{0},{1}\n".format(0, 0) )
readF.close()
readF = open( os.path.join(sig._out_path, trace_filename + ".read.hole.sizes.csv"), 'a+')
readF.write( "{0},{1}\n".format(0, 0) )
readF.close()
if (wlistEmpty == 1):
writeF = open( os.path.join(sig._out_path, trace_filename + ".write.rate.csv"), 'a+')
writeF.write( "{0},{1}\n".format(0, 0) )
writeF.close()
# TODO: gnuplot for read and write rates
# save the statistics information to files
output = os.path.join(sig._out_path, trace_filename + ".stat.properties")
f = open(output, 'a+')
f.write("total_read_time: {0}\n".format(total_read_time))
f.write("total_read_count: {0}\n".format(total_read_count))
f.write("total_write_time: {0}\n".format(total_write_time))
f.write("total_write_count: {0}\n".format(total_write_count))
#f.write("global_total_read_time: {0}\n".format(sig._total_read_time))
#f.write("global_total_write_time: {0}\n".format(sig._total_write_time))
|
RPGOne/Skynet | scikit-learn-0.18.1/examples/svm/plot_rbf_parameters.py | Python | bsd-3-clause | 8,016 | 0.001248 | '''
==================
RBF SVM parameters
==================
This example illustrates the effect of the parameters ``gamma`` and ``C`` of
the Radial Basis Function (RBF) kernel SVM.
Intuitively, the ``gamma`` parameter defines how far the influence of a single
training example reaches, with low values meaning 'far' and high values meaning
'close'. The ``gamma`` parameters can be seen as the inverse of the radius of
influence of samples selected by the model as support vectors.
The ``C`` parameter trades off misclassification of training examples against
simplicity of the decision surface. A low ``C`` makes the decision surface
smooth, while a high ``C`` aims at classifying all training examples correctly
by giving the model freedom to select more samples as support vectors.
The first plot is a visualization of the decision function for a variety of
parameter values on a simplified classification problem involving only 2 input
features and 2 possible target classes (binary classification). Note that this
kind of plot is not possible to do for problems with more features or target
classes.
The second plot is a heatmap of the classifier's cross-validation accuracy as a
function of ``C`` and ``gamma``. For this example we explore a relatively large
grid for illustration purposes. In practice, a logarithmic grid from
:math:`10^{-3}` to :math:`10^3` is usually sufficient. If the best parameters
lie on the boundaries of the grid, it can be extended in that direction in a
subsequent search.
Note that the heat map plot has a special colorbar with a midpoint value close
to the score values of the best performing models so as to make it easy to tell
them appart in the blink of an eye.
The behavior of the model is very sensitive to the ``gamma`` parameter. If
``gamma`` is too large, the radius of the area of influence of the support
vectors only includes the support vector itself and no amount of
regularization with ``C`` will be able to prevent overfitting.
When ``gamma`` is very small, the model is too constrained and cannot capture
the complexity or "shape" of the data. The region of influence of any selected
support vector would include the whole training set | . The resulting model will
behave similarly to a linear model with a set of hyperplanes that separate the
centers of high density of any pair of two classes.
| For intermediate values, we can see on the second plot that good models can
be found on a diagonal of ``C`` and ``gamma``. Smooth models (lower ``gamma``
values) can be made more complex by selecting a larger number of support
vectors (larger ``C`` values) hence the diagonal of good performing models.
Finally one can also observe that for some intermediate values of ``gamma`` we
get equally performing models when ``C`` becomes very large: it is not
necessary to regularize by limiting the number of support vectors. The radius of
the RBF kernel alone acts as a good structural regularizer. In practice though
it might still be interesting to limit the number of support vectors with a
lower value of ``C`` so as to favor models that use less memory and that are
faster to predict.
We should also note that small differences in scores results from the random
splits of the cross-validation procedure. Those spurious variations can be
smoothed out by increasing the number of CV iterations ``n_splits`` at the
expense of compute time. Increasing the value number of ``C_range`` and
``gamma_range`` steps will increase the resolution of the hyper-parameter heat
map.
'''
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.colors import Normalize
from sklearn.svm import SVC
from sklearn.preprocessing import StandardScaler
from sklearn.datasets import load_iris
from sklearn.model_selection import StratifiedShuffleSplit
from sklearn.model_selection import GridSearchCV
# Utility function to move the midpoint of a colormap to be around
# the values of interest.
class MidpointNormalize(Normalize):
def __init__(self, vmin=None, vmax=None, midpoint=None, clip=False):
self.midpoint = midpoint
Normalize.__init__(self, vmin, vmax, clip)
def __call__(self, value, clip=None):
x, y = [self.vmin, self.midpoint, self.vmax], [0, 0.5, 1]
return np.ma.masked_array(np.interp(value, x, y))
##############################################################################
# Load and prepare data set
#
# dataset for grid search
iris = load_iris()
X = iris.data
y = iris.target
# Dataset for decision function visualization: we only keep the first two
# features in X and sub-sample the dataset to keep only 2 classes and
# make it a binary classification problem.
X_2d = X[:, :2]
X_2d = X_2d[y > 0]
y_2d = y[y > 0]
y_2d -= 1
# It is usually a good idea to scale the data for SVM training.
# We are cheating a bit in this example in scaling all of the data,
# instead of fitting the transformation on the training set and
# just applying it on the test set.
scaler = StandardScaler()
X = scaler.fit_transform(X)
X_2d = scaler.fit_transform(X_2d)
##############################################################################
# Train classifiers
#
# For an initial search, a logarithmic grid with basis
# 10 is often helpful. Using a basis of 2, a finer
# tuning can be achieved but at a much higher cost.
C_range = np.logspace(-2, 10, 13)
gamma_range = np.logspace(-9, 3, 13)
param_grid = dict(gamma=gamma_range, C=C_range)
cv = StratifiedShuffleSplit(n_splits=5, test_size=0.2, random_state=42)
grid = GridSearchCV(SVC(), param_grid=param_grid, cv=cv)
grid.fit(X, y)
print("The best parameters are %s with a score of %0.2f"
% (grid.best_params_, grid.best_score_))
# Now we need to fit a classifier for all parameters in the 2d version
# (we use a smaller set of parameters here because it takes a while to train)
C_2d_range = [1e-2, 1, 1e2]
gamma_2d_range = [1e-1, 1, 1e1]
classifiers = []
for C in C_2d_range:
for gamma in gamma_2d_range:
clf = SVC(C=C, gamma=gamma)
clf.fit(X_2d, y_2d)
classifiers.append((C, gamma, clf))
##############################################################################
# visualization
#
# draw visualization of parameter effects
plt.figure(figsize=(8, 6))
xx, yy = np.meshgrid(np.linspace(-3, 3, 200), np.linspace(-3, 3, 200))
for (k, (C, gamma, clf)) in enumerate(classifiers):
# evaluate decision function in a grid
Z = clf.decision_function(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
# visualize decision function for these parameters
plt.subplot(len(C_2d_range), len(gamma_2d_range), k + 1)
plt.title("gamma=10^%d, C=10^%d" % (np.log10(gamma), np.log10(C)),
size='medium')
# visualize parameter's effect on decision function
plt.pcolormesh(xx, yy, -Z, cmap=plt.cm.RdBu)
plt.scatter(X_2d[:, 0], X_2d[:, 1], c=y_2d, cmap=plt.cm.RdBu_r)
plt.xticks(())
plt.yticks(())
plt.axis('tight')
scores = grid.cv_results_['mean_test_score'].reshape(len(C_range),
len(gamma_range))
# Draw heatmap of the validation accuracy as a function of gamma and C
#
# The score are encoded as colors with the hot colormap which varies from dark
# red to bright yellow. As the most interesting scores are all located in the
# 0.92 to 0.97 range we use a custom normalizer to set the mid-point to 0.92 so
# as to make it easier to visualize the small variations of score values in the
# interesting range while not brutally collapsing all the low score values to
# the same color.
plt.figure(figsize=(8, 6))
plt.subplots_adjust(left=.2, right=0.95, bottom=0.15, top=0.95)
plt.imshow(scores, interpolation='nearest', cmap=plt.cm.hot,
norm=MidpointNormalize(vmin=0.2, midpoint=0.92))
plt.xlabel('gamma')
plt.ylabel('C')
plt.colorbar()
plt.xticks(np.arange(len(gamma_range)), gamma_range, rotation=45)
plt.yticks(np.arange(len(C_range)), C_range)
plt.title('Validation accuracy')
plt.show()
|
taldcroft/datastack | test_pure_oo.py | Python | bsd-2-clause | 844 | 0.005924 | import sys
import sherpa.astro.ui as ui
import numpy as np
import datastack
x1 = np.arange(50)+2
y1 = x1**2
x2 = np.arange(10)
y2 = x2**1.9 * 2
x3 = np.arange(60)
y3 = x3**2.1 * 4
ds = datastack.DataStack()
ds[1].load_arrays(x1, y1) # ID required (autonumbering possible but potenti | al problems)
ds[2].load_arrays(x2, y2)
ds[3].load_arrays(x3, y3)
ds.set_source(ui.const1d.constID * ui.polynom1d.poly)
#ds.set_source('const1d.constID * polynom1d.poly')
ds.freeze('poly')
ds.thaw('poly.c0')
ds.thaw('poly.c1')
ds.thaw('poly.c2')
ds.thaw('const') |
ds[1].freeze('const')
ds[2].set_par('poly.c1', 0.45)
ds.set_par('const.c0', 1.0)
ds.set_par('const.integrate', False)
mins = ds.get_par('poly.c1.min')
vals = ds.get_par('poly.c1.val')
pars = ds.get_par('const.c0')
ds[2,3].link('poly.c0')
ds.unlink('poly.c0')
ds.fit()
ds.plot_fit_resid()
|
216software/Profiles | communityprofiles/userguides/urls.py | Python | mit | 261 | 0.007663 | from django.conf.urls import *
from userguides import views
urlpatterns = patterns('',
url(r'^$', views.about, | name="userguides-about"),
url(r'^FAQ/$', views.faq, name="userguides-FAQ"),
url(r'stay_in_touch$',views.stay, name="userguides-st | ay"),
)
|
procool/mygw | dist/brukva/demos/websockets/app.py | Python | bsd-2-clause | 1,650 | 0 | # Demo application for brukva
# In order to use:
# 1. $ python app.py
# 2. Open in your browser that supports websockets: http://localhost:8888/
# You should see text that says "Connected..."
# 3. $ curl http://localhost:8888/msg -d 'message=Hello!'
# You should see 'Hello!' in your browser
import brukva
import tornado.httpserver
import tornado.web
import tornado.websocket
import tornado.ioloop
c = brukva.Client()
c.connect()
class MainHandler(tornado.web.RequestHandler):
def get(self):
self.render("template.html", title="Websocket test")
class NewMessage(tornado.web.RequestHandler):
def post(self):
message = self.get_argument('message')
c.publish('test_channel', message)
self.set_header('Content-Type', 'text/plain')
self.write('sent: %s' % (message,))
class MessagesCatcher(tornado.websocket.WebSocketHandler):
def __init__(self, *args, **kwargs):
super(MessagesCatcher, self).__init__(*args, **kwargs)
self.client = brukva.Client()
self.client.connect()
self.client.subscribe('test_channel')
def open(self):
self.client.listen(self.on_message)
def on_message(self, result):
self.write_message(str(result.body))
def close(self):
self.client.unsubscribe('test_channel')
self.client.disconnect()
applic | ation = tornado.web.Application([
(r'/', MainHandler),
(r'/msg', NewMessage),
(r'/track', MessagesCatcher),
])
if __name__ == '__main__':
http_server = tornado.httpserver.HTTPServer(appli | cation)
http_server.listen(8888)
tornado.ioloop.IOLoop.instance().start()
|
mirestrepo/voxels-at-lems | registration_eval/unused/compute_rigid_transform.py | Python | bsd-2-clause | 2,803 | 0.019979 | #!/usr/bin/env python
# encoding: utf-8
"""
Author: Isabel Restrepo
August 12, 2012
Compute rigid transformation between two point clounds using feature correspondances
"""
import os
import sys
import glob
import time
from optparse import OptionParser
from xml.etree.ElementTree import ElementTree
from vpcl_adaptor import *
from boxm2_utils import *
parser = OptionParser()
parser.add_option("--srcRoot", action="store", type="string", dest="src_scene_root", help="root folder, this is where the .ply input | and output files should reside")
parser.add_option("--tgtRoot", action="store", type="string", dest="tgt_scene_root", help="root folder, this is where the .ply input and output files should reside")
parser.add_option("--basenameIn", action="store", type="string", dest="basename_in", help="basename of .ply file")
parser.add_opt | ion("-r", "--radius", action="store", type="int", dest="radius", help="radius (multiple of resolution)");
parser.add_option("-p", "--percent", action="store", type="int", dest="percentile", help="data percentile");
parser.add_option("-d", "--descriptor", action="store", type="string", dest="descriptor_type", help="name of the descriptor i.e FPFH");
parser.add_option("-v", "--verbose", action="store_true", dest="verbose", default=False, help="verbose - if false std is redirected to a logfile");
(opts, args) = parser.parse_args()
print opts
print args
#path to where all scenes are
src_scene_root=opts.src_scene_root;
tgt_scene_root=opts.tgt_scene_root;
radius = opts.radius; #gets multiplied by the resolution of the scene
percentile = opts.percentile;
descriptor_type = opts.descriptor_type;
verbose=opts.verbose;
if not verbose:
vpcl_batch.set_stdout("./logs/log_" + descriptor_type + 'percetile' + str(percentile) +'.log')
src_fname = src_scene_root + "/" + opts.basename_in + "_" + str(percentile) + ".ply"
src_features_dir = src_scene_root + "/" + descriptor_type + "_" + str(radius);
src_features_fname = src_features_dir + "/descriptors_" + str(percentile) + ".pcd";
tgt_fname = tgt_scene_root + "/" + opts.basename_in + "_" + str(percentile) + ".ply"
tgt_features_dir = tgt_scene_root + "/" + descriptor_type + "_" + str(radius);
tgt_features_fname = tgt_features_dir + "/descriptors_" + str(percentile) + ".pcd";
tform_cloud_fname = tgt_features_dir + "/tform_cloud_" + str(percentile) + ".pcd";
tform_fname = tgt_features_dir + "/transformation_" + str(percentile) + ".txt";
if verbose :
print src_fname, src_features_fname
print tgt_fname, tgt_features_fname, tform_cloud_fname, tform_fname
compute_rigid_transformation(src_fname, tgt_fname, src_features_fname, tgt_features_fname, tform_cloud_fname, tform_fname, descriptor_type);
if not verbose:
vpcl_batch.reset_stdout();
print "Done"
sys.exit(0)
|
trenton42/txbalanced | snippets/credit-soft-descriptor.py | Python | mit | 323 | 0.003096 | # bank_account_href is | the stored href for the BankAccount
# order_href is the stored href for the Order
bank_account = balanced.BankAccount.fetch(bank_account_href)
credit = bank_account.credit(
amount=100000,
description='Payout for order #1111',
appears_on_statement_as='GoodCo #1111',
order=order_href
) | |
Bulochkin/tensorflow_pack | tensorflow/contrib/keras/python/keras/callbacks.py | Python | apache-2.0 | 36,338 | 0.006632 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Keras callbacks: utilities called at certain points during model training.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from collections import deque
from collections import Iterable
from collections import OrderedDict
import csv
import json
import os
import time
import numpy as np
import six
from tensorflow.contrib.keras.python.keras import backend as K
from tensorflow.contrib.keras.python.keras.utils.generic_utils import Progbar
from tensorflow.contrib.tensorboard.plugins import projector
from tensorflow.python.ops import array_ops
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.summary import summary as tf_summary
from tensorflow.python.training import saver as saver_lib
# pylint: disable=g-import-not-at-top
try:
import requests
except ImportError:
requests = None
# pylint: enable=g-import-not-at-top
class CallbackList(object):
"""Container abstracting a list of callbacks.
Arguments:
callbacks: List of `Callback` instances.
queue_length: Queue length for keeping
running statistics over callback execution time.
"""
def __init__(self, callbacks=None, queue_length=10):
callbacks = callbacks or []
self.callbacks = [c for c in callbacks]
self.queue_length = queue_length
def append(self, callback):
self.callbacks.append(callback)
def set_params(self, params):
for callback in self.callbacks:
callback.set_params(params)
def set_model(self, model):
for callback in self.callbacks:
callback.set_model(model)
def on_epoch_begin(self, epoch, logs=None):
"""Called at the start of an epoch.
Arguments:
epoch: integer, index of epoch.
logs: dictionary of logs.
"""
logs = logs or {}
for callback in self.callbacks:
callback.on_epoch_begin(epoch, logs)
self._delta_t_batch = 0.
self._delta_ts_batch_begin = deque([], maxlen=self.queue_length)
self._delta_ts_batch_end = deque([], maxlen=self.queue_length)
def on_epoch_end(self, epoch, logs=None):
"""Called at the end of an epoch.
Arguments:
epoch: integer, index of epoch.
logs: dictionary of logs.
"""
logs = logs or {}
for callback in self.callbacks:
callback.on_epoch_end(epoch, logs)
def on_batch_begin(self, batch, logs=None):
"""Called right before processing a batch.
Arguments:
batch: integer, index of batch within the current epoch.
logs: dictionary of logs.
"""
logs = logs or {}
t_before_callbacks = time.time()
for callback in self.callbacks:
callback.on_batch_begin(batch, logs)
self._delta_ts_batch_begin.append(time.time() - t_before_callbacks)
delta_t_median = np.median(self._delta_ts_batch_begin)
if (self._delta_t_batch > 0. and
delta_t_median > 0.95 * self._delta_t_batch and delta_t_median > 0.1):
logging.warning(
'Method on_batch_begin() is slow compared '
'to the batch update (%f). Check your callbacks.' % delta_t_median)
self._t_enter_batch = time.time()
def on_batch_end(self, batch, logs=None):
"""Called at the end of a batch.
Arguments:
batch: integer, index of batch within the current epoch.
logs: dictionary of logs.
"""
logs = logs or {}
if not hasattr(self, '_t_enter_batch'):
self._t_enter_batch = time.time()
self._delta_t_batch = time.time() - self._t_enter_batch
t_before_callbacks = time.time()
for callback in self.callbacks:
callback.on_batch_end(batch, logs)
self._delta_ts_batch_end.append(time.time() - t_before_callbacks)
delta_t_median = np.median(self._delta_ts_batch_end)
if (self._delta_t_batch > 0. and
(delta_t_median > 0.95 * self._delta_t_batch and delta_t_median > 0.1)):
logging.warning(
'Method on_batch_end() is slow compared '
'to the batch update (%f). Check your callbacks.' % delta_t_median)
def on_train_begin(self, logs=None):
"""Called at the beginning of training.
Arguments:
logs: dictionary of logs.
"""
logs = logs or {}
for callback in self.callbacks:
callback.on_train_begin(logs)
def on_train_end(self, logs=None):
"""Called at the end of training.
Arguments:
logs: dictionary of logs.
"""
logs = logs or {}
for callback in self.callbacks:
callback.on_train_end(logs)
def __iter__(self):
return iter(self.callbacks)
class Callback(object):
"""Abstract base class used to build new callbacks.
# Properties
params: dict. Training parameters
(eg. verbosity, batch size, number of epochs...).
model: instance of `keras.models.Model`.
Reference of the model being trained.
The `logs` dictionary that callback methods
take as argument will contain keys for quantities relevant to
the current batch or epoch.
Currently, the `.fit()` method of the `Sequential` model class
will include the following quantities in the `logs` that
it passes to its callbacks:
on_epoch_end: logs include `acc` and `loss`, and
optionally include `val_loss`
(if validation is enabled in `fit`), and `val_acc`
(if validation and accuracy monitoring are enabled).
on_batch_begin: logs include `size`,
the number of samples in the current batch.
on_batch_end: logs include `loss`, and optionally `acc`
(if accuracy monitoring is enabled).
"""
def __init__(self):
self.validation_data = None
def set_params(self, params):
self.params = params
def set_model(self, model):
self.model = model
def on_epoch_begin(self, epoch, logs=None):
pass
def on_epoch_end(self, epoch, logs=None):
pass
def on_batch_begin(self, batch, logs=None):
pass
def on_batch_end(self, batch, logs=None):
pass
def on_train_begin(self, logs=None):
pass
def on_train_end(self, logs=None):
pass
class BaseLogger(Callback):
"""Callback that accumulates epoch averages of metrics.
This callback is automatically applied to every Keras model.
"""
def on_epoch_begin(self, epoch, logs=None):
self.seen = 0
self.totals = {}
def on_batch_end(self, batch, logs=None):
logs = logs or {}
batch_size = logs.get('size', 0)
self.seen += batch_size
for k, v in logs.items():
if k in self.totals:
self.totals[k] += v * batch_size
else:
self.totals[k] = v * batch_size
def on_epoch_end(self, epoch, logs=None):
if logs is not None:
for k in self.params['metrics']:
if k in self.totals:
# Make value available to next callbacks.
logs[k] = self.totals[k] / self.seen
|
class TerminateOnNaN(Callback):
"""Callback that terminates training when a NaN loss is encountered."""
def __init__(self):
super(TerminateOnNaN, self).__init__()
def on_batch_end(self, batch, logs=None):
logs = logs or {}
loss = logs.get('loss')
| if loss is not None:
if np.isnan(loss) or np.isinf(loss):
print('Batch %d: Invalid loss, terminating training' % (batch))
self.model.stop_training = True
class ProgbarLogger(Callback):
"""Callback that prints metrics to stdout.
Arguments:
count_mode: One of "steps" or "samples".
Whether the progress bar should
count samples seens or steps (batches) se |
mjschust/conformal-blocks | experiments/non_sym_m0n.py | Python | mit | 1,075 | 0.009302 | from __future__ import division
import conformal_blocks.cbbundle as cbd
import cProfile, time, random
#First test
#----------
| #
def experiment():
"""
Computes the rank and divisor of conformal block bundles with random weights.
:return: Null
"""
rank = 5
level = 3
num_points = 10
tries = 100
| liealg = cbd.TypeALieAlgebra(rank, store_fusion=True, exact=False)
A_l = liealg.get_weights(level)
print("Weight", "Rank", "Divisor")
for i in range(tries):
weights = [random.choice(A_l) for i in range(num_points)]
if sum([sum(liealg._convert_funds_to_epsilons(wt)) for wt in weights]) % (rank+1) != 0: continue
cbb = cbd.ConformalBlocksBundle(liealg, weights, level)
if cbb.get_rank() > 0:
divisor = cbb.get_symmetrized_divisor()
print(weights, cbb.get_rank(), divisor)
else:
print(weights, cbb.get_rank(), 0)
if __name__ == '__main__':
t0 = time.clock()
experiment()
print(time.clock() -t0)
#cProfile.run('experiment()', sort='cumtime') |
Byclosure/google-apis-client-generator | src/googleapis/codegen/utilities/maven_utils.py | Python | apache-2.0 | 2,224 | 0.010342 | #!/usr/bin/python2.7
"""Maven-related utilities for java packages."""
import re
def GetMavenArtifactId(api_name, package_path='', canonical_name='',
owner_domain='google.com'):
"""Returns the maven artifact id for a given api.
Args:
api_name: (str) The api name.
package_path: (str|None) The package path, if any.
canonical_name: (str|None) The canonical api name, if any.
owner_domain: (str) The api's owner domain.
Returns:
(str) The artifact id.
"""
# TODO(user): This logic is meant to match equivalent
# logic in api.Api._SetupModules() in setting up the path,
# using canonicalName only if packagePath is set, for backwards
# compatibility. A TODO there proposes a breaking change of
# obeying canonicalName unconditionally; were that done this
# would have to change likewise for our tests to pass.
if package_path and canonical_name:
api_name = canonical_name.replace(' ', '')
parts = []
if owner_domain == 'google.com':
parts.extend(['google', 'api', 'services'])
if package_path:
parts.extend(re.split(r'\.|/', package_path))
parts.append(api_name)
return '-'.join(parts)
def GetMavenGroupId(owner_domain):
"""Returns the maven group id for a given owner domain.
Args:
owner_d | omain: (str) T | he owner domain.
Returns:
(str) The group id.
"""
if owner_domain == 'google.com':
return 'com.google.apis'
else:
return '.'.join(reversed(owner_domain.split('.')))
def GetMavenVersion(api, language_version):
"""Returns the maven version."""
if api.get('ownerDomain') == 'google.com':
return '%s-rev%s-%s' %(api['version'],
api['revision'],
language_version)
return '%s-%s-SNAPSHOT' % (api['version'], language_version)
def GetMavenMetadata(api, language_version):
"""Returns a dict of useful maven metadata."""
owner_domain = api.get('ownerDomain', 'google.com')
return {
'artifact_id': GetMavenArtifactId(
api['name'], api.get('packagePath'),
api.get('canonicalName'), owner_domain),
'group_id': GetMavenGroupId(owner_domain),
'version': GetMavenVersion(api, language_version),
}
|
amaas-fintech/amaas-core-sdk-python | amaascore/core/reference.py | Python | apache-2.0 | 800 | 0.00375 | from __future__ import absolute_import, division, print_function, unicode_literals
from amaascore.core.amaas_model import AMaaSModel
class Reference(AMaaSModel):
def __init__(self, reference_value, reference_primary=False, *args, **kwargs):
self.reference_value = reference_value
self.reference_primary = reference_primary
super(Reference, | self).__init__(*args, **kwargs)
@property
def reference_prim | ary(self):
if hasattr(self, '_reference_primary'):
return self._reference_primary
@reference_primary.setter
def reference_primary(self, value):
"""
Always convert to bool if the service/database returns 0 or 1
"""
if value is not None:
self._reference_primary = True if value else False |
smcoll/django-rules | rules/contrib/admin.py | Python | mit | 1,879 | 0.002129 | from django.contrib import admin
try:
from django.contrib.auth import get_permission_codename
except ImportError: # pragma: no cover
# Django < 1.6
def get_permission_codename(action, opts):
return '%s_%s' % (action, opts.object_name.lower())
class ObjectPermissionsModelAdminMixin(object):
def has_change_permission(self, request, obj=None):
opts = self.o | pts
codename = get_permission_codename('change', opts)
return request.user.has_perm('%s.%s' % (op | ts.app_label, codename), obj)
def has_delete_permission(self, request, obj=None):
opts = self.opts
codename = get_permission_codename('delete', opts)
return request.user.has_perm('%s.%s' % (opts.app_label, codename), obj)
class ObjectPermissionsInlineModelAdminMixin(ObjectPermissionsModelAdminMixin):
def has_change_permission(self, request, obj=None): # pragma: no cover
opts = self.opts
if opts.auto_created:
for field in opts.fields:
if field.rel and field.rel.to != self.parent_model:
opts = field.rel.to._meta
break
codename = get_permission_codename('change', opts)
return request.user.has_perm('%s.%s' % (opts.app_label, codename), obj)
def has_delete_permission(self, request, obj=None): # pragma: no cover
if self.opts.auto_created:
return self.has_change_permission(request, obj)
return super(ObjectPermissionsInlineModelAdminMixin, self).has_delete_permission(request, obj)
class ObjectPermissionsModelAdmin(ObjectPermissionsModelAdminMixin, admin.ModelAdmin):
pass
class ObjectPermissionsStackedInline(ObjectPermissionsInlineModelAdminMixin, admin.StackedInline):
pass
class ObjectPermissionsTabularInline(ObjectPermissionsInlineModelAdminMixin, admin.TabularInline):
pass
|
kennedyshead/home-assistant | homeassistant/components/config/entity_registry.py | Python | apache-2.0 | 6,293 | 0.001748 | """HTTP views to interact with the entity registry."""
import voluptuous as vol
from homeassistant import config_entries
from homeassistant.components import websocket_api
from homeassistant.components.websocket_api.const import ERR_NOT_FOUND
from homeassistant.components.websocket_api.decorators import (
async_response,
require_admin,
)
from homeassistant.core import callback
from homeassistant.helpers import config_validation as cv
from homeassistant.helpers.entity_registry import DISABLED_USER, async_get_registry
async def async_setup(hass):
"""Enable the Entity Registry views."""
hass.components.websocket_api.async_register_command(websocket_list_entities)
hass.components.websocket_api.async_register_command(websocket_get_entity)
hass.components.websocket_api.async_register_command(websocket_update_entity)
hass.components.websocket_api.async_register_command(websocket_remove_entity)
return True
@async_response
@websocket_a | pi.websocket_command({vol.Required("type"): "config/entity_registry/list"})
async def websocket_list_entities(hass, connection, msg):
"""H | andle list registry entries command.
Async friendly.
"""
registry = await async_get_registry(hass)
connection.send_message(
websocket_api.result_message(
msg["id"], [_entry_dict(entry) for entry in registry.entities.values()]
)
)
@async_response
@websocket_api.websocket_command(
{
vol.Required("type"): "config/entity_registry/get",
vol.Required("entity_id"): cv.entity_id,
}
)
async def websocket_get_entity(hass, connection, msg):
"""Handle get entity registry entry command.
Async friendly.
"""
registry = await async_get_registry(hass)
entry = registry.entities.get(msg["entity_id"])
if entry is None:
connection.send_message(
websocket_api.error_message(msg["id"], ERR_NOT_FOUND, "Entity not found")
)
return
connection.send_message(
websocket_api.result_message(msg["id"], _entry_ext_dict(entry))
)
@require_admin
@async_response
@websocket_api.websocket_command(
{
vol.Required("type"): "config/entity_registry/update",
vol.Required("entity_id"): cv.entity_id,
# If passed in, we update value. Passing None will remove old value.
vol.Optional("name"): vol.Any(str, None),
vol.Optional("icon"): vol.Any(str, None),
vol.Optional("area_id"): vol.Any(str, None),
vol.Optional("new_entity_id"): str,
# We only allow setting disabled_by user via API.
vol.Optional("disabled_by"): vol.Any(DISABLED_USER, None),
}
)
async def websocket_update_entity(hass, connection, msg):
"""Handle update entity websocket command.
Async friendly.
"""
registry = await async_get_registry(hass)
if msg["entity_id"] not in registry.entities:
connection.send_message(
websocket_api.error_message(msg["id"], ERR_NOT_FOUND, "Entity not found")
)
return
changes = {}
for key in ("name", "icon", "area_id", "disabled_by"):
if key in msg:
changes[key] = msg[key]
if "new_entity_id" in msg and msg["new_entity_id"] != msg["entity_id"]:
changes["new_entity_id"] = msg["new_entity_id"]
if hass.states.get(msg["new_entity_id"]) is not None:
connection.send_message(
websocket_api.error_message(
msg["id"],
"invalid_info",
"Entity with this ID is already registered",
)
)
return
if "disabled_by" in msg and msg["disabled_by"] is None:
entity = registry.entities[msg["entity_id"]]
if entity.device_id:
device_registry = await hass.helpers.device_registry.async_get_registry()
device = device_registry.async_get(entity.device_id)
if device.disabled:
connection.send_message(
websocket_api.error_message(
msg["id"], "invalid_info", "Device is disabled"
)
)
return
try:
if changes:
entry = registry.async_update_entity(msg["entity_id"], **changes)
except ValueError as err:
connection.send_message(
websocket_api.error_message(msg["id"], "invalid_info", str(err))
)
return
result = {"entity_entry": _entry_ext_dict(entry)}
if "disabled_by" in changes and changes["disabled_by"] is None:
config_entry = hass.config_entries.async_get_entry(entry.config_entry_id)
if config_entry and not config_entry.supports_unload:
result["require_restart"] = True
else:
result["reload_delay"] = config_entries.RELOAD_AFTER_UPDATE_DELAY
connection.send_result(msg["id"], result)
@require_admin
@async_response
@websocket_api.websocket_command(
{
vol.Required("type"): "config/entity_registry/remove",
vol.Required("entity_id"): cv.entity_id,
}
)
async def websocket_remove_entity(hass, connection, msg):
"""Handle remove entity websocket command.
Async friendly.
"""
registry = await async_get_registry(hass)
if msg["entity_id"] not in registry.entities:
connection.send_message(
websocket_api.error_message(msg["id"], ERR_NOT_FOUND, "Entity not found")
)
return
registry.async_remove(msg["entity_id"])
connection.send_message(websocket_api.result_message(msg["id"]))
@callback
def _entry_dict(entry):
"""Convert entry to API format."""
return {
"config_entry_id": entry.config_entry_id,
"device_id": entry.device_id,
"area_id": entry.area_id,
"disabled_by": entry.disabled_by,
"entity_id": entry.entity_id,
"name": entry.name,
"icon": entry.icon,
"platform": entry.platform,
}
@callback
def _entry_ext_dict(entry):
"""Convert entry to API format."""
data = _entry_dict(entry)
data["original_name"] = entry.original_name
data["original_icon"] = entry.original_icon
data["unique_id"] = entry.unique_id
data["capabilities"] = entry.capabilities
return data
|
austinhartzheim/gravel-api-python | src/gravel/__init__.py | Python | gpl-3.0 | 873 | 0 | import hmac
import hashlib
import requests
class GravelApi():
def __init__(self, address, shared_s | ecret):
self.address = address
self.shared_secret = shared_secret
self.tokens = []
def get_problem(self, problemid):
pass
def get_user(self, userid):
pass
def get_tokens(self, number=1):
pass
def _call_api_with_token(self):
pass
def _call_api_without_token(self):
pass
class GravelProblem():
def __init__(self, api, id, data={}):
self.api | = api
self.id = id
def get_replies(self):
pass
def post_reply(self):
pass
class GravelReply():
def __init__(self, api, id, data={}):
self.api = api
self.id = id
class GravelUser():
def __init__(self, api, id, data={}):
self.api = api
self.id = id
|
RedwoodAdmin/RedwoodFramework | expecon/tests.py | Python | bsd-2-clause | 1,415 | 0.028269 | from django.test import TestCase
from django.contrib.auth.models import User
from models import *
import random
import datetime
from decimal import *
alphabet = [chr(i) for i in range(97,123)]
def random_name():
return ''.join([random.choice(alphabet) for i in range (10)])
def random_date():
random_second = random.randrange(364 * 24 * 60 * 60)
return datetime.datetime.now() + datetime.timedelta(seconds=random_second)
class SimpleTest(TestCase):
def setUp(self):
for i in range(10):
User.objects.create_user(random_name(), random_name() + '@example.com', random_name())
for i in range(10):
Subject.objects.create(
name=random_name(),
email=random_name() + '@example.com',
phone='',
birthday=datetime.da | te.today(),
gender=random.choice(GENDER_CHOICES)[0],
field=random.choice(FIELD_CHOICES)[0])
for i in range(2):
experimenter = random.choice(User.objects.all())
Experiment.objects.create(
public_name=random_name(),
private_name=random_name(),
experimenter=experimenter)
for i in range(5):
Session.objects.create(
time=random_date(),
duration=Decimal(random.randint(1, 3) + ra | ndom.random()).quantize(Decimal('0.1')),
required_subjects=random.randint(6, 12),
extra_subjects=random.randint(1, 5),
experiment=random.choice(Experiment.objects.all()))
def test_all(self):
self.assertEqual(len(Subject.objects.all()), 20)
|
tleonhardt/Python_Interface_Cpp | cython/wrap_c/setup.py | Python | mit | 193 | 0.005181 | # coding=utf-8
from setuptools import setup |
from Cython.Build import cythonize
setup(
name="cyfib",
ext_module | s=cythonize('cyfib.pyx', compiler_directives={'embedsignature': True}),
)
|
thonkify/thonkify | src/lib/future/backports/urllib/request.py | Python | mit | 96,679 | 0.00061 | """
Ported using Python-Future from the Python 3.3 standard library.
An extensible library for opening URLs using a variety of protocols
The simplest way to use this module is to call the urlopen function,
which accepts a string containing a URL or a Request object (described
below). It opens the URL and returns the results as file-like
object; the returned object has some extra methods described below.
The OpenerDirector manages a collection of Handler objects that do
all the actual work. Each Handler implements a particular protocol or
option. The OpenerDirector is a composite object that invokes the
Handlers needed to open the requested URL. For example, the
HTTPHandler performs HTTP GET and POST requests and deals with
non-error returns. The HTTPRedirectHandler automatically deals with
HTTP 301, 302, 303 a | nd 307 redirect errors, and the HTTPDigestAuthHandler
deals with digest authentication.
u | rlopen(url, data=None) -- Basic usage is the same as original
urllib. pass the url and optionally data to post to an HTTP URL, and
get a file-like object back. One difference is that you can also pass
a Request instance instead of URL. Raises a URLError (subclass of
IOError); for HTTP errors, raises an HTTPError, which can also be
treated as a valid response.
build_opener -- Function that creates a new OpenerDirector instance.
Will install the default handlers. Accepts one or more Handlers as
arguments, either instances or Handler classes that it will
instantiate. If one of the argument is a subclass of the default
handler, the argument will be installed instead of the default.
install_opener -- Installs a new opener as the default opener.
objects of interest:
OpenerDirector -- Sets up the User Agent as the Python-urllib client and manages
the Handler classes, while dealing with requests and responses.
Request -- An object that encapsulates the state of a request. The
state can be as simple as the URL. It can also include extra HTTP
headers, e.g. a User-Agent.
BaseHandler --
internals:
BaseHandler and parent
_call_chain conventions
Example usage:
import urllib.request
# set up authentication info
authinfo = urllib.request.HTTPBasicAuthHandler()
authinfo.add_password(realm='PDQ Application',
uri='https://mahler:8092/site-updates.py',
user='klem',
passwd='geheim$parole')
proxy_support = urllib.request.ProxyHandler({"http" : "http://ahad-haam:3128"})
# build a new opener that adds authentication and caching FTP handlers
opener = urllib.request.build_opener(proxy_support, authinfo,
urllib.request.CacheFTPHandler)
# install it
urllib.request.install_opener(opener)
f = urllib.request.urlopen('http://www.python.org/')
"""
# XXX issues:
# If an authentication error handler that tries to perform
# authentication for some reason but fails, how should the error be
# signalled? The client needs to know the HTTP error code. But if
# the handler knows that the problem was, e.g., that it didn't know
# that hash algo that requested in the challenge, it would be good to
# pass that information along to the client, too.
# ftp errors aren't handled cleanly
# check digest against correct (i.e. non-apache) implementation
# Possible extensions:
# complex proxies XXX not sure what exactly was meant by this
# abstract factory for opener
from __future__ import absolute_import, division, print_function, unicode_literals
from future.builtins import bytes, dict, filter, input, int, map, open, str
from future.utils import PY2, PY3, raise_with_traceback
import base64
import bisect
import hashlib
import array
from future.backports import email
from future.backports.http import client as http_client
from .error import URLError, HTTPError, ContentTooShortError
from .parse import (
urlparse, urlsplit, urljoin, unwrap, quote, unquote,
splittype, splithost, splitport, splituser, splitpasswd,
splitattr, splitquery, splitvalue, splittag, to_bytes, urlunparse)
from .response import addinfourl, addclosehook
import io
import os
import posixpath
import re
import socket
import sys
import time
import collections
import tempfile
import contextlib
import warnings
# check for SSL
try:
import ssl
# Not available in the SSL module in Py2:
from ssl import SSLContext
except ImportError:
_have_ssl = False
else:
_have_ssl = True
__all__ = [
# Classes
'Request', 'OpenerDirector', 'BaseHandler', 'HTTPDefaultErrorHandler',
'HTTPRedirectHandler', 'HTTPCookieProcessor', 'ProxyHandler',
'HTTPPasswordMgr', 'HTTPPasswordMgrWithDefaultRealm',
'AbstractBasicAuthHandler', 'HTTPBasicAuthHandler', 'ProxyBasicAuthHandler',
'AbstractDigestAuthHandler', 'HTTPDigestAuthHandler', 'ProxyDigestAuthHandler',
'HTTPHandler', 'FileHandler', 'FTPHandler', 'CacheFTPHandler',
'UnknownHandler', 'HTTPErrorProcessor',
# Functions
'urlopen', 'install_opener', 'build_opener',
'pathname2url', 'url2pathname', 'getproxies',
# Legacy interface
'urlretrieve', 'urlcleanup', 'URLopener', 'FancyURLopener',
]
# used in User-Agent header sent
__version__ = sys.version[:3]
_opener = None
def urlopen(url, data=None, timeout=socket._GLOBAL_DEFAULT_TIMEOUT, **_3to2kwargs):
if 'cadefault' in _3to2kwargs:
cadefault = _3to2kwargs['cadefault']; del _3to2kwargs['cadefault']
else:
cadefault = False
if 'capath' in _3to2kwargs:
capath = _3to2kwargs['capath']; del _3to2kwargs['capath']
else:
capath = None
if 'cafile' in _3to2kwargs:
cafile = _3to2kwargs['cafile']; del _3to2kwargs['cafile']
else:
cafile = None
global _opener
if cafile or capath or cadefault:
if not _have_ssl:
raise ValueError('SSL support not available')
context = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
context.options |= ssl.OP_NO_SSLv2
context.verify_mode = ssl.CERT_REQUIRED
if cafile or capath:
context.load_verify_locations(cafile, capath)
else:
context.set_default_verify_paths()
https_handler = HTTPSHandler(context=context, check_hostname=True)
opener = build_opener(https_handler)
elif _opener is None:
_opener = opener = build_opener()
else:
opener = _opener
return opener.open(url, data, timeout)
def install_opener(opener):
global _opener
_opener = opener
_url_tempfiles = []
def urlretrieve(url, filename=None, reporthook=None, data=None):
"""
Retrieve a URL into a temporary location on disk.
Requires a URL argument. If a filename is passed, it is used as
the temporary file location. The reporthook argument should be
a callable that accepts a block number, a read size, and the
total file size of the URL target. The data argument should be
valid URL encoded data.
If a filename is passed and the URL points to a local resource,
the result is a copy from local file to new file.
Returns a tuple containing the path to the newly created
data file as well as the resulting HTTPMessage object.
"""
url_type, path = splittype(url)
with contextlib.closing(urlopen(url, data)) as fp:
headers = fp.info()
# Just return the local path and the "headers" for file://
# URLs. No sense in performing a copy unless requested.
if url_type == "file" and not filename:
return os.path.normpath(path), headers
# Handle temporary file setup.
if filename:
tfp = open(filename, 'wb')
else:
tfp = tempfile.NamedTemporaryFile(delete=False)
filename = tfp.name
_url_tempfiles.append(filename)
with tfp:
result = filename, headers
bs = 1024 * 8
size = -1
read = 0
blocknum = 0
if "content-length" in headers:
size = int(headers["Content-Length"])
if reporthook:
reporthook(blocknum, bs, size)
while True:
block = fp.read(bs)
if not |
soylentdeen/BlurryApple | Tools/VLTTools.py | Python | gpl-2.0 | 12,298 | 0.00309 | import os
import paramiko
import numpy
import pyfits
import warnings
import select
class VLTConnection( object ):
"""
VLTConnection: This object allows python to log into a computer
running the VLT SPARTA Light software and do the following:
- Send a new flat pattern to the DM
- Retrieve data from the RTC (slopes, intensities, etc...)
- what else?
"""
def __init__(self, hostname, username, simulate=True):
self.hostname = hostname
self.username = username
if not(simulate):
self.ssh = paramiko.SSHClient()
self.ssh.load_system_host_keys()
self.ssh.connect(self.hostname, username=self.username)
self.ftp = self.ssh.open_sftp()
self.localpath = './data/'
self.remotepath = './local/test/'
self.CDMS = CDMS()
self.sim = simulate
def simulate(self):
self.sim = True
def goLive(self):
self.sim = False
def sendCommand(self, command, response=False):
if not(self.sim):
stdin, stdout, stderr = self.ssh.exec_command(command)
if response:
retval = []
while not stdout.channel.exit_status_ready():
if stdout.channel.recv_ready():
rl, wl, xl = select.select([stdout.channel], [], [], 0.0)
if len(rl) > 0:
received = stdout.channel.recv(1024)
if response:
retval.append(received)
else:
print received
else:
print("VLT Connection in Simulation mode. The command I would have sent is:")
print(command)
response = []
if response:
return retval
def parse(self, text):
print text
return 0.0
def set_Tip(self, tip):
self.sendCommand("msgSend \"\" CDMSGateway SETMAP \"-object TTCtr.ACT_POS_REF_MAP -function 0,0="+str("%.2g" % tip)+"\"")
self.sendCommand("msgSend \"\" spaccsServer EXEC \"-command TTCtr.update ALL\"")
def set_Tilt(self, tilt):
self.sendCommand("msgSend \"\" CDMSGateway SETMAP \"-object TTCtr.ACT_POS_REF_MAP -function 0,1="+str("%.2g" % tilt)+"\"")
self.sendCommand("msgSend \"\" spaccsServer EXEC \"-command TTCtr.update ALL\"")
def get_Tip(self):
tip = self.sendCommand("msgSend \"\" CDMSGateway GETMAP \"-object TTCtr.ACT_POS_REF_MAP -function 0,0\"", response=True)
return self.parse(tip)
def get_Tilt(self):
tilt = self.sendCommand("msgSend \"\" CDMSGateway GETMAP \"-object TTCtr.ACT_POS_REF_MAP -function 0,1\"", response=True)
return self.parse(tilt)
def set_TT_gain(self, gain):
self.sendCommand("msgSend \"\" CDMSGateway SETMAP \"-object TTCtr.TERM_B -function 0,0="+str("%.2g" % gain)+"\"")
self.sendCommand("msgSend \"\" spaccsServer EXEC \"-command TTCtr.update ALL\"")
def set_HO_gain(self, gain):
self.sendCommand("msgSend \"\" CDMSGateway SETMAP \"-object HOCtr.TERM_B -function 0,0="+str("%.2g" % gain)+"\"")
self.sendCommand("msgSend \"\" spaccsServer EXEC \"-command HOCtr.update ALL\"")
def get_HO_ACT_POS_REF_MAP(self):
name = self.CDMS.maps["HOCtr.ACT_POS_REF_MAP"].outfile
command = "cdmsSave -f "+self.remotepath+name+" HOCtr.ACT_POS_REF_MAP"
self.sendCommand(command)
if not(self.sim):
self.ftp.get(self.remotepath+name, self.localpath+name)
return pyfits.getdata(self.localpath+name)
def get_TT_ACT_POS_REF_MAP(self):
name = self.CDMS.maps["TTCtr.ACT_POS_REF_MAP"].outfile
command = "cdmsSave -f "+self.remotepath+name+" TTCtr.ACT_POS_REF_MAP"
self.se | ndCommand(command)
if not(self.sim):
self.ftp.get(self.remotepath+name, self.localpath+name)
return pyfits.getdat | a(self.localpath+name)
def set_gain(self, gain):
termA = numpy.array([-1], dtype='float32')
termB = gain*(numpy.array([-1.0, 0.0], dtype='float32'))
self.CDMS.maps["HOCtr.TERM_A"].replace(termA)
self.CDMS.maps["HOCtr.TERM_B"].replace(termB)
self.CDMS.maps["HOCtr.TERM_A"].write(path=self.localpath)
self.CDMS.maps["HOCtr.TERM_B"].write(path=self.localpath)
nameA = self.CDMS.maps["HOCtr.TERM_A"].outfile
nameB = self.CDMS.maps["HOCtr.TERM_B"].outfile
self.ftp.put(self.localpath+nameA, self.remotepath+nameA)
self.ftp.put(self.localpath+nameB, self.remotepath+nameB)
stdin, stdout, stderr = self.ssh.exec_command("cdmsLoad -f "+self.remotepath+nameA+" HOCtr.TERM_A --rename")
while not stdout.channel.exit_status_ready():
if stdout.channel.recv_ready():
rl, wl, xl = select.select([stdout.channel], [], [], 0.0)
if len(rl) > 0:
print stdout.channel.recv(1024)
stdin, stdout, stderr = self.ssh.exec_command("cdmsLoad -f "+self.remotepath+nameB+" HOCtr.TERM_B --rename")
while not stdout.channel.exit_status_ready():
if stdout.channel.recv_ready():
rl, wl, xl = select.select([stdout.channel], [], [], 0.0)
if len(rl) > 0:
print stdout.channel.recv(1024)
termB = gain*(numpy.array([-1.0, 0.0], dtype='float32'))
self.CDMS.maps["TTCtr.TERM_A"].replace(termA)
self.CDMS.maps["TTCtr.TERM_B"].replace(termB)
self.CDMS.maps["TTCtr.TERM_A"].write(path=self.localpath)
self.CDMS.maps["TTCtr.TERM_B"].write(path=self.localpath)
nameA = self.CDMS.maps["TTCtr.TERM_A"].outfile
nameB = self.CDMS.maps["TTCtr.TERM_B"].outfile
self.ftp.put(self.localpath+nameA, self.remotepath+nameA)
self.ftp.put(self.localpath+nameB, self.remotepath+nameB)
stdin, stdout, stderr = self.ssh.exec_command("cdmsLoad -f "+self.remotepath+nameA+" TTCtr.TERM_A --rename")
while not stdout.channel.exit_status_ready():
if stdout.channel.recv_ready():
rl, wl, xl = select.select([stdout.channel], [], [], 0.0)
if len(rl) > 0:
print stdout.channel.recv(1024)
stdin, stdout, stderr = self.ssh.exec_command("cdmsLoad -f "+self.remotepath+nameB+" TTCtr.TERM_B --rename")
while not stdout.channel.exit_status_ready():
if stdout.channel.recv_ready():
rl, wl, xl = select.select([stdout.channel], [], [], 0.0)
if len(rl) > 0:
print stdout.channel.recv(1024)
def make_TT_unscr(self):
self.CDMS.maps["TTCtr.SEC_ACT_UNSCR_MAP"].write(path=self.localpath)
name = self.CDMS.maps["TTCtr.SEC_ACT_UNSCR_MAP"].outfile
self.ftp.put(self.localpath+name, self.remotepath+name)
stdin, stdout, stderr = self.ssh.exec_command("cdmsLoad -f "+self.remotepath+name+" TTCtr.SEC_ACT_UNSCR_MAP --rename")
while not stdout.channel.exit_status_ready():
if stdout.channel.recv_ready():
rl, wl, xl = select.select([stdout.channel], [], [], 0.0)
if len(rl) > 0:
print stdout.channel.recv(1024)
def set_CommandMatrix(self, pattern):
self.CDMS.maps["Recn.REC1.CM"].replace(pattern)
self.CDMS.maps["Recn.REC1.CM"].write(path=self.localpath)
name = self.CDMS.maps["Recn.REC1.CM"].outfile
self.ftp.put(self.localpath+name, self.remotepath+name)
stdin, stdout, stderr = self.ssh.exec_command("cdmsLoad -f "+self.remotepath+name+" Recn.REC1.CM --rename")
while not stdout.channel.exit_status_ready():
if stdout.channel.recv_ready():
rl, wl, xl = select.select([stdout.channel], [], [], 0.0)
if len(rl) > 0:
print stdout.channel.recv(1024)
def get_InteractionMatrices(self):
HOname = self.CDMS.maps["HORecnCalibrat.RESULT_IM"].outfile
TTname = self.CDMS.maps["TTRecnCalibrat.RESULT.IM"].outfile
command = "cdmsSave -f "+self.remotepath+HOname+" HORecnCalibrat.RESULT_IM"
|
rizumu/pinax-invitations | pinax/invitations/migrations/0001_initial.py | Python | mit | 1,824 | 0.003289 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import django.utils.timezone
from django.conf import settings
class Migration(migrations.Migration):
dependencies = [
('account', '0001_initial'),
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='InvitationStat',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('invites_sent', models.IntegerField(default=0)),
('invites_allocated', models.IntegerField(default=0)),
('invites_accepted', models.In | tegerField(default=0)),
('user', models.OneToOneField(to=settings.AUTH_USER_MODEL)),
],
options={
},
bases | =(models.Model,),
),
migrations.CreateModel(
name='JoinInvitation',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('message', models.TextField(null=True)),
('sent', models.DateTimeField(default=django.utils.timezone.now)),
('status', models.IntegerField(choices=[(1, b'Sent'), (2, b'Accepted'), (3, b'Joined Independently')])),
('from_user', models.ForeignKey(related_name='invites_sent', to=settings.AUTH_USER_MODEL)),
('signup_code', models.OneToOneField(to='account.SignupCode')),
('to_user', models.ForeignKey(related_name='invites_received', to=settings.AUTH_USER_MODEL, null=True)),
],
options={
},
bases=(models.Model,),
),
]
|
uclouvain/osis_louvain | base/tests/views/group_element_years/test_detach.py | Python | agpl-3.0 | 5,223 | 0.002681 | ##############################################################################
#
# OSIS stands for Open Student Information System. It's an application
# designed to manage the core business of higher education institutions,
# such as u | niversities, faculties, institutes and professional schools.
# The core business involves the administration of students, teachers,
# courses, programs and so on.
#
# Copyright (C) 2015-2017 Université catholique de Louvain (http://ww | w.uclouvain.be)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# A copy of this license - GNU General Public License - is available
# at the root of the source code of this program. If not,
# see http://www.gnu.org/licenses/.
#
##############################################################################
from unittest import mock
from django.contrib.auth.models import Permission
from django.http import HttpResponse
from django.http import HttpResponseForbidden
from django.http import HttpResponseNotFound
from django.test import TestCase
from django.urls import reverse
from waffle.testutils import override_flag
from base.tests.factories.education_group_year import EducationGroupYearFactory
from base.tests.factories.group_element_year import GroupElementYearFactory
from base.tests.factories.person import CentralManagerFactory
@override_flag('education_group_update', active=True)
class TestDetach(TestCase):
@classmethod
def setUpTestData(cls):
cls.education_group_year = EducationGroupYearFactory()
cls.group_element_year = GroupElementYearFactory(parent=cls.education_group_year)
cls.person = CentralManagerFactory()
cls.person.user.user_permissions.add(Permission.objects.get(codename="can_access_education_group"))
cls.url = reverse("education_groups_management")
cls.post_valid_data = {
"root_id": cls.education_group_year.id,
"element_id": cls.education_group_year.id,
"group_element_year_id": cls.group_element_year.id,
'action': 'detach',
}
def setUp(self):
self.client.force_login(self.person.user)
def test_edit_case_user_not_logged(self):
self.client.logout()
response = self.client.post(self.url, data=self.post_valid_data)
self.assertRedirects(response, '/login/?next={}'.format(self.url))
@override_flag('education_group_update', active=False)
def test_detach_case_flag_disabled(self):
response = self.client.post(self.url, data=self.post_valid_data)
self.assertEqual(response.status_code, HttpResponseNotFound.status_code)
self.assertTemplateUsed(response, "page_not_found.html")
@mock.patch("base.business.education_groups.perms.is_eligible_to_change_education_group", return_value=False)
def test_detach_case_user_not_have_access(self, mock_permission):
response = self.client.post(self.url, data=self.post_valid_data)
self.assertEqual(response.status_code, HttpResponseForbidden.status_code)
self.assertTemplateUsed(response, "access_denied.html")
@mock.patch("base.business.education_groups.perms.is_eligible_to_change_education_group", return_value=True)
def test_detach_case_get_without_ajax_success(self, mock_permission):
response = self.client.get(self.url, data=self.post_valid_data, follow=True)
self.assertEqual(response.status_code, HttpResponse.status_code)
self.assertTemplateUsed(response, "education_group/group_element_year/confirm_detach.html")
@mock.patch("base.business.education_groups.perms.is_eligible_to_change_education_group", return_value=True)
def test_detach_case_get_with_ajax_success(self, mock_permission):
response = self.client.get(self.url, data=self.post_valid_data, HTTP_X_REQUESTED_WITH='XMLHttpRequest')
self.assertEqual(response.status_code, HttpResponse.status_code)
self.assertTemplateUsed(response, "education_group/group_element_year/confirm_detach_inner.html")
@mock.patch("base.models.group_element_year.GroupElementYear.delete")
@mock.patch("base.business.education_groups.perms.is_eligible_to_change_education_group")
def test_detach_case_post_success(self, mock_permission, mock_delete):
mock_permission.return_value = True
http_referer = reverse('education_group_read', args=[
self.education_group_year.id,
self.education_group_year.id
])
response = self.client.post(self.url, data=self.post_valid_data, follow=True, HTTP_REFERER=http_referer)
self.assertEqual(response.status_code, HttpResponse.status_code)
self.assertRedirects(response, http_referer)
self.assertTrue(mock_delete.called)
|
ddico/odoo | addons/base_vat/models/res_partner.py | Python | agpl-3.0 | 19,705 | 0.0034 | # -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
import datetime
import string
import re
import stdnum
from odoo import api, models, tools, _
from odoo.tools.misc import ustr
from odoo.exceptions import ValidationError
_eu_country_vat = {
'GR': 'EL'
}
_eu_co | untry_vat_inverse = {v: k for k, v in _eu_country_vat.items()}
_ref_vat = {
'al': 'ALJ91402501L',
'ar': 'AR200-5536168-2 or 20055361682',
'at': 'ATU12345675',
'au': '83 914 571 673',
'be': 'BE0477472701',
'bg': 'BG1234567892',
'ch': 'CHE-123.456.788 TVA or CH TVA 123456', # Swiss by Yannick Vaucher @ Camptocamp
'cl': 'CL76086428-5',
'co': 'CO21312 | 3432-1 or CO213.123.432-1',
'cy': 'CY10259033P',
'cz': 'CZ12345679',
'de': 'DE123456788',
'dk': 'DK12345674',
'do': 'DO1-01-85004-3 or 101850043',
'ec': 'EC1792060346-001',
'ee': 'EE123456780',
'el': 'EL12345670',
'es': 'ESA12345674',
'fi': 'FI12345671',
'fr': 'FR23334175221',
'gb': 'GB123456782',
'gr': 'GR12345670',
'hu': 'HU12345676',
'hr': 'HR01234567896', # Croatia, contributed by Milan Tribuson
'ie': 'IE1234567FA',
'in': "12AAAAA1234AAZA",
'is': 'IS062199',
'it': 'IT12345670017',
'lt': 'LT123456715',
'lu': 'LU12345613',
'lv': 'LV41234567891',
'mc': 'FR53000004605',
'mt': 'MT12345634',
'mx': 'MXGODE561231GR8 or GODE561231GR8',
'nl': 'NL123456782B90',
'no': 'NO123456785',
'pe': '10XXXXXXXXY or 20XXXXXXXXY or 15XXXXXXXXY or 16XXXXXXXXY or 17XXXXXXXXY',
'pl': 'PL1234567883',
'pt': 'PT123456789',
'ro': 'RO1234567897',
'rs': 'RS101134702',
'ru': 'RU123456789047',
'se': 'SE123456789701',
'si': 'SI12345679',
'sk': 'SK2022749619',
'sm': 'SM24165',
'tr': 'TR1234567890 (VERGINO) or TR17291716060 (TCKIMLIKNO)' # Levent Karakas @ Eska Yazilim A.S.
}
class ResPartner(models.Model):
_inherit = 'res.partner'
def _split_vat(self, vat):
vat_country, vat_number = vat[:2].lower(), vat[2:].replace(' ', '')
return vat_country, vat_number
@api.model
def simple_vat_check(self, country_code, vat_number):
'''
Check the VAT number depending of the country.
http://sima-pc.com/nif.php
'''
if not ustr(country_code).encode('utf-8').isalpha():
return False
check_func_name = 'check_vat_' + country_code
check_func = getattr(self, check_func_name, None) or getattr(stdnum.util.get_cc_module(country_code, 'vat'), 'is_valid', None)
if not check_func:
# No VAT validation available, default to check that the country code exists
if country_code.upper() == 'EU':
# Foreign companies that trade with non-enterprises in the EU
# may have a VATIN starting with "EU" instead of a country code.
return True
country_code = _eu_country_vat_inverse.get(country_code, country_code)
return bool(self.env['res.country'].search([('code', '=ilike', country_code)]))
return check_func(vat_number)
@api.model
@tools.ormcache('vat')
def _check_vies(self, vat):
# Store the VIES result in the cache. In case an exception is raised during the request
# (e.g. service unavailable), the fallback on simple_vat_check is not kept in cache.
return stdnum.eu.vat.check_vies(vat)
@api.model
def vies_vat_check(self, country_code, vat_number):
try:
# Validate against VAT Information Exchange System (VIES)
# see also http://ec.europa.eu/taxation_customs/vies/
return self._check_vies(country_code.upper() + vat_number)
except Exception:
# see http://ec.europa.eu/taxation_customs/vies/checkVatService.wsdl
# Fault code may contain INVALID_INPUT, SERVICE_UNAVAILABLE, MS_UNAVAILABLE,
# TIMEOUT or SERVER_BUSY. There is no way we can validate the input
# with VIES if any of these arise, including the first one (it means invalid
# country code or empty VAT number), so we fall back to the simple check.
return self.simple_vat_check(country_code, vat_number)
@api.model
def fix_eu_vat_number(self, country_id, vat):
europe = self.env.ref('base.europe')
country = self.env["res.country"].browse(country_id)
if not europe:
europe = self.env["res.country.group"].search([('name', '=', 'Europe')], limit=1)
if europe and country and country.id in europe.country_ids.ids:
vat = re.sub('[^A-Za-z0-9]', '', vat).upper()
country_code = _eu_country_vat.get(country.code, country.code).upper()
if vat[:2] != country_code:
vat = country_code + vat
return vat
@api.constrains('vat', 'country_id')
def check_vat(self):
if self.env.context.get('company_id'):
company = self.env['res.company'].browse(self.env.context['company_id'])
else:
company = self.env.company
if company.vat_check_vies:
# force full VIES online check
check_func = self.vies_vat_check
else:
# quick and partial off-line checksum validation
check_func = self.simple_vat_check
for partner in self:
if not partner.vat:
continue
#check with country code as prefix of the TIN
vat_country, vat_number = self._split_vat(partner.vat)
if not check_func(vat_country, vat_number):
#if fails, check with country code from country
country_code = partner.commercial_partner_id.country_id.code
if country_code:
if not check_func(country_code.lower(), partner.vat):
msg = partner._construct_constraint_msg(country_code.lower())
raise ValidationError(msg)
def _construct_constraint_msg(self, country_code):
self.ensure_one()
vat_no = "'CC##' (CC=Country Code, ##=VAT Number)"
vat_no = _ref_vat.get(country_code) or vat_no
if self.env.context.get('company_id'):
company = self.env['res.company'].browse(self.env.context['company_id'])
else:
company = self.env.company
if company.vat_check_vies:
return '\n' + _(
'The VAT number [%(vat)s] for partner [%(name)s] either failed the VIES VAT validation check or did not respect the expected format %(format)s.',
vat=self.vat,
name=self.name,
format=vat_no
)
return '\n' + _(
'The VAT number [%(vat)s] for partner [%(name)s] does not seem to be valid. \nNote: the expected format is %(format)s',
vat=self.vat,
name=self.name,
format=vat_no
)
__check_vat_ch_re1 = re.compile(r'(MWST|TVA|IVA)[0-9]{6}$')
__check_vat_ch_re2 = re.compile(r'E([0-9]{9}|-[0-9]{3}\.[0-9]{3}\.[0-9]{3})(MWST|TVA|IVA)$')
def check_vat_ch(self, vat):
'''
Check Switzerland VAT number.
'''
# VAT number in Switzerland will change between 2011 and 2013
# http://www.estv.admin.ch/mwst/themen/00154/00589/01107/index.html?lang=fr
# Old format is "TVA 123456" we will admit the user has to enter ch before the number
# Format will becomes such as "CHE-999.999.99C TVA"
# Both old and new format will be accepted till end of 2013
# Accepted format are: (spaces are ignored)
# CH TVA ######
# CH IVA ######
# CH MWST #######
#
# CHE#########MWST
# CHE#########TVA
# CHE#########IVA
# CHE-###.###.### MWST
# CHE-###.###.### TVA
# CHE-###.###.### IVA
#
if self.__check_vat_ch_re1.match(vat):
return True
match = self.__check_vat_ch_re2.match(vat)
if match:
# For new TVA numbers, do a mod11 check
num = [s fo |
subodhchhabra/airflow | tests/contrib/operators/test_dataflow_operator.py | Python | apache-2.0 | 7,353 | 0.000544 | # -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
import unittest
from airflow.contrib.operators.dataflow_operator import DataFlowPythonOperator, \
DataFlowJavaOperator, DataflowTemplateOperator
from airflow.contrib.operators.dataflow_operator import DataFlowPythonOperator
from airflow.version import version
try:
from unittest import mock
except ImportError:
try:
import mock
except ImportError:
mock = None
TASK_ID = 'test-dataflow-operator'
TEMPLATE = 'gs://dataflow-templates/wordcount/template_file'
PARAMETERS = {
'inputFile': 'gs://dataflow-samples/shakespeare/kinglear.txt',
'output': 'gs://test/output/my_output'
}
PY_FILE = 'gs://my-bucket/my-object.py'
JAR_FILE = 'example/test.jar'
JOB_CLASS = 'com.test.NotMain'
PY_OPTIONS = ['-m']
DEFAULT_OPTIONS_PYTHON = DEFAULT_OPTIONS_JAVA = {
'project': 'test',
'stagingLocation': 'gs://test/staging',
}
DEFAULT_OPTIONS_TEMPLATE = {
'project': 'test',
'stagingLocation': 'gs://test/staging',
'tempLocation': 'gs://test/temp',
'zone': 'us-central1-f'
}
ADDITIONAL_OPTIONS = {
'output': 'gs://test/output',
'labels': {'foo': 'bar'}
}
TEST_VERSION = 'v{}'.format(version.replace('.', '-').replace('+', '-'))
EXPECTED_ADDITIONAL_OPTIONS = {
'output': 'gs://test/output',
'labels': {'foo': 'bar', 'airflow-version': TEST_VERSION}
}
POLL_SLEEP = 30
GCS_HOOK_STRING = 'airflow.contrib.operators.dataflow_operator.{}'
class DataFlowPythonOperatorTest(unittest.TestCase):
def setUp(self):
self.dataflow = DataFlowPythonOperator(
task_id=TASK_ID,
py_file=PY_FILE,
py_options=PY_OPTIONS,
dataflow_default_options=DEFAULT_OPTIONS_PYTHON,
options=ADDITIONAL_OPTIONS,
poll_sleep=POLL_SLEEP)
def test_init(self):
"""Test DataFlowPythonOperator instance is properly initialized."""
self.assertEqual(self.dataflow.task_id, TASK_ID)
self.assertEqual(self.dataflow.py_file, PY_FILE)
self.assertEqual(self.dataflow.py_options, PY_OPTIONS)
self.assertEqual(self.dataflow.poll_sleep, POLL_SLEEP)
self.assertEqual(self.dataflow.dataflow_default_options,
DEFAULT_OPTIONS_PYTHON)
self.assertEqual(self.dataflow.options,
EXPECTED_ADDITIONAL_OPTIONS)
@mock.patch('airflow.contrib.operators.dataflow_operator.DataFlowHook')
@mock.patch(GCS_HOOK_STRING.format('GoogleCloudBucketHelper'))
def test_exec(self, gcs_hook, dataflow_mock):
"""Test DataFlowHook is created and the right args are passed to
start_python_workflow.
"""
start_python_hook = dataflow_mock.return_value.start_python_dataflow
gcs_download_hook = gcs_hook.return_value.google_cloud_to_local
self.dataflow.execute(None)
self.assertTrue(dataflow_mock.called) |
expected_options = {
'project': 'test',
'staging_location': 'gs://test/staging',
'output': 'gs://test/output',
'labels': {'foo': 'bar', 'airflow-version': TEST_VERSION}
}
gcs_download_hook.assert_called_once_with(PY_FILE)
start_python_hook.assert_called_once_with(TASK_ID, expected_options,
| mock.ANY, PY_OPTIONS)
self.assertTrue(self.dataflow.py_file.startswith('/tmp/dataflow'))
class DataFlowJavaOperatorTest(unittest.TestCase):
def setUp(self):
self.dataflow = DataFlowJavaOperator(
task_id=TASK_ID,
jar=JAR_FILE,
job_class=JOB_CLASS,
dataflow_default_options=DEFAULT_OPTIONS_JAVA,
options=ADDITIONAL_OPTIONS,
poll_sleep=POLL_SLEEP)
def test_init(self):
"""Test DataflowTemplateOperator instance is properly initialized."""
self.assertEqual(self.dataflow.task_id, TASK_ID)
self.assertEqual(self.dataflow.poll_sleep, POLL_SLEEP)
self.assertEqual(self.dataflow.dataflow_default_options,
DEFAULT_OPTIONS_JAVA)
self.assertEqual(self.dataflow.job_class, JOB_CLASS)
self.assertEqual(self.dataflow.jar, JAR_FILE)
self.assertEqual(self.dataflow.options,
EXPECTED_ADDITIONAL_OPTIONS)
@mock.patch('airflow.contrib.operators.dataflow_operator.DataFlowHook')
@mock.patch(GCS_HOOK_STRING.format('GoogleCloudBucketHelper'))
def test_exec(self, gcs_hook, dataflow_mock):
"""Test DataFlowHook is created and the right args are passed to
start_java_workflow.
"""
start_java_hook = dataflow_mock.return_value.start_java_dataflow
gcs_download_hook = gcs_hook.return_value.google_cloud_to_local
self.dataflow.execute(None)
self.assertTrue(dataflow_mock.called)
gcs_download_hook.assert_called_once_with(JAR_FILE)
start_java_hook.assert_called_once_with(TASK_ID, mock.ANY,
mock.ANY, JOB_CLASS)
class DataFlowTemplateOperatorTest(unittest.TestCase):
def setUp(self):
self.dataflow = DataflowTemplateOperator(
task_id=TASK_ID,
template=TEMPLATE,
parameters=PARAMETERS,
dataflow_default_options=DEFAULT_OPTIONS_TEMPLATE,
poll_sleep=POLL_SLEEP)
def test_init(self):
"""Test DataflowTemplateOperator instance is properly initialized."""
self.assertEqual(self.dataflow.task_id, TASK_ID)
self.assertEqual(self.dataflow.template, TEMPLATE)
self.assertEqual(self.dataflow.parameters, PARAMETERS)
self.assertEqual(self.dataflow.poll_sleep, POLL_SLEEP)
self.assertEqual(self.dataflow.dataflow_default_options,
DEFAULT_OPTIONS_TEMPLATE)
@mock.patch('airflow.contrib.operators.dataflow_operator.DataFlowHook')
def test_exec(self, dataflow_mock):
"""Test DataFlowHook is created and the right args are passed to
start_template_workflow.
"""
start_template_hook = dataflow_mock.return_value.start_template_dataflow
self.dataflow.execute(None)
self.assertTrue(dataflow_mock.called)
expected_options = {
'project': 'test',
'stagingLocation': 'gs://test/staging',
'tempLocation': 'gs://test/temp',
'zone': 'us-central1-f'
}
start_template_hook.assert_called_once_with(TASK_ID, expected_options,
PARAMETERS, TEMPLATE)
|
furlow/QifSwap | QifSwap.py | Python | unlicense | 1,496 | 0.010027 | import sys
import os
from PySide.QtCore import *
from PySide.QtGui import *
__appname__ = "Qifswap"
def swap_outflows_and_inflows(qif_filename):
qif = open(qif_filename, 'r+')
data = qif.read()
data = data.replace('\nT','\nTemp')
data = data.replace('\nTemp-','\nT')
data = data.replace('\nTemp', '\nT-')
qif.seek(0)
qif.write(data)
qif.close()
class Program(QDialog):
def __init__(self, | parent = None):
super(Program, self).__init__(parent)
self.setWindowTitle(__appname__)
#Create button for opening credit card file
self.open_fi | le_button = QPushButton("Open File")
#Align the widget in the container
layout = QVBoxLayout()
layout.addWidget(self.open_file_button)
self.setLayout(layout)
#connect to functions for saving
self.open_file_button.clicked.connect(self.openQifFile)
def openQifFile(self):
qif_filename, _ = QFileDialog.getOpenFileName(self,
"Open File",
"",
"Bank File (*.qif)"
)
if os.path.isfile(qif_filename):
swap_outflows_and_inflows(qif_filename)
def main():
sys.argv[0] = "QifSwap"
app = QApplication(sys.argv)
form = Program()
form.show()
app.exec_()
if __name__ == '__main__':
main()
|
coddingtonbear/django-measurement | tests/forms.py | Python | mit | 556 | 0 | from django import forms
from django_measu | rement.forms import MeasurementField
from tests.custom_measure_base import DegreePerTime, Temperature, Time
from tests.models import MeasurementTestModel
class MeasurementTestForm(forms.ModelForm):
class Meta:
model = MeasurementTestModel
exclude = []
class LabelTestForm(forms.Form):
simple = MeasurementField(Temperature)
class SITestForm(forms.Form):
| simple = MeasurementField(Time)
class BiDimensionalLabelTestForm(forms.Form):
simple = MeasurementField(DegreePerTime)
|
joopert/home-assistant | homeassistant/components/familyhub/camera.py | Python | apache-2.0 | 1,553 | 0.000644 | """Family Hub camera for Samsung Refrigerators."""
import logging
import voluptuous as vol
from homeassistant.components.camera import Camera, PLATFORM_SCHEMA
from homeassistant.const import CONF_IP_ADDRESS, CONF_NAME
from homeassistant.helpers.aiohttp_client import async_get_clientsession
import homeassistant.helpers.config_validation as cv
_LOGGER = logging.getLogger(__name__)
DEFAULT_NAME = "FamilyHub Camera"
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Required(CONF_IP_ADDRESS): cv.string,
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
}
)
async def async_setup_platform(hass, config, async_add_entities, discovery_info=None):
"""Set up the Family Hub Camera."""
from pyfamilyhublocal import FamilyHubCam
address = config.get(CONF_IP_ADDRESS)
name = config.get(CONF_NAME)
session = async_get_clientsession(hass)
family_hub_cam = FamilyH | ubCam(address, hass.loop, session)
async_add_entities([FamilyHubCamera(name, family_hub_cam)], True)
class FamilyHubCamera(Camera):
"""The representation of a Family Hub camera.""" |
def __init__(self, name, family_hub_cam):
"""Initialize camera component."""
super().__init__()
self._name = name
self.family_hub_cam = family_hub_cam
async def async_camera_image(self):
"""Return a still image response."""
return await self.family_hub_cam.async_get_cam_image()
@property
def name(self):
"""Return the name of this camera."""
return self._name
|
rgayon/plaso | tests/parsers/sqlite_plugins/gdrive.py | Python | apache-2.0 | 2,876 | 0.00209 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""Tests for the Google Drive database plugin."""
from __future__ import unicode_literals
import unittest
from plaso.formatters import gdrive as _ # pylint: disable=unused-import
from plaso.lib import definitions
from plaso.parsers.sqlite_plugins import gdrive
from tests.parsers.sqlite_plugins import test_lib
class GoogleDrivePluginTest(test_lib.SQLitePluginTestCase):
"""Tests for the Google Drive database plugin."""
def testProcess(self):
"""Tests the Process function on a Google Drive database file."""
plugin = gdrive.GoogleDrivePlugin()
storage_writer = self._ParseDatabaseFileWithPlugin(['snapshot.db'], plugin)
self.assertEqual(storage_writer.number_of_warnings, 0)
self.assertEqual(storage_writer.number_of_events, 30)
# Let's verify that we've got the correct balance of cloud and local
# entry events.
| # 10 files mount | ing to:
# 20 Cloud Entries (two timestamps per entry).
# 10 Local Entries (one timestamp per entry).
local_entries = []
cloud_entries = []
for event in storage_writer.GetEvents():
event_data = self._GetEventDataOfEvent(storage_writer, event)
if event_data.data_type == 'gdrive:snapshot:local_entry':
local_entries.append(event)
else:
cloud_entries.append(event)
self.assertEqual(len(local_entries), 10)
self.assertEqual(len(cloud_entries), 20)
# Test one local and one cloud entry.
event = local_entries[5]
self.CheckTimestamp(event.timestamp, '2014-01-28 00:11:25.000000')
event_data = self._GetEventDataOfEvent(storage_writer, event)
file_path = (
'%local_sync_root%/Top Secret/Enn meiri '
'leyndarmál/Sýnileiki - Örverpi.gdoc')
self.assertEqual(event_data.path, file_path)
expected_message = 'File Path: {0:s} Size: 184'.format(file_path)
self._TestGetMessageStrings(
event_data, expected_message, file_path)
event = cloud_entries[16]
self.CheckTimestamp(event.timestamp, '2014-01-28 00:12:27.000000')
self.assertEqual(
event.timestamp_desc, definitions.TIME_DESCRIPTION_MODIFICATION)
event_data = self._GetEventDataOfEvent(storage_writer, event)
self.assertEqual(event_data.document_type, 6)
expected_url = (
'https://docs.google.com/document/d/'
'1ypXwXhQWliiMSQN9S5M0K6Wh39XF4Uz4GmY-njMf-Z0/edit?usp=docslist_api')
self.assertEqual(event_data.url, expected_url)
expected_message = (
'File Path: /Almenningur/Saklausa hliðin '
'[Private] '
'Size: 0 '
'URL: {0:s} '
'Type: DOCUMENT').format(expected_url)
expected_short_message = '/Almenningur/Saklausa hliðin'
self._TestGetMessageStrings(
event_data, expected_message, expected_short_message)
if __name__ == '__main__':
unittest.main()
|
bartnijssen/RVIC | rvic/core/convolution_wrapper.py | Python | gpl-3.0 | 1,407 | 0 | """
convolution_wrapper.py
ctypes wrapper for rvic_convolution.c
gcc -shared -o rvic_convolution.so rvic_convolution.c
"""
import os
import numpy as np
import ctypes
SHAREDOBJECT = 'rvic_convolution.so'
LIBPATH = os.path.abspath(os.path.join(os.path.dirname(__file__), '../../'))
try:
_convolution = np.ctypeslib.load_library(SHAREDOBJECT, LIBPATH)
except ImportError as ie:
print('looking for shared object {0} in {1}'.format(SHAREDOBJECT, LIBPATH))
raise ImportError(ie)
except OSError as oe:
print('looking for shared object {0} in {1}'.format(SHAREDOBJECT, LIBPATH))
raise ImportError(oe)
_args = [ctypes.c_int,
ctypes.c_in | t,
ctypes.c_int,
ctypes.c_int,
np.ctypeslib.ndpointer(np.int32),
np.ctypeslib.ndpointer(np.int32),
np.ctypeslib.ndpointer(np.int32),
np.ctypeslib.ndpointer(np.int32),
np.ctypeslib.ndpointer(np.float64),
np.ctypeslib.ndpointer(np.float64),
np.ctypeslib.ndpointer(np.floa | t64)]
_convolution.convolve.argtypes = _args
_convolution.convolve.restype = None
def rvic_convolve(*args):
"""args:
nsources,
noutlets,
subset_length,
xsize,
source2outlet_ind,
source_y_ind,
source_x_ind,
source_time_offset,
unit_hydrograph,
aggrunin,
ring
"""
_convolution.convolve(*args)
return
|
HaebinShin/tensorflow | tensorflow/python/ops/nn_batchnorm_test.py | Python | apache-2.0 | 24,393 | 0.006559 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for batch_norm related functionality in tensorflow.ops.nn."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from six.moves import xrange # pylint: disable=redefined-builtin
import tensorflow as tf
from tensorflow.python.ops import gen_nn_ops
class BatchNormalizationTest(tf.test.TestCase):
def _npBatchNorm(self, x, m, v, beta, gamma, epsilon,
scale_after_normalization, shift_after_normalization):
y = (x - m) / np.sqrt(v + epsilon)
y = y * gamma if scale_after_normalization else y
return y + beta if shift_after_normalization else y
def _opsBatchNorm(self, x, m, v, beta, gamma, epsilon,
scale_after_normalization, shift_after_normalization):
y = (x - m) * tf.rsqrt(v + epsilon)
if scale_after_normalization:
y = gamma * y
return y + beta if shift_after_normalization else y
def _tfBatchNormV1(self, x, m, v, beta, gamma, epsilon,
scale_after_normalization):
"""Original implementation."""
# _batch_norm_with_global_normalization is deprecated in v9
tf.get_default_graph().graph_def_versions.producer = 8
# pylint: disable=protected-access
return gen_nn_ops._batch_norm_with_global_normalization(
x, m, v, beta, gamma, epsilon, scale_after_normalization)
# pylint: enable=protected-access
def _tfBatchNormV1BW(self, x, m, v, beta, gamma, epsilon,
scale_after_normalization):
"""Re-implementation of the original kernel for backward compatibility."""
return tf.nn.batch_norm_with_global_normalization(
x, m, v, beta, gamma, epsilon, scale_after_normalization)
def _tfBatchNormV2(self, x, m, v, beta, gamma, epsilon,
scale_after_normalization, shift_after_normalization):
"""New implementation."""
return tf.nn.batch_normalization(
x, m, v, beta if shift_after_normalization else None,
gamma if scale_after_normalization else None, epsilon)
def testBatchNorm(self):
x_shape = [3, 5, 4, 2]
param_shape = [2]
x_val = np.random.random_sample(x_shape).astype(np.float32)
m_val = np.random.random_sample(param_shape).astype(np.float32)
v_val = np.random.random_sample(param_shape).astype(np.float32)
beta_val = np.random.random_sample(param_shape).astype(np.float32)
gamma_val = np.random.random_sample(param_shape).astype(np.float32)
for use_gpu in [True, False]:
with self.test_session(use_gpu=use_gpu) as sess:
x = tf.constant(x_val, name="x")
m = tf.constant(m_val, name="m")
v = tf.constant(v_val, name="v")
beta = tf.constant(beta_val, name="beta")
gamma = tf.constant(gamma_val, name="gamma")
epsilon = 0.001
for scale_after_normalization in [True, False]:
for shift_after_normalization in [True, False]:
bn2 = self._tfBatchNormV2(
x, m, v, beta, gamma, epsilon, scale_after_normalization,
shift_after_normalization)
bn1bw = self._tfBatchNormV1BW(
x, m, v, beta, gamma, epsilon, scale_after_normalization)
bn1 = self._tfBatchNormV1(
x, m, v, beta, gamma, epsilon, scale_after_normalization)
on = self._opsBatchNorm(
x, m, v, beta, gamma, epsilon, scale_after_normalization,
shift_after_normalization)
np_bn = self._npBatchNorm(
x_val, m_val, v_val, beta_val, gamma_val, epsilon,
scale_after_normalization, shift_after_normalization)
tf_bn_v2, tf_bn_v1bw, tf_bn_v1, ops_bn = sess.run(
[bn2, bn1bw, bn1, on])
self.assertAllClose(np_bn, ops_bn, atol=0.00001)
self.assertAllClose(np_bn, tf_bn_v2, atol=0.00001)
self.assertAllClose(tf_bn_v2, ops_bn, atol=0.00001)
# shift_after_normalization=False is not supported in v1.
if shift_after_normalization:
self.assertAllClose(np_bn, tf_bn_v1bw, atol=0.00001)
self.assertAllClose(np_bn, tf_bn_v1, atol=0.00001)
self.assertAllClose(tf_bn_v1, ops_bn, atol=0.00001)
self.assertAllClose(tf_bn_v1bw, ops_bn, atol=0.00001)
def _testBatchNormGradient(self, param_index, tag, scale_after_normalization,
shift_after_normalization, version,
err_tolerance=1e-11):
x_shape = [3, 5, 4, 5]
param_shape = [5]
np.random.seed(1) # Make it reproducible.
x_val = np.random.random_sample(x_shape).astype(np.float64)
m_val = np.random.random_sample(param_shape).astype(np.float64)
v_val = np.random.random_sample(param_shape).astype(np.float64)
beta_val = np.random.random_sample(param_shape).astype(np.float64)
gamma_val = np.random.random_sample(param_shape).astype(np.float64)
with self.test_session():
x = tf.constant(x_val, name="x")
m = tf.constant(m_val, name="m")
v = tf.constant(v_val, name="v")
beta = tf.constant(beta_val, name="beta")
gamma = tf.constant(gamma_val, name="gamma")
epsilon = 0.001
if version == 1:
output = self._tfBatchNormV1(
x, m, v, beta, gamma, epsilon, scale_after_normalization)
elif version == 2:
output = self._tfBatchNormV2(
x, m, v, beta, gamma, epsilon, scale_after_normalization,
shift_after_normalization)
else:
print("Invalid version", version)
raise ValueError()
all_params = [x, m, v, beta, gamma]
all_shapes = [x_shape, param_shape, param_shape, param_shape, param_shape]
err = tf.test.compute_gradient_error(
all_params[param_index], all_shapes[param_index], output, x_shape)
print("Batch normalization v%d %s gradient %s scale and %s shift err = " %
(version, tag, "with" if scale_after_normalization else "without",
"with" if shift_after_normalization else "without"),
err)
self.assertLess(err, err_tolerance)
def _testBatchNormGradientInAllNeedConfigs(
self, param_index, tag, err_tolerance=1e-11):
for scale_after_normalization in [True, False]:
for shift_after_normalization in [True, False]:
# shift_after_normalization=False is not supported in version 1.
for v in ([1, 2] if shift_after_normalization else [2]):
self._testBatchNormGradient(
param_index, tag, scale_after_normalization,
shift_after_normalization, v, err_tolerance)
def testBatchNormInputGradient(self):
self._testBatchNormGradientInAllNeedConfigs(0, "x")
def testBatchNormMeanGradient(self):
self._testBatchNormGradientInAllNeedConfigs(1, "mean")
def testBatchNormVarianceGradient(self):
self._testBatchNormGradientInAllNeedConfigs(2, "variance",
err_tolerance=1e-03)
def testBatchNormBetaGradient(self):
# Since beta does not exist when scale_after_normalizati | on=False, we only
# test for scale_after_normalization=True.
for scale_after_normalization in [True, False]:
for v in [1, 2]:
self._testBatchNormGradient(3, "beta", s | cale_after_normalization, True,
v)
def testBatchNormGammaGradient(self):
# If scale_after_normalization is False, backprop for gamma in v1
# will be 0. In versi |
bhrzslm/uncertainty-reasoning | my_engine/others/GrMPy/lib/GrMPy/Examples/Discrete/BNET/Inference/Exact/Tut_BNET_maxsum.py | Python | mit | 3,090 | 0.00356 | # Author: Almero Gouws <14366037@sun.ac.za>
"""
This is a tutorial on how to create a Bayesian network, and perform
exact MAX-SUM inference on it.
"""
"""Import the required numerical modules"""
import numpy as np
"""Import the GrMPy modules"""
import models
import inference
import cpds
if __name__ == '__main__':
"""
This example is based on the lawn sprinkler example, and the Bayesian
network has the following structure, with all edges directed downwards:
Cloudy - 0
/ \
/ \
/ \
Sprinkler - 1 Rainy - 2
\ /
\ /
\ /
Wet Grass -3
"""
"""Assign a unique numerical identifier to each node"""
C = 0
S = 1
R = 2
W = 3
"""Assign the number of nodes in the graph"""
nodes = 4
"""
The graph structure is represented as a adjacency matrix, dag.
If dag[i, j] = 1, then there exists a directed edge from node
i and node j.
"""
dag = np.zeros((nodes, nodes))
dag[C, [R, S]] = 1
dag[R, W] = 1
dag[S, W] = 1
"""
Define the size of each node, which is the number of different values a
node could observed at. For example, if a node is either True of False,
it has only 2 possible values it could be, therefore its size is 2. All
the nodes in this graph has a size 2.
"""
node_sizes = 2 * np.ones(nodes)
"""
We now need to assign a conditional probability distribution to each
node.
"""
node_cpds = [[], [], [], []]
"""Define the CPD for node 0"""
CPT = np.array([0.5, 0.5])
node_cpds[C] = cpds.TabularCPD(CPT)
"""Define the CPD for node 1"""
CPT = np.array([[0.8, 0.2], [0.2, 0.8]])
node_cpds[R] = cpds.TabularCPD(CPT)
"""Define the CPD for node 2"""
CPT = np.array([[0.5, 0.5], [0.9, 0.1]])
node_cpds[S] = cpds.TabularCPD(CPT)
"""Define the CPD for node 3"""
CPT = np.array([[[1, 0], [0.1, 0.9]], [[0.1, 0.9], [0.01, 0.99]]])
node_cpds | [W] = cpds.TabularCPD(CPT)
"""Create the Bayesian network"""
net = models.bnet(dag, node_sizes, node_cpds=node_cpds)
"""
Intialize the BNET's inference engine to use EXACT inference
by setting exact=True.
"""
net.init_inferenc | e_engine(exact=True)
"""Define observed evidence ([] means that node is unobserved)"""
evidence = [None, 0, None, None]
"""Execute the sum-product algorithm"""
# net.enter_evidence(evidence)
mlc = net.max_sum(evidence)
"""
mlc contains the most likely configuaration for all the nodes in the BNET
based in the input evidence.
"""
print 'Cloudy node: ', bool(mlc[C])
print 'Sprinkler node: ', bool(mlc[S])
print 'Rainy node: ', bool(mlc[R])
print 'Wet grass node: ', bool(mlc[W])
|
sharad/calibre | src/calibre/gui2/tweak_book/undo.py | Python | gpl-3.0 | 8,104 | 0.002098 | #!/usr/bin/env python
# vim:fileencoding=utf-8
from __future__ import (unicode_literals, division, absolute_import,
print_function)
__license__ = 'GPL v3'
__copyright__ = '2013, Kovid Goyal <kovid at kovidgoyal.net>'
import shutil
from PyQt5.Qt import (
QAbstractListModel, Qt, QModelIndex, QApplication, QWidget,
QGridLayout, QListView, QStyledItemDelegate, pyqtSignal, QPushButton, QIcon)
from calibre.gui2 import error_dialog
ROOT = QModelIndex()
MAX_SAVEPOINTS = 100
def cleanup(containers):
for container in containers:
try:
shutil.rmtree(container.root, ignore_errors=True)
except:
pass
class State(object):
def __init__(self, container):
self.container = container
self.message = None
self.rewind_message = None
class GlobalUndoHistory(QAbstractListModel):
def __init__(self, parent=None):
QAbstractListModel.__init__(self, parent)
self.states = []
self.pos = 0
def rowCount(self, parent=ROOT):
return len(self.states)
def data(self, index, role=Qt.DisplayRole):
if role == Qt.DisplayRole:
return self.label_for_row(index.row())
if role == Qt.FontRole and index.row() == self.pos:
f = QApplication.instance().font()
f.setBold(True)
return f
if role == Qt.UserRole:
return self.states[index.row()]
return None
def label_for_row(self, row):
msg = self.states[row].message
if self.pos == row:
msg = _('Current state') + ('' if not msg else _(' [was %s]') % msg)
elif not msg:
msg = _('[Unnamed state]')
else:
msg = msg
return msg
def label_for_container(self, container):
for i, state in enu | merate(self.states):
if state.container is container:
return self.label_for_row(i)
@property
def current_container(self) | :
return self.states[self.pos].container
@property
def previous_container(self):
return self.states[self.pos - 1].container
def open_book(self, container):
self.beginResetModel()
self.states = [State(container)]
self.pos = 0
self.endResetModel()
def truncate(self):
extra = self.states[self.pos+1:]
if extra:
self.beginRemoveRows(ROOT, self.pos+1, len(self.states) - 1)
cleanup(extra)
self.states = self.states[:self.pos+1]
if extra:
self.endRemoveRows()
def add_savepoint(self, new_container, message):
try:
self.states[self.pos].rewind_message = self.states[self.pos].message
self.states[self.pos].message = message
except IndexError:
raise IndexError('The checkpoint stack has an incorrect position pointer. This should never happen: self.pos = %r, len(self.states) = %r' % (
self.pos, len(self.states)))
self.truncate()
self.beginInsertRows(ROOT, self.pos+1, self.pos+1)
self.states.append(State(new_container))
self.pos += 1
self.endInsertRows()
self.dataChanged.emit(self.index(self.pos-1), self.index(self.pos))
if len(self.states) > MAX_SAVEPOINTS:
num = len(self.states) - MAX_SAVEPOINTS
self.beginRemoveRows(ROOT, 0, num - 1)
cleanup(self.states[:num])
self.states = self.states[num:]
self.pos -= num
self.endRemoveRows()
def rewind_savepoint(self):
''' Revert back to the last save point, should only be used immediately
after a call to add_savepoint. If there are intervening calls to undo
or redo, behavior is undefined. This is intended to be used in the case
where you create savepoint, perform some operation, operation fails, so
revert to state before creating savepoint. '''
if self.pos > 0 and self.pos == len(self.states) - 1:
self.beginRemoveRows(ROOT, self.pos, self.pos)
self.pos -= 1
cleanup([self.states.pop().container])
self.endRemoveRows()
self.dataChanged.emit(self.index(self.pos), self.index(self.pos))
ans = self.current_container
self.states[self.pos].message = self.states[self.pos].rewind_message
return ans
def undo(self):
if self.pos > 0:
self.pos -= 1
self.dataChanged.emit(self.index(self.pos), self.index(self.pos+1))
return self.current_container
def redo(self):
if self.pos < len(self.states) - 1:
self.pos += 1
self.dataChanged.emit(self.index(self.pos-1), self.index(self.pos))
return self.current_container
def revert_to(self, container):
for i, state in enumerate(self.states):
if state.container is container:
opos = self.pos
self.pos = i
for x in (i, opos):
self.dataChanged.emit(self.index(x), self.index(x))
return container
@property
def can_undo(self):
return self.pos > 0
@property
def can_redo(self):
return self.pos < len(self.states) - 1
@property
def undo_msg(self):
if not self.can_undo:
return ''
return self.states[self.pos - 1].message or ''
@property
def redo_msg(self):
if not self.can_redo:
return ''
return self.states[self.pos + 1].message or _('[Unnamed state]')
def update_path_to_ebook(self, path):
for state in self.states:
state.container.path_to_ebook = path
class SpacedDelegate(QStyledItemDelegate):
def sizeHint(self, *args):
ans = QStyledItemDelegate.sizeHint(self, *args)
ans.setHeight(ans.height() + 4)
return ans
class CheckpointView(QWidget):
revert_requested = pyqtSignal(object)
compare_requested = pyqtSignal(object)
def __init__(self, model, parent=None):
QWidget.__init__(self, parent)
self.l = l = QGridLayout(self)
self.setLayout(l)
self.setContentsMargins(0, 0, 0, 0)
self.view = v = QListView(self)
self.d = SpacedDelegate(v)
v.doubleClicked.connect(self.double_clicked)
v.setItemDelegate(self.d)
v.setModel(model)
l.addWidget(v, 0, 0, 1, -1)
model.dataChanged.connect(self.data_changed)
self.rb = b = QPushButton(QIcon(I('edit-undo.png')), _('&Revert to'), self)
b.setToolTip(_('Revert the book to the selected checkpoint'))
b.clicked.connect(self.revert_clicked)
l.addWidget(b, 1, 1)
self.cb = b = QPushButton(QIcon(I('diff.png')), _('&Compare'), self)
b.setToolTip(_('Compare the state of the book at the selected checkpoint with the current state'))
b.clicked.connect(self.compare_clicked)
l.addWidget(b, 1, 0)
def data_changed(self, *args):
self.view.clearSelection()
m = self.view.model()
sm = self.view.selectionModel()
sm.select(m.index(m.pos), sm.ClearAndSelect)
self.view.setCurrentIndex(m.index(m.pos))
def double_clicked(self, index):
pass # Too much danger of accidental double click
def revert_clicked(self):
m = self.view.model()
row = self.view.currentIndex().row()
if row < 0:
return
if row == m.pos:
return error_dialog(self, _('Cannot revert'), _(
'Cannot revert to the current state'), show=True)
self.revert_requested.emit(m.states[row].container)
def compare_clicked(self):
m = self.view.model()
row = self.view.currentIndex().row()
if row < 0:
return
if row == m.pos:
return error_dialog(self, _('Cannot compare'), _(
'There is no point comparing the current state to itself'), show=True)
self.compare_requested.emit(m.states[row].container)
|
microelly2/Animation | mathplotlibNode.py | Python | gpl-2.0 | 10,507 | 0.052441 | # -*- coding: utf-8 -*-
#-------------------------------------------------
#-- animation workbench
#--
#-- microelly 2016 v 0.1
#--
#-- GNU Lesser General Public License (LGPL)
#-------------------------------------------------
from __future__ import unicode_literals
__vers__="08.04.2016 0.4"
import sys
import os
import random
import numpy as np
import time
__dir__ = os.path.dirname(__file__)
#import matplotlib
#matplotlib.use('Qt4Agg')
#matplotlib.rcParams['backend.qt4']='PySide'
#from matplotlib.backends.backend_qt4agg import FigureCanvasQTAgg as FigureCanvas
#from matplotlib.figure import Figure
import say
reload(say)
from say import *
import reconstruction
reload (reconstruction.projectiontools)
from reconstruction.projectiontools import *
import reconstruction.miki as miki
reload(miki)
import Animation
class _MPL(Animation._Actor):
def __init__(self,obj):
obj.Proxy = self
self.Type = self.__class__.__name__
self.obj2 = obj
self.vals={}
_ViewProviderMPL(obj.ViewObject)
def onChanged(self,obj,prop):
# say(["onChanged " + str(self),obj,prop,obj.getPropertyByName(prop)])
if prop == 'countSources':
for i in range(obj.countSources):
try:
obj.getPropertyByName('source'+str(i+1)+'Object')
except:
obj.addProperty('App::PropertyLink','source'+str(i+1)+'Object',"Source " + str(i+1))
obj.addProperty('App::PropertyString','source'+str(i+1)+'Data',"Source " + str(i+1))
obj.addProperty('App::PropertyFloatList','source'+str(i+1)+'Values',"Source " + str(i+1))
obj.addProperty('App::PropertyBool','source'+str(i+1)+'Off',"Source " + str(i+1))
exec("self.vals"+str(i+1)+"={}")
for i in range(10):
if i<obj.countSources: mode=0
else: mode=2
try:
obj.setEditorMode("source"+str(i+1)+"Object", mode)
obj.setEditorMode("source"+str(i+1)+"Data", mode)
obj.setEditorMode("source"+str(i+1)+"Values", mode)
obj.setEditorMode("source"+str(i+1)+"Off", mode)
except:
break
pass
def execute(self,obj):
if obj.mode=='histogram':
# self.edit()
say("plot ------------------")
try:
app=obj.Proxy.app
app.plot()
except:
sayW("Error for call obj.Proxy.app.plot()")
say("plot ----------------done --")
return
if not obj.record:
say(obj.Label+ " no recording")
return
try: t=self.vals
except: self.vals={}
src=obj.sourceObject
vs='src.'+obj.sourceData
v=eval(vs)
self.vals[v]=v
for i in range(obj.countSources):
exec("src"+str(i+1)+"=obj.source"+str(i+1)+"Object")
exec("ss=obj.source"+str(i+1)+"Object")
if ss != None:
vs2="obj.source"+str(i+1)+"Data"
v2=eval(vs2)
vs3="ss."+v2
v3=eval(vs3)
tt=eval("self.vals"+str(i+1))
tt[v]=v3
return
class _ViewProviderMPL(Animation._ViewProviderActor):
def __init__(self,vobj):
self.attach(vobj)
self.Object = vobj.Object
vobj.Proxy = self
self.vers=__vers__
def attach(self,vobj):
self.emenu=[]
self.cmenu=[]
self.Object = vobj.Object
vobj.Proxy = self
self.vers=__vers__
def getIcon(self):
return __dir__+ '/icons/icon1.svg'
def createDialog(self):
app=MyApp()
miki2=miki.Miki()
miki2.app=app
app.root=miki2
app.obj=self.Object
self.Object.Proxy.app=app
self.edit= lambda:miki2.run(MyApp.s6,app.create2)
def setupContextMenu(self, obj, menu):
self.createDialog()
cl=self.Object.Proxy.__class__.__name__
action = menu.addAction("About " + cl)
action.triggered.connect(self.showVersion)
action = menu.addAction("Edit ...")
action.triggered.connect(self.edit)
def setEdit(self,vobj,mode=0):
self.createDialog()
self.edit()
#FreeCAD.ActiveDocument.recompute()
return True
class MatplotlibWidget(FigureCanvas):
def __init__(self, parent=None, width=5, height=4, dpi=100):
super(MatplotlibWidget, self).__init__(Figure())
self.setParent(parent)
self.figure = Figure(figsize=(width, height), dpi=dpi)
self.canvas = FigureCanvas(self.figure)
FigureCanvas.setSizePolicy(self,
QtGui.QSizePolicy.Expanding,
QtGui.QSizePolicy.Expanding)
FigureCanvas.updateGeometry(self)
self.axes = self.figure.add_subplot(111)
self.setMinimumSize(self.size())
class MyApp(object):
s6='''
VerticalLayout:
id:'main'
# setFixedHeight: 500
# setFixedWidth: 500
# move: PySide.QtCore.QPoint(3000,100)
# QtGui.QLabel:
# setText:"*** My M A T P L O T L I B ***"
#
'''
def plot(self):
if self.obj.mode=='histogram':
self.mpl.figure.clf()
self.mpl.canvas = FigureCanvas(self.mpl.figure)
FigureCanvas.updateGeometry(self.mpl)
self.mpl.axes = self.mpl.figure.add_subplot(111)
self.mpl.draw()
FreeCAD.mpl=self.mpl
k=self.plot_histogram()
# FreeCAD.k=k
# self.mpl.axes.set_xlabel('length')
# self.mpl.axes.set_ylabel('count')
# self.mpl.axes.title=self.obj.Label
return
self.mpl.figure.clf()
self.mpl.canvas = FigureCanvas(self.mpl.figure)
FigureCanvas.updateGeometry(self.mpl)
self.mpl.axes = self.mpl.figure.add_subplot(111)
self.mpl.draw()
vals=self.obj.Proxy.vals
x=[]
y=[]
for k in vals:
x.append(k)
y.append(vals[k])
self.obj.sourceValues=y
for i in range(self.obj.countSources):
nr=str(i+1)
ss=eval("self.obj.source"+nr+"Object")
sf=eval("self.obj.source"+nr+"Off")
if ss!=None and not sf:
exec("vals=self.obj.Proxy.vals"+nr)
x2=[k for k in vals]
y1=[vals[k] for k in vals]
exec("label=self.obj.source"+nr+"Object.Label + ': ' + self.obj.source"+nr+"Data")
t=self.mpl.axes.plot(x,y1,label=label)
exec("self.obj.source"+nr+"Values=y1")
if ss== None and not sf and not self.obj.useNumpy:
say("no sourcve .jijij")
exec("vals=self.obj.source"+nr+"Values")
# x2=[k for k in vals]
# y1=[vals[k] for k in vals]
# say(vals)
y1=vals
x=range(len(vals))
exec("label=self.obj.source"+nr+"Data")
# label="Label for " + str(nr) + ": "+ label
t=self.mpl.axes.p | lot(x,y1,label=label)
# exec("self.obj.source"+nr+"Values=y1")
say("DDone")
if self.obj.useNumpy:
self.obj.outTime=self.obj.sour | ceNumpy.outTime
FreeCAD.activeDocument().recompute()
for i in range(10):
if eval("self.obj.useOut"+str(i)):
try:
y=self.obj.sourceNumpy.getPropertyByName('out'+str(i))
label=self.obj.sourceNumpy.getPropertyByName('label'+str(i))
if label=='':
label="numpy " + str(i)
# if x == []:
x=range(len(y))
if self.obj.outTime!=[]:
x=self.obj.outTime
say(("lens",len(x),len(y)))
t=self.mpl.axes.plot(x,y,label=label)
exec("self.obj.out"+str(i)+"="+str(y))
except:
sayexc("cannont calculate out"+str(i))
legend = self.mpl.axes.legend(loc='upper right', shadow=True)
self.mpl.draw()
self.mpl.figure.canvas.draw()
def plot_histogram(self): # for mode ==histogram
self.mpl.figure.clf()
self.mpl.canvas = FigureCanvas(self.mpl.figure)
FigureCanvas.updateGeometry(self.mpl)
self.mpl.axes = self.mpl.figure.add_subplot(111)
self.mpl.draw()
sob=self.obj.source1Object
y="sob."+str(self.obj.source1Data)
vals=eval(y)
# Proxy.extras.linelengths2
# say(vals)
FreeCAD.mpl=self.mpl
# self.mpl.axes.axis([-90, 90, 0, 100])
n, bins, patches = self.mpl.axes.hist(vals, 180, normed=0, facecolor='green', alpha=0.75)
self.mpl.axes.axis([0, 180, 0, np.max(n)])
# legend = self.mpl.axes.legend(loc='upper right', shadow=True)
self.mpl.draw()
self.mpl.figure.canvas.draw()
def reset(self):
self.obj.Proxy.vals={}
self.obj.sourceValues=[]
for i in range(self.obj.countSources):
nr=str(i+1)
exec("self.obj.Proxy.vals"+nr+"={}")
exec("self.obj.source"+nr+"Values=[]")
self.mpl.figure.clf()
self.mpl.canvas = FigureCanvas(self.mpl.figure)
FigureCanvas.updateGeometry(self.mpl)
self.mpl.axes = self.mpl.figure.add_subplot(111)
self.mpl.draw()
self.plot()
def create2(self):
par=self.root.ids['main']
l=QtGui.QLabel(self.obj.Label)
self.mpl=MatplotlibWidget()
bt=QtGui.QPushButton("update diagram")
bt.clicked.connect(self.plot)
bt2=QtGui.QPushButton("reset data")
bt2.clicked.connect(self.reset) |
wwright2/dcim3-angstrom1 | sources/bitbake/lib/bb/ui/crumbs/hoblistmodel.py | Python | mit | 35,789 | 0.002515 | #
# BitBake Graphical GTK User Interface
#
# Copyright (C) 2011 Intel Corporation
#
# Authored by Joshua Lock <josh@linux.intel.com>
# Authored by Dongxiao Xu <dongxiao.xu@intel.com>
# Authored by Shane Wang <shane.wang@intel.com>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 2 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
import gtk
import gobject
from bb.ui.crumbs.hobpages import HobPage
#
# PackageListModel
#
class PackageListModel(gtk.ListStore):
"""
This class defines an gtk.ListStore subclass which will convert the output
of the bb.event.TargetsTreeGenerated event into a gtk.ListStore whilst also
providing convenience functions to access gtk.TreeModel subclasses which
provide filtered views of the data.
"""
(COL_NAME, COL_VER, COL_REV, COL_RNM, COL_SEC, COL_SUM, COL_RDEP, COL_RPROV, COL_SIZE, | COL_RCP, COL_BINB, COL_INC, COL_FADE_INC, COL_FONT, COL_FLIST) = range(15)
__gsignals__ = {
"package-selection-changed" : (gobject.SIGNAL_RUN | _LAST,
gobject.TYPE_NONE,
()),
}
__toolchain_required_packages__ = ["packagegroup-core-standalone-sdk-target", "packagegroup-core-standalone-sdk-target-dbg"]
def __init__(self):
self.rprov_pkg = {}
gtk.ListStore.__init__ (self,
gobject.TYPE_STRING,
gobject.TYPE_STRING,
gobject.TYPE_STRING,
gobject.TYPE_STRING,
gobject.TYPE_STRING,
gobject.TYPE_STRING,
gobject.TYPE_STRING,
gobject.TYPE_STRING,
gobject.TYPE_STRING,
gobject.TYPE_STRING,
gobject.TYPE_STRING,
gobject.TYPE_BOOLEAN,
gobject.TYPE_BOOLEAN,
gobject.TYPE_STRING,
gobject.TYPE_STRING)
self.sort_column_id, self.sort_order = PackageListModel.COL_NAME, gtk.SORT_ASCENDING
"""
Find the model path for the item_name
Returns the path in the model or None
"""
def find_path_for_item(self, item_name):
pkg = item_name
if item_name not in self.pn_path.keys():
if item_name not in self.rprov_pkg.keys():
return None
pkg = self.rprov_pkg[item_name]
if pkg not in self.pn_path.keys():
return None
return self.pn_path[pkg]
def find_item_for_path(self, item_path):
return self[item_path][self.COL_NAME]
"""
Helper function to determine whether an item is an item specified by filter
"""
def tree_model_filter(self, model, it, filter):
name = model.get_value(it, self.COL_NAME)
for key in filter.keys():
if key == self.COL_NAME:
if filter[key] != 'Search packages by name':
if name and filter[key] not in name:
return False
else:
if model.get_value(it, key) not in filter[key]:
return False
self.filtered_nb += 1
return True
"""
Create, if required, and return a filtered gtk.TreeModelSort
containing only the items specified by filter
"""
def tree_model(self, filter, excluded_items_ahead=False, included_items_ahead=False, search_data=None, initial=False):
model = self.filter_new()
self.filtered_nb = 0
model.set_visible_func(self.tree_model_filter, filter)
sort = gtk.TreeModelSort(model)
sort.connect ('sort-column-changed', self.sort_column_changed_cb)
if initial:
sort.set_sort_column_id(PackageListModel.COL_NAME, gtk.SORT_ASCENDING)
sort.set_default_sort_func(None)
elif excluded_items_ahead:
sort.set_default_sort_func(self.exclude_item_sort_func, search_data)
elif included_items_ahead:
sort.set_default_sort_func(self.include_item_sort_func, search_data)
else:
if search_data and search_data!='Search recipes by name' and search_data!='Search package groups by name':
sort.set_default_sort_func(self.sort_func, search_data)
else:
sort.set_sort_column_id(self.sort_column_id, self.sort_order)
sort.set_default_sort_func(None)
sort.set_sort_func(PackageListModel.COL_INC, self.sort_column, PackageListModel.COL_INC)
sort.set_sort_func(PackageListModel.COL_SIZE, self.sort_column, PackageListModel.COL_SIZE)
sort.set_sort_func(PackageListModel.COL_BINB, self.sort_binb_column)
sort.set_sort_func(PackageListModel.COL_RCP, self.sort_column, PackageListModel.COL_RCP)
return sort
def sort_column_changed_cb (self, data):
self.sort_column_id, self.sort_order = data.get_sort_column_id ()
def sort_column(self, model, row1, row2, col):
value1 = model.get_value(row1, col)
value2 = model.get_value(row2, col)
if col==PackageListModel.COL_SIZE:
value1 = HobPage._string_to_size(value1)
value2 = HobPage._string_to_size(value2)
cmp_res = cmp(value1, value2)
if cmp_res!=0:
if col==PackageListModel.COL_INC:
return -cmp_res
else:
return cmp_res
else:
name1 = model.get_value(row1, PackageListModel.COL_NAME)
name2 = model.get_value(row2, PackageListModel.COL_NAME)
return cmp(name1,name2)
def sort_binb_column(self, model, row1, row2):
value1 = model.get_value(row1, PackageListModel.COL_BINB)
value2 = model.get_value(row2, PackageListModel.COL_BINB)
value1_list = value1.split(', ')
value2_list = value2.split(', ')
value1 = value1_list[0]
value2 = value2_list[0]
cmp_res = cmp(value1, value2)
if cmp_res==0:
cmp_size = cmp(len(value1_list), len(value2_list))
if cmp_size==0:
name1 = model.get_value(row1, PackageListModel.COL_NAME)
name2 = model.get_value(row2, PackageListModel.COL_NAME)
return cmp(name1,name2)
else:
return cmp_size
else:
return cmp_res
def exclude_item_sort_func(self, model, iter1, iter2, user_data=None):
if user_data:
val1 = model.get_value(iter1, PackageListModel.COL_NAME)
val2 = model.get_value(iter2, PackageListModel.COL_NAME)
return self.cmp_vals(val1, val2, user_data)
else:
val1 = model.get_value(iter1, PackageListModel.COL_FADE_INC)
val2 = model.get_value(iter2, PackageListModel.COL_INC)
return ((val1 == True) and (val2 == False))
def include_item_sort_func(self, model, iter1, iter2, user_data=None):
if user_data:
val1 = model.get_value(iter1, PackageListModel.COL_NAME)
val2 = model.get_value(iter2, PackageListModel.COL_NAME)
return self.cmp_vals(val1, val2, user_data)
else:
val1 = model.get_value(iter1, PackageListModel.COL_INC)
val2 = model.get_value(iter2, PackageListModel.COL_INC)
return ((val1 == False) and (val2 == True))
def sort_func(self, model, iter1, iter2, user_data):
val1 = model.get_value(iter1, PackageListModel.CO |
blynn/spelltapper | app/spelltapper.py | Python | gpl-3.0 | 15,729 | 0.047428 | import urllib
import logging
import random
from datetime import datetime
from google.appengine.api import users
from google.appengine.ext import webapp
from google.appengine.ext.webapp.util import run_wsgi_app
from google.appengine.ext import db
from google.appengine.ext.db import Key
class Move(db.Model):
move = db.StringProperty()
ctime = db.DateTimeProperty(auto_now_add=True)
para = db.StringProperty()
charm_hand = db.StringProperty()
charm_gesture = db.StringProperty()
has_para = db.IntegerProperty()
has_charm = db.IntegerProperty()
class Duel(db.Model):
chicken = db.IntegerProperty()
ctime = db.DateTimeProperty(auto_now_add=True)
now_turn = db.IntegerProperty()
received_count = db.IntegerProperty()
level = db.StringProperty()
class Account(db.Model):
ctime = db.DateTimeProperty(auto_now_add=True)
nonce = db.StringProperty()
level = db.IntegerProperty()
class User(db.Model):
ctime = db.DateTimeProperty(auto_now_add=True)
atime = db.DateTimeProperty(auto_now_add=True)
name = db.StringProperty()
level = db.IntegerProperty()
state = db.IntegerProperty()
"""
States:
0 I'm idle.
1 I propose a duel.
2 Somebody accepted my challenge.
3 I acknowledge someone's acceptance.
4 I accepted somebody's challenge.
9 I fled a duel.
"""
arg = db.StringProperty()
duel = db.StringProperty()
class MainPage(webapp.RequestHandler):
def get(self):
if "" == self.request.query_string:
self.response.out.write("2")
return
cmd = self.request.get("c")
"""
if "deldeldel" == cmd:
logging.info("cleanup")
stuff = db.GqlQuery("SELECT * FROM Move")
for thing in stuff:
thing.delete()
return
"""
def logoff(userkey):
def del_user(userkey):
user = db.get(userkey)
if not user:
return None
user.delete()
return user
u = db.run_in_transaction(del_user, userkey)
if None == u:
logging.error("User already deleted.")
return
def del_acct():
acct = db.get(Key.from_path("Account", "n:" + u.name))
if not acct:
logging.error("Missing account for user.")
return
acct.delete()
db.run_in_transaction(del_acct)
if "l" == cmd: # Login.
name = urllib.unquote(self.request.get("a"))
b = self.request.get("b")
if "" == b:
logging.error("No level supplied.")
self.response.out.write("Error: No level supplied.")
return
level = int(b)
logging.info("login: " + name)
# TODO: Handle other bad names.
if "" == name:
logging.error("Empty name.")
self.response.out.write("Error: Empty name.")
return
def handle_login():
acct = db.get(Key.from_path("Account", "n:" + name))
if not acct:
acct = Account(key_name="n:" + name, level=level,
nonce="%X" % random.getrandbits(64))
acct.put()
return acct.nonce
else:
return ""
nonce = db.run_in_transaction(handle_login)
if "" == nonce:
self.response.out.write("Error: Name already in use.")
else:
user = User(key_name="n:" + nonce, name=name, state=0, arg="")
user.put()
self.response.out.write(nonce)
return
if "L" == cmd: # Logoff.
nonce = self.request.get("i")
logging.info("logout: " + nonce)
logoff(Key.from_path("User", "n:" + nonce))
return
if "r" == cmd: # Lobby refresh.
nonce = self.request.get("i")
def heartbeat():
user = db.get(Key.from_path("User", "n:" + nonce))
if not user: return False, None
user.atime = datetime.now()
# Someone accepted the duel.
if 2 == user.state:
user.state = 3
user.put()
return True, user
user.put()
return False, user
flag, user = db.run_in_transaction(heartbeat)
if not user:
self.response.out.write("Error: No such user ID.")
return
if flag:
self.response.out.write("\n" + user.arg + "\n" + user.duel)
return
users = db.GqlQuery("SELECT * FROM User")
for u in users:
self.response.out.write(u.name + '\n')
self.response.out.write(unicode(u.state) + '\n')
self.response.out.write(u.arg + '\n')
if 0 == u.state or 1 == u.state:
if user.atime > u.atime and (user.atime - u.atime).seconds >= 12:
logging.info(u.name + " timeout: " + unicode((user.atime - u.atime).seconds))
logoff(u.key())
elif 9 == u.state:
# TODO: When net games become more robust, punish fleeing wizards
# with longer login bans.
if user.atime > u.atime and (user.atime - u.atime).seconds >= 4:
logging.info(u.name + " timeout: " + unicode((user.atime - u.atime).seconds))
logoff(u.key())
# TODO: Uptime user.atime in SetMove and lower timeout to a few minutes.
elif user.atime > u.atime and (user.atime - u.atime).seconds >= 2048:
logging.info(u.name + " timeout: " + unicode((user.atime - u.atime).seconds))
logoff(u.key())
return
if "n" == cmd: # New duel.
logging.info("New duel.")
a = self.request.get("a")
if "" == a:
logging.error("No level supplied.")
self.response.out.write("Error: No level supplied.")
return
level = int(a)
if level < 1 or level > 5:
logging.error("Bad level.")
self.response.out.write("Error: Bad level.")
return
nonce = self.request.get("i")
def new_duel():
user = db.get(Key.from_path("User", "n:" + nonce))
if not user: return -2
user.atime = datetime.now()
if 0 == user.state:
user.state = 1
user.arg = a
user.put()
return 0
user.put()
return -1
status = db.run_in_transaction(new_duel)
if -2 == status:
logging.error("No such user.")
self.response.out.write("Error: No such user.")
elif -1 == status:
logging.error("User already started duel.")
self.response.out.write("Error: Already started duel.")
else:
self.response.out.write("OK")
return
if "N" == cmd: # Accept duel.
logging.info("Accept duel.")
a = urllib.unquote(self.request.get("a"))
if "" == a:
logging.error("Error: No opponent supplied.")
return
nonce = self.request.get("i")
duelid = "%X" % random.getrandbits(64)
def mark_user():
user = db.get(Key.from_path("User", "n:" + nonce))
if not user:
return 0, "", None, -1
user.atime = datetime.now()
origstate = user.state
origarg = user.arg
# Can't accept a duel if you were advertising one and someone just
# accepted (but you don't know yet). Also can't accept a duel if
# already in one.
if 1 != user.state and 0 != user.state:
return 0, "", None, -2
user.state = 4
user.arg = a
user.duel = duelid
user.put()
return origstate, origarg, user, 0
origstate, origarg, user, status = db.run_in_transaction(mark_user)
if -1 == status:
self.response.out.write("Error: No such user ID.")
return
if -2 == status:
logging.warning("Already dueling. Ignoring.")
return
def restore():
def restore_state_arg(i, s):
user = db.get(Key.from_path("User", "n:" + nonce))
if user:
user.state = i
user.arg = s
user.put()
db.run_in_transaction(restore_state_arg, origstate, origarg)
return
acct = db.get(Key.from_path("Account", "n:" + a))
if not acct:
restore()
self.response.out.write("Error: Opponent unavailable.")
return
def accept_duel():
opp = db.get(Key.from_path("User", "n:" + acct.nonce))
if not opp: return ""
if 1 != opp.state: return ""
opp.state = 2
level = opp.arg
opp.arg = user.name
opp.duel = duelid
opp.put()
return level
level = db.run_in_transaction(accept_duel)
if "" == level:
self.response.out.write("Error: Opponent unavailable.")
restore()
logging.error("accept_duel failed.")
return
duel = Duel( | key_name = "g:" + duelid,
level = level,
now_turn = 0,
received_count = 0)
duel.put()
self.response.out.write(duelid)
| logging.info("Response: " + duelid)
return
gamename = self.request.get("g")
if "f" == cmd:
logging.info("Game " + gamename + " finished.")
nonce = self.request.get("i")
def restate_user():
user = db.get(Key.from_path("User", "n:" + nonce))
if not user:
return None
user.atime = datetime.now()
user.state = 0
user.put()
return user
user = db.run_in_transacti |
krisys/SpojBot | src/spojbot/bot/management/commands/migrate_suggestions.py | Python | mit | 707 | 0.005658 | from django.core.management.base import BaseCommand
from bot.models import *
from datetime import datetime, timedelta, date
class Command(BaseCommand):
args = 'No args'
help = 'Move problems from SuggestedProblem to ProblemSuggestion'
def handl | e(self, *args, **options):
for spojuser in SpojUser.objects.all():
user_belongs_to = [x.group for x in GroupMember.objects.filter(user=spojuser.user)]
for problem in SuggestedProblem.objects.filter(group__in=user_belongs_to).order_by('timestamp'):
try:
| ProblemSuggestion.objects.create(user=spojuser.user, problem=problem.problem)
except:
pass
|
ML-KULeuven/socceraction | tests/data/test_load_opta.py | Python | mit | 8,173 | 0.000856 | import os
import pytest
from py.path import local
from socceraction.data import opta as opta
from socceraction.data.opta import (
OptaCompetitionSchema,
OptaEventSchema,
OptaGameSchema,
OptaPlayerSchema,
OptaTeamSchema,
)
def test_create_opta_json_loader(tmpdir: local) -> None:
"""It should be able to parse F1, f9 and F24 JSON feeds."""
feeds = {
"f1": "f1-{competition_id}-{season_id}-{game_id}.json",
"f9": "f9-{competition_id}-{season_id}-{game_id}.json",
"f24": "f24-{competition_id}-{season_id}-{game_id}.json",
}
loader = opta.OptaLoader(root=str(tmpdir), parser="json", feeds=feeds)
assert loader.parsers == {
"f1": opta.parsers.F1JSONParser,
"f9": opta.parsers.F9JSONParser,
"f24": opta.parsers.F24JSONParser,
}
def test_create_opta_xml_loader(tmpdir: local) -> None:
"""It should be able to parse F7 and F24 XML feeds."""
feeds = {
"f7": "f7-{competition_id}-{season_id}-{game_id}.json",
"f24": "f24-{competition_id}-{season_id}-{game_id}.json",
}
loader = opta.OptaLoader(root=str(tmpdir), parser="xml", feeds=feeds)
assert loader.parsers == {
"f7": opta.parsers.F7XMLParser,
"f24": opta.parsers.F24XMLParser,
}
def test_create_statsperform_loader(tmpdir: local) -> None:
"""It should be able to parse MA1 and MA3 StatsPerfrom feeds."""
feeds = {
"ma1": "ma1-{competition_id}-{season_id}-{game_id}.json",
"ma3": "ma3-{competition_id}-{season_id}-{game_id}.json",
}
loader = opta.OptaLoader(root=str(tmpdir), parser="statsperform", feeds=feeds)
assert loader.parsers == {
"ma1": opta.parsers.MA1JSONParser,
"ma3": opta.parsers.MA3JSONParser,
}
def test_create_whoscored_loader(tmpdir: local) -> None:
"""It should be able to parse WhoScored feeds."""
feeds = {
"whoscored": "{competition_id}-{season_id}-{game_id}.json",
}
loader = opta.OptaLoader(root=str(tmpdir), parser="whoscored", feeds=feeds)
assert loader.parsers == {
"whoscored": opta.parsers.WhoScoredParser,
}
def test_create_custom_loader(tmpdir: local) -> None:
"""It should support a custom feed and parser."""
feeds = {
"myfeed": "{competition_id}-{season_id}-{game_id}.json",
}
parser = {
"myfeed": opta.parsers.base.OptaParser,
}
loader = opta.OptaLoader(root=str(tmpdir), parser=parser, feeds=feeds)
assert loader.parsers == {
"myfeed": opta.parsers.base.OptaParser,
}
def test_create_loader_with_unsupported_feed(tmpdir: local) -> None:
"""It should warn if a feed is not supported."""
feeds = {
"f0": "f0-{competition_id}-{season_id}-{game_id}.json",
}
with pytest.warns(
UserWarning, match="No parser available for f0 feeds. This feed is ignored."
):
loader = opta.OptaLoader(root=str(tmpdir), parser="json", feeds=feeds)
assert loader.parsers == {}
def test_create_invalid_loader(tmpdir: local) -> None:
"""It should raise an error if the parser is not supported."""
feeds = {
"myfeed": "{competition_id}-{season_id}-{game_id}.json",
}
with pytest.raises(ValueError):
opta.OptaLoader(root=str(tmpdir), parser="wrong", feeds=feeds)
def test_deepupdate() -> None:
"""It should update a dict with another dict."""
# list
t1 = {'name': 'ferry', 'hobbies': ['programming', 'sci-fi']}
opta.loader._deepupdate(t1, {'hobbies': ['gaming'], 'jobs': ['student']})
assert t1 == {
'name': 'ferry',
'hobbies': ['programming', 'sci-fi', 'gaming'],
'jobs': ['student'],
}
# set
t2 = {'name': 'ferry', 'hobbies': {'programming', 'sci-fi'}}
opta.loader._deepupdate(t2, {'hobbies': {'gaming'}, 'jobs': {'student'}})
assert t2 == {
'name': 'ferry',
'hobbies': {'programming', 'sci-fi', 'gaming'},
'jobs': {'student'},
}
# dict
t3 = {'name': 'ferry', 'hobbies': {'programming': True, 'sci-fi': True}}
opta.loader._deepupdate(t3, {'hobbies': {'gaming': True}})
assert t3 == {
'name': 'ferry',
'hobbies': {'programming': True, 'sci-fi': True, 'gaming': True},
}
# value
t4 = {'name': 'ferry', 'hobby': 'programming'}
opta.loader._deepupdate(t4, {'hobby': 'gaming'})
assert t4 == {'name': 'ferry', 'hobby': 'gaming'}
class TestJSONOptaLoader:
def setup_method(self) -> None:
data_dir = os.path.join(os.path.dirname(__file__), os.pardir, "datasets", "opta")
self.loader = opta.OptaLoader(
root=data_dir,
parser="json",
feeds={
"f1": "tournament-{season_id}-{competition_id}.json",
"f9": "match-{season_id}-{competition_id}-{game_i | d}.json",
"f24": "match-{season_id}-{competition_id}-{game_id}.json",
},
)
def test_competitions(self) -> None:
df_competitions = self.loader.competitions()
assert len(df_competitions) > 0
OptaCompetitionSchema.validate(df_competitions)
| def test_games(self) -> None:
df_games = self.loader.games(8, 2017)
assert len(df_games) == 1
OptaGameSchema.validate(df_games)
def test_teams(self) -> None:
df_teams = self.loader.teams(918893)
assert len(df_teams) == 2
OptaTeamSchema.validate(df_teams)
def test_players(self) -> None:
df_players = self.loader.players(918893)
assert len(df_players) == 27
OptaPlayerSchema.validate(df_players)
def test_events(self) -> None:
df_events = self.loader.events(918893)
assert len(df_events) > 0
OptaEventSchema.validate(df_events)
class TestXMLOptaLoader:
def setup_method(self) -> None:
data_dir = os.path.join(os.path.dirname(__file__), os.pardir, "datasets", "opta")
self.loader = opta.OptaLoader(
root=data_dir,
parser="xml",
feeds={
"f7": "f7-{competition_id}-{season_id}-{game_id}-matchresults.xml",
"f24": "f24-{competition_id}-{season_id}-{game_id}-eventdetails.xml",
},
)
def test_competitions(self) -> None:
df_competitions = self.loader.competitions()
assert len(df_competitions) > 0
OptaCompetitionSchema.validate(df_competitions)
def test_games(self) -> None:
df_games = self.loader.games(23, 2018)
assert len(df_games) == 1
OptaGameSchema.validate(df_games)
def test_teams(self) -> None:
df_teams = self.loader.teams(1009316)
assert len(df_teams) == 2
OptaTeamSchema.validate(df_teams)
def test_players(self) -> None:
df_players = self.loader.players(1009316)
assert len(df_players) == 36
OptaPlayerSchema.validate(df_players)
def test_events(self) -> None:
df_events = self.loader.events(1009316)
assert len(df_events) > 0
OptaEventSchema.validate(df_events)
class TestWhoscoredLoader:
def setup_method(self) -> None:
data_dir = os.path.join(os.path.dirname(__file__), os.pardir, "datasets", "whoscored")
self.loader = opta.OptaLoader(
root=data_dir,
parser="whoscored",
feeds={"whoscored": "{game_id}.json"},
)
# def test_competitions(self) -> None:
# df_competitions = self.loader.competitions()
# assert len(df_competitions) == 0
def test_games(self) -> None:
df_games = self.loader.games(23, 2018)
assert len(df_games) == 1
OptaGameSchema.validate(df_games)
def test_teams(self) -> None:
df_teams = self.loader.teams(1005916)
assert len(df_teams) == 2
OptaTeamSchema.validate(df_teams)
def test_players(self) -> None:
df_players = self.loader.players(1005916)
assert len(df_players) == 44
OptaPlayerSchema.validate(df_players)
def test_events(self) -> None:
df_events = self.loader.events(1005916)
assert len(df_events) > 0
OptaEventSchema.validate(df_events)
|
mais4719/PyLuxafor | pyluxafor/cli.py | Python | gpl-3.0 | 9,126 | 0.00263 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Simple command line interface to pyluxafor.
"""
from __future__ import division, print_function, absolute_import
import argparse
import sys
import logging
from pyluxafor import Devices
from pyluxafor import Wave, Pattern, Leds
from pyluxafor import __version__
__author__ = 'Magnus Isaksson'
__copyright__ = 'Magnus Isaksson'
__license__ = 'gpl3'
_logger = logging.getLogger(__name__)
def add_jump2color_parser(subparsers):
parser = subparsers.add_parser('jump2color',
description='Switches color on your Luxafor device.',
help='Switches color on your Luxafor device.')
parser.set_defaults(runner=jump2color)
parser.add_argument('-c',
'--color',
required=True,
help='Color in 3 hex codes (e.g. #00FF00 for green).')
def jump2color(args):
with Devices().first as d:
d.jump2color(args.color, leds=Leds.all)
return 'Jumping to color: {}'.format(args.color)
def add_fade2color_parser(subparsers):
parser = subparsers.add_parser('fade2color',
description='Fade to color on your Luxafor device.',
help='Fade to color on your Luxafor device.')
parser.set_defaults(runner=fade2color)
parser.add_argument('-c',
'--color',
required=True,
help='Color in 3 hex codes (e.g. #00FF00 for green).')
parser.add_argument('-s',
'--speed',
required=False,
default=100,
type=int,
help='Fading speed [0-255], low value equals higher speed.')
def fade2color(args):
if args.speed < 0 or args.speed > 255:
return 'Error: Speed needs to be an integer between 0 and 255.'
with Devices().first as d:
d.fade2color(args.color, leds=Leds.all, speed=args.speed)
return 'Fading to color: {} with speed: {}'.format(args.color, args.speed)
def add_blink_parser(subparsers):
parser = subparsers.add_parser('blink',
description='Blink color on your Luxafor device.',
help='Blink color on your Luxafor device.')
parser.set_defaults(runner=blink)
parser.add_argument('-c',
'--color',
required=True,
help='Color in 3 hex codes (e.g. #00FF00 for green).')
parser.add_argument('-s',
'--speed',
required=False,
default=100,
type=int,
help='Blink speed [0-255], low value equals higher speed.')
parser.add_argument('-r',
'--repeats',
required=False,
default=2,
type=int,
help='Repeats [1-255].')
def blink(args):
if args.speed < 0 or args.speed > 255:
return 'Error: Speed needs to both be an integer between 0 and 255.'
if args.repeats < 1 or args.repeats > 255:
return 'Error: Repeats needs to be an integer between 1 and 255.'
with Devices().first as d:
d.blink(args.color, leds=Leds.all, speed=args.speed, repeats=args.repeats)
return 'Blinking color: {}, {} times with speed: {}'.format(args.color, args.repeats, args.speed)
def add_pattern_parser(subparsers):
parser = subparsers.add_parser('pattern',
description='Run pattern on your Luxafor device.',
help='Run pattern on your Luxafor device.')
parser.set_defaults(runner=pattern)
parser.add_argument('-p',
'--pattern',
required=True,
help=', '.join([p for p in Pattern._fields]))
parser.add_argument('-r',
'--repeats',
type=int,
default=2,
required=False,
help='Repeats [1-255].')
def pattern(args):
args.pattern = args.pattern.lower()
if args.pattern not in Pattern._fields:
return 'Error: {} is not a valid pattern.'.format(args.pattern)
if args.repeats < 1 or args.repeats > 255:
return 'Error: Repeats needs to be a integer between 1 and 255.'
with Devices().first as d:
d.pattern(pattern_type=getattr(Pattern, args.pattern), repeats=args.repeats)
return 'Running pattern {} {} times.'.format(args.pattern, args.repeats)
def add_wave_parser(subparsers):
parser = subparsers.add_parser('wave',
description='Run wave on your Luxafor device.',
help='Run wave on your Luxafor device.')
parser.set_defaults(runner=wave)
parser.add_argument('-c',
'--color',
required=True,
help='Color in 3 hex codes (e.g. #00FF00 for green).')
parser.add_argument('-w',
'--wave',
required=True,
help=', '.join([p for p in Wave._fields]))
parser.add_argument('-s',
'--speed',
required=False,
default=100,
type=int,
help='Blink speed [0-255], low value equals higher speed.')
parser.add_argument('-r',
'--repeats',
required=False,
default=2,
type=int,
| help='Repeats [1-255].')
def wave(args):
args.pattern = args.wave.lower()
if args.pattern not in Wave._fields:
return 'Error: {} is not a valid wave type.'.format(args.pattern)
if args.speed < 0 or args.speed > 255:
return 'Error: Speed needs to be an integer between 0 and 255.'
if args.repeats < 1 or args.repeats > 255:
return 'Error: Repeats needs to b | e a integer between 1 and 255.'
with Devices().first as d:
d.wave(color=args.color, wave_type=getattr(Wave, args.wave), speed=args.speed, repeats=args.repeats)
return 'Running a {} wave with color {} and speed {}, {} times.'.format(args.wave, args.color,
args.speed, args.repeats)
def add_off_parser(subparsers):
parser = subparsers.add_parser('off',
description='Turn all LEDs of on your Luxafor device.',
help='Turn all LEDs of on your Luxafor device.')
parser.set_defaults(runner=off)
def off(args):
with Devices().first as d:
d.off()
return 'Turning of all LEDs on your device.'
def add_list_devices_parser(subparsers):
parser = subparsers.add_parser('devices',
description='List all Luxafor devices found on your system.',
help='List all Luxafor devices found on your system.')
parser.set_defaults(runner=list_devices)
def list_devices(args):
ans_str = 'Sorry, no Luxafor device found in the system.'
devices = ['Product: {}, Manufacturer: {}, Serial #: {}'.format(d.conn.product,
d.conn.manufacturer,
d.conn.serial_number.encode('utf-8')) for d in Devices().list]
if devices:
ans_str = '\n'.join(devices)
return '\nFound {} devices connected to your system.\n{}'.format(len(devices), ans_str)
def parse_args(args):
"""
Parse command line parameters
:param args: command line parameters as list of strings
:return: command line parameters as :obj:`argparse.Namespace`
"""
parser = argparse.ArgumentParser(
description="Simple command line interfa |
SauloAislan/ironic | ironic/tests/unit/drivers/test_generic.py | Python | apache-2.0 | 4,019 | 0 | # Copyright 2016 Red Hat, Inc.
| #
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in complianc | e with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from ironic.common import driver_factory
from ironic.common import exception
from ironic.conductor import task_manager
from ironic.drivers import base as driver_base
from ironic.drivers.modules import agent
from ironic.drivers.modules import fake
from ironic.drivers.modules import inspector
from ironic.drivers.modules import iscsi_deploy
from ironic.drivers.modules import noop
from ironic.drivers.modules import pxe
from ironic.tests.unit.db import base as db_base
from ironic.tests.unit.objects import utils as obj_utils
class ManualManagementHardwareTestCase(db_base.DbTestCase):
def setUp(self):
super(ManualManagementHardwareTestCase, self).setUp()
self.config(enabled_hardware_types=['manual-management'],
enabled_power_interfaces=['fake'],
enabled_management_interfaces=['fake'],
enabled_inspect_interfaces=['no-inspect'])
self.config(enabled=True, group='inspector')
def test_default_interfaces(self):
node = obj_utils.create_test_node(self.context,
driver='manual-management')
with task_manager.acquire(self.context, node.id) as task:
self.assertIsInstance(task.driver.management, fake.FakeManagement)
self.assertIsInstance(task.driver.power, fake.FakePower)
self.assertIsInstance(task.driver.boot, pxe.PXEBoot)
self.assertIsInstance(task.driver.deploy, iscsi_deploy.ISCSIDeploy)
self.assertIsInstance(task.driver.inspect, noop.NoInspect)
self.assertIsInstance(task.driver.raid, noop.NoRAID)
def test_supported_interfaces(self):
self.config(enabled_inspect_interfaces=['inspector', 'no-inspect'])
node = obj_utils.create_test_node(self.context,
driver='manual-management',
deploy_interface='direct',
raid_interface='agent')
with task_manager.acquire(self.context, node.id) as task:
self.assertIsInstance(task.driver.management, fake.FakeManagement)
self.assertIsInstance(task.driver.power, fake.FakePower)
self.assertIsInstance(task.driver.boot, pxe.PXEBoot)
self.assertIsInstance(task.driver.deploy, agent.AgentDeploy)
self.assertIsInstance(task.driver.inspect, inspector.Inspector)
self.assertIsInstance(task.driver.raid, agent.AgentRAID)
def test_get_properties(self):
# These properties are from vendor (agent) and boot (pxe) interfaces
expected_prop_keys = [
'deploy_forces_oob_reboot', 'deploy_kernel', 'deploy_ramdisk']
hardware_type = driver_factory.get_hardware_type("manual-management")
properties = hardware_type.get_properties()
self.assertEqual(sorted(expected_prop_keys), sorted(properties.keys()))
@mock.patch.object(driver_factory, 'default_interface', autospec=True)
def test_get_properties_none(self, mock_def_iface):
hardware_type = driver_factory.get_hardware_type("manual-management")
mock_def_iface.side_effect = exception.NoValidDefaultForInterface("no")
properties = hardware_type.get_properties()
self.assertEqual({}, properties)
self.assertEqual(len(driver_base.ALL_INTERFACES),
mock_def_iface.call_count)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.