repo_name stringlengths 5 100 | path stringlengths 4 231 | language stringclasses 1 value | license stringclasses 15 values | size int64 6 947k | score float64 0 0.34 | prefix stringlengths 0 8.16k | middle stringlengths 3 512 | suffix stringlengths 0 8.17k |
|---|---|---|---|---|---|---|---|---|
eliethesaiyan/ObliviousDecisionTree | ObliviousTree.py | Python | mit | 3,395 | 0.032401 | import numpy as np
class ObliviousTree:
'This is an implementation of Oblvious Tree'
def __init__(self,data=[],func="C4.5",autoload=False):
self.data=data
self.split_funct=func
self.autoload=autoload
self.feature_names=self.data.columns.tolist()
self.feature_name_domains={}
self.category_level={}
self.level=0
self.levels_nodes={}
def load_tree(self):
if not self.autoload:
print("skipped autoloading")
else:
print(self.data)
print(self.feature_names)
def build_branching_node_label_domains(self):
print(self.split_function())
def split_function(self):
if self.split_funct=="hardcoded":
return self.hardcoded_features()
def hardcoded_features(self):
for feature_name in self.feature_names:
self.feature_name_domains[feature_name]=np.unique(self.data[feature_name])
return self.feature_name_domains
def create(se | lf):
is_root=True
for feature_name in self.feature_name_domains:
level_list_nodes=[]
if fea | ture_name=="label":
continue
if is_root:
root_edge_list=[]
print(feature_name)
root_domain=self.feature_name_domains[feature_name]
root=Node(feature_name,self.data,100.0,root_domain)
for domain in root_domain:
branching_dataset=root.dataset[feature_name]=domain
print(branching_dataset)
braching_node=BranchingNode(feature_name,branching_dataset,branching_dataset.shape[0]/root.dataset.shape[0],domain,None)
root_edge_list.append(Edge(domain,root,braching_node))
root.set_edge_list(root_edge_list)
is_root=False
self.level+=1
print(root)
class Node:
def __init__(self,feature_name,dataset,probability,feature_domain,edge_list=None):
self.dataset=dataset
self.feature_domain=feature_domain
self.feature_name=feature_name
self.probability=probability
self.edge_list=edge_list
def set_edge_list(self,edge_list=None):
self.edge_list=edge_list
class BranchingNode(Node):
def __init__(self,feature_name,dataset,probability,feature_domain,edge_list=None):
Node.__init__(self,feature_name,dataset,probability,feature_domain,edge_list)
class CategoryNode(Node):
def __init__(self,feature_name,dataset,probability,feature_domain,edge_list=None):
Node.__init__(self,feature_name,dataset,feature_domain,probability,edge_list)
self.edge_list=None
class Edge:
def __init__(self,label,incoming_node=None,outgoing_node=None):
self.label=label
self.incoming_node=incoming_node
self.outgoing_node=outgoing_node
def display(self):
print(self.label)
def get_incoming_node(self):
if not self.incoming_node:
print("No incoming node")
return
return self.incoming_node
def get_outgoing_node(self):
if not self.outgoing_node:
print("No outgong Node")
return
return self.outgoing_node
|
cryptickp/python-neutronclient | neutronclient/tests/unit/test_cli20.py | Python | apache-2.0 | 31,625 | 0 | # Copyright 2012 OpenStack Foundation.
# All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
import contextlib
import copy
import itertools
import sys
import fixtures
from mox3 import mox
from oslo_utils import encodeutils
from oslotest import base
import requests
import six
import six.moves.urllib.parse as urlparse
from neutronclient.common import constants
from neutronclient.common import exceptions
from neutronclient.common import utils
from neutronclient.neutron import v2_0 as neutronV2_0
from neutronclient import shell
from neutronclient.v2_0 import client
API_VERSION = "2.0"
FORMAT = 'json'
TOKEN = 'testtoken'
ENDURL = 'localurl'
non_admin_status_resources = ['subnet', 'floatingip', 'security_group',
'security_group_rule', 'qos_queue',
'network_gateway', 'gateway_device',
'ikepolicy',
'ipsecpolicy', 'metering_label',
'metering_label_rule', 'net_partition',
'fox_socket', 'subnetpool',
'rbac_policy', 'address_scope',
'policy', 'bandwidth_limit_rule']
@contextlib.contextmanager
def capture_std_streams():
fake_stdout, fake_stderr = six.StringIO(), six.StringIO()
stdout, stderr = sys.stdout, sys.stderr
try:
sys.stdout, sys.stderr = fake_stdout, fake_stderr
yield fake_stdout, fake_stderr
finally:
sys.stdout, sys.stderr = stdout, stderr
class FakeStdout(object):
def __init__(self):
self.content = []
def write(self, text):
| self.content.append(text)
def make_string(self):
result = ''
for line in self.content:
result = result + line
return result
class MyResp(object):
def __init__(self, status_code, headers=None, reason=None):
self.status_code = status_code
| self.headers = headers or {}
self.reason = reason
class MyApp(object):
def __init__(self, _stdout):
self.stdout = _stdout
def end_url(path, query=None, format=FORMAT):
_url_str = ENDURL + "/v" + API_VERSION + path + "." + format
return query and _url_str + "?" + query or _url_str
class MyUrlComparator(mox.Comparator):
def __init__(self, lhs, client):
self.lhs = lhs
self.client = client
def equals(self, rhs):
lhsp = urlparse.urlparse(self.lhs)
rhsp = urlparse.urlparse(rhs)
lhs_qs = urlparse.parse_qsl(lhsp.query)
rhs_qs = urlparse.parse_qsl(rhsp.query)
return (lhsp.scheme == rhsp.scheme and
lhsp.netloc == rhsp.netloc and
lhsp.path == rhsp.path and
len(lhs_qs) == len(rhs_qs) and
set(lhs_qs) == set(rhs_qs))
def __str__(self):
if self.client and self.client.format != FORMAT:
lhs_parts = self.lhs.split("?", 1)
if len(lhs_parts) == 2:
lhs = ("%s.%s?%s" % (lhs_parts[0][:-4],
self.client.format,
lhs_parts[1]))
else:
lhs = ("%s.%s" % (lhs_parts[0][:-4],
self.client.format))
return lhs
return self.lhs
def __repr__(self):
return str(self)
class MyComparator(mox.Comparator):
def __init__(self, lhs, client):
self.lhs = lhs
self.client = client
def _com_dict(self, lhs, rhs):
if len(lhs) != len(rhs):
return False
for key, value in six.iteritems(lhs):
if key not in rhs:
return False
rhs_value = rhs[key]
if not self._com(value, rhs_value):
return False
return True
def _com_list(self, lhs, rhs):
if len(lhs) != len(rhs):
return False
for lhs_value in lhs:
if lhs_value not in rhs:
return False
return True
def _com(self, lhs, rhs):
if lhs is None:
return rhs is None
if isinstance(lhs, dict):
if not isinstance(rhs, dict):
return False
return self._com_dict(lhs, rhs)
if isinstance(lhs, list):
if not isinstance(rhs, list):
return False
return self._com_list(lhs, rhs)
if isinstance(lhs, tuple):
if not isinstance(rhs, tuple):
return False
return self._com_list(lhs, rhs)
return lhs == rhs
def equals(self, rhs):
if self.client:
rhs = self.client.deserialize(rhs, 200)
return self._com(self.lhs, rhs)
def __repr__(self):
if self.client:
return self.client.serialize(self.lhs)
return str(self.lhs)
class CLITestV20Base(base.BaseTestCase):
format = 'json'
test_id = 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa'
id_field = 'id'
def _find_resourceid(self, client, resource, name_or_id,
cmd_resource=None, parent_id=None):
return name_or_id
def _get_attr_metadata(self):
return self.metadata
def setUp(self, plurals=None):
"""Prepare the test environment."""
super(CLITestV20Base, self).setUp()
client.Client.EXTED_PLURALS.update(constants.PLURALS)
self.non_admin_status_resources = copy.copy(non_admin_status_resources)
if plurals is not None:
client.Client.EXTED_PLURALS.update(plurals)
self.metadata = {'plurals': client.Client.EXTED_PLURALS,
'xmlns': constants.XML_NS_V20,
constants.EXT_NS: {'prefix':
'http://xxxx.yy.com'}}
self.mox = mox.Mox()
self.endurl = ENDURL
self.fake_stdout = FakeStdout()
self.useFixture(fixtures.MonkeyPatch('sys.stdout', self.fake_stdout))
self.useFixture(fixtures.MonkeyPatch(
'neutronclient.neutron.v2_0.find_resourceid_by_name_or_id',
self._find_resourceid))
self.useFixture(fixtures.MonkeyPatch(
'neutronclient.neutron.v2_0.find_resourceid_by_id',
self._find_resourceid))
self.useFixture(fixtures.MonkeyPatch(
'neutronclient.v2_0.client.Client.get_attr_metadata',
self._get_attr_metadata))
self.client = client.Client(token=TOKEN, endpoint_url=self.endurl)
def register_non_admin_status_resource(self, resource_name):
self.non_admin_status_resources.append(resource_name)
def _test_create_resource(self, resource, cmd, name, myid, args,
position_names, position_values,
tenant_id=None, tags=None, admin_state_up=True,
extra_body=None, cmd_resource=None,
parent_id=None, no_api_call=False,
expected_exception=None,
**kwargs):
self.mox.StubOutWithMock(cmd, "get_client")
self.mox.StubOutWithMock(self.client.httpclient, "request")
cmd.get_client().MultipleTimes().AndReturn(self.client)
if not cmd_resource:
cmd_resource = resource
if (resource in self.non_admin_status_resources):
body = {resource: {}, }
else:
body = {resource: {'admin_state_up': admin_state_up, }, }
if tenant_id:
body[resource].update({'tenant_id': tenant_id})
if tags:
|
CaliOpen/CaliOpen | src/backend/components/py.pi/caliopen_pi/features/mail.py | Python | gpl-3.0 | 8,082 | 0 | # -*- coding: utf-8 -*-
"""Caliopen mail message privacy features extraction methods."""
from __future__ import absolute_import, print_function, unicode_literals
import logging
import pgpy
from caliopen_main.pi.parameters import PIParameter
from .helpers.spam import SpamScorer
from .helpers.ingress_path import get_ingress_features
from .helpers.importance_level import compute_importance
from .types import init_features
log = logging.getLogger(__name__)
TLS_VERSION_PI = {
'tlsv1/sslv3': 2,
'tls1': 7,
'tlsv1': 7,
'tls12': 10,
}
PGP_MESSAGE_HEADER = '\n-----BEGIN PGP MESSAGE-----'
class InboundMailFeature(object):
"""Process a parsed mail message and extract available privacy features."""
def __init__(self, message, config):
"""Get a ``MailMessage`` instance and extract privacy features."""
self.message = message
self.config = config
self._features = init_features('message')
def is_blacklist_mx(self, mx):
"""MX is blacklisted."""
blacklisted = self.config.get('blacklistes.mx')
if not blacklisted:
return False
if mx in blacklisted:
return True
return False
def is_whitelist_mx(self, mx):
"""MX is whitelisted."""
whitelistes = self.config.get('whitelistes.mx')
| if not whitelistes:
return False
if mx in whitelistes:
return True
return False
@property
def internal_domains(self):
"""Get internal hosts from configuration."""
domains = self.config | .get('internal_domains')
return domains if domains else []
def emitter_reputation(self, mx):
"""Return features about emitter."""
if self.is_blacklist_mx(mx):
return 'blacklisted'
if self.is_whitelist_mx(mx):
return 'whitelisted'
return 'unknown'
def emitter_certificate(self):
"""Get the certificate from emitter."""
return None
@property
def mail_agent(self):
"""Get the mailer used for this message."""
# XXX normalize better and more ?
return self.message.mail.get('X-Mailer', '').lower()
@property
def transport_signature(self):
"""Get the transport signature if any."""
return self.message.mail.get('DKIM-Signature')
@property
def spam_informations(self):
"""Return a global spam_score and related features."""
spam = SpamScorer(self.message.mail)
return {'spam_score': spam.score,
'spam_method': spam.method,
'is_spam': spam.is_spam}
@property
def is_internal(self):
"""Return true if it's an internal message."""
from_ = self.message.mail.get('From')
for domain in self.internal_domains:
if domain in from_:
return True
return False
def get_signature_informations(self):
"""Get message signature features."""
signed_parts = [x for x in self.message.attachments
if 'pgp-sign' in x.content_type]
if not signed_parts:
return {}
sign = pgpy.PGPSignature()
features = {'message_signed': True,
'message_signature_type': 'PGP'}
try:
sign.parse(signed_parts[0].data)
features.update({'message_signer': sign.signer})
except Exception as exc:
log.error('Unable to parse pgp signature {}'.format(exc))
return features
def get_encryption_informations(self):
"""Get message encryption features."""
is_encrypted = False
if 'encrypted' in self.message.extra_parameters:
is_encrypted = True
# Maybe pgp/inline ?
if not is_encrypted:
try:
body = self.message.body_plain.decode('utf-8')
if body.startswith(PGP_MESSAGE_HEADER):
is_encrypted = True
except UnicodeDecodeError:
log.warn('Invalid body_plain encoding for message')
pass
return {'message_encrypted': is_encrypted,
'message_encryption_method': 'pgp' if is_encrypted else ''}
def _get_features(self):
"""Extract privacy features."""
features = self._features.copy()
received = self.message.headers.get('Received', [])
features.update(get_ingress_features(received, self.internal_domains))
mx = features.get('ingress_server')
reputation = None if not mx else self.emitter_reputation(mx)
features['mail_emitter_mx_reputation'] = reputation
features['mail_emitter_certificate'] = self.emitter_certificate()
features['mail_agent'] = self.mail_agent
features['is_internal'] = self.is_internal
features.update(self.get_signature_informations())
features.update(self.get_encryption_informations())
features.update(self.spam_informations)
if self.transport_signature:
features.update({'transport_signed': True})
return features
def _compute_pi(self, participants, features):
"""Compute Privacy Indexes for a message."""
log.info('PI features {}'.format(features))
pi_cx = {} # Contextual privacy index
pi_co = {} # Comportemental privacy index
pi_t = {} # Technical privacy index
reput = features.get('mail_emitter_mx_reputation')
if reput == 'whitelisted':
pi_cx['reputation_whitelist'] = 20
elif reput == 'unknown':
pi_cx['reputation_unknow'] = 10
known_contacts = []
known_public_key = 0
for part, contact in participants:
if contact:
known_contacts.append(contact)
if contact.public_key:
known_public_key += 1
if len(participants) == len(known_contacts):
# - Si tous les contacts sont déjà connus le PIᶜˣ
# augmente de la valeur du PIᶜᵒ le plus bas des PIᶜᵒ des contacts.
contact_pi_cos = [x.pi['comportment'] for x in known_contacts
if x.pi and 'comportment' in x.pi]
if contact_pi_cos:
pi_cx['known_contacts'] = min(contact_pi_cos)
if known_public_key == len(known_contacts):
pi_co['contact_pubkey'] = 20
ext_hops = features.get('nb_external_hops', 0)
if ext_hops <= 1:
tls = features.get('ingress_socket_version')
if tls:
if tls not in TLS_VERSION_PI:
log.warn('Unknown TLS version {}'.format(tls))
else:
pi_t += TLS_VERSION_PI[tls]
if features.get('mail_emitter_certificate'):
pi_t['emitter_certificate'] = 10
if features.get('transport_signed'):
pi_t['transport_signed'] = 10
if features.get('message_encrypted'):
pi_t['encrypted'] = 30
log.info('PI compute t:{} cx:{} co:{}'.format(pi_t, pi_cx, pi_co))
return PIParameter({'technic': sum(pi_t.values()),
'context': sum(pi_cx.values()),
'comportment': sum(pi_co.values()),
'version': 0})
def process(self, user, message, participants):
"""
Process the message for privacy features and PI compute.
:param user: user the message belong to
:ptype user: caliopen_main.user.core.User
:param message: a message parameter that will be updated with PI
:ptype message: NewMessage
:param participants: an array of participant with related Contact
:ptype participants: list(Participant, Contact)
"""
features = self._get_features()
message.pi = self._compute_pi(participants, features)
il = compute_importance(user, message, features, participants)
message.privacy_features = features
message.importance_level = il
|
google/fhir | py/google/fhir/stu3/json_format_test.py | Python | apache-2.0 | 70,708 | 0.003974 | #
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Test FHIR STU3 parsing/printing functionality."""
import os
from typing import TypeVar, Type
from google.protobuf import message
from absl.testing import absltest
from absl.testing import parameterized
from proto.google.fhir.proto.stu3 import datatypes_pb2
from proto.google.fhir.proto.stu3 import resources_pb2
from google.fhir.json_format import json_format_test
from google.fhir.stu3 import json_format
from google.fhir.testing import testdata_utils
from google.fhir.utils import proto_utils
_BIGQUERY_PATH = os.path.join('testdata', 'stu3', 'bigquery')
_EXAMPLES_PATH = os.path.join('testdata', 'stu3', 'examples')
_FHIR_SPEC_PATH = os.path.join('spec', 'hl7.fhir.core', '3.0.1', 'package')
_VALIDATION_PATH = os.path.join('testdata', 'stu3', 'validation')
_T = TypeVar('_T', bound=message.Message)
class JsonFormatTest(json_format_test.JsonFormatTest):
"""Unit tests for functionality in json_format.py."""
@parameterized.named_parameters(
('_withCodeSystemV20003', 'CodeSystem-v2-0003'),
('_withCodeSystemv20061', 'CodeSystem-v2-0061'),
)
def testJsonFormat_forResourceWithPrimitiveExtensionNested | ChoiceType_succeeds(
self, file_name: str):
"""Tests parsing/printing with a primitive ext | ension nested choice field."""
self.assert_parse_and_print_spec_equals_golden(file_name,
resources_pb2.CodeSystem)
@parameterized.named_parameters(
('_withAccountExample', 'Account-example'),
('_withAccountEwg', 'Account-ewg'),
)
def testJsonFormat_forValidAccount_succeeds(self, file_name: str):
self.assert_parse_and_print_spec_equals_golden(file_name,
resources_pb2.Account)
@parameterized.named_parameters(
('_withActivityDefinitionReferralPrimaryCareMentalHealth',
'ActivityDefinition-referralPrimaryCareMentalHealth'),
('_withActivityDefinitionCitalopramPrescription',
'ActivityDefinition-citalopramPrescription'),
('_withActivityDefinitionReferralPrimaryCareMentalHealthInitial',
'ActivityDefinition-referralPrimaryCareMentalHealth-initial'),
('_withActivityDefinitionHeartValveReplacement',
'ActivityDefinition-heart-valve-replacement'),
('_withActivityDefinitionBloodTubesSupply',
'ActivityDefinition-blood-tubes-supply'),
)
def testJsonFormat_forValidActivityDefinition_succeeds(self, file_name: str):
self.assert_parse_and_print_spec_equals_golden(
file_name, resources_pb2.ActivityDefinition)
@parameterized.named_parameters(
('_withAdverseEventExample', 'AdverseEvent-example'),)
def testJsonFormat_forValidAdverseEvent_succeeds(self, file_name: str):
self.assert_parse_and_print_spec_equals_golden(file_name,
resources_pb2.AdverseEvent)
@parameterized.named_parameters(
('_withAllergyIntoleranceExample', 'AllergyIntolerance-example'),)
def testJsonFormat_forValidAllergyIntolerance_succeeds(self, file_name: str):
self.assert_parse_and_print_spec_equals_golden(
file_name, resources_pb2.AllergyIntolerance)
@parameterized.named_parameters(
('_withAppointmentExample', 'Appointment-example'),
('_withAppointment2docs', 'Appointment-2docs'),
('_withAppointmentExampleReq', 'Appointment-examplereq'),
)
def testJsonFormat_forValidAppointment_succeeds(self, file_name: str):
self.assert_parse_and_print_spec_equals_golden(file_name,
resources_pb2.Appointment)
@parameterized.named_parameters(
('_withAppointmentResponseExample', 'AppointmentResponse-example'),
('_withAppointmentResponseExampleResp',
'AppointmentResponse-exampleresp'),
)
def testJsonFormat_forValidAppointmentResponse_succeeds(self, file_name: str):
self.assert_parse_and_print_spec_equals_golden(
file_name, resources_pb2.AppointmentResponse)
@parameterized.named_parameters(
('_withAuditEventExample', 'AuditEvent-example'),
('_withAuditEventExampleDisclosure', 'AuditEvent-example-disclosure'),
('_withAuditEventExampleLogin', 'AuditEvent-example-login'),
('_withAuditEventExampleLogout', 'AuditEvent-example-logout'),
('_withAuditEventExampleMedia', 'AuditEvent-example-media'),
('_withAuditEventExamplePixQuery', 'AuditEvent-example-pixQuery'),
('_withAuditEventExampleSearch', 'AuditEvent-example-search'),
('_withAuditEventExampleRest', 'AuditEvent-example-rest'),
)
def testJsonFormat_forValidAuditEvent_succeeds(self, file_name: str):
self.assert_parse_and_print_spec_equals_golden(file_name,
resources_pb2.AuditEvent)
@parameterized.named_parameters(
('_withBasicReferral', 'Basic-referral'),
('_withBasicClassModel', 'Basic-classModel'),
('_withBasicBasicExampleNarrative', 'Basic-basic-example-narrative'),
)
def testJsonFormat_forValidBasic_succeeds(self, file_name: str):
self.assert_parse_and_print_spec_equals_golden(file_name,
resources_pb2.Basic)
@parameterized.named_parameters(
('_withBodySiteFetus', 'BodySite-fetus'),
('_withBodySiteSkinPatch', 'BodySite-skin-patch'),
('_withBodySiteTumor', 'BodySite-tumor'),
)
def testJsonFormat_forValidBodySite_succeeds(self, file_name: str):
self.assert_parse_and_print_spec_equals_golden(file_name,
resources_pb2.BodySite)
@parameterized.named_parameters(
('_withBundleBundleExample', 'Bundle-bundle-example'),
('_withBundle72ac849352ac41bd8d5d7258c289b5ea',
'Bundle-72ac8493-52ac-41bd-8d5d-7258c289b5ea'),
('_withBundleHla1', 'Bundle-hla-1'),
('_withBundleFather', 'Bundle-father'),
('_withBundleB0a5e427783c44adb87e2E3efe3369b6f',
'Bundle-b0a5e4277-83c4-4adb-87e2-e3efe3369b6f'),
('_withBundle3ad0687eF477468cAfd5Fcc2bf897819',
'Bundle-3ad0687e-f477-468c-afd5-fcc2bf897819'),
('_withPatientExamplesCypressTemplate',
'patient-examples-cypress-template'),
('_withBundleB248b1b216864b94993637d7a5f94b51',
'Bundle-b248b1b2-1686-4b94-9936-37d7a5f94b51'),
('_withBundle3ad0687eF477468cAfd5Fcc2bf897809',
'Bundle-3ad0687e-f477-468c-afd5-fcc2bf897809'),
('_withBundle3ad0687eF477468cAfd5Fcc2bf897808',
'Bundle-3ad0687e-f477-468c-afd5-fcc2bf897808'),
('_withBundleUssgFht', 'Bundle-ussg-fht'),
('_withBundleXds', 'Bundle-xds'),
)
def testJsonFormat_forValidBundle_succeeds(self, file_name: str):
self.assert_parse_and_print_spec_equals_golden(file_name,
resources_pb2.Bundle)
@parameterized.named_parameters(
('_withCapabilityStatementExample', 'CapabilityStatement-example'),
('_withCapabilityStatementPhr', 'CapabilityStatement-phr'),
)
def testJsonFormat_forValidCapabilityStatement_succeeds(self, file_name: str):
self.assert_parse_and_print_spec_equals_golden(
file_name, resources_pb2.CapabilityStatement)
@parameterized.named_parameters(
('_withCarePlanExample', 'CarePlan-example'),
('_withCarePlanF001', 'CarePlan-f001'),
('_withCarePlanF002', 'CarePlan-f002'),
('_withCarePlanF003', 'CarePlan-f003'),
('_withCarePlanF201', 'CarePlan-f201'),
('_withCarePlanF202', 'CarePlan-f202'),
('_withCarePlanF203', 'CarePlan-f203'),
('_withCarePlanGpvisit', 'CarePlan-gpvisit'),
|
fstagni/DIRAC | FrameworkSystem/DB/UserProfileDB.py | Python | gpl-3.0 | 26,525 | 0.009425 | """ UserProfileDB class is a front-end to the User Profile Database
"""
from __future__ import print_function
__RCSID__ = "$Id$"
import os
import sys
import hashlib
from DIRAC import S_OK, S_ERROR, gLogger, gConfig
from DIRAC.Core.Utilities import Time
from DIRAC.ConfigurationSystem.Client.Helpers import Registry
from DIRAC.Core.Base.DB import DB
class UserProfileDB(DB):
""" UserProfileDB class is a front-end to the User Profile Database
"""
tableDict = {'up_Users': {'Fields': {'Id': 'INTEGER AUTO_INCREMENT NOT NULL',
'UserName': 'VARCHAR(32) NOT NULL',
'LastAccess': 'DATETIME',
},
'PrimaryKey': 'Id',
'UniqueIndexes': {'U': ['UserName']},
'Engine': 'InnoDB',
},
'up_Groups': {'Fields': {'Id': 'INTEGER AUTO_INCREMENT NOT NULL',
'UserGroup': 'VARCHAR(32) NOT NULL',
'LastAccess': 'DATETIME',
},
'PrimaryKey': 'Id',
'UniqueIndexes': {'G': ['UserGroup']},
'Engine': 'InnoDB',
},
'up_VOs': {'Fields': {'Id': 'INTEGER AUTO_INCREMENT NOT NULL',
'VO': 'VARCHAR(32) NOT NULL',
'LastAccess': 'DATETIME',
},
'PrimaryKey': 'Id',
'UniqueIndexes': {'VO': ['VO']},
'Engine': 'InnoDB',
},
'up_ProfilesData': {'Fields': {'UserId': 'INTEGER',
'GroupId': 'INTEGER',
'VOId': 'INTEGER',
'Profile': 'VARCHAR(255) NOT NULL',
'VarName': 'VARCHAR(255) NOT NULL',
'Data': 'BLOB',
'ReadAccess': 'VARCHAR(10) DEFAULT "USER"',
'PublishAccess': 'VARCHAR(10) DEFAULT "USER"',
},
'PrimaryKey': ['UserId', 'GroupId', 'Profile', 'VarName'],
'Indexes': {'ProfileKey': ['UserId', 'GroupId', 'Profile'],
'UserKey': ['UserId'],
},
'Engine': 'InnoDB',
},
'up_HashTags': {'Fields': {'UserId': 'INTEGER',
'GroupId': 'INTEGER',
'VOId': 'INTEGER',
'HashTag': 'VARCHAR(32) NOT NULL',
'TagName': 'VARCHAR(255) NOT NULL',
'LastAccess': 'DATETIME',
},
' | PrimaryKey': ['UserId', 'GroupId', 'TagName'],
'Indexes': {'HashKey': ['UserId', 'HashTag']},
'Engine': 'InnoDB',
| },
}
def __init__(self):
""" Constructor
"""
self.__permValues = ['USER', 'GROUP', 'VO', 'ALL']
self.__permAttrs = ['ReadAccess', 'PublishAccess']
DB.__init__(self, 'UserProfileDB', 'Framework/UserProfileDB')
retVal = self.__initializeDB()
if not retVal['OK']:
raise Exception("Can't create tables: %s" % retVal['Message'])
def _checkTable(self):
""" Make sure the tables are created
"""
return self.__initializeDB()
def __initializeDB(self):
"""
Create the tables
"""
retVal = self._query("show tables")
if not retVal['OK']:
return retVal
tablesInDB = [t[0] for t in retVal['Value']]
tablesD = {}
if 'up_Users' not in tablesInDB:
tablesD['up_Users'] = self.tableDict['up_Users']
if 'up_Groups' not in tablesInDB:
tablesD['up_Groups'] = self.tableDict['up_Groups']
if 'up_VOs' not in tablesInDB:
tablesD['up_VOs'] = self.tableDict['up_VOs']
if 'up_ProfilesData' not in tablesInDB:
tablesD['up_ProfilesData'] = self.tableDict['up_ProfilesData']
if 'up_HashTags' not in tablesInDB:
tablesD['up_HashTags'] = self.tableDict['up_HashTags']
return self._createTables(tablesD)
def __getUserId(self, userName, insertIfMissing=True):
return self.__getObjId(userName, 'UserName', 'up_Users', insertIfMissing)
def __getGroupId(self, groupName, insertIfMissing=True):
return self.__getObjId(groupName, 'UserGroup', 'up_Groups', insertIfMissing)
def __getVOId(self, voName, insertIfMissing=True):
return self.__getObjId(voName, 'VO', 'up_VOs', insertIfMissing)
def __getObjId(self, objValue, varName, tableName, insertIfMissing=True):
result = self.getFields(tableName, ['Id'], {varName: objValue})
if not result['OK']:
return result
data = result['Value']
if len(data) > 0:
objId = data[0][0]
self.updateFields(tableName, ['LastAccess'], ['UTC_TIMESTAMP()'], {'Id': objId})
return S_OK(objId)
if not insertIfMissing:
return S_ERROR("No entry %s for %s defined in the DB" % (objValue, varName))
result = self.insertFields(tableName, [varName, 'LastAccess'], [objValue, 'UTC_TIMESTAMP()'])
if not result['OK']:
return result
return S_OK(result['lastRowId'])
def getUserGroupIds(self, userName, userGroup, insertIfMissing=True):
result = self.__getUserId(userName, insertIfMissing)
if not result['OK']:
return result
userId = result['Value']
result = self.__getGroupId(userGroup, insertIfMissing)
if not result['OK']:
return result
groupId = result['Value']
userVO = Registry.getVOForGroup(userGroup)
if not userVO:
userVO = "undefined"
result = self.__getVOId(userVO, insertIfMissing)
if not result['OK']:
return result
voId = result['Value']
return S_OK((userId, groupId, voId))
def deleteUserProfile(self, userName, userGroup=False):
"""
Delete the profiles for a user
"""
result = self.__getUserId(userName)
if not result['OK']:
return result
userId = result['Value']
condDict = {'UserId': userId}
if userGroup:
result = self.__getGroupId(userGroup)
if not result['OK']:
return result
groupId = result['Value']
condDict['GroupId'] = groupId
result = self.deleteEntries('up_ProfilesData', condDict)
if not result['OK'] or not userGroup:
return result
return self.deleteEntries('up_Users', {'Id': userId})
def __webProfileUserDataCond(self, userIds, sqlProfileName=False, sqlVarName=False):
condSQL = ['`up_ProfilesData`.UserId=%s' % userIds[0],
'`up_ProfilesData`.GroupId=%s' % userIds[1],
'`up_ProfilesData`.VOId=%s' % userIds[2]]
if sqlProfileName:
condSQL.append('`up_ProfilesData`.Profile=%s' % sqlProfileName)
if sqlVarName:
condSQL.append('`up_ProfilesData`.VarName=%s' % sqlVarName)
return " AND ".join(condSQL)
def __webProfileReadAccessDataCond(self, userIds, ownerIds, sqlProfileName, sqlVarName=False, match=False):
permCondSQL = []
sqlCond = []
if match:
sqlCond.append('`up_ProfilesData`.UserId = %s AND `up_ProfilesData`.GroupId = %s' % (ownerIds[0], ownerIds[1]))
else:
permCondSQL.append(
'`up_ProfilesData`.UserId = %s AND `up_ProfilesData`.GroupId = %s' %
(ownerIds[0], ownerIds[1]))
permCondSQL.append('`up_ProfilesData`.GroupId=%s AND `up_ProfilesData`.ReadAccess="GROUP"' % userIds[1])
permCondSQL.append('`up_ProfilesData`.VOId=%s AND `up_ProfilesData`.ReadAccess="V |
nicocardiel/xmegara | check_wlcalib_rss.py | Python | gpl-3.0 | 6,805 | 0 | from __future__ import division
from __future__ import print_function
import argparse
import astropy.io.fits as fits
import numpy as np
import os
from numina.array.display.ximplot import ximplot
from numina.array.display.ximshow import ximshow
from numina.array.wavecalib.check_wlcalib import check_wlcalib_sp
from numina.array.display.pause_debugplot import DEBUGPLOT_CODES
from fix_borders_wlcalib_rss import fix_pix_borders
def process_rss(fitsfile, npix_zero_in_border,
geometry, debugplot):
"""Process twilight image.
Parameters
----------
fitsfile : str
Wavelength calibrated RSS FITS file name.
npix_zero_in_border : int
Number of pixels to be set to zero at the beginning and at
the end of each spectrum to avoid unreliable pixel values
produced in the wavelength calibration procedure.
geometry : tuple (4 integers) or None
x, y, dx, dy values employed to set the Qt backend geometry.
debugplot : int
Debugging level for messages and plots. For details see
'numina.array.display.pause_debugplot.py'.
Returns
-------
spmedian : numpy array
Median spectrum corresponding to the collapse of the full
RSS | image.
crpix1: float
CRPIX1 keyword.
crval1: float
CRVAL1 keyword.
cdelt1: float
CDELT1 keyword.
"""
# read the 2d image
with fits.open(fitsfile) as hdulist:
image2d_header = hdulist[0].header
image2d = hdulist[0].data
naxis2, naxis1 = image2d.shape
crpix1 = image2d_header['crpix1']
crval1 = image2d_header['crval1']
cdelt1 = image2d_header['cdelt1']
print('* Input file:', fitsfile)
print('>>> NAXIS1:', naxis | 1)
print('>>> NAXIS2:', naxis2)
print('>>> CRPIX1:', crpix1)
print('>>> CRVAL1:', crval1)
print('>>> CDELT1:', cdelt1)
if abs(debugplot) in (21, 22):
ximshow(image2d, show=True,
title='Wavelength calibrated RSS image', debugplot=debugplot)
# set to zero a few pixels at the beginning and at the end of each
# spectrum to avoid unreliable values coming from the wavelength
# calibration procedure
image2d = fix_pix_borders(image2d, nreplace=npix_zero_in_border,
sought_value=0, replacement_value=0)
if abs(debugplot) in (21, 22):
ximshow(image2d, show=True,
title='RSS image after removing ' +
str(npix_zero_in_border) + ' pixels at the borders',
debugplot=debugplot)
# mask and masked array
mask2d = (image2d == 0)
image2d_masked = np.ma.masked_array(image2d, mask=mask2d)
# median (and normalised) vertical cross section
ycutmedian = np.ma.median(image2d_masked, axis=1).data
# normalise cross section with its own median
tmpmedian = np.median(ycutmedian)
if tmpmedian > 0:
ycutmedian /= tmpmedian
else:
raise ValueError('Unexpected null median in cross section')
# replace zeros by ones
iszero = np.where(ycutmedian == 0)
ycutmedian[iszero] = 1
if abs(debugplot) in (21, 22):
ximplot(ycutmedian, plot_bbox=(1, naxis2),
title='median ycut', debugplot=debugplot)
# equalise the flux in each fiber by dividing the original image by the
# normalised vertical cross secction
ycutmedian2d = np.repeat(ycutmedian, naxis1).reshape(naxis2, naxis1)
image2d_eq = image2d_masked/ycutmedian2d
if abs(debugplot) in (21, 22):
ximshow(image2d_eq.data, show=True,
title='equalised image', debugplot=debugplot)
# median spectrum
spmedian = np.ma.median(image2d_eq, axis=0).data
return spmedian, crpix1, crval1, cdelt1
def main(args=None):
# parse command-line options
parser = argparse.ArgumentParser(prog='check_wlcalib_rss')
# positional parameters
parser.add_argument("fitsfile",
help="Wavelength calibrated RSS FITS image",
type=argparse.FileType('r'))
parser.add_argument("--wv_master_file", required=True,
help="TXT file containing wavelengths",
type=argparse.FileType('r'))
parser.add_argument("--out_sp",
help="File name to save the median spectrum in FITS "
"format including the wavelength "
"calibration (default=None)",
default=None,
type=argparse.FileType('w'))
parser.add_argument("--npixzero",
help="Number of pixels to be set to zero at the "
"borders of each spectrum (default=3)",
default=3, type=int)
parser.add_argument("--geometry",
help="tuple x,y,dx,dy",
default="0,0,640,480")
parser.add_argument("--debugplot",
help="integer indicating plotting/debugging" +
" (default=0)",
type=int, default=12,
choices=DEBUGPLOT_CODES)
args = parser.parse_args(args=args)
# geometry
if args.geometry is None:
geometry = None
else:
tmp_str = args.geometry.split(",")
x_geom = int(tmp_str[0])
y_geom = int(tmp_str[1])
dx_geom = int(tmp_str[2])
dy_geom = int(tmp_str[3])
geometry = x_geom, y_geom, dx_geom, dy_geom
# compute median spectrum and get wavelength calibration parameters
spmedian, crpix1, crval1, cdelt1 = process_rss(
args.fitsfile.name,
args.npixzero,
geometry=geometry,
debugplot=args.debugplot
)
# save median spectrum
if args.out_sp is not None:
hdu = fits.PrimaryHDU(spmedian)
hdu.header['CRPIX1'] = crpix1
hdu.header['CRVAL1'] = crval1
hdu.header['CDELT1'] = cdelt1
hdu.writeto(args.out_sp, overwrite=True)
# read list of expected arc lines
master_table = np.genfromtxt(args.wv_master_file)
wv_master = master_table[:, 0]
if abs(args.debugplot) in (21, 22):
print('wv_master:', wv_master)
# check the wavelength calibration
title = 'fitsfile: ' + os.path.basename(args.fitsfile.name) + \
' [collapsed median]\n' + \
'wv_master: ' + os.path.basename(args.wv_master_file.name)
check_wlcalib_sp(sp=spmedian,
crpix1=crpix1,
crval1=crval1,
cdelt1=cdelt1,
wv_master=wv_master,
title=title,
geometry=geometry,
debugplot=args.debugplot)
if __name__ == "__main__":
main()
|
bsmedberg/socorro | webapp-django/crashstats/supersearch/form_fields.py | Python | mpl-2.0 | 4,545 | 0 | from django import forms
OPERATORS = (
'__true__', '__null__', '$', '~', '^', '=', '<=', '>=', '<', '>',
'!__true__', '!__null__', '!$', '!~', '!^', '!=', '!'
)
def split_on_operator(value):
for operator in sorted(OPERATORS, key=len, reverse=True):
if value.startswith(operator):
value = value[len(operator):]
return (operator, value)
return (None, value)
class PrefixedField(object):
"""Special field that accepts an operator as prefix in the value.
Removes the prefix from the initial value before the validation process
starts, and put it back in a different attribute once the validation
process is finished. The cleaned value is the one without the prefix, thus
allowing to use the real value and check its type.
The validated, prefixed value is available in `prefixed_value` as a string,
and the prefix is in `operator`.
This is needed to allow fields like IntegerField to accept values
containing an operator. For example, a value such as '>13' will raise
a ValidationError in the basic django IntegerField. Using a PrefixedField
based IntegerField, this value is perfectly valid.
"""
operator = None
prefixed_value = None
def to_python(self, value):
if isinstance(value, basestring):
self.operator, value = split_on_operator(value)
return super(PrefixedField, self).to_python(value)
def clean(self, *args, **kwargs):
cleaned_value = super(PrefixedField, self).clean(*args, **kwargs)
self.prefixed_value = self.value_to_string(cleaned_value)
if self.operator is not None and self.prefixed_value is not None:
self.prefixed_value = self.operator + self.prefixed_value
return cleaned_value
def value_to_string(self, value):
"""Return the value as a string. """
if value is None:
return None
return unicode(value)
class MultipleValueField(forms.MultipleChoiceField):
"""This is the same as a MultipleChoiceField except choices don't matter
as no validation will be done. The advantage is that it will take a list
as input, and output a list as well, allowing several values to be passed.
In the end, it's like a CharField that can take a list of values. It is
used as the default field for supersearch.
"""
def validate(self, value):
pass
class MultiplePrefixedValueField(PrefixedField):
"""Special field that uses a SelectMultiple widget to deal with multiple
values. """
def __init__(self, *args, **kwargs):
kwargs['widget'] = forms.SelectMultiple
super(MultiplePrefixedValueField, self).__init__(*args, **kwargs)
def clean(self, values, *args, **kwargs):
cleaned_values = []
prefixed_values = []
if values is None:
# call the mother classe's clean to do other verifications
return super(MultiplePrefixedValueField, self).clean(
values,
*args,
**kwargs
)
for value in values:
cleaned_value = super(MultiplePrefixedValueField, self).clean(
value,
*args,
**kwargs
)
cleaned_values.append(cleaned_value)
prefixed_values.append(self.prefixed_value)
self.prefixed_value = prefixed_values
return cleaned_values
class IntegerField(MultiplePrefixedValueField, forms.Intege | rField):
pass
class DateTimeField(MultiplePrefixedValueField, forms.DateTimeField):
def value_to_string(self, value):
try:
return value.i | soformat()
except AttributeError: # when value is None
return value
class StringField(MultipleValueField):
"""A CharField with a different name, to be considered as a string
by the dynamic_form.js library. This basically enables string operators
on that field ("contains", "starts with"... ).
"""
pass
class BooleanField(forms.CharField):
def to_python(self, value):
"""Return None if the value is None. Return 'true' if the value is one
of the accepted values. Return 'false' otherwise.
Return boolean values as a string so the middleware doesn't exclude
the field if the value is False.
"""
if value is None:
return None
if str(value).lower() in ('__true__', 'true', 't', '1', 'y', 'yes'):
return 'true'
return 'false'
|
MSMBA/msmba-workflow | msmba-workflow/srclib/wax/examples/simplebuttons4.py | Python | gpl-2.0 | 368 | 0.008152 | # simplebuttons4.py
# Button with a | border...
import sys
sys.path.append("../..")
from wax import *
WaxConfig.default_font = ("Verdana", 9)
class MainFrame(Frame):
def Body(self):
b = Button(self, "one")
b.SetSize((80, 80))
self.AddComponent(b, expand='both', border=15)
self.Pack()
app = Application(MainFrame)
app.Mai | nLoop()
|
knightjdr/screenhits | api/app/scripts/CRISPR/MAGeCK/v0.01/lib/python2.7/site-packages/mageck/mleclassdef.py | Python | mit | 4,322 | 0.048126 | '''
Class definition
'''
from __future__ import print_function
# from IPython.core.debugger import Tracer
class SimCaseSimple:
prefix='sample1'
# the beta parameters; beta0 is the base value (a list of double, size nsgRNA)
beta0=[]
# beta1 (a nsample*r) is the beta values of different conditions
beta1=[0]
# the efficient parameters; binary, 0 means it's not efficient and 1 means it's efficient
isefficient=[]
# NB parameters used to generate read counts
#mu0=[]
#mu1=[]
mu_estimate=[]
#var0=[]
#var1=[]
var=[]
#nb_p0=[]
#nb_p1=[]
nb_p=[]
#nb_r0=[]
#nb_r1=[]
nb_r=[]
# actual read counts
#nb_count0=[]
#nb_count1=[]
nb_count=[]
# design matrix
design_mat=[]
# extended design matrix
extended_design_mat=[]
extended_design_mat_residule=[]
#
# estimated values
beta_estimate=[]
w_estimate=[]
# p values
beta_zscore=[]
beta_pval=[] # two-sided p values
beta_pval_fdr=[] # two-sided p values
beta_pval_neg=[] # one-sided p values
beta_pval_neg_fdr=[] # one-sided p values
beta_pval_pos=[] # one-sided p values
beta_pval_pos_fdr=[] # one-sided p values
beta_permute_pval=[]
beta_permute_pval_fdr=[]
beta_permute_pval_neg=[]
beta_permute_pval_neg_fdr=[]
beta_permute_pval_pos=[]
beta_permute_pval_pos_fdr=[]
# sgRNA ids
sgrnaid=[]
# residules, used for calculating the mean-variance modeling
sgrna_kvalue=[] # count matrix; is usually (2*(nsample)+1)*1
sgrna_residule=[] # fitted residule; the same size as sgrna_kvalue
# dispersion estimate (i.e., r, or alpha)
dispersion_estimate=None
# MAP estimate
MAP_sgrna_dispersion_estimate=None
MAP_gene_dispersion_estimate=None
non_PPI_beta_prior_variance=None
prior_mean=None
prior_variance=None
sgrna_probability=None
loglikelihood=None
def gene_to_printfield(self,onesided=False):
'''
Convert values to print field
'''
nsg=0
if len(self.nb_count)>0:
nsg=(self.nb_count.shape[1])
# to field
ret=[self.prefix,str(nsg)]
for i in range(nsg,len(self.beta_estimate)):
ret+=['{0:.5g}'.format(self.beta_estimate[i])]
ret+=['{0:.5g}'.format(self.beta_zscore[i-nsg])]
if onesided:
# one sided test
ret+=['{0:.5g}'.format(self.beta_pval_neg[i-nsg]), '{0:.5g}'.format(self.beta_pval_neg_fdr[i-nsg])]
ret+=['{0:.5g}'.format(self.beta_pval_pos[i-nsg]), '{0:.5g}'.format(self.beta_pval_pos_fdr[i-nsg])]
if len(self.beta_permute_pval)>0:
ret+=['{0:.5g}'.format(self.beta_permute_pval_neg[i-nsg]), '{0:.5g}'.format(self.beta_permute_pval_neg_fdr[i-nsg])]
ret+=['{0:.5g}'.format(self.beta_permute_pval_pos[i-nsg]), '{0:.5g}'.format(self.beta_permute_pval_pos_fdr[i-nsg])]
else:
ret+=['1.0','1.0','1.0','1.0']
else:
# two-sided test
if len(self.beta_permute_pval)>0:
ret+=['{0:.5g}'.format(self.beta_permute_pval[i-nsg]),'{0:.5g}'.format(self.beta_permute_pval_fdr[i-nsg])]
else:
ret+=['1.0','1.0']
ret+=['{0:.5g}'.format(self.beta_pval[i-nsg]), '{0:.5g}'.format(self.beta_pval_fdr[i-nsg])]
return ret
def decformat(x):
return '{0:.3g}'.format(x)
def gene_fdr_correction(allgenedict, method):
'''
perform p value correction
'''
pvaluelabel_list=['beta_pval','beta_pval_neg','beta_pval_pos','beta_permute_pval','beta_permute_pval_neg','beta_permu | te_pval_pos']
fdr_label_list=[x+'_fdr' for x in pvaluelabel_list]
for ii in range(len(pvaluelabel_list)):
pvaluemat_list=[]
var_fdr_list=[]
#whichp='beta_pval'
#writep='beta_pval_fdr'
whichp=pvaluelabel_list[ii]
writep=fdr_label_list[ii]
for (gene,ginst) in allgenedict.iteritems():
tlist | =getattr(ginst,whichp)
pvaluemat_list+=[tlist]
#
import numpy as np
from mageck.fdr_calculation import pFDR
pvaluemat=np.matrix(pvaluemat_list)
pvaluemat_t=pvaluemat.getT()
# row by row
for cid in range(pvaluemat_t.shape[0]):
p_vec=pvaluemat_t[cid,:].getA1().tolist()
fdr_vec=pFDR(p_vec,method)
#Tracer()()
pvaluemat_t[cid,:]=np.array(fdr_vec)
# set up the attribute
gid=0
for (gene,ginst) in allgenedict.iteritems():
targetp=pvaluemat_t[:,gid].getA1().tolist()
setattr(ginst,writep,targetp)
gid+=1
|
silvau/Addons_Odoo | compute_sheet_oisa/hr_payroll.py | Python | gpl-2.0 | 631 | 0.025357 | # -*- encoding: utf-8 -*-
from openerp.osv import osv,fields
class hr_payslip_run(osv.Model):
_inherit = 'hr.payslip.run'
| def compute_sheet_all(self, cr, uid, ids, context=None):
|
payslip_obj=self.pool.get('hr.payslip')
payslip_run_obj=self.pool.get('hr.payslip.run')
my_slip_ids=payslip_run_obj.read(cr,uid,ids,['slip_ids'])
for slip in my_slip_ids[0]['slip_ids']:
child_slip=payslip_obj.browse(cr,uid,slip)
child_slip.compute_sheet()
return True
hr_payslip_run()
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
spapageo0x01/dioskrS | layer_block.py | Python | gpl-3.0 | 1,354 | 0.011078 | import os
import inspect
import sys
class BlockStore:
def __init__(self, input_file, block_size, output_dir):
self.input_file = input_file
self.block_size = block_size
file_size = os.stat(input_file).st_size
print | 'file_size: %d' % file_size
#Should handle this later on.
if (file_size < block_size):
print 'File provided is smaller than the deduplication block size.'
sys.exit(0)
if not (os.path.isdir(output_dir)):
print 'Output directory "%s" does not exist. Will create..' % output_dir
os.makedirs(output_dir)
try | :
self.file_fp = os.open(self.input_file, os.O_DIRECT | os.O_RDONLY)
except Exception as e:
frame = inspect.currentframe()
info = inspect.getframeinfo(frame)
print '\t[fopen: an %s exception occured | line: %d]' % (type(e).__name__, info.lineno)
sys.exit(0)
def get_sync(self, byte_offset=0):
block = ''
try:
block = os.read(self.file_fp, self.block_size)
except Exception as e:
frame = inspect.currentframe()
info = inspect.getframeinfo(frame)
print '\t[read: an %s exception occured | line: %d]' % (type(e).__name__, info.lineno)
sys.exit(0);
return block |
scharron/elasticsearch-river-mysql | http_stream/http_stream.py | Python | apache-2.0 | 1,681 | 0.009518 | #!/usr/bin/env python
#
# Update a redis server cache when an evenement is trigger
# in MySQL replication log
#
from pymysqlreplication import BinLogStreamReader
from pymysqlreplication.row_event import *
mysql_settings = {'host': '127.0.0.1', 'port': 3306, 'user': 'root', 'passwd': ''}
import json
import cherrypy
class Streamer(object):
def __init__(self):
self.stream = BinLogStreamReader(connection_settings = mysql_settings,
only_events = [DeleteRowsEvent, WriteRowsEvent, UpdateRowsEvent], blocking = True, resume_stream = True)
def index(self):
cherrypy.response.headers['Content-Type'] = 'text/plain'
def content():
for binlogevent in self.stream:
for row in binlogevent.rows:
if isinstance(binlogevent, DeleteRowsEvent):
yield json.dumps({
"action": "delete",
"id": row["values"]["id"]}) + "\n"
elif isinstance | (binlogevent, UpdateRowsEvent):
yield json.dumps | ({
"action": "update",
"id": row["after_values"]["id"],
"doc": row["after_values"]}) + "\n"
elif isinstance(binlogevent, WriteRowsEvent):
yield json.dumps({
"action": "insert",
"id": row["values"]["id"],
"doc": row["values"]}) + "\n"
return content()
index.exposed = True
index._cp_config = {"response.stream": True}
cherrypy.quickstart(Streamer())
|
PaulFlorea/Orbis2014 | lib/tronclient/SocketChannel.py | Python | mit | 2,095 | 0.015752 | import socket
import struct
import sys
from time import sleep
import logging
class SocketChannelFactory():
'''
Provides method to create channel connection.
'''
def openChannel(self, host, port):
try:
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.connect((host, port))
return SocketChannel(sock)
except socket.error:
print "Cannot connect to {0} at port {1}. Please make sure the server is running.".format(host, port)
raise
class SocketChannel():
'''
SocketChannel provides an abstraction layer above the
underlying socket, which sends and receives messages framed
by their length as 4 bytes in Big Endian.
'''
def __init__(self, sock):
self.sock = sock
self.connected = True
def write(self, byteStream):
'''
Write a byte stream message to the channel.
The message will be prepended by its length packed
in 4 bytes in Big Endian.
'''
streamLen = struct.pack('>L', len(byteStream))
framedStream = streamLen + byteStream
try:
self.sock.sendall(framedStream)
except socket.error:
self.close()
raise Exception("socket send fail, close")
def read(self):
'''
Read a byte stream message prepended by its length
in 4 bytes in Big Endian from channel.
The message content is returned.
'''
lenField = self.readnbytes(4)
length = struct.unpack('>L', lenField)[0]
byteStream = self.readnbytes(length)
return byteStream
def readnbytes(self, n):
buf = ''
while n > 0:
data = self.sock.recv(n)
if data == '':
print "The socket between this client and the server has been broken."
logging.info("Socket broken or connection closed - data was empty w | hile attempting to read")
raise Exception("socket broken or connection closed")
buf += data
n -= len(data)
return | buf
def close(self):
print("closing connection")
self.sock.close()
self.connected = False
|
yland/mailman3 | src/mailman/interfaces/runner.py | Python | gpl-2.0 | 4,700 | 0.000426 | # Copyright (C) 2007-2015 by the Free Software Foundation, Inc.
#
# This file is part of GNU Mailman.
#
# GNU Mailman is free software: you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free
# Software Foundation, either version 3 of the License, or (at your option)
# any later version.
#
# GNU Mailman is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
# more details.
#
# You should have received a copy of the GNU General Public License along with
# GNU Mailman. If not, see <http://www.gnu.org/licenses/>.
"""Interface for runners."""
__all__ = [
'IRunner',
'RunnerCrashEvent',
]
from zope.interface import Interface, Attribute
class RunnerCrashEvent:
"""Triggered when a runner encounters an exception in _dispose()."""
def __init__(self, runner, mlist, msg, metadata, error):
self.runner = runner
self.mailing_list = mlist
self.message = msg
self.metadata = metadata
self.error = error
class IRunner(Interface):
"""The runner."""
def run():
"""Start the runner."""
def stop():
"""Stop the runner on the next iteration through the loop."""
is_queue_runner = Attribute("""\
A boolean variable describing whether the runner is a queue runner.
""")
queue_directory = Attribute(
'The queue directory. Overridden in subclasses.')
sleep_time = Attribute("""\
The number of seconds this runner will sleep between iterations
through the main loop.
""")
def set_signals():
"""Set up the signal handlers necessary to control the runner.
The runner should catch the following signals:
- SIGTERM and SIGINT: treated exactly the same, they cause the runner
to exit with no restart from the master.
- SIGUSR1: Also causes the runner to exit, but the master watcher will
retart it.
- SIGHUP: Re-open the log files.
"""
def _one_iteration():
"""The work done in one iteration of the main loop.
| Can be overridden by subclasses.
:return: The number of files still left to process.
:rtype: int
"""
def _process_one_file(msg, msgdata):
"""Process one queue file.
:param msg: The message object.
:type msg: `email.message.Message`
:param msgdata: The message metadata.
:type msgdata: dict
"""
def _clean_up():
"""Clean up upon exit from the main processing loop.
Called when the runner's main loop is stopp | ed, this should perform any
necessary resource deallocation.
"""
def _dispose(mlist, msg, msgdata):
"""Dispose of a single message destined for a mailing list.
Called for each message that the runner is responsible for, this is
the primary overridable method for processing each message.
Subclasses, must provide implementation for this method.
:param mlist: The mailing list this message is destined for.
:type mlist: `IMailingList`
:param msg: The message being processed.
:type msg: `email.message.Message`
:param msgdata: The message metadata.
:type msgdata: dict
:return: True if the message should continue to be queued, False if
the message should be deleted automatically.
:rtype: bool
"""
def _do_periodic():
"""Do some arbitrary periodic processing.
Called every once in a while both from the runner's main loop, and
from the runner's hash slice processing loop. You can do whatever
special periodic processing you want here.
"""
def _snooze(filecnt):
"""Sleep for a little while.
:param filecnt: The number of messages in the queue the last time
through. Runners can decide to continue to do work, or sleep for
a while based on this value. By default, the base runner only
snoozes when there was nothing to do last time around.
:type filecnt: int
"""
def _short_circuit():
"""Should processing be short-circuited?
:return: True if the file processing loop should exit before it's
finished processing each message in the current slice of hash
space. False tells _one_iteration() to continue processing until
the current snapshot of hash space is exhausted.
:rtype: bool
"""
|
wschuell/experiment_manager | experiment_manager/job_queue/plafrim.py | Python | agpl-3.0 | 1,067 | 0.035614 |
from .slurm import SlurmJo | bQueue,OldSlurmJobQueue
class PlafrimJobQueue(SlurmJobQueue):
def __init__(self, username=None,hostname='plafrim-ext', basedir=None, local_basedir='', base_work_dir=None, max_jobs=100, key_file='plafrim', password=None, install_as_job=False, modules = [], **kwargs):
if username is None:
username = self.get_username_from_hostname(hostname)
ssh_cfg = {'username':username,
'hostname':hostname,
}
if not self.check_hostname(h | ostname):
raise ValueError('Hostname '+hostname+' not in your .ssh/config\n')
if basedir is None:
basedir = '/lustre/'+username
if base_work_dir is None:
base_work_dir = '/tmp/'+username
if not [_ for _ in modules if 'slurm' in _]:
modules.append('slurm')
SlurmJobQueue.__init__(self,ssh_cfg=ssh_cfg,modules=modules,base_work_dir=base_work_dir,basedir=basedir,local_basedir=local_basedir, max_jobs=max_jobs, install_as_job=install_as_job, **kwargs)
class PlafrimOldSlurm(OldSlurmJobQueue):
def __init__(self,*args,**kwargs):
PlafrimJobQueue.__init__(self,*args,**kwargs)
|
seanfisk/buzzword-bingo-server | buzzwordbingo/forms.py | Python | bsd-3-clause | 955 | 0.002094 | """:mod:`buzzwordbingo.form` --- Forms for the HTML REST interface
"""
from django import forms
from buzzwordbingo.models import Buzzword, Board, WinCondition
class BoardForm(forms.ModelForm):
"""Form assisting in the submission of a board that is able to stored in a
non-relational database."""
class Meta:
model = Board
exclude = ('words', 'win_conditions')
words = forms.TypedMultipleChoiceField(
coerce=int,
choices=[(word.pk, word | .word) for word in Buzzword.objects.all()])
"""Re-define words to be a custom field on the form rather than a field
straight from the model.
"""
win_conditions = forms.TypedMultipleChoiceField(
coerce=int,
choices=[(win_condition.pk, win_condition.name)
for win_condition in WinCondition.objects.all()])
"""Re-define win_conditions to be a custom field on the form rather than
straight from t | he model.
"""
|
PWr-Projects-For-Courses/SystemyWizyjne | toolbox/example/poly.py | Python | gpl-2.0 | 1,791 | 0.027359 | #!/usr/bin/env python
from sys import path
import os.path
t | hisrep = os.path.dirname(os.path.abspath(__file__))
path.append(os.path.dirname(thisrep))
from random import randint
from pygame import *
from pygame import gfxdraw
from EasyGame import pathgetter,confirm
controls = """hold the left mouse button to draw
d = undo
s = save"""
scr = display.set_mode((800,800))
co | nfirm(controls,fontsize=14,mode=1)
a = []
c = []
color = [randint(0,255) for i in (1,2,3)]+[50]
while 1:
ev = event.wait()
if ev.type == MOUSEBUTTONDOWN and ev.button == 1:
a.append([ev.pos])
c.append(color)
if ev.type == MOUSEMOTION and ev.buttons[0]:
a[-1].append(ev.pos)
if len(a[-1]) >= 2:
draw.aaline(scr,color,a[-1][-1],a[-1][-2],1)
display.flip()
if ev.type == MOUSEBUTTONUP and ev.button == 1:
if len(a[-1]) >= 2:
draw.aaline(scr,color,a[-1][0],a[-1][-1],1)
gfxdraw.filled_polygon(scr,a[-1],color)
display.flip()
color = [randint(0,255) for i in (1,2,3)]+[50]
if ev.type == QUIT: break
if ev.type == KEYDOWN and ev.key == K_s:
p = pathgetter()
if p: image.save(scr,p)
if ev.type == KEYDOWN and ev.key == K_d and a:
a.pop()
c.pop()
scr.fill(0)
for lines,color in zip(a,c):
draw.aalines(scr,color,1,lines,1)
gfxdraw.filled_polygon(scr,lines,color)
display.flip()
if ev.type == KEYDOWN and ev.key == K_p:
a = [[(x//10*10,y//10*10) for x,y in i] for i in a]
scr.fill(0)
for lines,col in zip(a,c):
if len(lines) > 1:
draw.aalines(scr,col,1,lines,1)
gfxdraw.filled_polygon(scr,lines,col)
display.flip()
|
google-research/deeplab2 | model/builder_test.py | Python | apache-2.0 | 3,005 | 0.002662 | # coding=utf-8
# Copyright 2022 The Deeplab2 Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for model.builder."""
import os
from absl.testing import parameterized
import tensorflow as tf
from google.protobuf import text_format
from deeplab2 import config_pb2
from deeplab2.model import builder
from deeplab2.model.decoder import motion_deeplab_decoder
from deeplab2.model.encoder import axial_resnet_instances
from deeplab2.model.encoder import mobilenet
# resources dependency
_CONFIG_PATH = 'deeplab2/configs/example'
def _read_proto_file(filename, proto):
filename = filename # OSS: removed internal filename loading.
| with tf.io.gfile.GFile(filename, 'r') as proto_file:
return text_format.ParseLines(proto_f | ile, proto)
class BuilderTest(tf.test.TestCase, parameterized.TestCase):
def test_resnet50_encoder_creation(self):
backbone_options = config_pb2.ModelOptions.BackboneOptions(
name='resnet50', output_stride=32)
encoder = builder.create_encoder(
backbone_options,
tf.keras.layers.experimental.SyncBatchNormalization)
self.assertIsInstance(encoder, axial_resnet_instances.ResNet50)
@parameterized.parameters('mobilenet_v3_large', 'mobilenet_v3_small')
def test_mobilenet_encoder_creation(self, model_name):
backbone_options = config_pb2.ModelOptions.BackboneOptions(
name=model_name, use_squeeze_and_excite=True, output_stride=32)
encoder = builder.create_encoder(
backbone_options,
tf.keras.layers.experimental.SyncBatchNormalization)
self.assertIsInstance(encoder, mobilenet.MobileNet)
def test_resnet_encoder_creation(self):
backbone_options = config_pb2.ModelOptions.BackboneOptions(
name='max_deeplab_s', output_stride=32)
encoder = builder.create_resnet_encoder(
backbone_options,
bn_layer=tf.keras.layers.experimental.SyncBatchNormalization)
self.assertIsInstance(encoder, axial_resnet_instances.MaXDeepLabS)
def test_decoder_creation(self):
proto_filename = os.path.join(
_CONFIG_PATH, 'example_kitti-step_motion_deeplab.textproto')
model_options = _read_proto_file(proto_filename, config_pb2.ModelOptions())
motion_decoder = builder.create_decoder(
model_options, tf.keras.layers.experimental.SyncBatchNormalization,
ignore_label=255)
self.assertIsInstance(motion_decoder,
motion_deeplab_decoder.MotionDeepLabDecoder)
if __name__ == '__main__':
tf.test.main()
|
CantemoInternal/pyxb | pyxb/bundles/opengis/tml.py | Python | apache-2.0 | 43 | 0 | f | rom | pyxb.bundles.opengis.raw.tml import *
|
GridProtectionAlliance/ARMORE | source/webServer/armoreServer.py | Python | mit | 6,634 | 0.007386 | # # # # #
# armoreServer.py
#
# This file is used to serve up
# RESTful links that can be
# consumed by a frontend system
#
# University of Illinois/NCSA Open Source License
# Copyright (c) 2015 Information Trust Institute
# All rights reserved.
#
# Developed by:
#
# Information Trust Institute
# University of Illinois
# http://www.iti.illinois.edu
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of
# this software and associated documentation files (the "Software"), to deal with
# the Software without restriction, including without limitation the rights to
# use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
# of the Software, and to permit persons to whom the Software is furnished to do
# so, subject to the following conditions:
#
# Redistributions of source code must retain the above copyright notice, this list
# of conditions and the following disclaimers. Redistributions in binary form must
# reproduce the above copyright notice, this list of conditions and the following
# disclaimers in the documentation and/or other materials provided with the
# distribution.
#
# Neither the names of Information Trust Institute, University of Illinois, nor
# the n | ames of its contributors may be used to endorse or promote products derived
# from this Software without specific prior written permission.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
# FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE CONTRIBUTORS
# OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OT | HERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS WITH THE SOFTWARE.
#
# # # # #
# Import python Flask server
# and add backend files to sys.path
import sys
import re
import domains.support.config as confLib
import domains.support.network as netLib
import domains.support.system as sysLib
from flask import Flask, render_template, redirect, url_for, session, request, Response, send_from_directory
from flask.ext.cors import CORS
from domains.support.models import *
from domains.admin import adminDomain
from domains.dashboard import dashboardDomain
from domains.logs import logDomain
from domains.network import netDomain
from domains.policy import policyDomain
from domains.processes import processDomain
from domains.security import securityDomain
from domains.settings import settingsDomain
from domains.statistics import statsDomain
from domains.visualization import visualDomain
from domains.baseline import baselineDomain
from domains.anomalies import anomalyDomain
from domains.support.lib.common import *
import logging
if sys.platform == 'darwin':
logging.basicConfig()
log = logging.getLogger('werkzeug')
log.setLevel(logging.DEBUG)
app = Flask(__name__)
CORS(app)
app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///database/armore.db'
app.config['SQLALCHEMY_BINDS'] = {"policyEvents": 'sqlite:///database/policy.sqlite'}
app.secret_key = "ARMURSEEKRIT"
app.register_blueprint(adminDomain)
app.register_blueprint(dashboardDomain)
app.register_blueprint(logDomain)
app.register_blueprint(netDomain)
app.register_blueprint(policyDomain)
app.register_blueprint(processDomain)
app.register_blueprint(securityDomain)
app.register_blueprint(settingsDomain)
app.register_blueprint(statsDomain)
app.register_blueprint(visualDomain)
app.register_blueprint(baselineDomain)
app.register_blueprint(anomalyDomain)
db.init_app(app)
app.debug = True
@app.route("/")
def home():
return redirect(url_for(".welcome"))
@app.route("/index.html")
@app.route("/welcome")
def welcome():
if 'username' in session:
return redirect(url_for("dashboard.dashboard"))
if isInitialSetup():
return redirect("/admin/initialUserSetup")
return render_template("welcome.html",
common = sysLib.getCommonInfo({"username": ""}, "welcome")
)
@app.before_request
def checkInitial():
if modLib.isInitialSetup() and request.path != "/admin/initialUserSetup" and not re.search('static', request.path):
return redirect("/admin/initialUserSetup")
@app.after_request
def add_header(response):
response.cache_control.max_age = 1
return response
@app.errorhandler(404)
def notFound(e):
return redirect(url_for('.welcome'))
@app.errorhandler(500)
def notFound(e):
return redirect(request.referrer)
@app.route('/accessDenied')
def accessDenied():
return render_template("access_denied.html",
common = sysLib.getCommonInfo({"username": session["username"]}, "accessDenied"),
)
@app.route('/favicon.ico')
def getFavicon():
return send_from_directory(os.path.join(app.root_path, 'static/img'), 'favicon.ico', mimetype='image/vnd.microsoft.icon')
@app.route('/signin', methods=['GET','POST'])
def signin():
username = request.form.get('username', None)
password = request.form.get('password', None)
if request.method == 'POST':
session['username'] = username
return redirect(url_for('dashboard.dashboard'))
@app.errorhandler(401)
@app.route('/signout', methods=['GET', 'POST'])
def signout():
if modLib.isInitialSetup():
return redirect("/admin/initialUserSetup")
session.pop("username", None)
session.pop("role", None)
return Response('<script> window.location.replace("/welcome")</script>', 401, {'WWWAuthenticate':'Digest realm="Login Required"'})
@app.route("/restartServer", methods=["POST"])
@secure(["admin","user"])
def restartServer():
f = request.environ.get('werkzeug.server.shutdown')
f()
return ""
@app.route("/ping")
@secure(["admin","user"])
def pingServer():
return ""
if __name__ == "__main__":
confLib.synchConfigs()
armoreConf = confLib.getArmoreConfig()
if not armoreConf:
ipAddr = netLib.getPrimaryIp()
else:
ipAddr = armoreConf.get('Management_IP')
# Debug code until we get the Management IP, Subnet and Interface into the backend menu
if ipAddr is None or ipAddr == "None":
ipAddr = input("Enter IP Address: ")
portNum = 8080 if len(sys.argv) < 2 else int(sys.argv[1])
app.config['portNum'] = portNum
if sys.platform == 'darwin':
print("Starting web server on 127.0.0.1:" + str(portNum))
app.run(host='127.0.0.1', port=portNum, debug=False, use_evalex=False)
else:
print("Starting Server On ", ipAddr, ":", portNum)
app.run(host=ipAddr, port=portNum, debug=True, use_evalex=False)
|
cliffpanos/True-Pass-iOS | CheckIn/pages/urls.py | Python | apache-2.0 | 403 | 0.019851 | fro | m django.conf.urls import url
from . import views
urlpatterns = [
url(r'^$', views.index, name='index'),
url(r'^index$', views.index, name='index'),
url(r'^signup/$', views.signup, name = 'signup'),
url(r'^map/$', views.map, name = 'map'),
url(r'^qrtest/$', views.qrtest, name = 'qrtest'),
url(r'^manage_priviliges/$', views.manage_priviliges, name ='manage_priviliges'),
| ]
|
chrislit/abydos | tests/distance/test_distance__token_distance.py | Python | gpl-3.0 | 15,537 | 0 | # Copyright 2019-2020 by Christopher C. Little.
# This file is part of Abydos.
#
# Abydos is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Abydos is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Abydos. If not, see <http://www.gnu.org/licenses/>.
"""abydos.tests.distance.test_distance__token_distance.
This module contains u | nit tests for abydos.distance._TokenDistance
"""
import unittest
from collections import Counter
from abydos.distance import (
AverageLinkage,
DamerauLevenshtein,
Jaccard,
JaroWinkler,
SokalMichener,
)
from abydos.stats import ConfusionTable
from abydos.tokenizer import (
CharacterTokenizer,
QSkipgrams,
WhitespaceTokenizer,
)
class TokenDistanceTestCases(unittest.TestCase):
"""Test _Token | Distance functions.
abydos.distance._TokenDistance
"""
cmp_j_crisp = Jaccard(intersection_type='crisp')
cmp_j_soft = Jaccard(intersection_type='soft')
cmp_j_fuzzy = Jaccard(
intersection_type='fuzzy', metric=DamerauLevenshtein(), threshold=0.4
)
cmp_j_linkage = Jaccard(intersection_type='linkage')
def test_crisp_jaccard_sim(self):
"""Test abydos.distance.Jaccard.sim (crisp)."""
# Base cases
self.assertEqual(self.cmp_j_crisp.sim('', ''), 1.0)
self.assertEqual(self.cmp_j_crisp.sim('a', ''), 0.0)
self.assertEqual(self.cmp_j_crisp.sim('', 'a'), 0.0)
self.assertEqual(self.cmp_j_crisp.sim('abc', ''), 0.0)
self.assertEqual(self.cmp_j_crisp.sim('', 'abc'), 0.0)
self.assertEqual(self.cmp_j_crisp.sim('abc', 'abc'), 1.0)
self.assertEqual(self.cmp_j_crisp.sim('abcd', 'efgh'), 0.0)
self.assertAlmostEqual(
self.cmp_j_crisp.sim('Nigel', 'Niall'), 0.3333333333
)
self.assertAlmostEqual(
self.cmp_j_crisp.sim('Niall', 'Nigel'), 0.3333333333
)
self.assertAlmostEqual(
self.cmp_j_crisp.sim('Colin', 'Coiln'), 0.3333333333
)
self.assertAlmostEqual(
self.cmp_j_crisp.sim('Coiln', 'Colin'), 0.3333333333
)
self.assertAlmostEqual(
self.cmp_j_crisp.sim('ATCAACGAGT', 'AACGATTAG'), 0.5
)
def test_soft_jaccard_sim(self):
"""Test abydos.distance.Jaccard.sim (soft)."""
# Base cases
self.assertEqual(self.cmp_j_soft.sim('', ''), 1.0)
self.assertEqual(self.cmp_j_soft.sim('a', ''), 0.0)
self.assertEqual(self.cmp_j_soft.sim('', 'a'), 0.0)
self.assertEqual(self.cmp_j_soft.sim('abc', ''), 0.0)
self.assertEqual(self.cmp_j_soft.sim('', 'abc'), 0.0)
self.assertEqual(self.cmp_j_soft.sim('abc', 'abc'), 1.0)
self.assertAlmostEqual(self.cmp_j_soft.sim('abcd', 'efgh'), 0.11111111)
self.assertAlmostEqual(self.cmp_j_soft.sim('Nigel', 'Niall'), 0.5)
self.assertAlmostEqual(self.cmp_j_soft.sim('Niall', 'Nigel'), 0.5)
self.assertAlmostEqual(self.cmp_j_soft.sim('Colin', 'Coiln'), 0.6)
self.assertAlmostEqual(self.cmp_j_soft.sim('Coiln', 'Colin'), 0.6)
self.assertAlmostEqual(
self.cmp_j_soft.sim('ATCAACGAGT', 'AACGATTAG'), 0.68
)
self.assertAlmostEqual(
Jaccard(
intersection_type='soft', tokenizer=WhitespaceTokenizer()
).sim('junior system analyst', 'systems analyst'),
0.6190476190476191,
)
self.assertAlmostEqual(
Jaccard(
intersection_type='soft', tokenizer=WhitespaceTokenizer()
).sim('systems analyst', 'junior system analyst'),
0.6190476190476191,
)
with self.assertRaises(TypeError):
Jaccard(
intersection_type='soft',
metric=JaroWinkler(),
tokenizer=WhitespaceTokenizer(),
).sim('junior system analyst', 'systems analyst')
def test_fuzzy_jaccard_sim(self):
"""Test abydos.distance.Jaccard.sim (fuzzy)."""
# Base cases
self.assertEqual(self.cmp_j_fuzzy.sim('', ''), 1.0)
self.assertEqual(self.cmp_j_fuzzy.sim('a', ''), 0.0)
self.assertEqual(self.cmp_j_fuzzy.sim('', 'a'), 0.0)
self.assertEqual(self.cmp_j_fuzzy.sim('abc', ''), 0.0)
self.assertEqual(self.cmp_j_fuzzy.sim('', 'abc'), 0.0)
self.assertEqual(self.cmp_j_fuzzy.sim('abc', 'abc'), 1.0)
self.assertAlmostEqual(
self.cmp_j_fuzzy.sim('abcd', 'efgh'), 0.1111111111111111
)
self.assertAlmostEqual(self.cmp_j_fuzzy.sim('Nigel', 'Niall'), 0.5)
self.assertAlmostEqual(self.cmp_j_fuzzy.sim('Niall', 'Nigel'), 0.5)
self.assertAlmostEqual(self.cmp_j_fuzzy.sim('Colin', 'Coiln'), 0.6)
self.assertAlmostEqual(self.cmp_j_fuzzy.sim('Coiln', 'Colin'), 0.6)
self.assertAlmostEqual(
self.cmp_j_fuzzy.sim('ATCAACGAGT', 'AACGATTAG'), 0.68
)
self.assertEqual(sum(self.cmp_j_fuzzy._union().values()), 11.0)
self.assertAlmostEqual(
Jaccard(intersection_type='fuzzy').sim('synonym', 'antonym'),
0.3333333333333333,
)
def test_linkage_jaccard_sim(self):
"""Test abydos.distance.Jaccard.sim (group linkage)."""
# Base cases
self.assertEqual(self.cmp_j_linkage.sim('', ''), 1.0)
self.assertEqual(self.cmp_j_linkage.sim('a', ''), 0.0)
self.assertEqual(self.cmp_j_linkage.sim('', 'a'), 0.0)
self.assertEqual(self.cmp_j_linkage.sim('abc', ''), 0.0)
self.assertEqual(self.cmp_j_linkage.sim('', 'abc'), 0.0)
self.assertEqual(self.cmp_j_linkage.sim('abc', 'abc'), 1.0)
self.assertAlmostEqual(
self.cmp_j_linkage.sim('abcd', 'efgh'), 0.1111111111111111
)
self.assertAlmostEqual(self.cmp_j_linkage.sim('Nigel', 'Niall'), 0.5)
self.assertAlmostEqual(self.cmp_j_linkage.sim('Niall', 'Nigel'), 0.5)
self.assertAlmostEqual(self.cmp_j_linkage.sim('Colin', 'Coiln'), 0.6)
self.assertAlmostEqual(self.cmp_j_linkage.sim('Coiln', 'Colin'), 0.6)
self.assertAlmostEqual(
self.cmp_j_linkage.sim('ATCAACGAGT', 'AACGATTAG'), 0.68
)
self.assertAlmostEqual(
Jaccard(
intersection_type='linkage',
metric=JaroWinkler(),
threshold=0.2,
).sim('synonym', 'antonym'),
0.6,
)
def test_token_distance(self):
"""Test abydos.distance._TokenDistance members."""
self.assertAlmostEqual(
Jaccard(intersection_type='soft', alphabet=24).sim(
'ATCAACGAGT', 'AACGATTAG'
),
0.68,
)
self.assertAlmostEqual(
Jaccard(qval=1, alphabet='CGAT').sim('ATCAACGAGT', 'AACGATTAG'),
0.9,
)
self.assertAlmostEqual(
Jaccard(tokenizer=QSkipgrams(qval=3), alphabet='CGAT').sim(
'ATCAACGAGT', 'AACGATTAG'
),
0.6372795969773299,
)
self.assertAlmostEqual(
Jaccard(alphabet=None).sim('synonym', 'antonym'),
0.3333333333333333,
)
self.assertAlmostEqual(
Jaccard(tokenizer=QSkipgrams(qval=3)).sim('synonym', 'antonym'),
0.34146341463414637,
)
src_ctr = Counter({'a': 5, 'b': 2, 'c': 10})
tar_ctr = Counter({'a': 2, 'c': 1, 'd': 3, 'e': 12})
self.assertAlmostEqual(Jaccard().sim(src_ctr, tar_ctr), 0.09375)
self.assertAlmostEqual(
SokalMichener(normalizer='proportional').sim('synonym', 'antonym'),
0.984777917351113,
)
self.assertAlmostEqual(
SokalMichener(normalizer='log').sim('synonym |
asgordon/EtcAbductionPy | etcabductionpy/abduction.py | Python | bsd-2-clause | 3,656 | 0.01395 | '''abduction.py
Base functionality for logical abduction using a knowledge base of definite clauses
Andrew S. Gordon
'''
import itertools
from . import parse
from . import unify
def abduction(obs, kb, maxdepth, skolemize = True):
'''Logical abduction: returns a list of all sets of assumptions that entail the observations given the kb'''
indexed_kb = index_by_consequent_predicate(kb)
res = []
listoflists = [and_or_leaflists([ob], indexed_kb, maxdepth) for ob in obs]
for u in itertools.product(*listoflists):
u = list(itertools.chain.from_iterable(u))
res.extend(crunch(u))
if skolemize:
return [unify.skolemize(r) for r in res]
else:
return res
def index_by_consequent_predicate(kb):
res = {}
for dc in kb:
predicate = parse.consequent(dc)[0]
if predicate in res:
res[predicate].append(dc)
else:
res[predicate] = [dc]
return res
def and_or_leaflists(remaining, indexed_kb, depth, antecedents = [], assumptions = []):
'''Returns list of all entailing sets of leafs in the and-or backchaining tree'''
if depth == 0 and antecedents: # fail
return [] # (empty) list of lists
elif not remaining: # done with this level
if not antecedents: # found one
return [assumptions] # list of lists
else:
return and_or_leaflists(antecedents, indexed_kb, depth - 1, [], assumptions)
else: # more to go on this level
literal = remaining[0] # first of remaining
predicate = literal[0]
if predicate not in indexed_kb:
return and_or_leaflists(remaining[1:], indexed_kb, depth, antecedents, [literal] + assumptions) # shift literal to assumptions
else:
revisions = []
for rule in indexed_kb[predicate]: # indexed by predicate of literal
theta = unify.unify(literal, parse.consequent(rule))
if theta != None:
if depth == 0: # no depth for revision
return [] # (empty) list of lists
revisions.append([unify.subst(theta, remaining[1:]), # new remaining with substitutions
indexed_kb,
depth,
unify.standardize(unify.subst(theta, parse.antecedent(rule))) +
unify.subst(theta, antecedents), # new antecedent | s with substitutions
unify.subst(theta, assumptions)]) # new assumptions with substitutions
return itertools.chain(*[and_or_leaflists(*rev) for rev in revisions]) # list of lists (if any)
def crunch(conjunction):
'''Returns all possible ways that literals in a conjunction co | uld be unified'''
return [k for k,v in itertools.groupby(sorted(cruncher(conjunction, 0)))] # dedupe solutions
def cruncher(conjunction, idx = 0):
if idx >= len(conjunction) - 1: # last one
return [[k for k,v in itertools.groupby(sorted(conjunction))]] # dedupe literals in solution
else:
res = []
for subsequent in range(idx + 1,len(conjunction)):
theta = unify.unify(conjunction[idx], conjunction[subsequent])
if theta:
new_conjunction = unify.subst(theta,
conjunction[0:subsequent] +
conjunction[(subsequent + 1):len(conjunction)])
res.extend(cruncher(new_conjunction, idx))
res.extend(cruncher(conjunction, idx + 1))
return res
|
orvi2014/kitsune | kitsune/wiki/tests/test_parser.py | Python | bsd-3-clause | 39,480 | 0 | import re
from django.conf import settings
from nose.tools import eq_
from pyquery import PyQuery as pq
import kitsune.sumo.tests.test_parser
from kitsune.gallery.models import Video
from kitsune.gallery.tests import image, video
from kitsune.sumo.tests import TestCase
from kitsune.wiki.models import Document
from kitsune.wiki.parser import (
WikiParser, ForParser, PATTERNS, RECURSION_MESSAGE, _key_split,
_build_template_params as _btp, _format_template_content as _ftc)
from kitsune.wiki.tests import document, revision
def | doc_rev_parser(*args, **kwargs):
return kitsune.sumo.tests.test_parser.doc_rev_parser(
*args, parser_cls=WikiParser, **kwargs)
def doc_parse_markup(content, markup, title='Template:test'):
"""Create a doc with given con | tent and parse given markup."""
_, _, p = doc_rev_parser(content, title)
doc = pq(p.parse(markup))
return (doc, p)
class SimpleSyntaxTestCase(TestCase):
"""Simple syntax regexing, like {note}...{/note}, {key Ctrl+K}"""
def test_note_simple(self):
"""Simple note syntax"""
p = WikiParser()
doc = pq(p.parse('{note}this is a note{/note}'))
eq_('this is a note', doc('div.note').text())
def test_warning_simple(self):
"""Simple warning syntax"""
p = WikiParser()
doc = pq(p.parse('{warning}this is a warning{/warning}'))
eq_('this is a warning', doc('div.warning').text())
def test_warning_multiline(self):
"""Multiline warning syntax"""
p = WikiParser()
doc = pq(p.parse('{warning}\nthis is a warning\n{/warning}'))
eq_('this is a warning', doc('div.warning').text())
def test_warning_multiline_breaks(self):
"""Multiline breaks warning syntax"""
p = WikiParser()
doc = pq(p.parse('\n\n{warning}\n\nthis is a warning\n\n'
'{/warning}\n\n'))
eq_('this is a warning', doc('div.warning').text())
def test_general_warning_note(self):
"""A bunch of wiki text with {warning} and {note}"""
p = WikiParser()
doc = pq(p.parse('\n\n{warning}\n\nthis is a warning\n\n{note}'
'this is a note{warning}!{/warning}{/note}'
"[[Installing Firefox]] '''internal''' ''link''"
'{/warning}\n\n'))
eq_('!', doc('div.warning div.warning').text())
eq_('this is a note !', doc('div.note').text())
eq_('Installing Firefox', doc('a').text())
eq_('internal', doc('strong').text())
eq_('link', doc('em').text())
def test_key_inline(self):
"""{key} stays inline"""
p = WikiParser()
doc = pq(p.parse('{key Cmd+Shift+Q}'))
eq_(1, len(doc('p')))
eq_(u'<span class="key">Cmd</span> + <span class="key">Shift</span>'
u' + <span class="key">Q</span>', doc.html().replace('\n', ''))
def test_template_inline(self):
"""Inline templates are not wrapped in <p>s"""
doc, p = doc_parse_markup('<span class="key">{{{1}}}</span>',
'[[T:test|Cmd]] + [[T:test|Shift]]')
eq_(1, len(doc('p')))
def test_template_multiline(self):
"""Multiline templates are wrapped in <p>s"""
doc, p = doc_parse_markup('<span class="key">\n{{{1}}}</span>',
'[[T:test|Cmd]]')
eq_(3, len(doc('p')))
def test_key_split_callback(self):
"""The _key_split regex callback does what it claims"""
key_p = PATTERNS[2][0]
# Multiple keys, with spaces
eq_('<span class="key">ctrl</span> + <span class="key">alt</span> + '
'<span class="key">del</span>',
key_p.sub(_key_split, '{key ctrl + alt + del}'))
# Single key with spaces in it
eq_('<span class="key">a key</span>',
key_p.sub(_key_split, '{key a key}'))
# Multiple keys with quotes and spaces
eq_('<span class="key">"Param-One" and</span> + <span class="key">'
'param</span> + <span class="key">two</span>',
key_p.sub(_key_split, '{key "Param-One" and + param+two}'))
eq_('<span class="key">multi\nline</span> + '
'<span class="key">me</span>',
key_p.sub(_key_split, '{key multi\nline\n+me}'))
def test_key_split_brace_callback(self):
"""Adding brace inside {key ...}"""
key_p = PATTERNS[2][0]
eq_('<span class="key">ctrl</span> + <span class="key">and</span> '
'Here is }',
key_p.sub(_key_split, '{key ctrl + and} Here is }'))
eq_('<span class="key">ctrl</span> + <span class="key">and</span> + '
'<span class="key">{</span>',
key_p.sub(_key_split, '{key ctrl + and + {}'))
def test_simple_inline_custom(self):
"""Simple custom inline syntax: menu, button, filepath, pref"""
p = WikiParser()
tags = ['menu', 'button', 'filepath', 'pref']
for tag in tags:
doc = pq(p.parse('{%s this is a %s}' % (tag, tag)))
eq_('this is a ' + tag, doc('span.' + tag).text())
def test_general_warning_note_inline_custom(self):
"""A mix of custom inline syntax with warnings and notes"""
p = WikiParser()
doc = pq(p.parse('\n\n{warning}\n\nthis is a {button warning}\n{note}'
'this is a {menu note}{warning}!{/warning}{/note}'
"'''{filepath internal}''' ''{menu hi!}''{/warning}"))
eq_('warning', doc('div.warning span.button').text())
eq_('this is a note !', doc('div.note').text())
eq_('note', doc('div.warning div.note span.menu').text())
eq_('internal', doc('strong span.filepath').text())
eq_('hi!', doc('em span.menu').text())
def test_comments(self):
"""Markup containing taggy comments shouldn't truncate afterward."""
p = WikiParser()
# This used to truncate after the comment when rendered:
eq_(p.parse('Start <!-- <foo --> End'),
'<p>Start End\n</p>')
# Just make sure these don't go awry either:
eq_(p.parse('Start <!-- <foo> --> End'),
'<p>Start End\n</p>')
eq_(p.parse('Start <!-- foo> --> End'),
'<p>Start End\n</p>')
def test_internal_links(self):
"""Make sure internal links work correctly when not to redirected
articles and when to redirected articles"""
p = WikiParser()
# Create a new article
rev = revision(is_approved=True, save=True)
doc = rev.document
doc.current_revision = rev
doc.title = 'Real article'
doc.save()
# Change the slug of the article to create a redirected article
old_slug = doc.slug
doc.slug = 'real-article'
doc.save()
redirect = Document.objects.get(slug=old_slug)
# Both internal links should link to the same article
eq_(p.parse('[[%s]]' % doc.title),
'<p><a href="/en-US/kb/%s">%s</a>\n</p>' % (doc.slug, doc.title))
eq_(p.parse('[[%s]]' % redirect.title),
'<p><a href="/en-US/kb/%s">%s</a>\n</p>' % (doc.slug, doc.title))
class TestWikiTemplate(TestCase):
def test_template(self):
"""Simple template markup."""
doc, _ = doc_parse_markup('Test content', '[[Template:test]]')
eq_('Test content', doc.text())
def test_template_does_not_exist(self):
"""Return a message if template does not exist"""
p = WikiParser()
doc = pq(p.parse('[[Template:test]]'))
eq_('The template "test" does not exist or has no approved revision.',
doc.text())
def test_template_locale(self):
"""Localized template is returned."""
py_doc, p = doc_parse_markup('English content', '[[Template:test]]')
parent = document()
d = document(parent=parent, title='Template:test', locale='fr')
d.save()
r = revision(content='French content', document=d, is_approved=True)
r.save()
eq_('English content', py_doc.text())
py_doc = pq(p.parse('[[T:test]]', lo |
lnls-fac/sirius | pymodels/BO_V06_01/accelerator.py | Python | mit | 915 | 0.002186 |
import numpy as _np
import lnls as _lnls
import pyaccel as _pyaccel
from . import lattice as _lattice
default_cavity_on = False
default_radiation_on = False
default_vchamber_on = False
def create_accelerator(optics_mode=_lattice.defaul | t_optics_mode, energy=_lattice.energy):
lattice = _lattice.create_lattice(optics_mode=optics_mode, energy=energy)
accelerator = _pyaccel.accelerator.Accelerator(
lattice=lattice,
energy=energy,
harmonic_number=_lattice.harmonic_number,
cavity_on=default_cavity_on,
radiation_on=default_radiation_on,
vchamber_on=default_vchamber_on
)
return accelerator
accelerator_data = dict()
accelerator_data['lattice_version'] = 'BO_V06_01'
acce | lerator_data['global_coupling'] = 0.0002 # expected corrected value
accelerator_data['pressure_profile'] = _np.array([[0, 496.8], [1.5e-8]*2]) # [s [m], p [mbar]]o
496.78745
|
kubeflow/pipelines | sdk/python/tests/compiler/testdata/add_pod_env.py | Python | apache-2.0 | 1,058 | 0.000945 | # Copyright 2019 The Kubeflow Authors
#
# Licensed under the Apache Licens | e, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writi | ng, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import kfp.deprecated as kfp
@kfp.dsl.pipeline(name='Test adding pod env', description='Test adding pod env')
def test_add_pod_env():
op = kfp.dsl.ContainerOp(
name='echo',
image='library/bash',
command=['sh', '-c'],
arguments=['echo $KFP_POD_NAME']).add_pod_label('add-pod-env', 'true')
if __name__ == '__main__':
import kfp.deprecated.compiler as compiler
compiler.Compiler().compile(test_add_pod_env, __file__ + '.yaml')
|
jmwenda/osmxapi | osmxapi/__init__.py | Python | gpl-3.0 | 4,014 | 0.008975 | # -*- coding: utf-8 -*-
#
# This file is part of the osmxapi Python module.
#
# osmxapi is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# osmxapi is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with osmxapi. If not, see <http://www.gnu.org/licenses/>.
#
# Copyright: © 2009-2010 Etienne Chové <chove@crans.org>
# Copyright: © 2012 Morten Kjeldgaard <mok@bioxray.dk>
# License: GPL-3+
__version__ = '0.1'
import xml.dom.minidom
import dom, http
import os.path
class OsmXapi:
def __init__(self, api = "www.overpass-api.de", base="api", debug = False):
self.debug = debug
self.base = os.path.join('/', base, 'xapi')
self.http = http.Http(api, debug)
#.
def nodeGet(self, query=None, raw=None):
""" Returns NodeData for query """
if not query:
return None
#.
uri = self.base+"?node"+repr(query)
data = self.http.get(uri)
if raw: return data
if not data: return data
data = xml.dom.minidom.parseString(data)
data = data.getElementsByTagName("osm")[0].getElementsByTagName("node")
nodelist = []
for n in data:
nodelist.append(dom.parseNode(n))
#.
return nodelist
#.
def wayGet(self, query=None, raw=None):
"""Returns way data for query"""
if not query:
return None
#.
uri = self.base+"?way"+repr(query)
data = self.http.get(uri)
if raw: return data
if not data: return data
data = xml.dom.minidom.parseString(data)
data = data.getElementsByTagName("osm")[0].getElementsByTagName("way")
waylist = []
for w in data:
waylist.append(dom.parseWay(w))
#.
return waylist
#.
def relationGet(self, query=None, raw=None):
"""Return relation data for query"""
uri = self.base+"?relation"+repr(query)
data = self.http.get(uri)
if raw: return data
data = xml.dom.minidom.parseString(data)
data = data.getElementsByTagName("osm")[0].getElementsByTagName("relation")
| relationlist = []
for r in data:
relationlist.append(dom.parseRelation(r))
#.
return relationlist
#.
def anyGet(self, query=None, raw=None):
"""Return any data for query"""
uri = self.base+"?*"+repr(query)
data = self.http.get(uri)
if raw: return data
data = xml.dom.minidom.parseString(data)
anydict = {}
for e in "node", "way", "relation":
d | = data.getElementsByTagName("osm")[0].getElementsByTagName(e)
anylist = []
for a in d:
if e == "node":
anylist.append(dom.parseNode(a))
#.
if e == "way":
anylist.append(dom.parseWay(a))
#.
if e == "relation":
anylist.append(dom.parseRelation(a))
#.
#.
anydict[e] = anylist
#.
return anydict
#.
#.
if __name__ == '__main__':
from xapiquery import XapiQuery
xapi = OsmXapi(debug = True)
uniparken = XapiQuery (lats=56.1618032,
lonw=10.1891327,
latn=56.1719343,
lone=10.212822)
uniparken[u'amenity'] = u'parking'
N = xapi.nodeGet(uniparken)
print N
W = xapi.wayGet(uniparken)
print W
A = xapi.anyGet(uniparken)
print A
#.
|
ololobster/cvidone | cvidone/util/validator.py | Python | mit | 11,249 | 0.012801 | # Released under the MIT license. See the LICENSE file for more information.
# https://github.com/ololobster/cvidone
from flask import request
import datetime
from calendar import monthrange
import json
import re
class ValidationError(Exception):
def __init__(self, name):
self.name = name
def __str__(self):
return self.name
class MissingError(ValidationError):
pass
class MinLengthConstraintError(ValidationError):
def __init__(self, name, min_length):
super().__init__(name)
self._min_length = min_length
@property
def min_length(self):
return self._min_length
class MaxLengthConstraintError(ValidationError):
def __init__(self, name, max_length):
super().__init__(name)
self._max_length = max_length
@property
def max_length(self):
return self._max_length
class BadNumberError(ValidationError):
pass
class MinConstraintError(ValidationError):
def __init__(self, name, min_value):
super().__init__(name)
self._min_value = min_value
@property
def min_value(self):
return self._min_value
class MaxConstraintError(ValidationError):
def __init__(self, name, max_value):
super().__init__(name)
self._max_value = max_value
@property
def max_value(self):
return self._max_value
class InvalidDateError(ValidationError):
pass
class ValidationRule(object):
def __init__(
self
, type="str"
# Information about how to validate a number.
, min_value=None
, max_value=None
# Information about how to validate a string.
, strip_required=False
, min_length=None
, max_length=None
, mask=None
# Information about how to validate a date.
, null_date_permitted=False
# Information about how to validate a list.
, separator=","
, make_unique=True
, skip_empty_elements=True
, sort_required=False
# Common checks.
, bad_values=None
, permitted_values=None
):
self.type=type
self.min_value=min_value
self.max_value=max_value
self.strip_required=strip_required
self.min_length=min_length
self.max_length=max_length
self.mask = mask
self.null_date_permitted=null_date_permitted
self.separator=separator
self.make_unique=make_unique
self.skip_empty_elements=skip_empty_elements
self.sort_required=sort_required
self.bad_values=bad_values
self.permitted_values=permitted_values
NULL_DATE = datetime.date(1000, 1, 1)
class Validator(object):
def __init__(self, request):
self._request = request
self.default_source = "FORM"
self._json_values = {}
if (0 == self._request.content_type.find("application/json")):
self._json_values = json.loads(self._request.data.decode("utf-8"))
for k in self._json_values.keys():
self._json_values[k] = str(self._json_values[k])
def _getInput(self, name, source):
values_dict = self._request.args if ("URL" == source) else \
self._request.form if ("FORM" == source) else \
self._request.cookies if ("COOKIE" == source) else \
self._json_values if ("JSON" == source) else \
None
assert(values_dict is not None)
return values_dict.get(name, None)
@staticmethod
def _makeCommonChecks(name, value, bad_values, permitted_values):
if (bad_values is not None) and (value in bad_values):
raise ValidationError(name)
if (permitted_values is not None) and (value not in permitted_values):
raise ValidationError(name)
return value
@staticmethod
def _validateInt(name, input_str, min_value, max_value, bad_values, permitted_values):
try:
value = int(input_str)
except ValueError:
raise BadNumberError(name)
if (min_value is not None) and (value < min_value):
raise MinConstraintError(name, min_value)
if (max_value is not None) and (value > max_value):
raise MaxConstraintError(name, max_value)
Validator._makeCommonChecks(name, value, bad_values, permitted_values)
return value
@staticmethod
def _validateStr(name, value, strip_required, min_length, max_length, mask, bad_values, permitted_values):
if (strip_required):
value = value.strip()
if (min_length is not None) and (len(value) < min_length):
raise MinLengthConstraintError(name, min_length)
if (max_length is not None) and (len(value) > max_length):
raise MaxLengthConstraintError(name, max_length)
if (mask is not None) and (re.compile("^" + mask + "$").match(value) is None):
raise ValidationError(name)
Validator._makeCommonChecks(name, value, bad_values, permitted_values)
return value
@staticmethod
def _validateDate(name, value, null_date_permitted):
if ("00.00.0000" == value) or ("" == value):
if (null_date_permitted):
return NULL_DATE
else:
raise ValidationError(name)
if (re.compile("^\d\d\.\d\d\.\d\d\d\d$").match(value) is None):
raise ValidationError(name)
date = value.split(".")
d, m, y = int(date[0]), int(date[1]), int(date[2])
if (1 > d) or (d > 31) or (1 > m) or (m > 12) or (1990 > y):
raise InvalidDateError(name)
days_in_month = monthrange(y, m)[1]
if (d > days_in_month):
raise InvalidDateError(name)
return datetime.date(y, m, d)
@staticmethod
def _validateList(name, values, make_unique, sort_required, bad_values, permitted_values):
if (make_unique):
seen = set()
values = [value for value in values if (value not in seen) and not seen.add(value)]
if (sort_required):
values.sort()
for value in values:
Validator._makeCommonChecks(name, value, bad_values, permitted_values)
return values
@staticmethod
def validate(
input_str
, name=None
, rule=None
, type="str"
# Information about how to validate a number.
, min_value=None
, max_value=None
# Information about how to validate a string.
, strip_required=False
| , min_length=None
, max_length=None
, mask=None
# Information about how to validate a date.
, null_date_permitted=False
# Information about how to validate a list.
, separator=","
, make_unique=False
, skip_empty_elements=False
, sort_required=False
# Common checks.
, bad_values=None
| , permitted_values=None
):
if (rule is not None):
return Validator.validate(
input_str
, name=name
, type=rule.type
, min_value=rule.min_value
, max_value=rule.max_value
, strip_required=rule.strip_required
, min_length=rule.min_length
, max_length=rule.max_length
, mask=rule.mask
, null_date_permitted=rule.null_date_permitted
, separator=rule.separator
, make_unique=rule.make_unique
, skip_empty_elements=rule.skip_empty_elements
, sort_required=rule.sort_required
, bad_values=rule.bad_values
, permitted_values=rule.permitted_values
)
if ("int" == type):
return Validator._validateInt(name, input_str, min_value, max_value, bad_values, permitted_values)
elif ("ints" == type):
values = []
raw_values = input_str.split(separator)
for raw_value in raw_values:
if (skip_empty_elements) and ("" == raw_value):
continue
values.append(Validator._validateInt(name, raw_value, min_value, max_value, |
tempbottle/pykafka | tests/pykafka/test_balancedconsumer.py | Python | apache-2.0 | 4,755 | 0.001052 | import math
import time
import mock
import unittest2
from pykafka import KafkaClient
from pykafka.balancedconsumer import BalancedConsumer
from pykafka.test.utils import get_cluster, stop_cluster
def buildMockConsumer(num_partitions=10, num_participants=1, timeout=2000):
consumer_group = 'testgroup'
topic = mock.Mock()
topic.name = 'testtopic'
topic.partitions = {}
for k in xrange(num_partitions):
part = mock.Mock(name='part-{part}'.format(part=k))
part.id = k
part.topic = topic
part.leader = mock.Mock()
part.leader.id = k % num_participants
topic.partitions[k] = part
cluster = mock.MagicMock()
zk = mock.MagicMock()
return BalancedConsumer(topic, cluster, consumer_group,
zookeeper=zk, auto_start=False,
consumer_timeout_ms=timeout), topic
class TestBalancedConsumer(unittest2.TestCase):
@classmethod
def setUpClass(cls):
cls._consumer_timeout = 2000
cls._mock_consumer, _ = buildMockConsumer(timeout=cls._consumer_timeout)
def test_consume_returns(self):
"""Ensure that consume() returns in the amount of time it's supposed to
"""
self._mock_consumer._setup_internal_consumer(start=False)
start = time.time()
self._mock_consumer.consume()
self.assertEqual(int(time.time() - start), int(self._consumer_timeout / 1000))
def test_decide_partitions(self):
"""Test partition assignment for a number of partitions/consumers."""
# 100 test iterations
for i in xrange(100):
# Set up partitions, cluster, etc
num_participants = i + 1
num_partitions = 100 - i
participants = ['test-debian:{p}'.format(p=p)
for p in xrange(num_participants)]
cns, topic = buildMockConsumer(num_partitions=num_partitions,
num_participants=num_participants)
# Simulate each participant to ensure they're correct
assigned_parts = []
for p_id in xrange(num_participants):
cns._consumer_id = participants[p_id] | # override consumer id
# Decide partitions then validate
partitions = cns._decide_partitions(participants)
assigned_parts.extend(partitions)
remainder_ppc = num_partitions % num_participants
| idx = participants.index(cns._consumer_id)
parts_per_consumer = num_partitions / num_participants
parts_per_consumer = math.floor(parts_per_consumer)
num_parts = parts_per_consumer + (0 if (idx + 1 > remainder_ppc) else 1)
self.assertEqual(len(partitions), num_parts)
# Validate all partitions were assigned once and only once
all_partitions = topic.partitions.values()
all_partitions.sort()
assigned_parts.sort()
self.assertListEqual(assigned_parts, all_partitions)
class BalancedConsumerIntegrationTests(unittest2.TestCase):
maxDiff = None
@classmethod
def setUpClass(cls):
cls.kafka = get_cluster()
cls.topic_name = 'test-data'
cls.kafka.create_topic(cls.topic_name, 3, 2)
cls.client = KafkaClient(cls.kafka.brokers)
prod = cls.client.topics[cls.topic_name].get_producer(batch_size=5)
prod.produce('msg {num}'.format(num=i) for i in xrange(1000))
@classmethod
def tearDownClass(cls):
stop_cluster(cls.kafka)
def test_consume(self):
try:
consumer_a = self.client.topics[self.topic_name].get_balanced_consumer('test_consume', zookeeper_connect=self.kafka.zookeeper)
consumer_b = self.client.topics[self.topic_name].get_balanced_consumer('test_consume', zookeeper_connect=self.kafka.zookeeper)
# Consume from both a few times
messages = [consumer_a.consume() for i in xrange(1)]
self.assertTrue(len(messages) == 1)
messages = [consumer_b.consume() for i in xrange(1)]
self.assertTrue(len(messages) == 1)
# Validate they aren't sharing partitions
self.assertSetEqual(
consumer_a._partitions & consumer_b._partitions,
set()
)
# Validate all partitions are here
self.assertSetEqual(
consumer_a._partitions | consumer_b._partitions,
set(self.client.topics[self.topic_name].partitions.values())
)
finally:
consumer_a.stop()
consumer_b.stop()
if __name__ == "__main__":
unittest2.main()
|
frappe/frappe | frappe/desk/doctype/todo/todo.py | Python | mit | 4,119 | 0.025977 | # Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# License: MIT. See LICENSE
import frappe
import json
from frappe.model.document import Document
from frappe.utils import get_fullname, parse_addr
exclude_from_linked_with = True
class ToDo(Document):
DocType = 'ToDo'
def validate(self):
self._assignment = None
if self.is_new():
if self.assigned_by == self.allocated_to:
assignment_message = frappe._("{0} self assigned this task: {1}").format(get_fullname(self.assigned_by), self.description)
else:
assignment_message = frappe._("{0} assigned {1}: {2}").format(get_fullname(self.assigned_by), get_fullname(self.allocated_to), self.description)
self._assignment = {
"text": assignment_message,
"comment_type": "Assigned"
}
else:
# NOTE the previous value is only available in validate method
if self.get_db_value("status") != self.status:
if self.allocated_to == frappe.session.user:
removal_message = frappe._("{0} removed their assignment.").format(
get_fullname(frappe.session.user))
else:
removal_message = frappe._("Assignment of {0} removed by {1}").format(
get_fullname(self.allocated_to), get_fullname(frappe.session.user))
self._assignment = {
"text": removal_message,
"comment_type": "Assignment Completed"
}
def on_update(self):
if self._assignment:
self.add_assign_comment(**self._assignment)
self.update_in_reference()
def on_trash(self):
self.delete_communication_links()
self.update_in_reference()
def add_assign_comment(self, text, comment_type):
if not (self.reference_type and self.reference_name):
return
frappe.get_doc(self.reference_type, self.reference_name).add_comment(comment_type, text)
def delete_communication_links(self):
# unlink todo from linked comments
return frappe.db.delete("Communication Link", {
"link_doctype": self.doctype,
"link_name": self.name
})
def update_in_reference(self):
if not (self.reference_type and self.reference_name):
return
try:
assignments = frappe.get_all("ToDo", filters={
"reference_type": self.reference_type,
"reference_name": s | elf.reference_name,
"status": ("!=", "Cancelled")
}, pluck="allocated_to")
assignments.reverse()
frappe.db.set_value(self.reference_type, self.reference_name | ,
"_assign", json.dumps(assignments), update_modified=False)
except Exception as e:
if frappe.db.is_table_missing(e) and frappe.flags.in_install:
# no table
return
elif frappe.db.is_column_missing(e):
from frappe.database.schema import add_column
add_column(self.reference_type, "_assign", "Text")
self.update_in_reference()
else:
raise
@classmethod
def get_owners(cls, filters=None):
"""Returns list of owners after applying filters on todo's.
"""
rows = frappe.get_all(cls.DocType, filters=filters or {}, fields=['allocated_to'])
return [parse_addr(row.allocated_to)[1] for row in rows if row.allocated_to]
# NOTE: todo is viewable if a user is an owner, or set as assigned_to value, or has any role that is allowed to access ToDo doctype.
def on_doctype_update():
frappe.db.add_index("ToDo", ["reference_type", "reference_name"])
def get_permission_query_conditions(user):
if not user: user = frappe.session.user
todo_roles = frappe.permissions.get_doctype_roles('ToDo')
if 'All' in todo_roles:
todo_roles.remove('All')
if any(check in todo_roles for check in frappe.get_roles(user)):
return None
else:
return """(`tabToDo`.allocated_to = {user} or `tabToDo`.assigned_by = {user})"""\
.format(user=frappe.db.escape(user))
def has_permission(doc, ptype="read", user=None):
user = user or frappe.session.user
todo_roles = frappe.permissions.get_doctype_roles('ToDo', ptype)
if 'All' in todo_roles:
todo_roles.remove('All')
if any(check in todo_roles for check in frappe.get_roles(user)):
return True
else:
return doc.allocated_to==user or doc.assigned_by==user
@frappe.whitelist()
def new_todo(description):
frappe.get_doc({
'doctype': 'ToDo',
'description': description
}).insert()
|
HiTechIronMan/openfda | openfda/parallel/sharded_db.py | Python | cc0-1.0 | 1,844 | 0.009761 | import cPickle
import glob
import logging
import os
import leveldb
class ShardedDB(object):
'''
Manages a number of leveldb "shards" (partitions).
LevelDB does not support concurrent writers, so we create a separate output
shard for each reducer. `ShardedDB` provides a unified interface to
multiple shards.
'''
def __init__(self, filebase, num_shards, create_if_missing):
self.filebase = filebase
self.num_shards = num_shards
self._shards = []
os.system('mkdir -p "%s"' % filebase)
for i in range(num_shards):
shard_file = '%s/shard-%05d-of-%05d.db' % (filebase, i, num_shards)
self._shards.append(leveldb.LevelDB(shard_file, create_ | if_missing=create_if_missing))
logging.info('Opened DB with %s files', num_shards)
@staticmethod
def create(filebase, num_shards):
'Create a new ShardedDB with the given number of output shards.'
return ShardedDB(filebase, num_shards, True)
@static | method
def open(filebase):
'Open an existing ShardedDB.'
files = glob.glob('%s/*.db' % filebase)
return ShardedDB(filebase, len(files), False)
def _shard_for(self, key):
return self._shards[hash(key) % self.num_shards]
def put(self, key, value):
self._shard_for(key).Put(key, cPickle.dumps(value, -1))
def get(self, key):
return cPickle.loads(self._shard_for(key).Get(key))
def range_iter(self, start_key, end_key):
iters = [db.RangeIter(start_key, end_key) for db in self._shards]
for i in iters:
for key, value in i:
yield key, cPickle.loads(value)
def __iter__(self):
for shard in self._shards:
for key, value in shard.RangeIter():
yield key, cPickle.loads(value)
def as_dict(self):
'Returns the content of this database as an in-memory Python dictionary'
return dict(self.range_iter(None, None))
|
nave91/rebot | scripts/hdfs_to_spark_to_hdfs.py | Python | gpl-2.0 | 2,273 | 0.008799 | from pyspark import SparkContext, SparkConf
folder_name = "input/"
out_folder_name = "json/"
#file_name = "stackexchange-posts-sample.xml"
file_name = "Posts.xml"
hdfs = "ec2-52-8-214-93.us-west-1.compute.amazonaws.com:9000"
def jsoner(dic):
import json
return json.dumps(dic)
def dicte | r(row):
header = ['id','posttypeid','score','answer','body','snippets']
header_map = ['Id','PostTypeId','Score','AcceptedAnswerId','Body','snippets']
out = {}
for ind, h in enumerate(header_map):
if h == 'AcceptedAnswerId':
if h in row.keys():
out[header[ind]] = row[h]
else:
out[he | ader[ind]] = 'NULL'
continue
out[header[ind]] = row[h]
return out
def load_row(row):
from HTMLParser import HTMLParser
class PostsBodyParser(HTMLParser):
def __init__(self, *args, **kwargs):
HTMLParser.__init__(self, *args, **kwargs)
self.recording = 0
self.codes = ''
def handle_starttag(self, tag, attrs):
if tag != 'code':
return
if self.recording:
self.recording += 1
self.recording = 1
def handle_endtag(self, tag):
if tag != 'code':
return
if self.recording:
self.recording -= 1
def handle_data(self, data):
if self.recording:
self.codes += "<code>" + data + "</code>"
new_row = {}
for key,value in row.attrib.items():
new_row[key] = value
parser = PostsBodyParser()
parser.feed(new_row['Body'])
new_row['snippets'] = parser.codes
return dicter(new_row)
def fetch_line(line):
if line[-2] == '/':
import xml.etree.ElementTree as ET
row = ET.fromstring(line.encode('UTF-8'))
return load_row(row)
conf = SparkConf().setAppName("ESTest")
sc = SparkContext(conf=conf)
file = sc.textFile("hdfs://"+hdfs+"/"+folder_name+file_name)
lines = file.map(lambda line: fetch_line(line))\
.filter(lambda dic: dic is not None)\
.filter(lambda dic: dic['posttypeid'] == '1')\
.map(lambda d: jsoner(d))
lines.saveAsTextFile("hdfs://"+hdfs+"/"+out_folder_name+'middles')
|
niftynei/zulip | zerver/lib/html_diff.py | Python | apache-2.0 | 4,261 | 0.003051 | from __future__ import absolute_import
from typing import Callable, Tuple, Text
from django.conf import settings
from diff_match_patch import diff_match_patch
import platform
import logging
# TODO: handle changes in link hrefs
def highlight_with_class(klass, text):
# type: (Text, Text) -> Text
return '<span class="%s">%s</span>' % (klass, text)
def highlight_inserted(text):
# type: (Text) -> Text
return highlight_with_class('highlight_text_inserted', text)
def highlight_deleted(text):
# type: (Text) -> Text
| return highlight_with_class('highlight_text_deleted', text)
def highlight_replaced(text):
# type: (Text) -> | Text
return highlight_with_class('highlight_text_replaced', text)
def chunkize(text, in_tag):
# type: (Text, bool) -> Tuple[List[Tuple[Text, Text]], bool]
start = 0
idx = 0
chunks = [] # type: List[Tuple[Text, Text]]
for c in text:
if c == '<':
in_tag = True
if start != idx:
chunks.append(('text', text[start:idx]))
start = idx
elif c == '>':
in_tag = False
if start != idx + 1:
chunks.append(('tag', text[start:idx + 1]))
start = idx + 1
idx += 1
if start != idx:
chunks.append(('tag' if in_tag else 'text', text[start:idx]))
return chunks, in_tag
def highlight_chunks(chunks, highlight_func):
# type: (List[Tuple[Text, Text]], Callable[[Text], Text]) -> Text
retval = u''
for type, text in chunks:
if type == 'text':
retval += highlight_func(text)
else:
retval += text
return retval
def verify_html(html):
# type: (Text) -> bool
# TODO: Actually parse the resulting HTML to ensure we don't
# create mal-formed markup. This is unfortunately hard because
# we both want pretty strict parsing and we want to parse html5
# fragments. For now, we do a basic sanity check.
in_tag = False
for c in html:
if c == '<':
if in_tag:
return False
in_tag = True
elif c == '>':
if not in_tag:
return False
in_tag = False
if in_tag:
return False
return True
def highlight_html_differences(s1, s2):
# type: (Text, Text) -> Text
differ = diff_match_patch()
ops = differ.diff_main(s1, s2)
differ.diff_cleanupSemantic(ops)
retval = u''
in_tag = False
idx = 0
while idx < len(ops):
op, text = ops[idx]
next_op = None
if idx != len(ops) - 1:
next_op, next_text = ops[idx + 1]
if op == diff_match_patch.DIFF_DELETE and next_op == diff_match_patch.DIFF_INSERT:
# Replace operation
chunks, in_tag = chunkize(next_text, in_tag)
retval += highlight_chunks(chunks, highlight_replaced)
idx += 1
elif op == diff_match_patch.DIFF_INSERT and next_op == diff_match_patch.DIFF_DELETE:
# Replace operation
# I have no idea whether diff_match_patch generates inserts followed
# by deletes, but it doesn't hurt to handle them
chunks, in_tag = chunkize(text, in_tag)
retval += highlight_chunks(chunks, highlight_replaced)
idx += 1
elif op == diff_match_patch.DIFF_DELETE:
retval += highlight_deleted(' ')
elif op == diff_match_patch.DIFF_INSERT:
chunks, in_tag = chunkize(text, in_tag)
retval += highlight_chunks(chunks, highlight_inserted)
elif op == diff_match_patch.DIFF_EQUAL:
chunks, in_tag = chunkize(text, in_tag)
retval += text
idx += 1
if not verify_html(retval):
from zerver.lib.actions import internal_send_message
# We probably want more information here
logging.getLogger('').error('HTML diff produced mal-formed HTML')
if settings.ERROR_BOT is not None:
subject = "HTML diff failure on %s" % (platform.node(),)
internal_send_message(settings.ERROR_BOT, "stream",
"errors", subject, "HTML diff produced malformed HTML")
return s2
return retval
|
chhans/tor-automation | patternexperiment.py | Python | mit | 8,474 | 0.02903 | from classifier import Classifier
from itertools import combinations
from datetime import datetime
import sys
import os
open_path = "PatternDumps/open"
closed_path = "PatternDumps/closed"
monitored_sites = ["cbsnews.com", "google.com", "nrk.no", "vimeo.com", "wikipedia.org", "youtube.com"]
per_burst_weight = 1
total_cells_weight = 1.1
diff_threshold = 1.5 # Higher threshold implies lower true and false positive rate
max_threshold = 7
def mkdir(dir):
if not os.path.exists(dir):
os.makedirs(dir)
def indexOfSortedValues(l, descending=False):
sort = sorted(l, reverse=descending)
indices = [l.index(x) for x in sort]
return indices
def calculateDi | stanceVotes(vector, w):
G = indexOfSortedValues(vector)
l = float(len(vector))
votes = []
for i in range(len(vector)):
j = i
while True:
try:
r = G.index(j)
br | eak
except:
j -= 1
v = 2*w - 2*r/l*w
if v == 2.0:
v += 2.0
votes.append(v)
return votes
def createTrainingSets(n):
l = []
for i in range(n):
l.append(range(0, i)+range(i+1, n)+range(i, i+1))
return l
def matchAgainstClassifiers(clf, fp):
pred = []
pbd = []
td = []
for c in clf:
pred.append(c.predict(fp))
pbd.append(c.perBurstDistance(fp))
td.append(c.totalDistance(fp))
pbv = calculateDistanceVotes(pbd, per_burst_weight)
tdv = calculateDistanceVotes(td, total_cells_weight)
total = [pred[i] + pbv[i] + tdv[i] for i in range(len(clf))]
return total
def getFingerprint(f_path):
with open(f_path, "r") as f:
fp = [int(dimension) for dimension in f.readlines()]
f.close()
return fp
def createClassifiers(file_list):
clf = [Classifier(site) for site in monitored_sites]
for i in file_list:
for k, site in enumerate(monitored_sites):
f_path = "%s/%s/%i.fp" % (closed_path, site, i)
fp = getFingerprint(f_path)
clf[k].train(fp)
return clf
def createExperimentSets(n_train, n_exp):
tot = n_train + n_exp
tot_r = range(tot)
combo_train = list(combinations(tot_r, n_train))
exp_sets = []
if n_train == 1:
for t in combo_train:
exp_sets.append([[y for y in t], [x for x in tot_r if x not in t]])
tot_r = [x for x in tot_r if x not in t]
else:
for t in combo_train:
exp_sets.append([[y for y in t], [x for x in tot_r if x not in t]])
return exp_sets
def closedWorldExperiment(n_train, n_exp):
experiment_sets = createExperimentSets(n_train, n_exp)
total_results = dict.fromkeys(range(0, 6), 0)
site_results = dict.fromkeys(monitored_sites, 0)
total = 0
for e_set in experiment_sets:
training_set = e_set[0]
# Create classifiers with training data
clf = createClassifiers(training_set)
for exp in e_set[1]:
for i, site in enumerate(monitored_sites):
f_path = "%s/%s/%d.fp" % (closed_path, site, exp)
fp = getFingerprint(f_path)
votes = matchAgainstClassifiers(clf, fp)
res = indexOfSortedValues(votes, descending=True)
j = 0
while True:
try:
rank = res.index(i-j)
break
except:
j += 1
total += 1
total_results[rank] += 1
if rank == 0:
site_results[site] += 1
storeClosedWorldResult(n_train, n_exp, total, total_results, site_results)
def openWorldFileList(train_range):
fp_list = []
for (dirpath, dirnames, filenames) in os.walk(closed_path):
for f in filenames:
if f[-3:] == ".fp" and not int(f[-4]) in train_range:
fp_list.append(dirpath+"/"+f)
for (dirpath, dirnames, filenames) in os.walk(open_path):
for f in filenames:
if f[-3:] == ".fp":
fp_list.append(dirpath+"/"+f)
return fp_list
# Returns True if votes imply an open world hit
def openWorldThreshold(votes):
if max(votes) > max_threshold and (max(votes)-sorted(votes)[-2]) > diff_threshold:
return True
else:
return False
# Returns true if the supplied fingerprint feature vector is predicted to belong to one of the marked sites
def openWorldPrediction(marked, feature_vector, clf):
votes = matchAgainstClassifiers(clf, feature_vector)
res = indexOfSortedValues(votes, descending=True)
guessed_site = monitored_sites[res[0]]
# The site is guessed to be one of the marked ones
if guessed_site in marked and openWorldThreshold(votes):
return True
else:
return False
def openWorldExperiment(n_train, n_classifier, marked):
true_positives = 0
false_positives = 0
false_negatives = 0
true_negatives = 0
training_sets = [x[0] for x in createExperimentSets(n_train, n_classifier)]
for training_range in training_sets:
# Create classifiers with training data, use remaining feature vectors as experiments
clf = createClassifiers(training_range)
fv_paths = openWorldFileList(training_range)
for f_path in fv_paths:
feature_vector = getFingerprint(f_path)
actual_site = f_path.split("/")[-2]
hit = openWorldPrediction(marked, feature_vector, clf)
if hit:
if actual_site in marked:
true_positives += 1
else:
false_positives += 1
else:
if actual_site in marked:
false_negatives += 1
else:
true_negatives += 1
storeOpenWorldResult(n_train, marked, true_positives, false_positives, false_negatives, true_negatives)
def storeClosedWorldResult(n_train, n_exp, total, total_results, site_results):
with open("PatternResults/closed/%s" % (str(datetime.now())), "w") as r_file:
print "Completed experiment. Achieved accuracy of %.2f%%. Detailed results stored in %s." % (100*(float(total_results[0]))/total, r_file.name)
r_file.write("Number of training instances: %d\n" % n_train)
r_file.write("Number of predictions: %d\n\n" % total)
r_file.write("Accuracy:\t%.2f\n" % (float(total_results[0])/total))
for guesses in total_results:
r_file.write("%d:\t\t%d\t%.2f\n" % (guesses, total_results[guesses], float(total_results[guesses])/total))
r_file.write("\nIndividual site accuracy:\n")
for site in site_results:
r_file.write("%s: %.2f\n" % (site, float(site_results[site])/(total/len(monitored_sites))))
r_file.close()
def storeOpenWorldResult(n_train, marked, true_positives, false_positives, false_negatives, true_negatives):
first_dir = "PatternResults/open/%s_training_instances" % n_train
mkdir(first_dir)
second_dir = "%s/%d_marked_sites" % (first_dir, len(marked))
mkdir(second_dir)
acc = float(true_positives+true_negatives)/(true_positives+false_negatives+false_positives+true_negatives)
with open("%s/%s" % (second_dir, marked), "w") as r_file:
print "Completed experiment. Achieved an accuracy of %.2f%%. Detailed results stored in %s." % (100*acc, r_file.name)
r_file.write("Number of training instances: %d\n" % n_train)
r_file.write("Marked sites: ")
for site in marked:
r_file.write(site+" ")
r_file.write("\n\nTP\tFP\tTN\tFN\n%d\t%d\t%d\t%d" % (true_positives, false_positives, true_negatives, false_negatives))
if __name__=="__main__":
try:
model = sys.argv[1]
if model not in ["closed", "open"]:
raise
except:
print "Error: first argument must be either 'open' or 'closed'"
print "Usage: python %s <closed/open> <number of training instances> <number of experiment instances> <marked sites (if open world)>" % sys.argv[0]
sys.exit()
if model == "closed":
try:
n_train = int(sys.argv[2])
n_exp = int(sys.argv[3])
except:
print "Error: second and third argument must be the number of training instances and experiments respectively"
print "Usage: python %s <closed/open> <number of training instances> <number of experiment instances> <marked sites (if open world)>" % sys.argv[0]
sys.exit()
closedWorldExperiment(n_train, n_exp)
elif model == "open":
try:
n_train = int(sys.argv[2])
n_exp = int(sys.argv[3])
except:
print "Error: second and third argument must be the number of training instances and experiments respectively"
print "Usage: python %s <closed/open> <number of training instances> <number of experiment instances> <marked sites (if open world)>" % sys.argv[0]
sys.exit()
marked = []
i = 4
while True:
try:
marked_site = sys.argv[i]
marked.append(marked_site)
i += 1
except:
break
if len(marked) == 0:
print "Error: no marked sites supplied."
print "Usage: python %s <closed/open> <number of training instances> <number of experiment |
xbmc/atv2 | xbmc/lib/libPython/Python/Demo/curses/xmas.py | Python | gpl-2.0 | 25,498 | 0.000824 | # asciixmas
# December 1989 Larry Bartz Indianapolis, IN
#
# $Id: xmas.py 36559 2004-07-18 05:56:09Z tim_one $
#
# I'm dreaming of an ascii character-based monochrome Christmas,
# Just like the one's I used to know!
# Via a full duplex communications channel,
# At 9600 bits per second,
# Even though it's kinda slow.
#
# I'm dreaming of an ascii character-based monochrome Christmas,
# With ev'ry C program I write!
# May your screen be merry and bright!
# And may all your Christmases be amber or green,
# (for reduced eyestrain and improved visibility)!
#
#
# Notes on the Python version:
# I | used a couple of `try...except curses.error' to get around some functions
# returning ERR. The errors come from using wrapping functions to fill
# windows to the last character cell. The C version doesn't have this problem,
# it simply ignores any return values.
#
import curses
import sys
FROMWHO = "Thomas Gellekum <tg@FreeBSD.org>"
def set_color(win, color):
if curses.has_colors():
n = color + 1
curses.init_pair(n, color, my_bg)
win.attroff(curs | es.A_COLOR)
win.attron(curses.color_pair(n))
def unset_color(win):
if curses.has_colors():
win.attrset(curses.color_pair(0))
def look_out(msecs):
curses.napms(msecs)
if stdscr.getch() != -1:
curses.beep()
sys.exit(0)
def boxit():
for y in range(0, 20):
stdscr.addch(y, 7, ord('|'))
for x in range(8, 80):
stdscr.addch(19, x, ord('_'))
for x in range(0, 80):
stdscr.addch(22, x, ord('_'))
return
def seas():
stdscr.addch(4, 1, ord('S'))
stdscr.addch(6, 1, ord('E'))
stdscr.addch(8, 1, ord('A'))
stdscr.addch(10, 1, ord('S'))
stdscr.addch(12, 1, ord('O'))
stdscr.addch(14, 1, ord('N'))
stdscr.addch(16, 1, ord("'"))
stdscr.addch(18, 1, ord('S'))
return
def greet():
stdscr.addch(3, 5, ord('G'))
stdscr.addch(5, 5, ord('R'))
stdscr.addch(7, 5, ord('E'))
stdscr.addch(9, 5, ord('E'))
stdscr.addch(11, 5, ord('T'))
stdscr.addch(13, 5, ord('I'))
stdscr.addch(15, 5, ord('N'))
stdscr.addch(17, 5, ord('G'))
stdscr.addch(19, 5, ord('S'))
return
def fromwho():
stdscr.addstr(21, 13, FROMWHO)
return
def tree():
set_color(treescrn, curses.COLOR_GREEN)
treescrn.addch(1, 11, ord('/'))
treescrn.addch(2, 11, ord('/'))
treescrn.addch(3, 10, ord('/'))
treescrn.addch(4, 9, ord('/'))
treescrn.addch(5, 9, ord('/'))
treescrn.addch(6, 8, ord('/'))
treescrn.addch(7, 7, ord('/'))
treescrn.addch(8, 6, ord('/'))
treescrn.addch(9, 6, ord('/'))
treescrn.addch(10, 5, ord('/'))
treescrn.addch(11, 3, ord('/'))
treescrn.addch(12, 2, ord('/'))
treescrn.addch(1, 13, ord('\\'))
treescrn.addch(2, 13, ord('\\'))
treescrn.addch(3, 14, ord('\\'))
treescrn.addch(4, 15, ord('\\'))
treescrn.addch(5, 15, ord('\\'))
treescrn.addch(6, 16, ord('\\'))
treescrn.addch(7, 17, ord('\\'))
treescrn.addch(8, 18, ord('\\'))
treescrn.addch(9, 18, ord('\\'))
treescrn.addch(10, 19, ord('\\'))
treescrn.addch(11, 21, ord('\\'))
treescrn.addch(12, 22, ord('\\'))
treescrn.addch(4, 10, ord('_'))
treescrn.addch(4, 14, ord('_'))
treescrn.addch(8, 7, ord('_'))
treescrn.addch(8, 17, ord('_'))
treescrn.addstr(13, 0, "//////////// \\\\\\\\\\\\\\\\\\\\\\\\")
treescrn.addstr(14, 11, "| |")
treescrn.addstr(15, 11, "|_|")
unset_color(treescrn)
treescrn.refresh()
w_del_msg.refresh()
return
def balls():
treescrn.overlay(treescrn2)
set_color(treescrn2, curses.COLOR_BLUE)
treescrn2.addch(3, 9, ord('@'))
treescrn2.addch(3, 15, ord('@'))
treescrn2.addch(4, 8, ord('@'))
treescrn2.addch(4, 16, ord('@'))
treescrn2.addch(5, 7, ord('@'))
treescrn2.addch(5, 17, ord('@'))
treescrn2.addch(7, 6, ord('@'))
treescrn2.addch(7, 18, ord('@'))
treescrn2.addch(8, 5, ord('@'))
treescrn2.addch(8, 19, ord('@'))
treescrn2.addch(10, 4, ord('@'))
treescrn2.addch(10, 20, ord('@'))
treescrn2.addch(11, 2, ord('@'))
treescrn2.addch(11, 22, ord('@'))
treescrn2.addch(12, 1, ord('@'))
treescrn2.addch(12, 23, ord('@'))
unset_color(treescrn2)
treescrn2.refresh()
w_del_msg.refresh()
return
def star():
treescrn2.attrset(curses.A_BOLD | curses.A_BLINK)
set_color(treescrn2, curses.COLOR_YELLOW)
treescrn2.addch(0, 12, ord('*'))
treescrn2.standend()
unset_color(treescrn2)
treescrn2.refresh()
w_del_msg.refresh()
return
def strng1():
treescrn2.attrset(curses.A_BOLD | curses.A_BLINK)
set_color(treescrn2, curses.COLOR_WHITE)
treescrn2.addch(3, 13, ord('\''))
treescrn2.addch(3, 12, ord(':'))
treescrn2.addch(3, 11, ord('.'))
treescrn2.attroff(curses.A_BOLD | curses.A_BLINK)
unset_color(treescrn2)
treescrn2.refresh()
w_del_msg.refresh()
return
def strng2():
treescrn2.attrset(curses.A_BOLD | curses.A_BLINK)
set_color(treescrn2, curses.COLOR_WHITE)
treescrn2.addch(5, 14, ord('\''))
treescrn2.addch(5, 13, ord(':'))
treescrn2.addch(5, 12, ord('.'))
treescrn2.addch(5, 11, ord(','))
treescrn2.addch(6, 10, ord('\''))
treescrn2.addch(6, 9, ord(':'))
treescrn2.attroff(curses.A_BOLD | curses.A_BLINK)
unset_color(treescrn2)
treescrn2.refresh()
w_del_msg.refresh()
return
def strng3():
treescrn2.attrset(curses.A_BOLD | curses.A_BLINK)
set_color(treescrn2, curses.COLOR_WHITE)
treescrn2.addch(7, 16, ord('\''))
treescrn2.addch(7, 15, ord(':'))
treescrn2.addch(7, 14, ord('.'))
treescrn2.addch(7, 13, ord(','))
treescrn2.addch(8, 12, ord('\''))
treescrn2.addch(8, 11, ord(':'))
treescrn2.addch(8, 10, ord('.'))
treescrn2.addch(8, 9, ord(','))
treescrn2.attroff(curses.A_BOLD | curses.A_BLINK)
unset_color(treescrn2)
treescrn2.refresh()
w_del_msg.refresh()
return
def strng4():
treescrn2.attrset(curses.A_BOLD | curses.A_BLINK)
set_color(treescrn2, curses.COLOR_WHITE)
treescrn2.addch(9, 17, ord('\''))
treescrn2.addch(9, 16, ord(':'))
treescrn2.addch(9, 15, ord('.'))
treescrn2.addch(9, 14, ord(','))
treescrn2.addch(10, 13, ord('\''))
treescrn2.addch(10, 12, ord(':'))
treescrn2.addch(10, 11, ord('.'))
treescrn2.addch(10, 10, ord(','))
treescrn2.addch(11, 9, ord('\''))
treescrn2.addch(11, 8, ord(':'))
treescrn2.addch(11, 7, ord('.'))
treescrn2.addch(11, 6, ord(','))
treescrn2.addch(12, 5, ord('\''))
treescrn2.attroff(curses.A_BOLD | curses.A_BLINK)
unset_color(treescrn2)
treescrn2.refresh()
w_del_msg.refresh()
return
def strng5():
treescrn2.attrset(curses.A_BOLD | curses.A_BLINK)
set_color(treescrn2, curses.COLOR_WHITE)
treescrn2.addch(11, 19, ord('\''))
treescrn2.addch(11, 18, ord(':'))
treescrn2.addch(11, 17, ord('.'))
treescrn2.addch(11, 16, ord(','))
treescrn2.addch(12, 15, ord('\''))
treescrn2.addch(12, 14, ord(':'))
treescrn2.addch(12, 13, ord('.'))
treescrn2.addch(12, 12, ord(','))
treescrn2.attroff(curses.A_BOLD | curses.A_BLINK)
unset_color(treescrn2)
# save a fully lit tree
treescrn2.overlay(treescrn)
treescrn2.refresh()
w_del_msg.refresh()
return
def blinkit():
treescrn8.touchwin()
for cycle in range(0, 5):
if cycle == 0:
treescrn3.overlay(treescrn8)
treescrn8.refresh()
w_del_msg.refresh()
break
elif cycle == 1:
treescrn4.overlay(treescrn8)
treescrn8.refresh()
w_del_msg.refresh()
break
elif cycle == 2:
treescrn5.overlay(treescrn8)
treescrn8.refresh()
w_del_msg.refresh()
break
elif cycle == 3:
treescrn6.overlay(treescrn8)
treescrn8.refresh()
w_del_msg.refresh()
break
elif cycle == 4:
treescrn7.overlay(treescrn8)
treescrn8.refresh()
w_del_msg.refresh()
bre |
marcellodesales/svnedge-console | ext/windows/pkg-toolkit/pkg/vendor-packages/pkg/client/progress.py | Python | agpl-3.0 | 30,842 | 0.009857 | #!/usr/bin/python2.4
#
# CDDL HEADER START
#
# The contents of this file are subject to the terms of the
# Common Development and Distribution License (the "License").
# You may not use this file except in compliance with the License.
#
# You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
# or http://www.opensolaris.org/os/licensing.
# See the License for the specific language governing permissions
# and limitations under the License.
#
# When distributing Covered Code, include this CDDL HEADER in each
# file and include the License file at usr/src/OPENSOLARIS.LICENSE.
# If applicable, add the following below this CDDL HEADER, with the
# fields enclosed by brackets "[]" replaced with your own identifying
# information: Portions Copyright [yyyy] [name of copyright owner]
#
# CDDL HEADER END
#
#
# Copyright 2009 Sun Microsystems, Inc. All rights reserved.
# Use is subject to license terms.
#
import errno
import sys
import os
import time
from pkg.misc import PipeError, emsg
import pkg.portable as portable
class ProgressTracker(object):
""" This abstract class is used by the client to render and track
progress towards the completion of various tasks, such as
download, installation, update, etc.
The superclass is largely concerned with tracking the
raw numbers, and with calling various callback routines
when events of interest occur.
Different subclasses provide the actual rendering to the
user, with differing levels of detail and prettiness.
Note that as currently envisioned, this class is concerned
with tracking the progress of long-running operations: it is
NOT a general purpose output mechanism nor an error collector.
Subclasses of ProgressTracker must implement all of the
*_output_* methods.
External consumers should base their subclasses on the
NullProgressTracker class. """
def __init__(self):
self.reset()
def reset(self):
self.cat_cur_catalog = None
self.refresh_pub_cnt = 0
self.refresh_cur_pub_cnt = 0
self.refresh_cur_pub = None
self.ver_cur_fmri = None
self.eval_cur_fmri = None
self.eval_prop_npkgs = 0
self.eval_goal_install_npkgs = 0
self.eval_goal_update_npkgs = 0
self.eval_goal_remove_npkgs = 0
self.dl_goal_nfiles = 0
self.dl_cur_nfiles = 0
self.dl_goal_nbytes = 0
self.dl_cur_nbytes = 0
self.dl_goal_npkgs = 0
self.dl_cur_npkgs = 0
self.dl_cur_pkg = "None"
self.act_cur_nactions = 0
self.act_goal_nactions = 0
self.act_phase = "None"
self.act_phase_last = "None"
self.ind_cur_nitems = 0
self.ind_goal_nitems = 0
self.ind_phase = "None"
self.ind_phase_last = "None"
self.last_printed = 0 # when did we last emit status?
def catalog_start(self, catalog):
self.cat_cur_catalog = catalog
self.cat_output_start()
def catalog_done(self):
self.cat_output_done()
def cache_catalogs_start(self):
self.cache_cats_output_start()
def cache_catalogs_done(self):
self.cache_cats_output_done()
def load_catalog_cache_start(self):
self.load_cat_cache_output_start()
def load_catalog_cache_done(self):
self.load_cat_cache_output_done()
def refresh_start(self, pub_cnt):
self.refresh_pub_cnt = pub_cnt
self.refresh_cur_pub_cnt = 0
self.refresh_output_start()
def refresh_progress(self, pub):
self.refresh_cur_pub = pub
self.refresh_cur_pub_cnt += 1
self.refresh_output_progress()
def refresh_done(self):
self.refresh_output_done()
def evaluate_start(self, npkgs=-1):
self.eval_prop_npkgs = npkgs
self.eval_output_start()
def evaluate_progress(self, fmri=None):
if fmri:
self.eval_cur_fmri = fmri
self.eval_output_progress()
def evaluate_done(self, install_npkgs=-1, \
update_npkgs=-1, remove_npkgs=-1):
self.eval_goal_install_npkgs = install_npkgs
self.eval_goal_update_npkgs = update_npkgs
self.eval_goal_remove_npkgs = remove_npkgs
self.eval_output_done()
def verify_add_progress(self, fmri):
self.ver_cur_fmri = fmri
self.ver_output()
def verify_yield_error(self, actname, errors):
self.ver_output_error(actname, errors)
def verify_done(self):
self.ver_cur_fmri = None
self.ver_output_done()
def download_set_goal(self, npkgs, nfiles, nbytes):
self.dl_goal_npkgs = npkgs
self.dl_goal_nfiles = nfiles
self.dl_goal_nbytes = nbytes
def download_start_pkg(self, pkgname):
self.dl_cur_pkg = pkgname
if self.dl_goal_nbytes != 0:
self.dl_output()
def download_end_pkg(self):
self.dl_cur_npkgs += 1
if self.dl_goal_nbytes != 0:
self.dl_output()
def download_add_progress(self, nfiles, nbytes):
""" Call to provide news that the download has made progress """
self.dl_cur_nbytes += nbytes
self.dl_cur_nfiles += nfiles
if self.dl_goal_nbytes != 0:
self.dl_output()
def download_done(self):
""" Call when all downloading is finished """
if self.dl_goal_nbytes != 0:
self.dl_output_done()
if self.dl_cur_npkgs != self.dl_goal_npkgs:
emsg("\nExpected %s pkgs, received %s pkgs instead." %
(self.dl_goal_npkgs, self.dl_cur_npkgs))
if self.dl_cur_nfiles != self.dl_goal_nfiles:
emsg("\nExpected %s files, received %s files instead."
% (self.dl_goal_nfiles, self.dl_cur_nfiles))
if self.dl_cur_nbytes != self.dl_goal_nbytes:
emsg("\nExpected %s bytes, received %s bytes instead."
% (self.dl_goal_nbytes, self.dl_cur_nbytes))
assert self.dl_cur_npkgs == self.dl_goal_npkgs
assert self.dl_cur_nfiles == self.dl_goal_nfiles
assert self.dl_cur_nbytes == self.dl_goal_nbytes
def download_get_progress(self):
return (self.dl_cur_npkgs, self.dl_cur_nfiles,
self.dl_cur_nbytes)
def actions_set_goal(self, phase, nactions):
self.act_phase = phase
self.act_goal_nactions = nactions
self.act_cur_nactions = 0
def actions_add_progress(self):
self.act_cur_nactions += 1
if self.act_goal_nactions > 0:
self.act_output()
def actions_done(self):
if self.act_goal_nactions > 0:
self.act_output_done()
assert self.act_goal_nactions == self.act_cur_nactions
def index_set_goal(self, p | hase, nitems):
self.ind_phase = phase
self.ind_goal_nitems = ni | tems
self.ind_cur_nitems = 0
def index_add_progress(self):
self.ind_cur_nitems += 1
if self.ind_goal_nitems > 0:
self.ind_output()
def index_done(self):
if self.ind_goal_nitems > 0:
|
Distrotech/iksemel | python/test/runtests.py | Python | lgpl-2.1 | 423 | 0.004728 | #!/usr/bin/python
# -*- coding: utf-8 -*-
import sys
import os
import subprocess
def runtests() | :
fail = 0
for test in os.listdir("."):
if test.startswith("tst_") and test.endswith(".py"):
if 0 != subprocess.call(["./" + test]):
fail += 1
print test, "failed!"
if not fail:
return 0
return 1
if __nam | e__ == "__main__":
sys.exit(runtests())
|
troycomi/microMS | ImageUtilities/blob.py | Python | mit | 2,709 | 0.009598 | from GUICanvases import GUIConstants
import matplotlib as mpl
class blob(object):
"""
Representation of a target point
"""
def __init__(self, x = float(0), y = float(0),
radius = float(GUIConstants.DEFAULT_BLOB_RADIUS),
circularity = float(1), group = None):
'''
Initialize a new blob with the specified position, shape and group
x: x coordinate, default 0.0
y: y coordinate, default 0.0
radius: effective radius of the blob, default to value specified in GUIConstants
circularity: 0 < circ < 1, default value is 1 (perfect circle)
'''
self.X = x
self.Y = y
self.radius = float(radius)
#keep circularity in bounds
self.circularity = 1 if circularity > 1 else \
(0 if circularity < 0 else circularity)
self.group = group
@staticmethod
def getXYList(blobs):
'''
Method to convert a list of blobs to their x,y coordinates
blobs: list of blobs
returns a list of (x,y) tuples of each blob in order
'''
if blobs is None:
return None
return list(map(lambda b: (b.X, b.Y), blobs))
@staticmethod
def blobFromSplitString(instrings):
'''
Tries to parse all information from a split string to make a new blob
instrings: list of strings, produced from splitting a blob.toString()
returns a new blob with the indicated x,y,r and circularity
'''
result = blob()
if instrings is None:
return result
if (len(instrings) == 3 or len(instrings) == 4):
result.X = float(instrings[0])
result.Y = float(instrings[1])
result.radius = float(instrings[2])
if len(instrings) == 4:
result.circularity = float(instrings[3])
return result
@staticmethod
def blobFromXYString(instring):
'''
| Parses xy location from string to make a new blob
instring: string of the form "x_{}y_{}"
returns a new blob with the indicated x,y
'''
result = blob()
if instring is None:
return result
toks = instring.split('_')
result.X = float(toks[1][:-1])
result.Y = float(toks[2])
return result
def toString(self):
'''
Generates a tab delimited string with the x, y, radius and circularity of | the blob
'''
return "{0:.3f}\t{1:.3f}\t{2:.3f}\t{3:.3f}".format(self.X, self.Y,
self.radius, self.circularity)
|
who-emro/meerkat_api | meerkat_api/util/__init__.py | Python | mit | 4,531 | 0.002869 | """
meerkat_api util functions
"""
from datetime import datetime
from dateutil import parser
import meerkat_abacus.util as abacus_util
import numpy as np
import meerkat_abacus.util.epi_week
def series_to_json_dict(series):
"""
Takes pandas series and turns into a dict with string keys
Args:
series: pandas series
Returns:
dict: dict
"""
# np.asscalar is necessary to cast numpy types to python native
if series is not None:
ret = {}
for key, value in series.to_dict().items():
if isinstance(value, float) or isinstance(value, int):
ret[str(key)] = value
else:
ret[str(key)] = float(np.asscalar(value))
return ret
else:
return {}
def fix_dates(start_date, end_date):
"""
We parse the start and end date and remove any timezone information
Args:
start_date: start date
end_date: end_date
Returns:
dates(tuple): (start_date, end_date)
"""
if end_date:
end_date = parser.parse(end_date).replace(hour=23,
minute=59,
second=59,
tzinfo=None)
else:
end_date = datetime.now()
if start_date:
start_date = parser.parse(start_date).replace(hour=0,
minute=0,
second=0,
tzinfo=None)
else:
start_date = end_date.replace(month=1, day=1,
hour=0, second=0,
minute=0,
microsecond=0)
if start_date < meerkat_abacus.util.epi_week.epi_year_start_date(date=start_date):
start_date = meerkat_abacus.util.epi_week.epi_year_start_date(date=start_date)
return start_date, end_date
def row_to_dict(row):
"""
Translate sql alchemy row to dict
Args:
row: SQL alchemy class
Returns:
data_dict(dict): data as dictionary
"""
if not row:
return {}
if hasattr(row, "__table__"):
return dict((col, getattr(row, col))
for col in row.__table__.columns.keys())
else:
ret = {}
for table in row:
if table:
ret[table.__tablename__] = dict(
(col, getattr(table, col)) for col
in table.__table__.columns.keys())
return ret
def rows_to_dicts(rows, dict_id=None):
"""
Translate sql alchemy rows to dicts
Args:
rows: List of SQL alchemy rows
dict_id: If True we return a dict with the dict_id column as index
Returns:
data_dicts(dict): data as dictionary
"""
if dict_id:
if len(rows) >0 and isinstance(rows[0], tuple):
raise TypeError("Can not use dict_id=True with tuple rows")
data_dicts = {}
for row in rows:
data_dicts[getattr(row, dict_id)] = row_to_dict(row)
else:
data_dicts = []
for ro | w in rows:
data_dicts.append(row_to_dict(row))
return data_dicts
def find_level(location, sublevel, locations):
"""
Returns | the isntance of level that location is a child of
Args:
location: location
sublevel: the sublevel we are interested in
locations: all locations in dict
Returns:
location_id(int): id of the mathcing location
"""
location = int(location)
for loc in locations:
if locations[loc].level == sublevel and abacus_util.is_child(loc, location, locations):
return loc
return None
def get_children(parent, locations, clinic_type=None, require_case_report=True, case_type=None):
"""
Return all clinics that are children of parent
Args:
parent: parent_id
locations: all locations in dict
Returns:
list of location ids
"""
ret = []
for location_id in locations.keys():
if ( (not require_case_report or locations[location_id].case_report) and
(not clinic_type or locations[location_id].clinic_type == clinic_type)):
if( case_type is None or locations[location_id].case_type == case_type):
if abacus_util.is_child(parent, location_id, locations):
ret.append(location_id)
return ret
|
atheendra/access_keys | keystone/tests/test_url_middleware.py | Python | apache-2.0 | 2,020 | 0 | # Copyright 2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import w | ebob
from keystone import middleware
from keystone import tests
class FakeApp(object):
"""Fakes a WSGI app URL normalized."""
def __call__(self, env, start_response):
resp = webob.Response()
resp.body = 'SUCCESS'
return resp(env, start_response)
class UrlMiddlewareTest(tests.TestCase):
def setUp(self):
| self.middleware = middleware.NormalizingFilter(FakeApp())
self.response_status = None
self.response_headers = None
super(UrlMiddlewareTest, self).setUp()
def start_fake_response(self, status, headers):
self.response_status = int(status.split(' ', 1)[0])
self.response_headers = dict(headers)
def test_trailing_slash_normalization(self):
"""Tests /v2.0/tokens and /v2.0/tokens/ normalized URLs match."""
req1 = webob.Request.blank('/v2.0/tokens')
req2 = webob.Request.blank('/v2.0/tokens/')
self.middleware(req1.environ, self.start_fake_response)
self.middleware(req2.environ, self.start_fake_response)
self.assertEqual(req1.path_url, req2.path_url)
self.assertEqual(req1.path_url, 'http://localhost/v2.0/tokens')
def test_rewrite_empty_path(self):
"""Tests empty path is rewritten to root."""
req = webob.Request.blank('')
self.middleware(req.environ, self.start_fake_response)
self.assertEqual(req.path_url, 'http://localhost/')
|
constantx/dotfiles | sublime3/Packages/SideBarEnhancements-st3/desktop/dialog.py | Python | mit | 17,225 | 0.007431 | #!/usr/bin/env python
"""
Simple desktop dialogue box support for Python.
Copyright (C) 2007, 2009 Paul Boddie <paul@boddie.org.uk>
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU Lesser General Public License as published by the Free
Software Foundation; either version 3 of the License, or (at your option) any
later version.
This program is distributed in the hope that it will be useful, but WITHOUT
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
details.
You should have received a copy of the GNU Lesser General Public License along
with this program. If not, see <http://www.gnu.org/licenses/>.
--------
Opening Dialogue Boxes (Dialogs)
--------------------------------
To open a dialogue box (dialog) in the current desktop environment, relying on
the automatic detection of that environment, use the appropriate dialogue box
class:
question = desktop.dialog.Question("Are you sure?")
result = question.open()
To override the detected desktop, specify the desktop parameter to the open
function as follows:
question.open("KDE") # Insists on KDE
question.open("GNOME") # Insists on GNOME
The dialogue box options are documented in each class's docstring.
Available dialogue box classes are listed in the desktop.dialog.available
attribute.
Supported desktop environments are listed in the desktop.dialog.supported
attribute.
"""
from desktop import use_desktop, _run, _readfrom, _status
class _wrapper:
def __init__(self, handler):
self.handler = handler
class _readvalue(_wrapper):
def __call__(self, cmd, shell):
return self.handler(cmd, shell).strip()
class _readinput(_wrapper):
def __call__(self, cmd, shell):
return self.handler(cmd, shell)[:-1]
class _readvalues_kdialog(_wrapper):
def __call__(self, cmd, shell):
result = self.handler(cmd, shell).strip().strip('"')
if result:
return result.split('" "')
else:
return []
class _readvalues_zenity(_wrapper):
def __call__(self, cmd, shell):
result = self.handler(cmd, shell).strip()
if result:
return result.split("|")
else:
return []
class _readvalues_Xdialog(_wrapper):
def __call__(self, cmd, shell):
result = self.handler(cmd, shell).strip()
if result:
return result.split("/")
else:
return []
# Dialogue parameter classes.
class String:
"A generic parameter."
def __init__(self, name):
self.name = name
def convert(self, value, program):
return [value or ""]
class Strings(String):
"Multiple string parameters."
def convert(self, value, program):
return value or []
class StringPairs(String):
"Multiple string parameters duplicated to make identifiers."
def convert(self, value, program):
l = []
for v in value:
l.append(v)
l.append(v)
return l
class StringKeyword:
"A keyword parameter."
def __init__(self, keyword, name):
self.keyword = keyword
self.name = name
def convert(self, value, program):
return [self.keyword + "=" + (value or "")]
class StringKeywords:
"Multiple keyword parameters."
def __init__(self, keyword, name):
self.keyword = keyword
self.name = name
def convert(self, value, program):
l = []
for v in value or []:
l.append(self.keyword + "=" + v)
return l
class Integer(String):
"An integer parameter."
defaults = {
"width" : 40,
"height" : 15,
"list_height" : 10
}
scale = 8
def __init__(self, name, pixels=0):
String.__init__(self, name)
if pixels:
self.factor = self.scale
else:
self.factor = 1
def convert(self, value, program):
if value is None:
value = self.defaults[self.name]
return [str(int(value) * self.factor)]
class IntegerKeyword(Integer):
"An integer keyword parameter."
def __init__(self, keyword, name, pixels=0):
Integer.__init__(self, name, pixels)
self.keyword = keyword
def convert(self, value, program):
if value is None:
value = self.defaults[self.name]
return [self.keyword + "=" + str(int(value) * sel | f.factor)]
class Boolean(String):
"A boolean parameter."
values = {
"kdialog" : ["off", "on"],
"zenity" : ["FALSE", "TRUE"],
"Xdialog" : ["off", "on"]
}
def convert(self, value, program):
values = self.values[program]
| if value:
return [values[1]]
else:
return [values[0]]
class MenuItemList(String):
"A menu item list parameter."
def convert(self, value, program):
l = []
for v in value:
l.append(v.value)
l.append(v.text)
return l
class ListItemList(String):
"A radiolist/checklist item list parameter."
def __init__(self, name, status_first=0):
String.__init__(self, name)
self.status_first = status_first
def convert(self, value, program):
l = []
for v in value:
boolean = Boolean(None)
status = boolean.convert(v.status, program)
if self.status_first:
l += status
l.append(v.value)
l.append(v.text)
if not self.status_first:
l += status
return l
# Dialogue argument values.
class MenuItem:
"A menu item which can also be used with radiolists and checklists."
def __init__(self, value, text, status=0):
self.value = value
self.text = text
self.status = status
# Dialogue classes.
class Dialogue:
commands = {
"KDE" : "kdialog",
"GNOME" : "zenity",
"XFCE" : "zenity", # NOTE: Based on observations with Xubuntu.
"X11" : "Xdialog"
}
def open(self, desktop=None):
"""
Open a dialogue box (dialog) using a program appropriate to the desktop
environment in use.
If the optional 'desktop' parameter is specified then attempt to use
that particular desktop environment's mechanisms to open the dialog
instead of guessing or detecting which environment is being used.
Suggested values for 'desktop' are "standard", "KDE", "GNOME",
"Mac OS X", "Windows".
The result of the dialogue interaction may be a string indicating user
input (for Input, Password, Menu, Pulldown), a list of strings
indicating selections of one or more items (for RadioList, CheckList),
or a value indicating true or false (for Question, Warning, Message,
Error).
Where a string value may be expected but no choice is made, an empty
string may be returned. Similarly, where a list of values is expected
but no choice is made, an empty list may be returned.
"""
# Decide on the desktop environment in use.
desktop_in_use = use_desktop(desktop)
# Get the program.
try:
program = self.commands[desktop_in_use]
except KeyError:
raise OSError("Desktop '%s' not supported (no known dialogue box command could be suggested)" % desktop_in_use)
# The handler is one of the functions communicating with the subprocess.
# Some handlers return boolean values, others strings.
handler, options = self.info[program]
cmd = [program]
for option in options:
if isinstance(option, str):
cmd.append(option)
else:
value = getattr(self, option.name, None)
cmd += option.convert(value, program)
return handler(cmd, 0)
class Simple(Dialogue):
def __init__(self, text, width=None, height=None):
self.text = text
self.width = width
self.height = height
class Question(Simp |
RowleyGroup/pbs-generator | pyqueue/enums.py | Python | gpl-3.0 | 1,317 | 0.001519 | """
Enums.
"""
try:
from enum import Enum
except ImportError:
Enum = object
class MailTypes(Enum):
"""
MailTypes enum
"""
ALL = 0
BEGIN = 1
END = 2
FAIL = 3
REQUEUE = 4
class DependencyTypes(Enum):
"""
DependencyTypes enum
"""
# This job can begin execution after the specified jobs have begun
# execution.
# SUPPORT: Slurm
AFTER = 0
# This job can begin execution after the specified jobs have terminated.
# SUPPORT: Slurm, PBS
AFTER_ANY = 1
# A task of this job array can begin execution after the corresponding
# task ID in the specified job has completed successfully
# (ran to completi | on with an exit code of zero).
# SUPPORT: Slurm
AFTER_CORR = 2
# This job can begin execution after the specified jobs have successfully executed
# (ran to completion with an exit code of zero).
# SUPPORT: Slurm, PBS
AFTER_OK = 3
# This job can begin execution after the specified jobs have terminated in some failed state
# (non-zero exit code, node failure, timed out, etc).
# SUPPORT: Slurm, PBS
AFTER_NOT_OK = 4
# This job can begin executi | on after any previously launched jobs
# sharing the same job name and user have terminated.
# SUPPORT: Slurm
SINGLETON = 4
|
XuezheMax/LasagneNLP | bi_lstm_cnn_crf.py | Python | apache-2.0 | 15,909 | 0.005091 | __author__ = 'max'
import time
import sys
import argparse
from lasagne_nlp.utils import utils
import lasagne_nlp.utils.data_processor as data_processor
from lasagne_nlp.utils.objectives import crf_loss, crf_accuracy
import lasagne
import theano
import theano.tensor as T
from lasagne_nlp.networks.networks import build_BiLSTM_CNN_CRF
import numpy as np
def main():
parser = argparse.ArgumentParser(description='Tuning with bi-directional LSTM-CNN-CRF')
parser.add_argument('--fine_tune', action='store_true', help='Fine tune the word embeddings')
parser.add_argument('--embedding', choices=['word2vec', 'glove', 'senna', 'random'], help='Embedding for words',
required=True)
parser.add_argument('--embedding_dict', default=None, help='path for embedding dict')
parser.add_argument('--batch_size', type=int, default=10, help='Number of sentences in each batch')
parser.add_argument('--num_units', type=int, default=100, help='Number of hidden units in LSTM')
parser.add_argument('--num_filters', type=int, default=20, help='Number of filters in CNN')
parser.add_argument('--learning_rate', type=float, default=0.1, help='Learning rate')
parser.add_argument('--decay_rate', type=float, default=0.1, help='Decay rate of learning rate')
parser.add_argument('--grad_clipping', type=float, default=0, help='Gradient clipping')
parser.add_argument('--gamma', type=float, default=1e-6, help='weight for regularization')
parser.add_argument('--peepholes', action='store_true', help='Peepholes for LSTM')
parser.add_argument('--oov', choices=['random', 'embedding'], help='Embedding for oov word', required=True)
parser.add_argument('--update', choices=['sgd', 'momentum', 'nesterov', 'adadelta'], help='update algorithm',
default='sgd')
parser.add_argument('--regular', choices=['none', 'l2'], help='regularization for training', required=True)
parser.add_argument('--dropout', action='store_true', help='Apply dropout layers')
parser.add_argument('--patience', type=int, default=5, help='Patience for early stopping')
parser.add_argument('--output_prediction', action='store_true', help='Output predictions to temp files')
parser.add_argument('--train') # "data/POS-penn/wsj/split1/wsj1.train.original"
parser.add_argument('--dev') # "data/POS-penn/wsj/split1/wsj1.dev.original"
parser.add_argument('--test') # "data/POS-penn/wsj/split1/wsj1.test.original"
args = parser.parse_args()
def construct_input_layer():
if fine_tune:
layer_input = lasagne.layers.InputLayer(shape=(None, max_length), input_var=input_var, name='input')
layer_embedding = lasagne.layers.EmbeddingLayer(layer_input, input_size=alphabet_size,
output_size=embedd_dim,
W=embedd_table, name='embedding')
return layer_embedding
else:
layer_input = lasagne.layers.InputLayer(shape=(None, max_length, embedd_dim), input_var=input_var,
name='input')
return layer_input
def construct_char_input_layer():
layer_char_input = lasagne.layers.InputLayer(shape=(None, max_sent_length, max_char_length),
input_var=char_input_var, name='char-input')
layer_char_input = lasagne.layers.reshape(layer_char_input, (-1, [2]))
layer_char_embedding = lasagne.layers.EmbeddingLayer(layer_char_input, input_size=char_alphabet_size,
output_size=char_embedd_dim, W=char_embedd_table,
name='char_embedding')
layer_char_input = lasagne.layers.DimshuffleLayer(layer_char_embedding, pattern=(0, 2, 1))
return layer_char_input
logger = utils.get_logger("BiLSTM-CNN-CRF")
fine_tune = args.fine_tune
oov = args.oov
regular = args.regular
embedding = args.embedding
embedding_path = args.embedding_dict
train_path = args.train
dev_path = args.dev
test_path = args.test
update_algo = args.update
grad_clipping = args.grad_clipping
peepholes = args.peepholes
num_filters = args.num_filters
gamma = args.gamma
output_predict = args.output_prediction
dropout = args.dropout
X_train, Y_train, mask_train, X_dev, Y_dev, mask_dev, X_test, Y_test, mask_test, \
embedd_table, label_alphabet, \
C_train, C_dev, C_test, char_embedd_table = data_processor.load_dataset_sequence_labeling(train_path, dev_path,
test_path, oov=oov,
fine_tune=fine_tune,
embedding=embedding,
embedding_path=embedding_path,
use_character=True)
num_labels = label_alphabet.size() - 1
logger.info("constructing network...")
# create variables
target_var = T.imatrix(name='targets')
mask_var = T.matrix(name='masks', dtype=theano.config.floatX)
if fine_tune:
input_var = T.imatrix(name='inputs')
num_data, max_length = X_train.shape
alphabet_size, embedd_dim = embedd_table.shape
else:
input_var = T.tensor3(name='inputs', dtype=theano.config.floatX)
num_data, max_length, embedd_dim = X_train.shape
char_input_var = T.itensor3(name='char-inputs')
num_data_char, max_sent_length, max_char_length = C_train.shape
char_alphabet_size, char_embedd_dim = char_embedd_table.shape
assert (max_length == max_sent_length)
assert (num_data == num_data_char)
# construct input and mask layers
layer_incoming1 = construct_char_input_layer()
layer_incoming2 = construct_input_layer()
layer_mask = lasagne.layers.InputLayer(shape=(None, max_length), input_var=mask_var, name='mask')
# construct bi-rnn-cnn
num_units = args.num_units
bi_lstm_cnn_crf = build_BiLSTM_CNN_CRF(layer_incoming1, layer_incoming2, num_units, num_labels, mask=layer_mask,
grad_clipping=grad_clipping, peepholes=peepholes, num_filters=num_filters,
dropout=dropout)
logger.info("Network structure: hidden=%d, filter=%d" % (num_units, num_filters))
# compute loss
num_tokens = mask_var.sum(dtype=theano.config.floatX)
# get outpout of bi-lstm-cnn-crf shape [batch, length, num_labels, num_labels]
energies_train = lasagne.layers.get_output(bi_lstm_cnn_crf)
energies_eval = lasagne.layers.get_output(bi_lstm_cnn_crf, deterministic=True)
loss_train = crf_loss(energies_train, target_var, mask_var).mean()
loss_eval = crf_loss(energies_eval, target_var, mask_var).mean()
# l2 regularization?
if regular == 'l2':
l2_penalty = lasagne.regularization.regularize_network_params(bi_lstm_cnn_crf, lasagne.regularization.l2)
loss_train = loss_train + gamma * l2_penalty
_, corr_train = crf_accuracy(energies_train, target_var)
corr_train = (corr_train * mask_var).sum(dtype=theano.config.floatX)
prediction_eval, corr_eval = crf_accurac | y(energies_eval, target_var)
corr_eval = (corr_eval * mask_var).sum(dtype=theano.config.floatX)
# Create update expressions for training.
# hyper parameters to tune: learning rate, momentum, regularization.
batch_size = args.batch_size
learning_rate = 1.0 if update_algo == 'adadelta' else args.learning_rate
decay_rate = args.decay_rate
momentum = 0.9
params = lasagne.layer | s.get_all_params(bi_lstm_cnn_crf, trainable=True)
updates = utils.create_updates(loss_train, params, update_algo, learning_rate, momentum=momentum)
|
erseco/ugr_desarrollo_aplicaciones_internet | Practica_01/ejercicio_01.py | Python | gpl-3.0 | 1,395 | 0.017204 | #!/usr/bin/env python
# -*- coding: UTF-8 -*-
'''
#-------------------------------------------------------------------------------
#
# DAI - Desarrollo de Aplicaciones para Internet
#
# 2014 Ernesto Serrano <erseco@correo.ugr.es>
#
#-------------------------------------------------------------------------------
Programe un mini-juego de "adivinar" un numero (entre 1 y 100) que el ordenador establezca al azar.
El usuario puede ir introduciendo numeros y el ordenador le responderia con mensajes del estilo
"El numero buscado el mayor / menor".
El programa debe finalizar cuando el usuario adivine el n | umero (con su correspondiente mensaje de felicitacion)
o bien cuando el usuario haya realizado 10 intentos incorrectos de adivinacion.
#-------------------------------------------------------------------------------
'''
from random import randint
rnd = randint(1,100)
encontrado = False
for x in xrange(1,10):
valor = input ("Introduzca un numero del 1 al 100: ")
if valor == rnd:
encontrado = True
print("Valor | encontrado")
break # Salimos de la ejecucin del programa
elif valor > 100 or valor < 1:
print "El numero tiene que estar entre el rango [1-100]"
elif valor > rnd:
print("El numero es menor que el introducido")
elif valor < rnd:
print("El numero es mayor que el introducido")
if not encontrado:
print("El valor no se ha encontrado tras 10 itentos.") |
cloudysunny14/lakshmi | test/testlib/mox.py | Python | apache-2.0 | 60,051 | 0.006461 | #!/usr/bin/env python
#
# Copyright 2008 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Mox, an object-mocking framework for Python.
Mox works in the record-replay-verify paradigm. When you first create
a mock object, it is in record mode. You then programmatically set
the expected behavior of the mock object (what methods are to be
called on it, with what parameters, what they should return, and in
what order).
Once you have set up the expected mock behavior, you put it in replay
mode. Now the mock responds to method calls just as you told it to.
If an unexpected method (or an expected method with unexpected
parameters) is called, then an exception will be raised.
Once you are done interacting with the mock, you need to verify that
all the expected interactions occured. (Maybe your code exited
prematurely without calling some cleanup method!) The verify phase
ensures that every expected method was called; otherwise, an exception
will be raised.
WARNING! Mock objects created by Mox are not thread-safe. If you are
call a mock in multiple threads, it should be guarded by a mutex.
TODO(user): Add the option to make mocks thread-safe!
Suggested usage / workflow:
# Create Mox factory
my_mox = Mox()
# Create a mock data access object
mock_dao = my_mox.CreateMock(DAOClass)
# Set up expected behavior
mock_dao.RetrievePersonWithIdentifier('1').AndReturn(person)
mock_dao.DeletePerson(person)
# Put mocks in replay mode
my_mox.ReplayAll()
# Inject mock object and run test
controller.SetDao(mock_dao)
controller.DeletePersonById('1')
# Verify all methods were called as expected
my_mox.VerifyAll()
"""
from collections import deque
import difflib
import inspect
import re
import types
import unittest
import stubout
class Error(AssertionError):
"""Base exception for this module."""
pass
class ExpectedMethodCallsError(Error):
"""Raised when Verify() is called before all expected methods have been called
"""
def __init__(self, expected_methods):
"""Init exception.
Args:
# expected_methods: A sequence of MockMethod objects that should have been
# called.
expected_methods: [MockMethod]
Raises:
ValueError: if expected_methods contains no methods.
"""
if not expected_methods:
raise ValueError("There must be at least one expected method")
Error.__init__(self)
self._expected_methods = expected_methods
def __str__(self):
calls = "\n".join(["%3d. %s" % (i, m)
for i, m in enumerate(self._expected_methods)])
return "Verify: Expected methods never called:\n%s" % (calls,)
class UnexpectedMethodCallError(Error):
"""Raised when an unexpected method is called.
This can occur if a method is called with incorrect parameters, or out of the
specified order.
"""
def __init__(self, unexpected_method, expected):
"""Init exception.
Args:
# unexpected_method: MockMethod that was called but was not at the head of
# the expected_method queue.
# expected: MockMethod or UnorderedGroup the method should have
# been in.
unexpected_method: MockMethod
expected: MockMethod or UnorderedGroup
"""
Error.__init__(self)
if expected is None:
self._str = "Unexpected method call %s" % (unexpected_method,)
else:
differ = difflib.Differ()
diff = differ.compare(str(unexpected_method).splitlines(True),
str(expected).splitlines(True))
self._str = ("Unexpected method call. unexpected:- expected:+\n%s"
% ("\n".join(line.rstrip() for line in diff),))
def __str__(self):
return self._str
class UnknownMethodCallError(Error):
"""Raised if an unknown method is requested of the mock object."""
def __init__(self, unknown_method_name):
"""Init exception.
Args:
# unknown_method_name: Method call that is not part of the mocked class's
# public interface.
unknown_method_name: str
"""
Error.__init__(self)
self._unknown_method_name = unknown_method_name
def __str__(self):
return "Method called is not a member of the object: %s" % \
self._unknown_method_name
class PrivateAttributeError(Error):
"""
Raised if a MockObject is passed a private additional attribute name.
"""
def __init__(self, attr):
Error.__init__(self)
self._attr = attr
def __str__(self):
return ("Attribute '%s' is private and should not be available in a mock "
"object." % self._attr)
class ExpectedMockCreationError(Error):
"""Raised if mocks should have been created by StubOutClassWithMocks."""
def __init__(self, expected_mocks):
"""Init exception.
Args:
# expected_mocks: A sequence of MockObjects that should have been
# created
Raises:
ValueError: if expected_mocks contains no methods.
"""
if not expected_mocks:
raise ValueError("There must be at least one expected method")
Error.__init__(self)
self._expected_mocks = expected_mocks
def __str__(self):
mocks = "\n".join(["%3d. %s" % (i, m)
for i, m in enumerate(self._expected_mocks)])
return "Verify: Expected mocks never created:\n%s" % (mocks,)
class UnexpectedMockCreationError(Error):
"""Raised if too many mocks were created by StubOutClassWithMocks."""
def __init__(self, instance, *params, **named_params):
"""Init exception.
Args:
# instance: the type of obejct that was created
# params: parameters given during instantiation
# named_params: named parameters given during instantiation
"""
Error.__init__(self)
self._instance = instance
self._params = params
self._named_params = named_params
def __str__(self):
args = ", ".join(["%s" % v for i, v in enumerate(self._params)])
error = "Unexpected mock creation: %s(%s" % (self._instance, args)
if self._named_params:
error += ", " + ", ".join(["%s=%s" % (k, v) for k, v in
self._named_params.iteritems()])
error += ")"
return error
class Mox(object):
"""Mox: a factory for creati | ng mock objects."""
# A list of types that should be | stubbed out with MockObjects (as
# opposed to MockAnythings).
_USE_MOCK_OBJECT = [types.ClassType, types.FunctionType, types.InstanceType,
types.ModuleType, types.ObjectType, types.TypeType,
types.MethodType, types.UnboundMethodType,
]
# A list of types that may be stubbed out with a MockObjectFactory.
_USE_MOCK_FACTORY = [types.ClassType, types.ObjectType, types.TypeType]
def __init__(self):
"""Initialize a new Mox."""
self._mock_objects = []
self.stubs = stubout.StubOutForTesting()
def CreateMock(self, class_to_mock, attrs=None):
"""Create a new mock object.
Args:
# class_to_mock: the class to be mocked
class_to_mock: class
attrs: dict of attribute names to values that will be set on the mock
object. Only public attributes may be set.
Returns:
MockObject that can be used as the class_to_mock would be.
"""
if attrs is None:
attrs = {}
new_mock = MockObject(class_to_mock, attrs=attrs)
self._mock_objects.append(new_mock)
return new_mock
def CreateMockAnything(self, description=None):
"""Create a mock that will accept any method calls.
This does not enforce an interface.
Args:
description: str. Optionally, a descriptive name for the mock object being
created, for debugging output purposes.
|
irvs/ros_tms | tms_ts/tms_ts_smach/setup.py | Python | bsd-3-clause | 309 | 0 | # ! DO NOT MANUALLY INVOKE THI | S setup.py, USE CATKIN INSTEAD
from distutils.core import setup
from catkin_pkg.python_setup import generate_distutils_setup
# fetch values from package.xml
setup_args = generate_distutils_setup(
packages=['tms_ts_smach'],
packa | ge_dir={'': 'src'})
setup(**setup_args)
|
andrewyoung1991/supriya | supriya/tools/ugentools/IFFT.py | Python | mit | 4,524 | 0.001768 | # -*- encoding: utf-8 -*-
from supriya.tools.ugentools.WidthFirstUGen import WidthFirstUGen
class IFFT(WidthFirstUGen):
r'''An inverse fast Fourier transform.
::
>>> pv_chain = ugentools.LocalBuf(2048)
>>> ifft = ugentools.IFFT.ar(
... pv_chain=pv_chain,
... window_size=0,
... window_type=0,
... )
>>> ifft
IFFT.ar()
'''
### CLASS VARIABLES ###
__documentation_section__ = 'FFT UGens'
__slots__ = ()
_ordered_input_names = (
'pv_chain',
'window_type',
'window_size',
)
_valid_calculation_rates = None
### INITIALIZER ###
def __init__(
self,
pv_chain=None,
calculation_rate=None,
window_size=0,
window_type=0,
):
WidthFirstUGen.__init__(
self,
calculation_rate=calculation_rate,
pv_chain=pv_chain,
window_si | ze=window_size,
window_type=window_type,
)
### PUBLIC METHODS ###
@classmethod
def ar(
cls,
pv_chain=None,
window_size=0,
window_type=0,
):
r'''Constructs an audio-rate IFFT.
::
>>> pv_chain = ugentools.LocalBuf(2048)
>>> ifft = ugentools.I | FFT.ar(
... pv_chain=pv_chain,
... window_size=0,
... window_type=0,
... )
>>> ifft
IFFT.ar()
Returns ugen graph.
'''
from supriya.tools import synthdeftools
calculation_rate = synthdeftools.CalculationRate.AUDIO
ugen = cls._new_expanded(
calculation_rate=calculation_rate,
pv_chain=pv_chain,
window_size=window_size,
window_type=window_type,
)
return ugen
@classmethod
def kr(
cls,
pv_chain=None,
window_size=0,
window_type=0,
):
r'''Constructs a control-rate IFFT.
::
>>> pv_chain = ugentools.LocalBuf(2048)
>>> ifft = ugentools.IFFT.kr(
... pv_chain=pv_chain,
... window_size=0,
... window_type=0,
... )
>>> ifft
IFFT.kr()
Returns ugen graph.
'''
from supriya.tools import synthdeftools
calculation_rate = synthdeftools.CalculationRate.CONTROL
ugen = cls._new_expanded(
calculation_rate=calculation_rate,
pv_chain=pv_chain,
window_size=window_size,
window_type=window_type,
)
return ugen
### PUBLIC PROPERTIES ###
@property
def pv_chain(self):
r'''Gets `pv_chain` input of IFFT.
::
>>> pv_chain = ugentools.LocalBuf(2048)
>>> ifft = ugentools.IFFT.ar(
... pv_chain=pv_chain,
... window_size=0,
... window_type=0,
... )
>>> ifft.pv_chain
OutputProxy(
source=LocalBuf(
frame_count=2048.0,
channel_count=1.0,
calculation_rate=CalculationRate.SCALAR
),
output_index=0
)
Returns ugen input.
'''
index = self._ordered_input_names.index('pv_chain')
return self._inputs[index]
@property
def window_size(self):
r'''Gets `window_size` input of IFFT.
::
>>> pv_chain = ugentools.LocalBuf(2048)
>>> ifft = ugentools.IFFT.ar(
... pv_chain=pv_chain,
... window_size=0,
... window_type=0,
... )
>>> ifft.window_size
0.0
Returns ugen input.
'''
index = self._ordered_input_names.index('window_size')
return self._inputs[index]
@property
def window_type(self):
r'''Gets `window_type` input of IFFT.
::
>>> pv_chain = ugentools.LocalBuf(2048)
>>> ifft = ugentools.IFFT.ar(
... pv_chain=pv_chain,
... window_size=0,
... window_type=0,
... )
>>> ifft.window_type
0.0
Returns ugen input.
'''
index = self._ordered_input_names.index('window_type')
return self._inputs[index] |
anatol/namcap | Namcap/tests/package/test_infodirectory.py | Python | gpl-2.0 | 2,400 | 0.018341 | # -*- coding: utf-8 -*-
#
# namcap tests - infodirectory
# Copyright (C) 2011 Rémy Oudompheng <remy@archlinux.org>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
# USA
#
import os
from Namcap.tests.makepkg import MakepkgTest
import Namcap.rules.infodirectory
class InfoDirectoryTest(MakepkgTest):
pkgbuild = """
pkgname=__namcap_test_infodirectory
pkgver=1.0
pkgrel=1
pkgdesc="A package"
arch=('i686' 'x86_64')
url="http://www.example.com/"
license=('GPL')
depends=('glibc')
source=()
options=(!purge !zipman)
build() {
true
}
package() {
mkdir -p "${pkgdir}/usr/share/info"
touch "${pkgdir}/usr/share/info/dir"
}
"""
def test_info_slash_dir_exists(self):
"Package with a file /usr/share/info/dir left"
pkgfile = "__namcap_test_infodirectory-1.0-1-%(arch)s.pkg.tar" % { "arch": self.arch }
with open(os.path.join(self.tmpdir, "PKGBUILD"), "w") as f:
f.write(self.pkgbuild)
self.run_makepkg()
pkg, r = self.run_rule_on_tarball(
os.path.join(self.tmpdir, pkgfile),
Namcap.rules.infodirectory.InfodirRule
)
self.assertEqual(r.errors, [
("info-dir-file-present %s", "usr/share/info/dir")
])
self.assertEqual(r.warnings, [])
self.assertEqual(r.infos, [])
d | ef test_info_dir_updated(self):
pkgfile = "__namcap_test_infodirectory-1.0-1-%(arch)s.pkg.tar" % { "arch": self.arch }
with open(os.path.join(self.tmpdir, "PKGBUILD"), "w") as f:
f.write(self.pkgbuild)
self.run_makepkg()
pkg, r = self.run_rule_on_tarball(
os.path.join(self.tmpdir, pkgfile),
Namcap.rules.infodirectory.InfoInstallRule
)
self.assertEqual(r.errors, [
("info-dir-not-updated", ())
])
self.assertEqual(r.warnings, [])
self.assertEqual(r.infos, [])
# vim: set ts=4 | sw=4 noet:
|
pombredanne/kunai-1 | kunai/dnsquery.py | Python | mit | 3,161 | 0.006643 | import re
import socket
pattern = r"((25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)([ (\[]?(\.|dot)[ )\]]?(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)){3})"
ipv4pattern = re.compile(pattern)
class DNSQuery:
def __init__(self, data):
self.data = data
self.domain = ''
t = (ord(data[2]) >> 3) & 15 # Opcode bits
if t == 0: # Standard query
ini = 12
lon = ord(data[ini])
while lon != 0:
self.domain += data[ini+1 : ini+lon+1]+'.'
ini += lon + 1
lon = ord(data[ini])
def _get_size_hex(self, nb):
nb = min(nb, 256*256)
d,r = divmod(nb, 256)
s = chr(d)+chr(r)
return s
# We look in the nodes for the good tag
def lookup_for_nodes(self, nodes, dom):
print "DNS LOOKING ", self.domain, "inside domain", dom
if not self.domain.endswith(dom):
return []
search = self.domain[:-len(dom)]
# split into sname.service.datacenter
print "DNS lookup for search", search
elts = search.split('.', 2)
if len(elts) != 3:
print "DNS bad query", search
return []
dc = elts[2]
_type = elts[1]
tag = elts[0]
r = []
for n in nodes.values():
# skip non alive nodes
if n['state'] != 'alive':
continue
if tag in n['tags']:
services = n.get('services', {})
state_id = 0
if tag in services:
service = services[tag]
state_id = service.get('state_id')
print "DNS state_id", state_id
if state_id == 0:
addr = n['addr']
# If already an ip, add it
if ipv4pattern.match(addr):
r.append(addr)
else: # else try to resolv it first
try:
addr = socket.gethostbyname(addr)
r.append(addr)
| except socket.gaierror: # not found
print 'DNS cannot find the hotname ip', addr
# skip this node
print "DNS R:", r
return r
def response(self, r):
packet = ''
print "DNS DOM", self.domain
nb = len(r)
if self.domain:
packet += self.data[:2] + "\x81\x80"
packet += self.data[4:6] + self._get_size_hex(nb) + '\x00\x00\x00\x00' # Questions and Answers | Counts
packet += self.data[12:] # Original Domain Name Question
for ip in r:
packet += '\xc0\x0c' # Pointer to domain name
packet += '\x00\x01\x00\x01\x00\x00\x00\x3c\x00\x04' # Response type, ttl and resource data length -> 4 bytes
packet += str.join('',map(lambda x: chr(int(x)), ip.split('.'))) # 4bytes of IP
print "DNS RETURNing", len(packet), len(r)
return packet
|
datafiniti/Diamond | src/collectors/snmpraw/snmpraw.py | Python | mit | 6,082 | 0.000658 | # coding=utf-8
"""
The SNMPRawCollector is designed for collecting data from SNMP-enables devices,
using a set of specified OIDs
#### Configuration
Below is an example configuration for the SNMPRawCollector. The collector
can collect data any number of devices by adding configuration sections
under the *devices* header. By default the collector will collect every 60
seconds. This might be a bit excessive and put unnecessary load on the
devices being polled. You may wish to change this to every 300 seconds. However
you need modify your graphite data retentions to handle this properly.
```
# Options for SNMPRawCollector
enabled = True
interval = 60
[devices]
# Start the device configuration
# Note: this name will be used in the metric path.
[[my-identification-for-this-host]]
host = localhost
port = 161
community = public
# Start the OID list for this device
# Note: the value part will be used in the metric path.
[[[oids]]]
1.3.6.1.4.1.2021.10.1.3.1 = cpu.load.1min
1.3.6.1.4.1.2021.10.1.3.2 = cpu.load.5min
1.3.6.1.4.1.2021.10.1.3.3 = cpu.load.15min
# If you want another host, you can. But you probably won't need it.
[[another-identification]]
host = router1.example.com
port = 161
community = public
[[[oids]]]
oid = metric.path
oid = metric.path
```
Note: If you modify the SNMPRawCollector configuration, you will need to
restart diamond.
#### Dependencies
* pysmnp (which depends on pyasn1 0.1.7 and pycrypto)
"""
import os
import sys
import time
sys.path.insert(0, os.path.join(os.path.dirname(os.path.dirname(__file__)),
'snmp'))
from snmp import SNMPCollector as parent_SNMPCollector
from diamond.metric import Metric
class SNMPRawCollector(parent_SNMPCollector):
def __init__(self, *args, **kwargs):
super(SNMPRawCollector, self).__init__(*args, **kwargs)
# list to save non-existing oid's per device, to avoid repetition of
# errors in logging. restart diamond/collector to flush this
self.skip_list = []
def get_default_config(self):
"""
Override SNMPCollector.get_default_config method to provide
default_config for the SNMPInterfaceCollector
"""
default_config = super(SNMPRawCollector,
self).get_default_config()
default_config.update({
'oids': {},
'path_prefix': 'servers',
'path_suffix': 'snmp',
})
return default_config
def _precision(self, value):
"""
Return the precision of the number
"""
value = str(value)
decimal = value.rfind('.')
if decimal == -1:
return 0
return len(value) - decimal - 1
def _skip(self, device, oid, reason=None):
self.skip_list.append((device, oid))
if reason is not None:
self.log.warn('Muted \'{0}\' on \'{1}\', because: {2}'.format(
oid, device, reason))
| def _get_value_walk(self, device, oid, host, port, community):
| data = self.walk(oid, host, port, community)
if data is None:
self._skip(device, oid, 'device down (#2)')
return
self.log.debug('Data received from WALK \'{0}\': [{1}]'.format(
device, data))
if len(data) != 1:
self._skip(device, oid,
'unexpected response, data has {0} entries'.format(
len(data)))
return
# because we only allow 1-key dicts, we can pick with absolute index
value = data.items()[0][1]
return value
def _get_value(self, device, oid, host, port, community):
data = self.get(oid, host, port, community)
if data is None:
self._skip(device, oid, 'device down (#1)')
return
self.log.debug('Data received from GET \'{0}\': [{1}]'.format(
device, data))
if len(data) == 0:
self._skip(device, oid, 'empty response, device down?')
return
if oid not in data:
# oid is not even in hierarchy, happens when using 9.9.9.9
# but not when using 1.9.9.9
self._skip(device, oid, 'no object at OID (#1)')
return
value = data[oid]
if value == 'No Such Object currently exists at this OID':
self._skip(device, oid, 'no object at OID (#2)')
return
if value == 'No Such Instance currently exists at this OID':
return self._get_value_walk(device, oid, host, port, community)
return value
def collect_snmp(self, device, host, port, community):
"""
Collect SNMP interface data from device
"""
self.log.debug(
'Collecting raw SNMP statistics from device \'{0}\''.format(device))
for device in self.config['devices']:
dev_config = self.config['devices'][device]
if not 'oids' in dev_config:
continue
for oid, metricName in dev_config['oids'].items():
if (device, oid) in self.skip_list:
self.log.debug(
'Skipping OID \'{0}\' ({1}) on device \'{2}\''.format(
oid, metricName, device))
continue
timestamp = time.time()
value = self._get_value(device, oid, host, port, community)
if value is None:
continue
self.log.debug(
'\'{0}\' ({1}) on device \'{2}\' - value=[{3}]'.format(
oid, metricName, device, value))
path = '.'.join([self.config['path_prefix'], device,
self.config['path_suffix'], metricName])
metric = Metric(path, value, timestamp, self._precision(value),
None, 'GAUGE')
self.publish_metric(metric)
|
r3alityc0d3r/pyisac-core | src/Pyisac/infrastructure/profile.py | Python | gpl-2.0 | 83 | 0.012048 | class Profile(object):
| def __init__(self, name):
| self.name = name
|
onfido/dependencies-resolver | tests/utils/test_md5_checksum.py | Python | mit | 1,242 | 0.000805 | import re
import tempfile
from dependencies_resolver.config.configuration import \
REGEX_MULTIPART_UPLOAD_PATTERN
from dependencies_resolver.utils.md5_checksum import get_aws_like_md5_checksum
from tests.utils.test_s3_utils import MOCKED_MD5_CHECKSUM
def test_get_md5_checksum_no_multipart_upload():
"""A test to check we get the desired md5 checksum for a file that has
not uploaded using multipart upload.
:return: True, unless the function is not w | orking as we expected.
"""
with tempfile.NamedTemporaryFile() as f:
md5_checksum = ge | t_aws_like_md5_checksum(f.name, None)
assert md5_checksum == MOCKED_MD5_CHECKSUM[1:-1].split('-')[0]
def test_get_md5_checksum_multipart_upload():
"""A test to check we get the desired md5 checksum for a file that has
uploaded using multipart upload.
:return: True, unless the function is not working as we expected.
"""
with tempfile.NamedTemporaryFile() as f:
multipart_regex_result = re.search(REGEX_MULTIPART_UPLOAD_PATTERN,
MOCKED_MD5_CHECKSUM)
md5_checksum = get_aws_like_md5_checksum(f.name, multipart_regex_result)
assert md5_checksum == MOCKED_MD5_CHECKSUM[1:-1]
|
GlobalFishingWatch/vessel-classification | classification/metadata.py | Python | apache-2.0 | 16,174 | 0.001608 | # Copyright 2017 Google Inc. and Skytruth Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from collections import defaultdict, namedtuple
import csv
import datetime
import dateutil.parser
import pytz
import logging
import os
import sys
import tensorflow as tf
import yaml
import numpy as np
import hashlib
import six
from .feature_generation.file_iterator import GCSFile
""" The main column for vessel classification. """
PRIMARY_VESSEL_CLASS_COLUMN = 'label'
#TODO: (bitsofbits) think about extracting to config file
# The 'real' categories for multihotness are the fine categories, which 'coarse' and 'fishing'
# are defined in terms of. Any number of coarse categories, even with overlapping values can
# be defined in principle, although at present the interaction between the mulithot and non multihot
# versions makes that more complicated.
try:
yaml_load = yaml.safe_load
except:
yaml_load = yaml.load
raw_schema = '''
unknown:
non_fishing:
passenger:
gear:
fish_factory:
cargo_or_tanker:
bunker_or_tanker:
bunker:
tanker:
cargo_or_reefer:
cargo:
reefer:
specialized_reefer:
container_reefer:
fish_tender:
well_boat:
patrol_vessel:
research:
dive_vessel:
submarine:
dredge_non_fishing:
supply_vessel:
tug:
seismic_vessel:
helicopter:
other_not_fishing:
fishing:
squid_jigger:
drifting_longlines:
pole_and_line:
other_fishing:
trollers:
fixed_gear:
pots_and_traps:
set_longlines:
set_gillnets:
trawlers:
dredge_fishing:
seiners:
purse_seines:
tuna_purse_seines:
other_purse_seines:
other_seines:
driftnets:
'''
schema = yaml.safe_load(raw_schema)
def atomic(obj):
for k, v in obj.items():
if v is None or isinstance(v, str):
yield k
else:
for x in atomic(v):
yield x
def categories(obj, include_atomic=True):
for k, v in obj.items():
if v is None or isinstance(v, str):
if include_atomic:
yield k, [k]
else:
yield (k, list(atomic(v)))
for x in categories(v, include_atomic=include_atomic):
yield x
VESSEL_CLASS_DETAILED_NAMES = sorted(atomic(schema))
VESSEL_CATEGORIES = sorted(categories(schema))
TRAINING_SPLIT = 'Training'
TEST_SPLIT = 'Test'
FishingRange = namedtuple('FishingRange',
['start_time', 'end_time', 'is_fishing'])
def stable_hash(x):
x = six.ensure_binary(x)
digest = hashlib.blake2b(six.ensure_binary(x)).hexdigest()[-8:]
return int(digest, 16)
class VesselMetadata(object):
def __init__(self,
metadata_dict,
fishing_ranges_map):
self.metadata_by_split = metadata_dict
self.metadata_by_id = {}
self.fishing_ranges_map = fishing_ranges_map
self.id_map_int2bytes = {}
for split, vessels in metadata_dict.items():
for id_, data in vessels.items():
id_ = six.ensure_binary(id_)
self.metadata_by_id[id_] = data
idhash = stable_hash(id_)
self.id_map_int2bytes[idhash] = id_
intersection_ids = set(self.metadata_by_id.keys()).intersection(
set(fishing_ranges_map.keys()))
logging.info("Metadata for %d ids.", len(self.metadata_by_id))
logging.info("Fishing ranges for %d ids.", len(fishing_ranges_map))
logging.info("Vessels with both types of data: %d",
len(intersection_ids))
def vessel_weight(self, id_):
| return self.metadata_by_id[id_][1]
def vessel_label(self, label_name, id_):
return self.metadata_by_id[id_][0][label_name]
def ids_for_split(self, split):
assert split in (TRAINING_SPLIT, TEST_SPLIT)
# Check to make sure we don't have leakage
if (set(self.metadata_by_split[TRAINING_SPLIT].keys()) &
set(self.metadata_by_split[TEST_SPLIT].keys())):
| logging.warning('id in both training and test split')
return self.metadata_by_split[split].keys()
def weighted_training_list(self,
random_state,
split,
max_replication_factor,
row_filter=lambda row: True,
boundary=1):
replicated_ids = []
logging.info("Training ids: %d", len(self.ids_for_split(split)))
fishing_ranges_ids = []
for id_, (row, weight) in self.metadata_by_split[split].items():
if row_filter(row):
if id_ in self.fishing_ranges_map:
fishing_ranges_ids.append(id_)
weight = min(weight, max_replication_factor)
int_n = int(weight)
replicated_ids += ([id_] * int_n)
frac_n = weight - float(int_n)
if (random_state.uniform(0.0, 1.0) <= frac_n):
replicated_ids.append(id_)
missing = (-len(replicated_ids)) % boundary
if missing:
replicated_ids = np.concatenate(
[replicated_ids,
np.random.choice(replicated_ids, missing)])
random_state.shuffle(replicated_ids)
logging.info("Replicated training ids: %d", len(replicated_ids))
logging.info("Fishing range ids: %d", len(fishing_ranges_ids))
return replicated_ids
def fishing_range_only_list(self, random_state, split):
replicated_ids = []
fishing_id_set = set(
[k for (k, v) in self.fishing_ranges_map.items() if v])
fishing_range_only_ids = [id_
for id_ in self.ids_for_split(split)
if id_ in fishing_id_set]
logging.info("Fishing range training ids: %d / %d",
len(fishing_range_only_ids),
len(self.ids_for_split(split)))
return fishing_range_only_ids
def read_vessel_time_weighted_metadata_lines(available_ids, lines,
fishing_range_dict, split):
""" For a set of vessels, read metadata; use flat weights
Args:
available_ids: a set of all ids for which we have feature data.
lines: a list of comma-separated vessel metadata lines. Columns are
the id and a set of vessel type columns, containing at least one
called 'label' being the primary/coarse type of the vessel e.g.
(Longliner/Passenger etc.).
fishing_range_dict: dictionary of mapping id to lists of fishing ranges
Returns:
A VesselMetadata object with weights and labels for each vessel.
"""
metadata_dict = {TRAINING_SPLIT : {}, TEST_SPLIT : {}}
min_time_per_id = np.inf
for row in lines:
id_ = six.ensure_binary(row['id'].strip())
if id_ in available_ids:
if id_ not in fishing_range_dict:
continue
# Is this id included only to supress false positives
# Symptoms; fishing score for this id never different from 0
item_split = raw_item_split = row['split']
if raw_item_split in '0123456789':
if int(raw_item_split) == split:
item_split = TEST_SPLIT
else:
item_split = TRAINING_SPLIT
if item_split not in (TRAINING_SPLIT, TEST_SPLIT):
logging.warning(
'id %s has n |
codeinthehole/django-async-messages | tests/urls.py | Python | mit | 119 | 0.008403 | from | django.conf.urls.defaults import patterns, url
urlpatterns = patterns('',
url(r'^$', 'tests.view | s.index'),
)
|
F5Networks/f5-common-python | f5/bigip/tm/gtm/test/unit/test_server.py | Python | apache-2.0 | 2,388 | 0 | # Copyright 2014-2017 F5 Networks Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in co | mpliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import mock
import pytest
from | f5.bigip import ManagementRoot
from f5.bigip.tm.gtm.server import Server
from f5.bigip.tm.gtm.server import Virtual_Server
from f5.sdk_exception import MissingRequiredCreationParameter
from six import iterkeys
@pytest.fixture
def FakeServer():
fake_servers = mock.MagicMock()
fake_server = Server(fake_servers)
return fake_server
@pytest.fixture
def FakeVS():
fake_server = mock.MagicMock()
fake_vs = Virtual_Server(fake_server)
return fake_vs
class TestCreate(object):
def test_create_two(self, fakeicontrolsession):
b = ManagementRoot('192.168.1.1', 'admin', 'admin')
s1 = b.tm.gtm.servers.server
s2 = b.tm.gtm.servers.server
assert s1 is not s2
def test_create_no_args(self, FakeServer):
with pytest.raises(MissingRequiredCreationParameter):
FakeServer.create()
def test_create_no_datacenter(self, FakeServer):
with pytest.raises(MissingRequiredCreationParameter):
FakeServer.create(name='fakeserver',
addresses=[{'name': '1.1.1.1'}])
def test_create_no_address(self, FakeServer):
with pytest.raises(MissingRequiredCreationParameter):
FakeServer.create(name='fakeserver', datacenter='fakedc')
class Test_VS_Subcoll(object):
def test_vs_attr_exists(self, fakeicontrolsession):
b = ManagementRoot('192.168.1.1', 'admin', 'admin')
s = b.tm.gtm.servers.server
test_meta = s._meta_data['attribute_registry']
kind = 'tm:gtm:server:virtual-servers:virtual-serverscollectionstate'
assert kind in list(iterkeys(test_meta))
def test_create_no_args(self, FakeVS):
with pytest.raises(MissingRequiredCreationParameter):
FakeVS.create()
|
ubaumgar/OKR | docs/conf.py | Python | bsd-2-clause | 7,670 | 0.007562 | # -*- coding: utf-8 -*-
#
# sample documentation build configuration file, created by
# sphinx-quickstart on Mon Apr 16 21:22:43 2012.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = []
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'okr'
copyright = u'2017, siroop'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = 'v1.0.0'
# The full version, including alpha/beta/rc tags.
release = 'v1.0.0'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename | = 'sampledoc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional | stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'okr.tex', u'okr Documentation',
u'siroop', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'okr', u'okr Documentation',
[u'siroop'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'okr', u'okr Documentation',
u'siroop', 'okr', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
|
mojaves/convirt | convirt/events.py | Python | lgpl-2.1 | 3,067 | 0 | #
# Copyright 2015-2016 Red Hat, Inc.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published
# by the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
#
# Refer to the README and COPYING files for full details of the license
#
from __future__ import absolute_import
import collections
import logging
import threading
Callback = collections.namedtuple('Callback',
['conn', 'dom', 'body', 'opaque'])
def _null_cb(*args, **kwargs):
pass
_NULL = Callback(None, None, _null_cb, tuple())
class Handler(object):
_log = logging.getLogger('convirt.event')
_null = [_NULL]
def __init__(self, name=None, parent=None):
self._name = id(self) if name is None else name
self._parent = parent
self._lock = threading.Lock()
self.events = collections.defaultdict(list)
def register(self, event_id, conn, dom, func, opaque=None):
with self._lock:
# TODO: weakrefs?
cb = Callback(conn, dom, func, opaque)
# TODO: debug?
self._log.info('[%s] %i -> %s', self._name, event_id, cb)
self.events[event_id].append(cb)
def fire(self, event_id, dom, *args):
| for cb in self.get_callbacks(event_id):
arguments = list(args)
if cb.opaque is not None:
arguments.append(cb.opaque) |
domain = cb.dom
if dom is not None:
domain = dom
self._log.debug('firing: %s(%s, %s, %s)',
cb.body, cb.conn, domain, arguments)
return cb.body(cb.conn, domain, *arguments)
def get_callbacks(self, event_id):
with self._lock:
callback = self.events.get(event_id, None)
if callback is not None:
return callback
if self._parent is not None:
self._log.warning('[%s] unknown event %r',
self._name, event_id)
return self._parent.get_callbacks(event_id)
# TODO: debug?
self._log.warning('[%s] unhandled event %r', self._name, event_id)
return self._null
@property
def registered(self):
with self._lock:
return tuple(self.events.keys())
# for testing purposes
def clear(self):
with self._lock:
self.events.clear()
root = Handler(name='root')
def fire(event_id, dom, *args):
global root
root.fire(event_id, dom, *args)
|
PierreBdR/point_tracker | point_tracker/normcross.py | Python | gpl-2.0 | 3,343 | 0.010769 | from __future__ import print_function, division, absolute_import
__author__ = "Pierre Barbier de Reuille <pierre@barbierdereuille.net>"
__docformat__ = "restructuredtext"
import scipy
from scipy import rot90, zeros, cumsum, sqrt, maximum, std, absolute, array, real
from scipy.signal.signaltools import correlate2d, fftconvolve
try:
from scipy.signal import fft2, ifft2
except ImportError:
from numpy.fft import fft2, ifft2
from .utils import centered, eps, padding
# Turns out fourrier is almost always faster ... no need to test!
#import convolution_timing
def normcross2d(template, A, mode="full"):
"""
Compute the normalized cross-correlation of A and the template.
The normalized cross-correlation is decribed in Lewis, J. P., "Fast
Normalized Cross-Correlation," Industrial Light & Magic.
(http://www.idiom.com/~zilla/Papers/nvisionInterface/nip.html)
:Parameters:
template
template to use for the cross-correlation
A
Array containing the 2D data
mode
'full' to get the full correlation matrix, 'same' to get the
matrix with the same dimensions as `A`, 'valid' to get only the parts
strictly valid.
"""
cmplx = False
if (template.dtype.char in ['D','F']) or (A.dtype.char in ['D', 'F']):
cmplx = True
corr_TA = fftcorrelate2d(template, A)
m,n = template.shape
mn = m*n
local_sum_A = local_sum(A, m, n)
local_sum_A2 = local_sum(A*A,m,n)
diff_local_sums = (local_sum_A2 - (local_sum_A*local_sum_A)/mn)
denom_A = sqrt(maximum(diff_local_sums,0))
denom_T = sqrt(mn-1)*unbiased_std(template.flat)
denom = denom_T*denom_A
numerator = corr_TA - local_sum_A*sum(template.flat)/mn
C = zeros(numerator.shape, dtype=numerator.dtype)
tol = 1000*eps(max(absolute(denom.flat)))
i_nonzero = denom > tol
C[i_nonzero] = numerator[i_nonzero] / denom[i_nonzero]
if not cmplx:
C = real(C)
if mode == 'full':
return C
elif mode == 'same':
return centered(C,A.shape)
elif mode == 'valid':
return centered(C, array(A.shape)-array(template.shape)+1)
def unbiased_std(vector):
l = len(vector)
s = std(vector)
return sqrt((s*s*l)/(l-1))
def local_sum(A, m, n):
B = padding(A, (m, n))
s = cumsum(B, 0)
c = s[m:-1]-s[:-m-1]
s = cumsum(c,1)
retu | rn s[:,n:-1]-s[:,:-n-1]
def fftconvolve2d(in1, in2):
"""
Convolve two 2-dimensional arrays using FFT.
I took the code of fftconvolve and specialized it for fft2d ...
"""
s1 = array(in1.shape)
s2 = array(in2.shape)
if (s1.dtype.char in ['D','F']) or (s2.dtype.char in ['D', 'F']):
cmp | lx=1
else: cmplx=0
size = s1+s2-1
IN1 = fft2(in1,size)
IN1 *= fft2(in2,size)
ret = ifft2(IN1)
del IN1
if not cmplx:
ret = real(ret)
return ret
def fftcorrelate2d(template, A):
"""
Perform a 2D fft correlation using fftconvolve2d.
"""
return fftconvolve2d(rot90(template,2), A)
def fftcorrelatend(template, A):
"""
Perform a 2D fft correlation using fftconvolve.
"""
return fftconvolve(rot90(template,2), A)
correlation_functions = {
'fft2': fftcorrelate2d,
'fftn': fftcorrelatend,
'domain': correlate2d }
|
ulikoehler/UliEngineering | tests/Electronics/TestLED.py | Python | apache-2.0 | 1,086 | 0.004604 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
from numpy.testing import assert_approx_equal, assert_allclose
from UliEngineering.Electronics.LED import *
from UliEngineering.Exceptions import OperationImpossibleException
from UliEngineering.EngineerIO import auto_format
import unittest
class TestLEDSeriesResistors(unitte | st.TestCase):
def test_led_series_resistor(self):
# Example verified at http://www.elektronik-kompendium.de/sites/bau/1109111.htm
# Also verified at https://www.digikey.com/en/resources/conversion-calculators/conversion-calculator-led-series-resistor
assert_approx_equal(led_series_resi | stor(12.0, 20e-3, 1.6), 520.)
assert_approx_equal(led_series_resistor("12V", "20 mA", "1.6V"), 520.)
assert_approx_equal(led_series_resistor(12.0, 20e-3, LEDForwardVoltages.Red), 520.)
def test_led_series_resistor_invalid(self):
# Forward voltage too high for supply voltage
with self.assertRaises(OperationImpossibleException):
assert_approx_equal(led_series_resistor("1V", "20 mA", "1.6V"), 520.)
|
bpsinc-native/src_third_party_chromite | scripts/cros_check_patches.py | Python | bsd-3-clause | 8,385 | 0.009064 | #!/usr/bin/env python
# Copyright (c) 2012 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Command to list patches applies to a repository."""
import functools
import json
import os
import parallel_emerge
import portage # pylint: disable=F0401
import re
import shutil
import sys
import tempfile
from chromite.lib import cros_build_lib
from chromite.lib import osutils
class PatchReporter(object):
"""PatchReporter helps discover patches being applied by ebuilds,
and compare them to a set of expected patches. This set of expected
patches can be sorted into categories like 'needs_upstreaming', etc.
Use of this can help ensure that critical (e.g. security) patches
are not inadvertently dropped, and help surface forgotten-about
patches that are yet-to-be upstreamed.
"""
PATCH_TYPES = ('upstreamed', 'needs_upstreaming', 'not_for_upstream',
'uncategorized')
def __init__(self, config, overlay_dir, ebuild_cmd, equery_cmd, sudo=False):
"""The 'config' dictionary should look like this:
{
"ignored_packages": ["chromeos-base/chromeos-chrome"],
"upstreamed": [],
"needs_upstreaming": [],
"not_for_upstream": [],
"uncategorized": [
"net-misc/htpdate htpdate-1.0.4-checkagainstbuildtime.patch",
"net-misc/htpdate htpdate-1.0.4-errorcheckhttpresp.patch"
]
}
"""
self.overlay_dir = os.path.realpath(overlay_dir)
self.ebuild_cmd = ebuild_cmd
self.equery_cmd = equery_cmd
self._invoke_command = cros_build_lib.RunCommand
if sudo:
self._invoke_command = functools.partial(cros_build_lib.SudoRunCommand,
strict=False)
self.ignored_packages = config['ignored_packages']
self.package_count = 0
# The config format is stored as category: [ list of patches ]
# for ease of maintenance. But it's actually more useful to us
# in the code if kept as a map of patch:patch_type.
self.patches = {}
for cat in self.PATCH_TYPES:
for patch in config[cat]:
self.patches[patch] = cat
def Ignored(self, package_name):
"""Given a package name (e.g. 'chromeos-base/chromeos-chrome'), return
True if this package should be skipped in the analysis. False otherwise.
"""
return package_name in self.ignored_packages
def ObservePatches(self, deps_map):
"""Given a deps_map of packages to analyze, observe the ebuild
process for each and return a list of patches being applied.
"""
original = os.environ.get('PORT_LOGDIR', None)
temp_space = None
try:
temp_space = tempfile.mkdtemp(prefix='check_patches')
os.environ['PORT_LOGDIR'] = temp_space
return self._ObservePatches(temp_space, deps_map)
finally:
if temp_space:
shutil.rmtree(os.environ['PORT_LOGDIR'])
if original:
os.environ['PORT_LOGDIR'] = original
else:
os.environ.pop('PORT_LOGDIR')
def _ObservePatches(self, temp_space, deps_map):
for cpv in deps_map:
cat, name, _, _ = portage.versions.catpkgsplit(cpv)
if self.Ignored("%s/%s" % (cat, name)):
continue
cmd = self.equery_cmd[:]
cmd.extend(['which', cpv])
ebuild_path = self._invoke_command(cmd, print_cmd=False,
redirect_stdout=True).output.rstrip()
# Some of these packages will be from other portdirs. Since we are
# only interested in extracting the patches from one particular
# overlay, we skip ebuilds not from that overlay.
if self.overlay_dir != os.path.commonprefix([self.overlay_dir,
ebuild_path]):
continue
# By running 'ebuild blah.ebuild prepare', we get logs in PORT_LOGDIR
# of what patches were applied. We clean first, to ensure we get a
# complete log, and clean again afterwards to avoid leaving a mess.
cmd = self.ebuild_cmd[:]
cmd.extend([ebuild_path, 'clean', 'prepare', 'clean'])
self._invoke_command(cmd, print_cmd=False, redirect_stdout=True)
self.package_count += 1
# Done with ebuild. Now just harvest the logs and we're finished.
# This regex is tuned intentionally to ignore a few unhelpful cases.
# E.g. elibtoolize repetitively applies a set of sed/portage related
# patches. And media-libs/jpeg says it is applying
# "various patches (bugfixes/updates)", which isn't very useful for us.
# So, if you noticed these omissions, it was intentional, not a bug. :-)
patch_regex = r'^ [*] Applying ([^ ]*) [.][.][.].*'
output = cros_build_lib.RunCommand(
['egrep', '-r', patch_regex, temp_space], print_cmd=False,
redirect_stdout=True).output
lines = output.splitlines()
patches = []
patch_regex = re.compile(patch_regex)
for line in lines:
cat, pkg, _, patchmsg = line.split(':')
cat = os.path.basename(cat)
_, pkg, _, _ = portage.versions.catpkgsplit('x-x/%s' % pkg)
patch_name = re.sub(patch_regex, r'\1', patchmsg)
patches.append("%s/%s %s" % (cat, pkg, patch_name))
return patches
def ReportDiffs(self, observed_patches):
"""Prints a report on any differences to stdout. Returns an int
representing the total number of discrepancies found.
"""
expected_patches = set(self.patches.keys())
observed_patches = set(observed_patches)
missing_patches = sorted(list(expected_patches - observed_patches))
unexpected_patches = sorted(list(observed_patches - expected_patches))
if missing_patches:
print "Missing Patches:"
for p in missing_patches:
print "%s (%s)" % (p, self.patches[p])
if unexpected_patches:
print "Unexpected Patches:"
for p in unexpected_patches:
print p
return len(missing_patches) + len(unexpected_patches)
def Usage():
"""Print usage."""
print """Usage:
cros_check_patches [--board=BOARD] [emerge args] package overlay-dir config.json
Given a package name (e.g. 'virtual/target-os') and an overlay directory
(e.g. /usr/local/portage/chromiumos), outputs a list of patches
applied by that overlay, in the course of building the specified
package and all its dependencies. Additional configuration options are
specified in the JSON-format config file named on the command line.
F | irst run? Try this for a starter config:
{
"ignored_packages": ["chromeos-base/chromeos-chrome"],
"upstreamed": [],
"needs_upstreaming": [],
"not_for_upstream": [],
"uncategorized": []
}
"""
def main(argv):
if len(argv) < 4:
Usage()
sys.exit(1)
# Avoid parsing most of argv because most of | it is destined for
# DepGraphGenerator/emerge rather than us. Extract what we need
# without disturbing the rest.
config_path = argv.pop()
config = json.loads(osutils.ReadFile(config_path))
overlay_dir = argv.pop()
board = [x.split('=')[1] for x in argv if x.find('--board=') != -1]
if board:
ebuild_cmd = ['ebuild-%s' % board[0]]
equery_cmd = ['equery-%s' % board[0]]
else:
ebuild_cmd = ['ebuild']
equery_cmd = ['equery']
use_sudo = not board
# We want the toolchain to be quiet to avoid interfering with our output.
depgraph_argv = ['--quiet', '--pretend', '--emptytree']
# Defaults to rdeps, but allow command-line override.
default_rootdeps_arg = ['--root-deps=rdeps']
for arg in argv:
if arg.startswith('--root-deps'):
default_rootdeps_arg = []
# Now, assemble the overall argv as the concatenation of the
# default list + possible rootdeps-default + actual command line.
depgraph_argv.extend(default_rootdeps_arg)
depgraph_argv.extend(argv)
deps = parallel_emerge.DepGraphGenerator()
deps.Initialize(depgraph_argv)
deps_tree, deps_info = deps.GenDependencyTree()
deps_map = deps.GenDependencyGraph(deps_tree, deps_info)
reporter = PatchReporter(config, overlay_dir, ebuild_cmd, equery_cmd,
sudo=use_sudo)
observed = reporter.ObservePatches(deps_map)
diff_count = reporter.ReportD |
adobe-type-tools/python-scripts | vintage/copyCFFCharstrings.py | Python | mit | 5,593 | 0.030753 | """
Copies the CFF charstrings and subrs from src to dst fonts.
"""
__help__ = """python copyCFFCharstrings.py -src <file1> -dst <file2>[,file3,...filen_n]
Copies the CFF charstrings and subrs from src to dst.
v1.002 Aug 27 2014
"""
import os
import sys
import traceback
from fontTools.ttLib import TTFont, getTableModule, TTLibError
class LocalError(TypeError):
pass
def getOpts():
srcPath = None
dstPath = None
i = 0
args = sys.argv[1:]
while i < len(args):
arg = args[i]
i += 1
if arg =="-dst":
dstPath = args[i]
i += 1
elif arg == "-src":
srcPath = args[i]
i += 1
else:
print "Did not recognize argument: ", arg
print __help__
raise LocalError()
if not (srcPath and dstPath):
print "You must supply source and destination font paths."
print __help__
raise LocalError()
if not os.path.exists(srcPath):
print "Source path does not exist:", srcPath
raise LocalError()
dstPathList = dstPath.split(",")
for dstPath in dstPathList:
if not os.path.exists(dstPath):
print "Destination path does not exist:", dstPath
raise LocalError()
return srcPath, dstPathList
def makeTempOTF(srcPath):
ff = file(srcPath, "rb")
data = ff.read()
ff.close()
try:
ttFont = TTFont()
cffModule = getTableModule('CFF ')
cffTable = cffModule.table_C_F_F_('CFF ')
ttFont['CFF '] = cffTable
cffTable.decompile(data, ttFont)
except:
print "\t%s" %(traceback.format_exception_only(sys.exc_type, sys.exc_value)[-1])
print "Attempted to read font %s as CFF." % filePath
raise LocalError("Er | ror parsing font file <%s>." % filePath)
return ttFont
def getTTFontCFF(filePath):
isOTF = True
try:
ttFont = TTFont(filePath)
except (IOError, OSError):
raise LocalError("Error opening or reading from font file <%s>." % filePath)
except TTLibError:
# Maybe it is a CFF. Make a dummy TTF font for fontTools to work with.
ttFont = makeTempOTF(filePath)
isOTF = False
try:
cffTable = ttFont['CFF ']
except KeyError:
raise LocalError("Error: font is not a CFF font <% | s>." % filePath)
return ttFont, cffTable.cff, isOTF
def validatePD(srcPD, dstPD):
# raise LocalError if the hints differ.
for key in ["BlueScale", "BlueShift", "BlueFuzz", "BlueValues", "OtherBlues", "FamilyBlues", "FamilyOtherBlues", "StemSnapH", "StemSnapV", "StdHW", "StdVW", "ForceBold", "LanguageGroup"]:
err = 0
if dstPD.rawDict.has_key(key):
if not srcPD.rawDict.has_key(key):
err = 1
else:
srcVal = eval("srcPD.%s" % (key))
dstVal = eval("dstPD.%s" % (key))
if (srcVal != dstVal):
err = 1
elif srcPD.rawDict.has_key(key):
err = 1
if err:
break
if err:
print "Quitting. FDArray Private hint info does not match for FD[%s]." % (i)
raise LocalError()
return
def copyData(srcPath, dstPath):
srcTTFont, srcCFFTable, srcIsOTF = getTTFontCFF(srcPath)
srcTopDict = srcCFFTable.topDictIndex[0]
dstTTFont, dstCFFTable, dstIsOTF = getTTFontCFF(dstPath)
dstTopDict = dstCFFTable.topDictIndex[0]
# Check that ROS, charset, and hinting parameters all match.
if srcTopDict.ROS != dstTopDict.ROS:
print "Quitting. ROS does not match. src: %s dst: %s." % (srcTopDict.ROS, dstTopDict.ROS)
return
if srcTopDict.CIDCount != dstTopDict.CIDCount:
print "Quitting. CIDCount does not match. src: %s dst: %s." % (srcTopDict.CIDCount, dstTopDict.CIDCount)
return
if srcTopDict.charset != dstTopDict.charset:
print "Quitting. charset does not match.."
return
numFD = len(srcTopDict.FDArray)
if numFD != len(dstTopDict.FDArray):
print "Quitting. FDArray count does not match. src: %s dst: %s." % (srcTopDict.FDArray.count, dstTopDict.FDArray.count)
return
for i in range(numFD):
srcFD = srcTopDict.FDArray[i]
# srcFD.FontName
srcPD = srcFD.Private
dstFD = dstTopDict.FDArray[i]
dstPD = dstFD.Private
validatePD(srcPD, dstPD) # raises LocalError if the hints differ.
# All is OK. Update the font names.
for i in range(numFD):
srcFD = srcTopDict.FDArray[i]
dstFD = dstTopDict.FDArray[i]
srcFD.FontName = dstFD.FontName
# Update the CID name.
if dstTopDict.rawDict.has_key("version"):
srcTopDict.version = dstTopDict.version
if dstTopDict.rawDict.has_key("Notice"):
srcTopDict.Notice = dstTopDict.Notice
if dstTopDict.rawDict.has_key("Copyright"):
srcTopDict.Copyright = dstTopDict.Copyright
if dstTopDict.rawDict.has_key("FullName"):
srcTopDict.FullName = dstTopDict.FullName
if dstTopDict.rawDict.has_key("FamilyName"):
srcTopDict.FamilyName = dstTopDict.FamilyName
if dstTopDict.rawDict.has_key("Weight"):
srcTopDict.Weight = dstTopDict.Weight
if dstTopDict.rawDict.has_key("UniqueID"):
srcTopDict.UniqueID = dstTopDict.UniqueID
if dstTopDict.rawDict.has_key("XUID"):
srcTopDict.XUID = dstTopDict.XUID
if dstTopDict.rawDict.has_key("CIDFontVersion"):
srcTopDict.CIDFontVersion = dstTopDict.CIDFontVersion
if dstTopDict.rawDict.has_key("CIDFontRevision"):
srcTopDict.CIDFontRevision = dstTopDict.CIDFontRevision
for i in range(len(srcCFFTable.fontNames)):
srcCFFTable.fontNames[i] = dstCFFTable.fontNames[i]
cffTable = srcTTFont['CFF ']
outputFile = dstPath + ".new"
if dstIsOTF:
dstTTFont['CFF '] = cffTable
dstTTFont.save(outputFile)
print "Wrote new OTF file:", outputFile
else:
data = cffTable.compile(dstTTFont)
tf = file(outputFile, "wb")
tf.write(data)
tf.close()
print "Wrote new CFF file:", outputFile
srcTTFont.close()
dstTTFont.close()
def run():
srcPath, dstPathList = getOpts()
for dstPath in dstPathList:
copyData(srcPath, dstPath)
if __name__ == "__main__":
run()
|
mikeek/FIT | IPP/proj_2/Macro.py | Python | mit | 4,768 | 0.066485 | #!/usr/bin/python3
#JMP:xkozub03
import sys
import re
import Config
from Config import exit_err
macro_list = {};
def init_list(redef_opt):
def_macro = Macro("@def");
set_macro = Macro("@set");
let_macro = Macro("@let");
null_macro = Macro("@null");
_def_macro = Macro("@__def__");
_set_macro = Macro("@__set__");
_let_macro = Macro("@__let__");
def_macro.is_def = True;
set_macro.is_set = True;
let_macro.is_let = True;
null_macro.is_null = True;
_def_macro.is_def = True;
_set_macro.is_set = True;
_let_macro.is_let = True;
def_macro.set_redef(redef_opt);
_def_macro.set_redef(redef_opt);
def_macro.add_arg("$frst");
def_macro.add_arg("$scnd");
def_macro.add_arg("$thrd");
set_macro.add_arg("$frst");
let_macro.add_arg("$frst");
let_macro.add_arg("$scnd");
_def_macro.add_arg("$frst");
_def_macro.add_arg( | "$scnd");
_def_macro.add_arg("$thrd");
_set_macro.add_arg("$frst");
_let_macro.add_arg("$frst");
_let_macro.add_arg("$scnd");
macro_list["@def"] = def_macro;
macro_list["@set"] = set_macro;
macro_list["@let"] = let_macro;
ma | cro_list["@null"] = null_macro;
macro_list["@__def__"] = _def_macro;
macro_list["@__set__"] = _set_macro;
macro_list["@__let__"] = _let_macro;
class Macro:
name = "";
body = "";
args = {};
args_ord_name = [];
args_cnt = 0;
args_order = 0;
is_def = False;
is_set = False;
is_let = False;
is_null = False;
redef = True;
def __init__(self, name):
self.name = name;
self.body = "";
self.args = {};
self.args_ord_name = [];
self.args_cnt = 0;
self.args_order = 0;
self.is_def = False;
self.is_set = False;
self.is_let = False;
self.is_null = False;
self.redef = True;
return;
def set_redef(self, redef):
self.redef = redef;
def get_name(self):
return self.name;
def get_num_of_args(self):
return self.args_cnt;
def add_arg(self, name):
if name in self.args.keys():
#redefinice argumentu;
exit_err("Semantic error (argument redefinition - '" + name + "')", 56);
self.args[name] = '';
self.args_ord_name.append(name);
self.args_cnt += 1;
def set_next_arg(self, value):
if self.args_order == self.args_cnt:
#prilis mnoho parametru
sys.stderr.write("Syntax error\n");
sys.exit(56);
if self.is_def and self.args_order == 0 and value[0] != '@':
exit_err("Macro name expected ('" + value + "' given)", 57);
self.args[self.args_ord_name[self.args_order]] = value;
self.args_order += 1;
def set_body(self, body):
self.body = body;
def expand(self):
return _expand(self);
def expand_def(self):
return _expand_def(self);
def expand_set(self):
return _expand_set(self);
def expand_let(self):
return _expand_let(self);
def expand_null(self):
return _expand_null(self);
def _expand(self):
if self.args_order != self.args_cnt:
sys.stderr.write("Syntax error\n");
exit(56);
self.args_order = 0;
if self.is_def:
return self.expand_def();
if self.is_set:
return self.expand_set();
if self.is_null:
return self.expand_null();
if self.is_let:
return self.expand_let();
exp_body = self.body;
m = re.findall("((^|[^\$]*?)(\$[a-zA-Z_][a-zA-Z_0-9]*)(\s|\$|$|[^a-zA-Z_0-9]))", exp_body);
for rex in m:
if rex[2] in self.args.keys():
exp_body = exp_body.replace(rex[0], rex[1] + self.args[rex[2]] + rex[3]);
return exp_body;
def _expand_def(self):
name = self.args[self.args_ord_name[0]];
arg_list = self.args[self.args_ord_name[1]];
def_body = self.args[self.args_ord_name[2]];
new_macro = Macro(name);
if name == "@__def__" or name == "@__let__" or name == "@__set__":
exit_err("Redef __macro__ error", 57);
if name == "@null":
return "";
if self.redef and name in macro_list:
exit_err("Redef -r macro error", 57);
m = re.findall("\$[a-zA-Z_][a-zA-Z_0-9]*", arg_list);
for rex in m:
new_macro.add_arg(rex);
new_macro.set_body(def_body);
macro_list[name] = new_macro;
return "";
def _expand_set(self):
self.body = "";
set = self.args[self.args_ord_name[0]];
global ignore_white;
if set == "-INPUT_SPACES":
Config.ignore_white = True;
elif set == "+INPUT_SPACES":
Config.ignore_white = False;
else:
sys.stderr.write("Set error!\n");
sys.exit(56);
return self.body;
def _expand_let(self):
self.body = "";
first = self.args[self.args_ord_name[0]];
second = self.args[self.args_ord_name[1]];
if first[0] != '@' or second[0] != '@':
exit_err("let macro requires macro names as both arguments", 57);
if first == "@null":
return self.body;
if first == "@__def__" or first == "@__let__" or first == "@__set__":
exit_err("Redef __macro__ error", 57);
if second == "@null":
if first in macro_list:
del macro_list[first];
return self.body;
macro_list[first] = macro_list[second];
return self.body;
def _expand_null(self):
return ""; |
jwg4/flask-restless | flask_restless/views/function.py | Python | agpl-3.0 | 2,512 | 0 | # function.py - views for evaluating SQL functions on SQLAlchemy models
#
# Copyright 2011 Lincoln de Sousa <lincoln@comum.org>.
# Copyright 2012, 2013, 2014, 2015, 2016 Jeffrey Finkelstein
# <jeffrey.finkelstein@gmail.com> and contributors.
#
# This file is part of Flask-Restless.
#
# Flask-Restless is distributed under both the GNU Affero General Public
# License version 3 and under the 3-clause BSD license. For more
# information, see LICENSE.AGPL and LICENSE.BSD.
"""Views for evaluating functions on a SQLAlchemy model.
The main class in this module, :class:`FunctionAPI`, is a
:class:`~flask.MethodView` subclass that creates endpoints for fetching
the result of evaluating a SQL function on a SQLAlchemy model.
"""
from flask import json
from flask import request
from sqlalchemy.exc import OperationalError
from .base import error_response
from .base import ModelView
from .helpers import evaluate_functions
class FunctionAPI(ModelView):
"""Provides method-based dispatching for :http:method:`get` requests which
wish to apply SQL functions to all instances of a model.
.. versionadded:: 0.4
"""
def get(self):
"""Returns the result of evaluating the SQL functions specified in the
body of the request.
For a description of the request and response formats, see
:ref:`functionevaluation`.
"""
if 'functions' not in request.args:
detail = 'Must provide `functions` query parameter'
return error_response(400, detail=detail)
functions = request.args.get('functions')
try:
data = json.loads(str(functions)) or []
except (TypeError, ValueError, OverflowError) as exception:
detail = 'Una | ble to decode JSON in `functions` query parameter'
return error_response(400, cause=exception, detail=detail)
try:
result = evaluate_functions(self.session, self.model, data)
except AttributeError as exception:
detail = 'No such field "{0}"'.format(exception.field)
return error_response(400, cause=exception, detail=detail)
except KeyError as exception:
detail = str(ex | ception)
return error_response(400, cause=exception, detail=detail)
except OperationalError as exception:
detail = 'No such function "{0}"'.format(exception.function)
return error_response(400, cause=exception, detail=detail)
return dict(data=result)
|
luca76/QGIS | python/plugins/processing/algs/lidar/lastools/hugeFileNormalize.py | Python | gpl-2.0 | 5,671 | 0.002645 | # -*- coding: utf-8 -*-
"""
***************************************************************************
hugeFileNormalize.py
---------------------
Date : May 2014
Copyright : (C) 2014 by Martin Isenburg
Email : martin near rapidlasso point com
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = 'Martin Isenburg'
__date__ = 'May 2014'
__copyright__ = '(C) 2014, Martin Isenburg'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '$Format:%H$'
import os
from LAStoolsUtils import LAStoolsUtils
from LAStoolsAlgorithm import LAStoolsAlgorithm
from processing.parameters.ParameterBoolean import ParameterBoolean
from processing.parameters.ParameterSelection import ParameterSelection
from processing.parameters.ParameterNumber import ParameterNumber
class hugeFileNormalize(LAStoolsAlgorithm):
TILE_SIZE = "TILE_SIZE"
BUFFER = "BUFFER"
AIRBORNE = "AIRBORNE"
TERRAIN = "TERRAIN"
TERRAINS = ["wilderness", "nature", "town", "city", "metro"]
GRANULARITY = "GRANULARITY"
GRANULARITIES = ["coarse", "default", "fine", "extra_fine", "ultra_fine"]
def defineCharacteristics(self):
self.name = "hugeFileNormalize"
self.group = "LAStools Pipelines"
self.addParametersPointInputGUI()
self.addParameter(ParameterNumber(hugeFileNormalize.TILE_SIZE, "tile size (side length of square tile)", 0, None, 1000.0))
self.addParameter(ParameterNumber(hugeFileNormalize.BUFFER, "buffer around each tile (avoids edge artifacts)", 0, None, 25.0))
self.addParameter(ParameterBoolean(hugeFileNormalize.AIRBORNE, "airborne LiDAR", True))
self.addParameter(ParameterSelection(hugeFileNormalize.TERRAIN, "terrain type", hugeFileNormalize.TERRAINS, 1))
self.addParameter(ParameterSelection(hugeFileNormalize.GRANULARITY, "preprocessing", hugeFileNormalize.GRANULARITIES, 1))
self.addParametersTemporaryDirectoryGUI()
self.addParametersPointOutputGUI()
self.addParametersCoresGUI()
self.addParametersVerboseGUI()
def processAlgorithm(self, progress):
# first we tile the data with option '-reversible'
commands = [os.path.join(LAStoolsUtils.LAStoolsPath(), "bin", "lastile.exe")]
self.addParametersVerboseCommands(commands)
self.addParametersPointInputCommands(commands)
tile_size = self.getParameterValue(hugeFileNormalize.TILE_SIZE)
commands.append("-tile_size")
commands.append(str(tile_size))
buffer = self.getParameterValue(hugeFileNormalize.BUFFER)
if buffer != 0.0:
commands.append("-buffer")
commands.append(str(buffer))
commands.append("-reversible")
self.addParametersTemporaryDirectoryAsOutputDirectoryCommands(commands)
commands.append("-o")
commands.append("hugeFileNormalize.laz")
LAStoolsUtils.runLAStools(commands, progress)
# then we ground classify the reversible tiles
commands = [os.path.join(LAStoolsUtils.LAStoolsPath(), "bin", "lasground.exe")]
self.addParametersVerboseCommands(commands)
self.addParametersTemporaryDirectoryAsInputFilesCommands(commands, "hugeFileNormalize*.laz")
airborne = self.getParameterValue(hugeFileNormalize.AIRBORNE)
if airborne != True:
commands.append("-not_airborne")
method = self.getParameterValue(hugeFileNormalize.TERRAIN)
if method != 1:
commands.append("-" + huge | FileNormalize.TERRAINS[method])
gra | nularity = self.getParameterValue(hugeFileNormalize.GRANULARITY)
if granularity != 1:
commands.append("-" + hugeFileNormalize.GRANULARITIES[granularity])
self.addParametersTemporaryDirectoryAsOutputDirectoryCommands(commands)
commands.append("-odix")
commands.append("_g")
commands.append("-olaz")
self.addParametersCoresCommands(commands)
LAStoolsUtils.runLAStools(commands, progress)
# then we height-normalize each points in the reversible tiles
commands = [os.path.join(LAStoolsUtils.LAStoolsPath(), "bin", "lasheight.exe")]
self.addParametersVerboseCommands(commands)
self.addParametersTemporaryDirectoryAsInputFilesCommands(commands, "hugeFileNormalize*_g.laz")
self.addParametersTemporaryDirectoryAsOutputDirectoryCommands(commands)
commands.append("-replace_z")
commands.append("-odix")
commands.append("h")
commands.append("-olaz")
self.addParametersCoresCommands(commands)
LAStoolsUtils.runLAStools(commands, progress)
# then we reverse the tiling
commands = [os.path.join(LAStoolsUtils.LAStoolsPath(), "bin", "lastile.exe")]
self.addParametersVerboseCommands(commands)
self.addParametersTemporaryDirectoryAsInputFilesCommands(commands, "hugeFileNormalize*_gh.laz")
commands.append("-reverse_tiling")
self.addParametersPointOutputCommands(commands)
LAStoolsUtils.runLAStools(commands, progress)
|
practian-reapps/django-backend-utils | backend_utils/serializers.py | Python | bsd-3-clause | 380 | 0 | """
@co | pyright Copyright (c) 2016 Devhres Team
@author Angel Sullon (@asullom)
@package utils
Descripcion: serializers
"""
from rest_framework imp | ort serializers
class RecursiveSerializer(serializers.Serializer):
def to_representation(self, value):
serializer = self.parent.parent.__class__(value, context=self.context)
return serializer.data
|
mbaldessari/sarstats | sar_grapher.py | Python | gpl-2.0 | 10,858 | 0.001105 | import hashlib
import matplotlib
# Force matplotlib to not use any Xwindows backend.
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import matplotlib.dates as mdates
import matplotlib.colors as colors
import matplotlib.cm as cm
from matplotlib.patches import Rectangle
import os
import shutil
import tempfile
from sar_parser import SarParser
# If the there are more than 50 plots in a graph we move the legend to the
# bottom
LEGEND_THRESHOLD = 50
def ascii_date(d):
return "%s" % (d.strftime("%Y-%m-%d %H:%M"))
class SarGrapher(object):
def __init__(self, filenames, starttime=None, endtime=None):
"""Initializes the class, creates a SarParser class
given a list of files and also parsers the files"""
# Temporary dir where images are stored (one per graph)
# NB: This is done to keep the memory usage constant
# in spite of being a bit slower (before this change
# we could use > 12GB RAM for a simple sar file -
# matplotlib is simply inefficient in this area)
self._tempdir = tempfile.mkdtemp(prefix='sargrapher')
self.sar_parser = SarParser(filenames, starttime, endtime)
self.sar_parser.parse()
duplicate_timestamps = self.sar_parser._duplicate_timestamps
if duplicate_timestamps:
print("There are {0} lines with duplicate timestamps. First 10"
"line numbers at {1}".format(
len(duplicate_timestamps.keys()),
sorted(list(duplicate_timestamps.keys()))[:10]))
def _graph_filename(self, graph, extension='.png'):
"""Creates a unique constant file name given a graph or graph list"""
if isinstance(graph, list):
temp = "_".join(graph)
else:
temp = graph
temp = temp.replace('%', '_')
temp = temp.replace('/', '_')
digest = hashlib.sha1()
digest.update(temp.encode('utf-8'))
fname = os.path.join(self._tempdir, digest.hexdigest() + extension)
return fname
def datasets(self):
"""Returns a list of all the available datasets"""
return self.sar_parser.available_data_types()
def timestamps(self):
"""Returns a list of all the available datasets"""
return sorted(self.sar_parser.available_timestamps())
def plot_datasets(self, data, fname, extra_labels, showreboots=False,
output='pdf'):
""" Plot timeseries data (of type dataname). The data can be either
simple (one or no datapoint at any point in time, or indexed (by
indextype). dataname is assumed to be in the form of [title, [label1,
label2, ...], [data1, data2, ...]] extra_labels is a list of tuples
[(datetime, 'label'), ...] """
sar_parser = self.sar_parser
title = data[0][0]
unit = data[0][1]
axis_labels = data[0][2]
datanames = data[1]
if not isinstance(datanames, list):
raise Exception("plottimeseries expects a list of datanames: %s" %
data)
fig = plt.figure(figsize=(10.5, 6.5))
axes = fig.add_subplot(111)
axes.set_title('{0} time series'.format(title), fontsize=12)
axes.set_xlabel('Time')
axes.xaxis.set_major_formatter(mdates.DateFormatter('%m-%d %H:%M'))
# Twenty minutes. Could probably make it a parameter
axes.xaxis.set_minor_locator(mdates.MinuteLocator(interval=20))
fig.autofmt_xdate()
ylabel = title
if unit:
ylabel += " - " + unit
axes.set_ylabel(ylabel)
y_formatter = matplotlib.ticker.ScalarFormatter(useOffset=False)
axes.yaxis.set_major_formatter(y_formatter)
axes.yaxis.get_major_formatter().set_scientific(False)
color_norm = colors.Normalize(vmin=0, vmax=len(datanames) - 1)
scalar_map = cm.ScalarMappable(norm=color_norm,
cmap=plt.get_cmap('Set1'))
timestamps = self.timestamps()
counter = 0
for i in datanames:
try:
dataset = [sar_parser._data[d][i] for d in timestamps]
except:
print("Key {0} does not exist in this graph".format(i))
raise
axes.plot(timestamps, dataset, 'o:', label=axis_labels[counter],
color=scalar_map.to_rgba(counter))
counter += 1
# Draw extra_labels
if extra_labels:
for extra in extra_labels:
| axes.annotate(extra[1], xy=(mdates.date2num(extra[0]),
sar_parser.f | ind_max(extra[0], datanames)),
xycoords='data', xytext=(30, 30),
textcoords='offset points',
arrowprops=dict(arrowstyle="->",
connectionstyle="arc3,rad=.2"))
# If we have a sosreport draw the reboots
if showreboots and sar_parser.sosreport is not None and \
sar_parser.sosreport.reboots is not None:
reboots = sar_parser.sosreport.reboots
for reboot in reboots.keys():
reboot_date = reboots[reboot]['date']
rboot_x = mdates.date2num(reboot_date)
(xmin, xmax) = plt.xlim()
(ymin, ymax) = plt.ylim()
if rboot_x < xmin or rboot_x > xmax:
continue
axes.annotate('', xy=(mdates.date2num(reboot_date), ymin),
xycoords='data', xytext=(-30, -30),
textcoords='offset points',
arrowprops=dict(arrowstyle="->", color='blue',
connectionstyle="arc3,rad=-0.1"))
# Show any data collection gaps in the graph
gaps = sar_parser.find_data_gaps()
if len(gaps) > 0:
for i in gaps:
(g1, g2) = i
x1 = mdates.date2num(g1)
x2 = mdates.date2num(g2)
(ymin, ymax) = plt.ylim()
axes.add_patch(Rectangle((x1, ymin), x2 - x1,
ymax - ymin, facecolor="lightgrey"))
# Add a grid to the graph to ease visualization
axes.grid(True)
lgd = None
# Draw the legend only when needed
if len(datanames) > 1 or \
(len(datanames) == 1 and len(datanames[0].split('#')) > 1):
# We want the legends box roughly square shaped
# and not take up too much room
props = matplotlib.font_manager.FontProperties(size='xx-small')
if len(datanames) < LEGEND_THRESHOLD:
cols = int((len(datanames) ** 0.5))
lgd = axes.legend(loc=1, ncol=cols, shadow=True, prop=props)
else:
cols = int(len(datanames) ** 0.6)
lgd = axes.legend(loc=9, ncol=cols,
bbox_to_anchor=(0.5, -0.29),
shadow=True, prop=props)
if len(datanames) == 0:
return None
try:
if lgd:
plt.savefig(fname, bbox_extra_artists=(lgd,),
bbox_inches='tight')
else:
plt.savefig(fname, bbox_inches='tight')
except:
import traceback
print(traceback.format_exc())
import sys
sys.exit(-1)
plt.cla()
plt.clf()
plt.close('all')
def plot_svg(self, graphs, output, labels):
"""Given a list of graphs, output an svg file per graph.
Input is a list of strings. A graph with multiple datasets
is a string with datasets separated by comma"""
if output == 'out.pdf':
output = 'graph'
counter = 1
fnames = []
for i in graphs:
subgraphs = i.split(',')
fname = self._graph_filename(subgraphs, '.svg')
fnames.append(fname)
self.plot_datasets((['', None, subgraphs], subgraphs), fname,
|
jkyeung/XlsxWriter | xlsxwriter/test/worksheet/test_date_time_01.py | Python | bsd-2-clause | 7,088 | 0 | ###############################################################################
#
# Tests for XlsxWriter.
#
# Copyright (c), 2013-2016, John McNamara, jmcnamara@cpan.org
#
import unittest
from datetime import datetime
from ...worksheet import Worksheet
class TestConvertDateTime(unittest.TestCase):
"""
Test the Worksheet _convert_date_time() method against dates extracted
from Excel.
"""
def setUp(self):
self.worksheet = Worksheet()
def test_convert_date_time(self):
"""Test the _convert_date_time() method."""
# Dates and corresponding numbers from an Excel file.
excel_dates = [
('1899-12-31T00:00:00.000', 0),
('1982-08-25T00:15:20.213', 30188.010650613425),
('2065-04-19T00:16:48.290', 60376.011670023145),
('2147-12-15T00:55:25.446', 90565.038488958337),
('2230-08-10T01:02:46.891', 120753.04359827546),
('2313-04-06T01:04:15.597', 150942.04462496529),
('2395-11-30T01:09:40.889', 181130.04838991899),
('2478-07-25T01:11:32.560', 211318.04968240741),
('2561-03-21T01:30:19.169', 241507.06272186342),
('2643-11-15T01:48:25.580', 271695.07529606484),
('2726-07-12T02:03:31.919', 301884.08578609955),
('2809-03-06T02:11:11.986', 332072.09111094906),
('2891-10-31T02:24:37.095', 362261.10042934027),
('2974-06-26T02:35:07.220', 392449.10772245371),
('3057-02-19T02:45:12.109', 422637.1147234838),
('3139-10-17T03:06:39.990', 452826.12962951389),
('3222-06-11T03:08:08.251', 483014.13065105322),
('3305-02-05T03:19:12.576', 513203.13834),
('3387-10-01T03:29:42.574', 543391.14563164348),
('3470-05-27T03:37:30.813', 573579.15105107636),
('3553-01-21T04:14:38.231', 603768.17683137732),
('3635-09-16T04:16:28.559', 633956.17810832174),
('3718-05-13T04:17:58.222', 664145.17914608796),
('3801-01-06T04:21:41.794', 694333.18173372687),
('3883-09-02T04:56:35.792', 724522.20596981479),
('3966-04-28T05:25:14.885', 754710.2258667245),
('4048-12-21T05:26:05.724', 784898.22645513888),
('4131-08-18T05:46:44.068', 815087.24078782403),
('4214-04-13T05:48:01.141', 845275.24167987274),
('4296-12-07T05:53:52.315', 875464.24574438657),
('4379-08-03T06:14:48.580', 905652.26028449077),
('4462-03-28T06:46:15.738', 935840.28212659725),
('4544-11-22T07:31:20.407', 966029.31343063654),
('4627-07-19T07:58:33.754', 996217.33233511576),
('4710-03-15T08:07:43.130', 1026406.3386936343),
('4792-11-07T08:29:11.091', 1056594.3536005903),
('4875-07-04T09:08:15.328', 1086783.3807329629),
('4958-02-27T09:30:41.781', 1116971.3963169097),
('5040-10-23T09:34:04.462', 1147159.3986627546),
('5123-06-20T09:37:23.945', 1177348.4009715857),
('5206-02-12T09:37:56.655', 1207536.4013501736),
('5288-10-08T09:45:12.230', 1237725.406391551),
('5371-06-04T09:54:14.782', 1267913.412671088),
('5454-01-28T09:54:22.108', 1298101.4127558796),
('5536-09-24T10:01:36.151', 1328290.4177795255),
('5619-05-20T12:09:48.602', 1358478.5068125231),
('5702-01-14T12:34:08.549', 1388667.5237100578),
('5784-09-08T12:56:06.495', 1418855.5389640625),
('5867-05-06T12:58:58.217', 1449044.5409515856),
('5949-12-30T12:59:54.263', 1479232.5416002662),
('6032-08-24T13:34:41.331', 1509420.5657561459),
('6115-04-21T13:58:28.601', 1539609.5822754744),
('6197-12-14T14:02:16.899', 1569797.5849178126),
('6280-08-10T14:36:17.444', 1599986.6085352316),
('6363-04-06T14:37:57.451', 1630174.60969272),
('6445-11-30T14:57:42.757', 1660363.6234115392),
('6528-07-26T15:10:48.307', 1690551.6325035533),
('6611-03-22T15:14:39.890', 1720739.635183912),
('6693-11-15T15:19:47.988', 1750928.6387498612),
('6776-07-11T16:04:24.344', 1781116.6697262037),
('6859-03-07T16:22:23.952', 1811305.6822216667),
('6941-10-31T16:29:55.999', 1841493.6874536921),
('7024-06-26T16:58:20.259', 1871681.7071789235),
('7107-02-21T17:04:02.415', 1901870.7111390624),
('7189-10-16T17:18:29.630', 1932058.7211762732),
('7272-06-11T17:47:21.323', 1962247.7412190163),
('7355-02-05T17:53:29.866', 1992435.7454845603),
('7437-10-02T17:53:41.076', 2022624.7456143056),
('7520-05-28T17:55:06.044', 2052812.7465977315),
('7603-01-21T18:14:49.151', 2083000.7602910995),
('7685-09-16T18:17:45.738', 2113189.7623349307),
('7768-05-12T18:29:59.700', 2143377.7708298611),
('7851-01-07T18:33:21.233', 2173566.773162419),
('7933-09-02T19:14:24.673', 2203754.8016744559),
('8016-04-27T19:17:12.816', 2233942.8036205554),
('8098-12-22T19:23:36.418', 2264131.8080603937),
('8181-08-17T19:46:25.908', 2294319.8239109721),
('8264-04-13T20:07:47.314', 2324508.8387420601),
('8346-12-08T20:31:37.603', 2354696.855296331),
('8429-08-03T20:39:57.770', 2384885.8610853008),
('8512-03-29T20:50:17.067', 2415073.8682530904),
('8594-11-22T21:02:57.827', 2445261.8770581828),
('8677-07-19T21:23:05.519', 2475450.8910360998),
('8760-03-14T21:34:49.572', 2505638.8991848612),
('8842-11-08T21:39:05.944', 2535827.9021521294),
('8925-07-04T21:39:18.426', 2566015.9022965971),
('9008-02-28T21:46:07.769', 2596203.9070343636),
('9090-10-24T21:57:55.662', 2626392.9152275696),
('9173-06-19T22:19:11.732', 2656580.92999 | 68979),
('9256-02-13T | 22:23:51.376', 2686769.9332335186),
('9338-10-09T22:27:58.771', 2716957.9360968866),
('9421-06-05T22:43:30.392', 2747146.9468795368),
('9504-01-30T22:48:25.834', 2777334.9502990046),
('9586-09-24T22:53:51.727', 2807522.9540709145),
('9669-05-20T23:12:56.536', 2837711.9673210187),
('9752-01-14T23:15:54.109', 2867899.9693762613),
('9834-09-10T23:17:12.632', 2898088.9702850925),
('9999-12-31T23:59:59.000', 2958465.999988426),
]
epoch = datetime(1899, 12, 31)
for excel_date in excel_dates:
date = datetime.strptime(excel_date[0], "%Y-%m-%dT%H:%M:%S.%f")
got = self.worksheet._convert_date_time(date)
exp = excel_date[1]
self.assertEqual(got, exp)
# Also test time deltas.
delta = date - epoch
got = self.worksheet._convert_date_time(delta)
exp = excel_date[1]
self.assertEqual(got, exp)
|
arturosevilla/repoze.who-x509 | tests/test_identifier.py | Python | bsd-3-clause | 6,719 | 0.000595 | # -*- coding: utf-8 -*-
# Copyright (C) 2012 Ckluster Technologies
# All Rights Reserved.
#
# This software is subject to the provision stipulated in
# http://www.ckluster.com/OPEN_LICENSE.txt.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
from zope.interface.verify import verifyClass, verifyObject
from repoze.who.interfaces import IIdentifier
from repoze.who.middleware import match_classification
from repoze.who.plugins.x509 import X509Identifier
from tests import TestX509Base
class TestX509Identifier(TestX509Base):
def test_object_conforms_to_IIdentifier(self):
verifyObject(IIdentifier, X509Identifier('Test'))
def test_class_conforms_to_IIdentifier(self):
verifyClass(IIdent | ifier, X509Identifier)
def test_remember_without_headers(self):
identifier = X509Identifier('Test')
| assert identifier.remember({}, None) is None
def test_forget_without_headers(self):
identifier = X509Identifier('Test')
assert identifier.forget({}, None) is None
def test_match_default_classification(self):
identifier = X509Identifier('Test')
assert identifier in match_classification(
IIdentifier,
(identifier,),
'browser'
)
def test_dont_match_default_classification(self):
identifier = X509Identifier('Test')
assert identifier not in match_classification(
IIdentifier,
(identifier,),
'other'
)
def test_match_custom_classification(self):
identifier = X509Identifier('Test', classifications=['ios', 'browser'])
assert identifier in match_classification(
IIdentifier,
(identifier,),
'ios'
)
def test_dont_match_custom_classification(self):
identifier = X509Identifier('Test', classifications=['ios', 'browser'])
assert identifier not in match_classification(
IIdentifier,
(identifier,),
'other'
)
def test_identify_default_values(self):
identifier = X509Identifier('SSL_CLIENT_S_DN')
environ = self.make_environ(
{'CN': 'Issuer', 'C': 'US', 'O': 'Company'},
{'CN': 'Name', 'Email': 'email@example.com', 'C': 'US'}
)
creds = identifier.identify(environ)
assert creds is not None
assert 'login' in creds
assert 'subject' in creds
self.assertEquals(creds['subject'], environ['SSL_CLIENT_S_DN'])
self.assertEquals(creds['login'], 'email@example.com')
def test_identify_default_values_server_variable(self):
identifier = X509Identifier('SSL_CLIENT_S_DN')
environ = self.make_environ(
{'CN': 'Issuer', 'C': 'US', 'O': 'Company'},
{'CN': 'Name', 'Email': 'email@domain.com', 'C': 'US'}
)
environ['SSL_CLIENT_S_DN_Email'] = 'email@example.com'
creds = identifier.identify(environ)
assert creds is not None
assert 'login' in creds
assert 'subject' in creds
self.assertEquals(creds['subject'], environ['SSL_CLIENT_S_DN'])
self.assertEquals(creds['login'], 'email@example.com')
def test_invalid_subject_dn(self):
identifier = X509Identifier('stuff')
environ = self.make_environ(
{'CN': 'Issuer', 'C': 'US', 'O': 'Company'},
{'CN': 'Name', 'Email': 'email@example.com', 'C': 'US'}
)
creds = identifier.identify(environ)
assert creds is None
def test_invalid_certificate(self):
identifier = X509Identifier('stuff')
environ = self.make_environ(
{'CN': 'Issuer', 'C': 'US', 'O': 'Company'},
{'CN': 'Name', 'Email': 'email@example.com', 'C': 'US'},
verified=False
)
creds = identifier.identify(environ)
assert creds is None
def test_without_field_in_dn(self):
identifier = X509Identifier('SSL_CLIENT_S_DN', login_field='Lala')
environ = self.make_environ(
{'CN': 'Issuer', 'C': 'US', 'O': 'Company'},
{'CN': 'Name', 'Email': 'email@example.com', 'C': 'US'}
)
creds = identifier.identify(environ)
assert creds is None
def test_invalid_dn(self):
identifier = X509Identifier('SSL_CLIENT_S_DN')
environ = self.make_environ(
{'CN': 'Issuer', 'C': 'US', 'O': 'Company'},
''
)
creds = identifier.identify(environ)
assert creds is None
def test_allow_multiple_values(self):
identifier = X509Identifier('SSL_CLIENT_S_DN', multiple_values=True)
environ = self.make_environ(
{'CN': 'Issuer', 'C': 'US', 'O': 'Company'},
'/Email=email1@example.com/Email=email2@example.com/O=Org'
)
creds = identifier.identify(environ)
assert 'email1@example.com' in creds['login']
assert 'email2@example.com' in creds['login']
def test_allow_multiple_values_server_variables(self):
identifier = X509Identifier('SSL_CLIENT_S_DN', multiple_values=True)
environ = self.make_environ(
{'CN': 'Issuer', 'C': 'US', 'O': 'Company'},
{'CN': 'Name', 'Email': 'email@domain.com', 'C': 'US'}
)
environ['SSL_CLIENT_S_DN_Email'] = ''
environ['SSL_CLIENT_S_DN_Email_0'] = 'email1@example.com'
environ['SSL_CLIENT_S_DN_Email_1'] = 'email2@example.com'
creds = identifier.identify(environ)
assert 'email1@example.com' in creds['login']
assert 'email2@example.com' in creds['login']
def test_multiple_values_but_disabled(self):
identifier = X509Identifier('SSL_CLIENT_S_DN')
environ = self.make_environ(
{'CN': 'Issuer', 'C': 'US', 'O': 'Company'},
'/Email=email1@example.com/Email=email2@example.com/O=Org'
)
creds = identifier.identify(environ)
assert creds is None
|
joaofrancese/heavy-destruction | Panda/src/objects/__init__.py | Python | bsd-3-clause | 17 | 0.117647 | __auth | or__ | ="Joao" |
KimBoWoon/Embeddad-System | nfc-server/nfcserver/controller/index.py | Python | mit | 1,224 | 0.000826 | # -*- coding: utf-8 -*-
from nfcserver.model.access import Access
from nfcserver.model.user import User
from flask import render_template, request, redirect, url_for
from nfcserver.db import dao
from nfcserver.blueprint import nfc
from exception import NoneUserName
import nxppy, time
@nfc.route('/access')
def accessPage():
users = dao.query(Access).order_by(Access.date.asc()).all()
return render_template('index.html', users=users)
@nfc.route('/')
def indexPage():
print('NFC TAG')
mifare = nxppy.Mifare()
# Print card UIDs as they are detected
while True:
try:
nfcid = mifare.select()
print(nfcid)
| user = dao.query(User).filter(User.nfcid == nfcid).first()
if user is None:
raise NoneUserName
new_access = | Access(user.name, user.nfcid)
dao.add(new_access)
dao.commit()
print(new_access.name + " Access")
except NoneUserName as e:
print(e)
except nxppy.SelectError:
# SelectError is raised if no card is in the field.
# print('nxppy.SelectError')
print('NFC Tag를 접촉해주세요')
time.sleep(1)
|
opencivicdata/scrapers-ca | ca_qc_brossard/__init__.py | Python | mit | 288 | 0 | from utils import CanadianJurisdiction
class Brossard(CanadianJurisdiction):
classification = 'legislature'
division_id = | 'ocd-division/country:ca/csd:2458007'
division_name = 'Brossard'
name = 'Conseil municipal de Brossard'
url | = 'http://www.ville.brossard.qc.ca'
|
dianamor8/waiter | manage.py | Python | lgpl-3.0 | 249 | 0 | #!/usr/bin/env python
import os
import sys
if __name__ == " | __main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "waiter.settings")
| from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
|
ddico/odoo | addons/l10n_latam_invoice_document/wizards/account_move_reversal.py | Python | agpl-3.0 | 3,941 | 0.003045 | # Part of Odoo. See LICENSE file for full copyright and licensing details.
from odoo import models, fields, api, _
from odoo.exceptions import UserError
class AccountMoveReversal(models.TransientModel):
_inherit = "account.move.reversal"
l10n_latam_use_documents = fields.Boolean(compute='_compute_document_type')
l10n_latam_document_type_id = fields.Many2one('l10n_latam.document.type', 'Document Type', ondelete='cascade', domain="[('id', 'in', l10n_latam_available_document_type_ids)]", compute='_compute_document_type', readonly=False)
l10n_l | atam_available_document_type_ids = fields.Many2many('l10n_latam.document.type', compute='_compute_document_type')
l10n_latam_document_number = fields.Char(string='Document Number')
l10n_latam_manual_document_number = fie | lds.Boolean(compute='_compute_l10n_latam_manual_document_number', string='Manual Number')
@api.depends('l10n_latam_document_type_id')
def _compute_l10n_latam_manual_document_number(self):
for rec in self.filtered('move_ids'):
move = rec.move_ids[0]
if move.journal_id and move.journal_id.l10n_latam_use_documents:
rec.l10n_latam_manual_document_number = self.env['account.move']._is_manual_document_number(move.journal_id)
@api.model
def _reverse_type_map(self, move_type):
match = {
'entry': 'entry',
'out_invoice': 'out_refund',
'in_invoice': 'in_refund',
'in_refund': 'in_invoice',
'out_receipt': 'in_receipt',
'in_receipt': 'out_receipt'}
return match.get(move_type)
@api.depends('move_ids')
def _compute_document_type(self):
self.l10n_latam_available_document_type_ids = False
self.l10n_latam_document_type_id = False
self.l10n_latam_use_documents = False
for record in self:
if len(record.move_ids) > 1:
move_ids_use_document = record.move_ids._origin.filtered(lambda move: move.l10n_latam_use_documents)
if move_ids_use_document:
raise UserError(_('You can only reverse documents with legal invoicing documents from Latin America one at a time.\nProblematic documents: %s') % ", ".join(move_ids_use_document.mapped('name')))
else:
record.l10n_latam_use_documents = record.move_ids.journal_id.l10n_latam_use_documents
if record.l10n_latam_use_documents:
refund = record.env['account.move'].new({
'move_type': record._reverse_type_map(record.move_ids.move_type),
'journal_id': record.move_ids.journal_id.id,
'partner_id': record.move_ids.partner_id.id,
'company_id': record.move_ids.company_id.id,
})
record.l10n_latam_document_type_id = refund.l10n_latam_document_type_id
record.l10n_latam_available_document_type_ids = refund.l10n_latam_available_document_type_ids
def _prepare_default_reversal(self, move):
""" Set the default document type and number in the new revsersal move taking into account the ones selected in
the wizard """
res = super()._prepare_default_reversal(move)
res.update({
'l10n_latam_document_type_id': self.l10n_latam_document_type_id.id,
'l10n_latam_document_number': self.l10n_latam_document_number,
})
return res
@api.onchange('l10n_latam_document_number', 'l10n_latam_document_type_id')
def _onchange_l10n_latam_document_number(self):
if self.l10n_latam_document_type_id:
l10n_latam_document_number = self.l10n_latam_document_type_id._format_document_number(
self.l10n_latam_document_number)
if self.l10n_latam_document_number != l10n_latam_document_number:
self.l10n_latam_document_number = l10n_latam_document_number
|
cawc/sudokusolve | grabsudoku.py | Python | gpl-3.0 | 804 | 0.034826 | from bs4 import BeautifulSoup
import requests, re
n = int(input('How many sudoku\'s do you want to download (between 1 and 10)? '))
if n < 1 or n > 10:
die()
url = 'http://show.websudoku.com/?level=4'
for i in range(n):
page = requests.get(url)
page.raise_for_status()
rawPage=page.text
sudoku | id = int(re.search(r'\d+', rawPage.split('\n')[20]).group())
soup = BeautifulSoup(rawPage,'html.parser')
sudokuTable = soup.findAll(True, {'class':['s0', 'd0']})
sudoku = [ [(int(item['value']) if | item.get('class')[0] == 's0' else 0) for item in sudokuTable][i:i+9] for i in range(0, 81, 9) ]
filename = 'sudokus/sudoku_%i.txt'%sudokuid
sudokufile = open(filename, 'w')
for line in sudoku:
sudokufile.write( str(line).replace(',',' ').replace('[','').replace(']',' ') + '\n' )
input('Done!') |
Azure/azure-sdk-for-python | sdk/network/azure-mgmt-network/azure/mgmt/network/v2020_04_01/aio/operations/_virtual_network_peerings_operations.py | Python | mit | 22,821 | 0.005039 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar, Union
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.core.polling import AsyncLROPoller, AsyncNoPolling, AsyncPollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.async_arm_polling import AsyncARMPolling
from ... import models as _models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class VirtualNetworkPeeringsOperations:
"""VirtualNetworkPeeringsOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.network.v2020_04_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
async def _delete_initial(
self,
resource_group_name: str,
virtual_network_name: str,
virtual_network_peering_name: str,
**kwargs: Any
) -> None:
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-04-01"
accept = "application/json"
# Construct URL
url = self._delete_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualNetworkName': self._serialize.url("virtual_network_name", virtual_network_name, 'str'),
'virtualNetworkPeeringName': self._serialize.url("virtual_network_peering_name", virtual_network_peering_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworks/{virtualNetworkName}/virtualNetworkPeerings/{virtualNetworkPeeringName}'} # type: ignore
async def begin_delete(
self,
resource_group_name: str,
virtual_network_name: str,
virtual_network_peering_name: str,
**kwargs: Any
) -> AsyncLROPoller[None]:
"""Deletes the specified virtual network peering.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param virtual_network_name: The name of the virtual network.
:type virtual_network_name: str
:param virtual_network_peering_name: The name of the virtual network peering.
:type virtual_network_peering_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: | By default, your polling method will be AsyncARMPolling.
Pass in False for this operation to not poll, or pass in yo | ur own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._delete_initial(
resource_group_name=resource_group_name,
virtual_network_name=virtual_network_name,
virtual_network_peering_name=virtual_network_peering_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualNetworkName': self._serialize.url("virtual_network_name", virtual_network_name, 'str'),
'virtualNetworkPeeringName': self._serialize.url("virtual_network_peering_name", virtual_network_peering_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworks/{virtualNetworkName}/virtualNetworkPeerings/{virtu |
rcgee/oq-hazardlib | openquake/hazardlib/gsim/boore_atkinson_2011.py | Python | agpl-3.0 | 3,604 | 0.00111 | # -*- coding: | utf-8 -*-
| # vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# Copyright (C) 2014-2016 GEM Foundation
#
# OpenQuake is free software: you can redistribute it and/or modify it
# under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# OpenQuake is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with OpenQuake. If not, see <http://www.gnu.org/licenses/>.
"""
Module exports :class:`BooreAtkinson2011`,
:class:`Atkinson2008prime`
"""
from __future__ import division
import numpy as np
from openquake.hazardlib.gsim.boore_atkinson_2008 import BooreAtkinson2008
from openquake.hazardlib.gsim.base import CoeffsTable
from openquake.hazardlib import const
class BooreAtkinson2011(BooreAtkinson2008):
"""
Implements GMPE based on the corrections proposed by Gail M. Atkinson
and D. Boore in 2011 and published as "Modifications to Existing
Ground-Motion Prediction Equations in Light of New Data " (2011,
Bulletin of the Seismological Society of America, Volume 101, No. 3,
pages 1121-1135).
"""
def get_mean_and_stddevs(self, sites, rup, dists, imt, stddev_types):
"""
See :meth:`superclass method
<.base.GroundShakingIntensityModel.get_mean_and_stddevs>`
for spec of input and result values.
"""
# get mean and std using the superclass
mean, stddevs = super(BooreAtkinson2011, self).get_mean_and_stddevs(
sites, rup, dists, imt, stddev_types)
# correction factor (see Atkinson and Boore, 2011; equation 5 at
# page 1126 and nga08_gm_tmr.for line 508
corr_fact = 10.0**(np.max([0, 3.888 - 0.674 * rup.mag]) -
(np.max([0, 2.933 - 0.510 * rup.mag]) *
np.log10(dists.rjb + 10.)))
return np.log(np.exp(mean)*corr_fact), stddevs
class Atkinson2008prime(BooreAtkinson2011):
"""
Implements the Boore & Atkinson (2011) adjustment to the Atkinson (2008)
GMPE (not itself implemented in OpenQuake)
"""
# GMPE is defined for application to Eastern North America (Stable Crust)
DEFINED_FOR_TECTONIC_REGION_TYPE = const.TRT.STABLE_CONTINENTAL
def get_mean_and_stddevs(self, sites, rup, dists, imt, stddev_types):
"""
See :meth:`superclass method
<.base.GroundShakingIntensityModel.get_mean_and_stddevs>`
for spec of input and result values.
"""
# get mean and std using the superclass
mean, stddevs = super(Atkinson2008prime, self).get_mean_and_stddevs(
sites, rup, dists, imt, stddev_types)
A08 = self.A08_COEFFS[imt]
f_ena = 10.0 ** (A08["c"] + A08["d"] * dists.rjb)
return np.log(np.exp(mean)*f_ena), stddevs
A08_COEFFS = CoeffsTable(sa_damping=5, table="""\
IMT c d
pgv 0.450 0.00211
pga 0.419 0.00039
0.005 0.417 0.00192
0.050 0.417 0.00192
0.100 0.245 0.00273
0.200 0.042 0.00232
0.300 -0.078 0.00190
0.500 -0.180 0.00180
1.000 -0.248 0.00153
2.000 -0.214 0.00117
3.030 -0.084 0.00091
5.000 0.000 0.00000
10.00 0.000 0.00000
""")
|
DavidSPCrack/UEM_AccesoADatos | PythonTest/test/HolaMundoPack.py | Python | gpl-2.0 | 589 | 0.005111 | '''
Created on 20/12/2014
@author: usuario.apellido
'''
print("HOLA MUNDO" | )
print("AAAAAAAAAAAAAAAAAAAAAAAA")
mi_tupla = ("ARRAY LLAMADO TUPLA", 15, 2.8, "OTRO DATO", 25)
semaforo = "verde"
if semaforo == "verde":
print("Cruzar la calle")
else:
print("Esperar")
print("El niño busco la concha en la playa")
anio = 2001
while anio <= 2012:
print("Informes del Año", str(anio))
anio += 1
while True:
nombre = input("Indique su nombre: ")
print(nombre)
if nombre:
break
print("Ha | terminado el while")
|
longmen21/edx-platform | cms/djangoapps/contentstore/views/course.py | Python | agpl-3.0 | 71,331 | 0.003266 | """
Views related to operations on course objects
"""
import copy
import json
import logging
import random
import string # pylint: disable=deprecated-module
from django.conf import settings
from django.contrib.auth.decorators import login_required
from django.core.exceptions import PermissionDenied
from django.core.urlresolvers import reverse
from django.http import HttpResponse, HttpResponseBadRequest, HttpResponseNotFound, Http404
from django.shortcuts import redirect
import django.utils
from django.utils.translation import ugettext as _
from django.views.decorators.http import require_http_methods, require_GET
from django.views.decorators.csrf import ensure_csrf_cookie
from opaque_keys import InvalidKeyError
from opaque_keys.edx.keys import CourseKey
from opaque_keys.edx.locations import Location
from .component import (
ADVANCED_COMPONENT_TYPES,
)
from .item import create_xblock_info
from .library import LIBRARIES_ENABLED
from ccx_keys.locator import CCXLocator
from contentstore.course_group_config import (
COHORT_SCHEME,
GroupConfiguration,
GroupConfigurationsValidationError,
RANDOM_SCHEME,
)
from contentstore.course_info_model import get_course_updates, update_course_updates, delete_course_update
from contentstore.courseware_index import CoursewareSearchIndexer, SearchIndexingError
from contentstore.push_notification import push_notification_enabled
from contentstore.tasks import rerun_course
from contentstore.utils import (
add_instructor,
initialize_permissions,
get_lms_link_for_item,
remove_all_instructors,
reverse_course_url,
reverse_library_url,
reverse_usage_url,
reverse_url,
)
from contentstore.views.entrance_exam import (
create_entrance_exam,
delete_entrance_exam,
update_entrance_exam,
)
from course_action_state.managers import CourseActionStateItemNotFoundError
from course_action_state.models import CourseRerunState, CourseRerunUIStateManager
from course_creators.views import get_course_creator_status, add_user_with_status_unrequested
from edxmako.shortcuts import render_to_response
from models.settings.course_grading import CourseGradingModel
from models.settings.course_metadata import CourseMetadata
from models.settings.encoder import CourseSettingsEncoder
from openedx.core.djangoapps.content.course_structures.api.v0 import api, errors
from openedx.core.djangoapps.credit.api import is_credit_course, get_credit_requirements
from openedx.core.djangoapps.credit.tasks import update_credit_course_requirements
from openedx.core.djangoapps.models.course_details import CourseDetails
from openedx.core.djangoapps.programs.models import ProgramsApiConfig
from openedx.core.djangoapps.programs.utils import get_programs
from openedx.core.djangoapps.self_paced.models import SelfPacedConfiguration
from openedx.core.djangoapps.site_configuration import helpers as configuration_helpers
from openedx.core.lib.course_tabs import CourseTabPluginManager
from openedx.core.lib.courses import course_image_url
from openedx.core.djangolib.js_utils import dump_js_escaped_json
from student import auth
from student.auth import has_course_author_access, has_studio_write_access, has_studio_read_access
from student.roles import (
CourseInstructorRole, CourseStaffRole, CourseCreatorRole, GlobalStaff, UserBasedRole
)
from util.course import get_lms_link_for_about_page
from util.date_utils import get_default_time_display
from util.json_request import JsonResponse, JsonResponseBadRequest, | expect_json
from util.milestones_helpers import (
is_entrance_exams_enabled,
is_prerequisite_courses_enabled,
is_valid_course_key,
set_prerequisite_courses,
)
from util.organizations_helpers import (
| add_organization_course,
get_organization_by_short_name,
organizations_enabled,
)
from util.string_utils import _has_non_ascii_characters
from xblock_django.api import deprecated_xblocks
from xmodule.contentstore.content import StaticContent
from xmodule.course_module import CourseFields
from xmodule.course_module import DEFAULT_START_DATE
from xmodule.error_module import ErrorDescriptor
from xmodule.modulestore import EdxJSONEncoder
from xmodule.modulestore.django import modulestore
from xmodule.modulestore.exceptions import ItemNotFoundError, DuplicateCourseError
from xmodule.tabs import CourseTab, CourseTabList, InvalidTabsException
log = logging.getLogger(__name__)
__all__ = ['course_info_handler', 'course_handler', 'course_listing',
'course_info_update_handler', 'course_search_index_handler',
'course_rerun_handler',
'settings_handler',
'grading_handler',
'advanced_settings_handler',
'course_notifications_handler',
'textbooks_list_handler', 'textbooks_detail_handler',
'group_configurations_list_handler', 'group_configurations_detail_handler']
class AccessListFallback(Exception):
"""
An exception that is raised whenever we need to `fall back` to fetching *all* courses
available to a user, rather than using a shorter method (i.e. fetching by group)
"""
pass
def get_course_and_check_access(course_key, user, depth=0):
"""
Internal method used to calculate and return the locator and course module
for the view functions in this file.
"""
if not has_studio_read_access(user, course_key):
raise PermissionDenied()
course_module = modulestore().get_course(course_key, depth=depth)
return course_module
def reindex_course_and_check_access(course_key, user):
"""
Internal method used to restart indexing on a course.
"""
if not has_course_author_access(user, course_key):
raise PermissionDenied()
return CoursewareSearchIndexer.do_course_reindex(modulestore(), course_key)
@login_required
def course_notifications_handler(request, course_key_string=None, action_state_id=None):
"""
Handle incoming requests for notifications in a RESTful way.
course_key_string and action_state_id must both be set; else a HttpBadResponseRequest is returned.
For each of these operations, the requesting user must have access to the course;
else a PermissionDenied error is returned.
GET
json: return json representing information about the notification (action, state, etc)
DELETE
json: return json repressing success or failure of dismissal/deletion of the notification
PUT
Raises a NotImplementedError.
POST
Raises a NotImplementedError.
"""
# ensure that we have a course and an action state
if not course_key_string or not action_state_id:
return HttpResponseBadRequest()
response_format = request.GET.get('format') or request.POST.get('format') or 'html'
course_key = CourseKey.from_string(course_key_string)
if response_format == 'json' or 'application/json' in request.META.get('HTTP_ACCEPT', 'application/json'):
if not has_studio_write_access(request.user, course_key):
raise PermissionDenied()
if request.method == 'GET':
return _course_notifications_json_get(action_state_id)
elif request.method == 'DELETE':
# we assume any delete requests dismiss actions from the UI
return _dismiss_notification(request, action_state_id)
elif request.method == 'PUT':
raise NotImplementedError()
elif request.method == 'POST':
raise NotImplementedError()
else:
return HttpResponseBadRequest()
else:
return HttpResponseNotFound()
def _course_notifications_json_get(course_action_state_id):
"""
Return the action and the action state for the given id
"""
try:
action_state = CourseRerunState.objects.find_first(id=course_action_state_id)
except CourseActionStateItemNotFoundError:
return HttpResponseBadRequest()
action_state_info = {
'action': action_state.action,
'state': action_state.state,
'should_display': action_state.should_display
}
return JsonResponse(action_state_info)
def _dismiss_notification(request, course_action_state_id): |
Nikea/VTTools | vttools/tests/test_utils.py | Python | bsd-3-clause | 4,142 | 0.00169 | # ######################################################################
# Copyright (c) 2014, Brookhaven Science Associates, Brookhaven #
# National Laboratory. All rights reserved. #
# #
# Redistribution and use in source and binary forms, with or without #
# modification, are permitted provided that the following conditions #
# are met: #
# #
# * Redistributions of source code must retain the above copyright #
# notice, this list of conditions and the following disclaimer. #
# #
# * Redistributions in binary form must reproduce the above copyright #
# notice this list of conditions and the following disclaimer in #
# the documentation and/or other materials provided with the #
# distribution. #
# #
# * Neither the name of the Brookhaven Science Associates, Brookhaven #
# National Laboratory nor the names of its contributors may be used #
# to endorse or promote products derived from this software without #
# specific prior written permission. #
# #
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS #
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT #
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS #
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE #
# COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, #
# INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES #
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR #
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) #
# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, #
# STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OTHERWISE) ARISING #
# IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE #
# POSSIBILITY OF SUCH DAMAGE. #
########################################################################
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import six
import logging
logger = logging.getLogger(__name | __)
from nose.tools import assert_true
from skxray.testing.decorators import known_fail_if, skip_if
from vttools.utils impor | t make_symlink, query_yes_no
import tempfile
import os
import shutil
from subprocess import call
def destroy(path):
try:
shutil.rmtree(path)
except WindowsError as whee:
call(['rmdir', '/S', path], shell=True)
@skip_if(not os.name == 'nt', 'not on window')
def test_make_symlink():
test_loc = os.path.join(os.path.expanduser('~'), 'symlinking_test')
try:
os.mkdir(test_loc)
except WindowsError as whee:
destroy(test_loc)
os.mkdir(test_loc)
src = open
link_name = 'link'
src = os.path.join(test_loc, link_name)
os.mkdir(src)
os.mkdir(os.path.join(test_loc, 'dst'))
dst = os.path.join(test_loc, 'dst', link_name)
# create a temporary file in the target location called `link_name`
with open(dst, 'w+') as tmp_file:
tmp_file.write('test')
assert_true(make_symlink(dst=dst, src=src, silently_move=True))
destroy(dst)
# make an empty temporary folder in the target location called `link_name`
os.mkdir(dst)
assert_true(make_symlink(dst=dst, src=src, silently_move=True))
destroy(dst)
# make a non-empty temporary tree in the target location called `link_name`
os.mkdir(dst)
os.mkdir(os.path.join(dst, 'sub_folder'))
assert_true(make_symlink(dst=dst, src=src, silently_move=True))
destroy(dst)
shutil.rmtree(test_loc)
|
bgruening/EDeN | eden/converter/molecule/obabel.py | Python | gpl-3.0 | 893 | 0 | import openbabel as ob
import pybel
import networkx as nx
def obabel_to_eden(input, file_type='sdf', **options):
"""
Takes a string list in sdf format format and yields networkx graphs.
Parameters
----------
input : string
A pointer to the data sou | rce.
"""
for mol in pybel.readfile(file_type, input):
# remove hydrogens
mol.removeh()
G = obabel_to_networkx(mol)
if len(G):
yield G
def obabel_to | _networkx(mol):
"""
Takes a pybel molecule object and converts it into a networkx graph.
"""
g = nx.Graph()
# atoms
for atom in mol:
label = str(atom.type)
g.add_node(atom.idx, label=label)
# bonds
for bond in ob.OBMolBondIter(mol.OBMol):
label = str(bond.GetBO())
g.add_edge(bond.GetBeginAtomIdx(), bond.GetEndAtomIdx(), label=label)
return g
|
sandrofolk/girox | girox/event/migrations/0016_subscription_team.py | Python | gpl-3.0 | 498 | 0.002008 | # -*- coding | : utf-8 -*-
# Generated by Django 1.10.6 on 2017-04-08 19:41
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('event', '0015_auto_20170408_1815'),
]
operations = [
migrations.AddField(
model_name='subscription',
name='team',
field=models.CharField(blank=True, max_length=255, null=True, verbose_name='equipe'),
| ),
]
|
L33tCh/afj-flask | project/tests/test_auth.py | Python | mit | 9,874 | 0.000304 | # project/tests/test_auth.py
import time
import json
import unittest
from project.server import db
from project.server.models import User, BlacklistToken
from project.tests.base import BaseTestCase
def register_user(self, email, password):
return self.client.post(
'/auth/register',
data=json.dumps(dict(
email=email,
password=password
)),
content_type='application/json',
)
def login_user(self, email, password):
return self.client.post(
'/auth/login',
data=json.dumps(dict(
email=email,
password=password
)),
content_type='application/json',
)
class TestAuthBlueprint(BaseTestCase):
def test_registration(self):
""" Test for user registration """
with self.client:
response = register_user(self, 'joe@gmail.com', '123456')
data = json.loads(response.data.decode())
self.assertTrue(data['status'] == 'success')
self.assertTrue(data['message'] == 'Successfully registered.')
self.assertTrue(data['auth_token'])
self.assertTrue(response.content_type == 'application/json')
self.assertEqual(response.status_code, 201)
def test_registered_with_already_registered_user(self):
""" Test registration with already registered email"""
user = User(
email='joe@gmail.com',
password='test'
)
db.session.add(user)
db.session.commit()
with self.client:
response = register_user(self, 'joe@gmail.com', '123456')
data = json.loads(response.data.decode())
self.assertTrue(data['status'] == 'fail')
self.assertTrue(
data['message'] == 'User already exists. Please Log in.')
self.assertTrue(response.content_type == 'application/json')
self.assertEqual(response.status_code, 202)
def test_registered_user_login(self):
""" Test for login of registered-user login """
with self.client:
# user registration
resp_register = register_user(self, 'joe@gmail.com', '123456')
data_register = json.loads(resp_register.data.decode())
self.assertTrue(data_register['status'] == 'success')
self.assertTrue(
data_register['message'] == 'Successfully registered.'
)
self.assertTrue(data_register['auth_token'])
self.assertTrue(resp_register.content_type == 'application/json')
self.assertEqual(resp_register.status_code, 201)
# registered user login
response = login_user(self, 'joe@gmail.com', '123456')
data = json.loads(response.data.decode())
self.assertTrue(data['status'] == 'success')
self.assertTrue(data['message'] == 'Successfully logged in.')
self.assertTrue(data['auth_token'])
self.assertTrue(response.content_type == 'application/json')
self.assertEqual(response.status_code, 200)
def test_non_registered_user_login(self):
""" Test for login of non-registered user """
with self.client:
response = login_user(self, 'joe@gmail.com', '123456')
data = json.loads(response.data.decode())
self.assertTrue(data['status'] == 'fail')
self.assertTrue(data['message'] == 'User does not exist.')
self.assertTrue(response.content_type == 'application/json')
self.assertEqual(response.status_code, 404)
def test_user_status(self):
""" Test for user status """
with self.client:
resp_register = register_user(self, 'joe@gmail.com', '123456')
response = self.client.get(
'/auth/status',
headers=dict(
Authorization='Bearer ' + json.loads(
resp_register.data.decode()
)['auth_token']
)
)
data = json.loads(response.data.decode())
self.assertTrue(data['status'] == 'success')
self.assertTrue(data['data'] is not None)
self.assertTrue(data['data']['email'] == 'joe@gmail.com')
self.assertTrue(data['data']['admin'] is 'true' or 'false')
self.assertEqual(response.status_code, 200)
def test_user_status_malformed_bearer_token(self):
""" Test for user status with malformed bearer token"""
with self.client:
resp_register = register_user(self, 'joe@gmail.com', '123456')
response = self.client.get(
'/auth/status',
headers=dict(
Authorization='Bearer' + json.loads(
resp_register.data.decode()
)['auth_token']
)
)
data = json.loads(response.data.decode())
self.assertTrue(data['status'] == 'fail')
self.assertTrue(data['message'] == 'Bearer token malformed.')
self.assertEqual(response.status_code, 401)
def test_valid_logout(self):
""" Test for logout before token expires """
with self.client:
# user registration
resp_register = register_user(self, 'joe@gmail.com', '123456')
data_register = json.loads(resp_register.data.decode())
self.assertTrue(data_register['status'] == 'success')
self.assertTrue(
data_register['message'] == 'Successfully registered.')
self.assertTrue(data_register['auth_token'])
self.assertTrue(resp_register.content_type == 'application/json')
self.assertEqual(resp_register.status_code, 201)
# user login
resp_login = login_user(self, 'joe@gmail.com', '123456')
data_login = json.loads(resp_login.data.decode())
self.assertTrue(data_login['status'] == 'success')
self.assertTrue(data_login['message'] == 'Successfully logged in.')
self.assertTrue(data_logi | n['auth_token'])
self.assertTrue(resp_login.content_type == 'application/json')
| self.assertEqual(resp_login.status_code, 200)
# valid token logout
response = self.client.post(
'/auth/logout',
headers=dict(
Authorization='Bearer ' + json.loads(
resp_login.data.decode()
)['auth_token']
)
)
data = json.loads(response.data.decode())
self.assertTrue(data['status'] == 'success')
self.assertTrue(data['message'] == 'Successfully logged out.')
self.assertEqual(response.status_code, 200)
def test_valid_blacklisted_token_logout(self):
""" Test for logout after a valid token gets blacklisted """
with self.client:
# user registration
resp_register = register_user(self, 'joe@gmail.com', '123456')
data_register = json.loads(resp_register.data.decode())
self.assertTrue(data_register['status'] == 'success')
self.assertTrue(
data_register['message'] == 'Successfully registered.')
self.assertTrue(data_register['auth_token'])
self.assertTrue(resp_register.content_type == 'application/json')
self.assertEqual(resp_register.status_code, 201)
# user login
resp_login = login_user(self, 'joe@gmail.com', '123456')
data_login = json.loads(resp_login.data.decode())
self.assertTrue(data_login['status'] == 'success')
self.assertTrue(data_login['message'] == 'Successfully logged in.')
self.assertTrue(data_login['auth_token'])
self.assertTrue(resp_login.content_type == 'application/json')
self.assertEqual(resp_login.status_code, 200)
# blacklist a valid token
blacklist_token = BlacklistToken(
token=json.loads(resp_login.data.decode())['auth_token'] |
tomviner/pytest-django | tests/test_environment.py | Python | bsd-3-clause | 6,155 | 0.000162 | from __future__ import with_statement
import pytest
from django.core import mail
from django.db import connection
from pytest_django_test.app.models import Item
# It doesn't matter which order all the _again methods are run, we just need
# to check the environment remains constant.
# This is possible with some of the testdir magic, but this is the lazy way
# to do it.
def test_mail():
assert len(mail.outbox) == 0
mail.send_mail('subject', 'body', 'from@example.com', ['to@example.com'])
assert len(mail.outbox) == 1
m = mail.outbox[0]
assert m.subject == 'subject'
assert m.body == 'body'
assert m.from_email == 'from@example.com'
assert list(m.to) == ['to@example.com']
def test_mail_again():
test_mail()
@pytest.mark.django_project(extra_settings="""
TEMPLATE_LOADERS = (
'django.template.loaders.filesy | stem.Loader',
'django.template.loaders.app_directories.Loader',
)
ROOT_URLCONF = 'tpkg.app.urls'
""")
def test_invalid_template_variable(django_testdir):
django_testdir.create_app_file("""
fr | om django.conf.urls import url
from pytest_django_test.compat import patterns
from tpkg.app import views
urlpatterns = patterns(
'',
url(r'invalid_template/', views.invalid_template),
)
""", 'urls.py')
django_testdir.create_app_file("""
from django.shortcuts import render
def invalid_template(request):
return render(request, 'invalid_template.html', {})
""", 'views.py')
django_testdir.create_app_file(
"<div>{{ invalid_var }}</div>",
'templates/invalid_template.html'
)
django_testdir.create_test_module('''
import pytest
def test_for_invalid_template(client):
client.get('/invalid_template/')
@pytest.mark.ignore_template_errors
def test_ignore(client):
client.get('/invalid_template/')
''')
result = django_testdir.runpytest_subprocess('-s', '--fail-on-template-vars')
result.stdout.fnmatch_lines_random([
"tpkg/test_the_test.py F.",
"Undefined template variable 'invalid_var' in 'invalid_template.html'",
])
@pytest.mark.django_project(extra_settings="""
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
)
ROOT_URLCONF = 'tpkg.app.urls'
""")
def test_invalid_template_variable_opt_in(django_testdir):
django_testdir.create_app_file("""
from django.conf.urls import url
from pytest_django_test.compat import patterns
from tpkg.app import views
urlpatterns = patterns(
'',
url(r'invalid_template/', views.invalid_template),
)
""", 'urls.py')
django_testdir.create_app_file("""
from django.shortcuts import render
def invalid_template(request):
return render(request, 'invalid_template.html', {})
""", 'views.py')
django_testdir.create_app_file(
"<div>{{ invalid_var }}</div>",
'templates/invalid_template.html'
)
django_testdir.create_test_module('''
import pytest
def test_for_invalid_template(client):
client.get('/invalid_template/')
@pytest.mark.ignore_template_errors
def test_ignore(client):
client.get('/invalid_template/')
''')
result = django_testdir.runpytest_subprocess('-s')
result.stdout.fnmatch_lines_random([
"tpkg/test_the_test.py ..",
])
@pytest.mark.django_db
def test_database_rollback():
assert Item.objects.count() == 0
Item.objects.create(name='blah')
assert Item.objects.count() == 1
@pytest.mark.django_db
def test_database_rollback_again():
test_database_rollback()
def test_database_name():
name = connection.settings_dict['NAME']
assert name == ':memory:' or name.startswith('test_')
def test_database_noaccess():
with pytest.raises(pytest.fail.Exception):
Item.objects.count()
class TestrunnerVerbosity:
"""Test that Django's code to setup and teardown the databases uses
pytest's verbosity level."""
@pytest.fixture
def testdir(self, django_testdir):
print("testdir")
django_testdir.create_test_module('''
import pytest
@pytest.mark.django_db
def test_inner_testrunner():
pass
''')
return django_testdir
def test_default(self, testdir):
"""Not verbose by default."""
result = testdir.runpytest_subprocess('-s')
result.stdout.fnmatch_lines([
"tpkg/test_the_test.py ."])
def test_vq_verbosity_0(self, testdir):
"""-v and -q results in verbosity 0."""
result = testdir.runpytest_subprocess('-s', '-v', '-q')
result.stdout.fnmatch_lines([
"tpkg/test_the_test.py ."])
def test_verbose_with_v(self, testdir):
"""Verbose output with '-v'."""
result = testdir.runpytest_subprocess('-s', '-v')
result.stdout.fnmatch_lines_random([
"tpkg/test_the_test.py:*",
"*PASSED*",
"*Destroying test database for alias 'default'...*"])
def test_more_verbose_with_vv(self, testdir):
"""More verbose output with '-v -v'."""
result = testdir.runpytest_subprocess('-s', '-v', '-v')
result.stdout.fnmatch_lines([
"tpkg/test_the_test.py:*Creating test database for alias*",
"*Creating table app_item*",
"*PASSED*Destroying test database for alias 'default' ('*')...*"])
def test_more_verbose_with_vv_and_reusedb(self, testdir):
"""More verbose output with '-v -v', and --create-db."""
result = testdir.runpytest_subprocess('-s', '-v', '-v', '--create-db')
result.stdout.fnmatch_lines([
"tpkg/test_the_test.py:*Creating test database for alias*",
"*PASSED*"])
assert ("*Destroying test database for alias 'default' ('*')...*"
not in result.stdout.str())
|
OriHoch/Open-Knesset | committees/models.py | Python | bsd-3-clause | 22,034 | 0.000957 | # encoding: utf-8
import re
import logging
from datetime import datetime, timedelta, date
from django.db import models
from django.db.models.query_utils import Q
from django.utils.translation import ugettext_lazy as _, ugettext
from django.utils.text import Truncator
from django.contrib.contenttypes import generic
from django.contrib.auth.models import User
from django.core.cache import cache
from django.utils.functional import cached_property
from django.conf import settings
from tagging.models import Tag, TaggedItem
from djangoratings.fields import RatingField
from committees.enums import CommitteeTypes
from events.models import Event
from links.models import Link
from mks.models import Knesset
from lobbyists.models import LobbyistCorporation
from itertools import groupby
from hebrew_numbers import gematria_to_int
from knesset_data_django.committees import members_extended
COMMITTEE_PROTOCOL_PAGINATE_BY = 120
logger = logging.getLogger("open-knesset.committees.models")
class Committee(models.Model):
name = models.CharField(max_length=256)
# comma separated list of names used as name aliases for harvesting
aliases = models.TextField(null=True, blank=True)
members = models.ManyToManyField('mks.Member', related_name='committees',
blank=True)
chairpersons = models.ManyToManyField('mks.Member',
related_name='chaired_committees',
blank=True)
replacements = models.ManyToManyField('mks.Member',
related_name='replacing_in_committees',
blank=True)
events = generic.GenericRelation(Event, content_type_field="which_type",
object_id_field="which_pk")
description = m | odels.TextField(null=True, blank=True)
portal_knesset_broadcasts_url = models.URLField(max_length=1000,
blank=True)
type = models.CharField(max_length=10, default=CommitteeTypes.committee,
choices=CommitteeTypes.as_choices(),
db_in | dex=True)
hide = models.BooleanField(default=False)
# Deprecated? In use? does not look in use
protocol_not_published = models.BooleanField(default=False)
knesset_id = models.IntegerField(null=True, blank=True)
knesset_type_id = models.IntegerField(null=True, blank=True)
knesset_parent_id = models.IntegerField(null=True, blank=True)
# Deprecated? In use? does not look
last_scrape_time = models.DateTimeField(null=True, blank=True)
name_eng = models.CharField(max_length=256, null=True, blank=True)
name_arb = models.CharField(max_length=256, null=True, blank=True)
start_date = models.DateTimeField(null=True, blank=True)
end_date = models.DateTimeField(null=True, blank=True)
knesset_description = models.TextField(null=True, blank=True)
knesset_description_eng = models.TextField(null=True, blank=True)
knesset_description_arb = models.TextField(null=True, blank=True)
knesset_note = models.TextField(null=True, blank=True)
knesset_note_eng = models.TextField(null=True, blank=True)
knesset_portal_link = models.TextField(null=True, blank=True)
@property
def gender_presence(self):
# returns a touple of (female_presence, male_presence
r = {'F': 0, 'M': 0}
for cm in self.meetings.all():
try:
results = groupby(cm.mks_attended.all(), lambda mk: mk.gender)
except ValueError:
continue
for i in results:
key, count = i[0], len(list(i[1]))
r[key] += count
return r['F'], r['M']
def __unicode__(self):
if self.type == 'plenum':
return "%s" % ugettext('Plenum')
else:
return "%s" % self.name
@models.permalink
def get_absolute_url(self):
if self.type == 'plenum':
return 'plenum', []
else:
return 'committee-detail', [str(self.id)]
@property
def annotations(self):
protocol_part_tn = ProtocolPart._meta.db_table
meeting_tn = CommitteeMeeting._meta.db_table
committee_tn = Committee._meta.db_table
annotation_tn = Annotation._meta.db_table
protocol_part_ct = ContentType.objects.get_for_model(ProtocolPart)
ret = Annotation.objects.select_related().filter(
content_type=protocol_part_ct)
return ret.extra(tables=[protocol_part_tn,
meeting_tn, committee_tn],
where=["%s.object_id=%s.id" % (
annotation_tn, protocol_part_tn),
"%s.meeting_id=%s.id" % (
protocol_part_tn, meeting_tn),
"%s.committee_id=%%s" % meeting_tn],
params=[self.id]).distinct()
def members_by_name(self, ids=None, current_only=False):
"""Return a queryset of all members, sorted by their name."""
members = members_extended(self, current_only=current_only, ids=ids)
return members.order_by('name')
def recent_meetings(self, limit=10, do_limit=True):
relevant_meetings = self.meetings.all().order_by('-date')
if do_limit:
more_available = relevant_meetings.count() > limit
return relevant_meetings[:limit], more_available
else:
return relevant_meetings
def future_meetings(self, limit=10, do_limit=True):
current_date = datetime.now()
relevant_events = self.events.filter(when__gt=current_date).order_by(
'when')
if do_limit:
more_available = relevant_events.count() > limit
return relevant_events[:limit], more_available
else:
return relevant_events
def protocol_not_yet_published_meetings(self, end_date, limit=10,
do_limit=True):
start_date = self.meetings.all().order_by(
'-date').first().date + timedelta(days=1) \
if self.meetings.count() > 0 \
else datetime.now()
relevant_events = self.events.filter(when__gt=start_date,
when__lte=end_date).order_by(
'-when')
if do_limit:
more_available = relevant_events.count() > limit
return relevant_events[:limit], more_available
else:
return relevant_events
not_header = re.compile(
r'(^אני )|((אלה|אלו|יבוא|מאלה|ייאמר|אומר|אומרת|נאמר|כך|הבאים|הבאות):$)|(\(.\))|(\(\d+\))|(\d\.)'.decode(
'utf8'))
def legitimate_header(line):
"""Returns true if 'line' looks like something should be a protocol part header"""
if re.match(r'^\<.*\>\W*$', line): # this is a <...> line.
return True
if not (line.strip().endswith(':')) or len(line) > 50 or not_header.search(
line):
return False
return True
class CommitteeMeetingManager(models.Manager):
def filter_and_order(self, *args, **kwargs):
qs = self.all()
# In dealing with 'tagged' we use an ugly workaround for the fact that generic relations
# don't work as expected with annotations.
# please read http://code.djangoproject.com/ticket/10461 before trying to change this code
if kwargs.get('tagged'):
if kwargs['tagged'] == ['false']:
qs = qs.exclude(tagged_items__isnull=False)
elif kwargs['tagged'] != ['all']:
qs = qs.filter(tagged_items__tag__name__in=kwargs['tagged'])
if kwargs.get('to_date'):
qs = qs.filter(time__lte=kwargs['to_date'] + timedelta(days=1))
if kwargs.get('from_date'):
qs = qs.filter(time__gte=kwargs['from_date'])
return qs.select_related('committee')
class CommitteesMeetingsOnlyManager(CommitteeMeetingManager):
def get_queryset(self):
return super(CommitteesMeetingsOnlyMana |
Vagab0nd/SiCKRAGE | sickchill/providers/torrent/TorrentProvider.py | Python | gpl-3.0 | 3,971 | 0.001511 | from datetime import datetime
import bencodepy
from feedparser import FeedParserDict
from sickchill import logger, settings
from sickchill.helper.common import try_int
from sickchill.oldbeard.classes import Proper, TorrentSearchResult
from sickchill.oldbeard.common import Quality
from sickchill.oldbeard.db import DBConnection
from sickchill.providers.GenericProvider import GenericProvider
from sickchill.show.Show import Show
class TorrentProvider(GenericProvider):
def __init__(self, name):
super().__init__(name)
self.ratio = None
self.provider_type = GenericProvider.TORRENT
def find_propers(self, search_date=None):
results = []
db = DBConnection()
placeholders = ', '.join(['?'] * len(Quality.DOWNLOADED + Quality.SNATCHED + Quality.SNATCHED_BEST))
sql_results = db.select(
f'SELECT s.show_name, e.showid, e.season, e.episode, e.status, e.airdate FROM tv_episodes AS e INNER JOIN tv_shows AS s ON (e.showid = s.indexer_id) WHERE e.airdate >= ? AND e.status IN ({placeholders}) and e.is_proper = 0',
[search_date.toordinal(), *Quality.DOWNLOADED + Quality.SNATCHED + Quality.SNATCHED_BEST]
)
for result in sql_results or []:
show = Show.find(settings.showList, int(result['showid']))
if show:
episode = show.getEpisode(result['season'], result['episode'])
for term in self.proper_strings:
search_strings = self.get_episode_search_strings(episode, add_string=term)
for search_string in search_strings:
for item in self.search(search_string):
title, url = self._get_title_and_url(item)
results.append(Proper(title, url, datetime.today(), show))
return results
@property
def is_active(self):
return bool(settings.USE_TORRENTS) and self.is_enabled
@property
def _custom_trackers(self):
if not (settings.TRACKERS_LIST and self.public):
return ''
return '&tr=' + '&tr='.join({x.strip() for x in settings.TRACKERS_LIST.split(',') if x.strip()})
def _get_result(self, episodes):
return TorrentSearchResult(episodes)
def _get_size(self, item):
if isinstance(item, dict):
size = item.get('size', -1)
elif isinstance(item, (list, tuple)) and len(item) > 2:
size = item[2]
else:
size = -1
size = try_int(size, -1)
# Make sure we didn't select seeds/leechers by accident
if not size or size < 1024 * 1024:
size = -1
return size
def _get_storage_dir(self):
return settings.TORRENT_DIR
def _get_title_and_url(self, item):
if isinstance(item, (dict, FeedParserDict)):
download_url = item.get('url', '')
title = item.get('title', '')
if not download_url:
download_url = item.get('link', '')
elif isinstance(item, (list, tuple)) and len(item) > 1:
download_url = item[1]
title = item[0]
else:
download_url = ''
title = ''
| if title.endswith('DIAMOND'):
logger.info('Skipping DIAMOND release for mass fake releases.')
download_url = title = 'FAKERELEASE'
if download_url:
download_url = down | load_url.replace('&', '&')
if title:
title = title.replace(' ', '.')
return title, download_url
def _verify_download(self, file_name):
try:
bencodepy.bread(file_name)
except bencodepy.BencodeDecodeError as e:
logger.debug('Failed to validate torrent file: {0}'.format(str(e)))
logger.debug('Result is not a valid torrent file')
return False
return True
def seed_ratio(self):
return self.ratio
|
GoogleCloudPlatform/appengine-blobstoremgmt-python | src/app/__init__.py | Python | apache-2.0 | 644 | 0 | # Copyright 2018 Google Inc. All rights reserved.
#
# Lice | nsed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" | BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Blob-management tool application code.
"""
|
Tinkerforge/brickv | src/brickv/bindings/bricklet_accelerometer_v2.py | Python | gpl-2.0 | 22,844 | 0.00416 | # -*- coding: utf-8 -*-
#############################################################
# This file was automatically generated on 2022-01-18. #
# #
# Python Bindings Version 2.1.29 #
# #
# If you have a bugfix for this file and want to commit it, #
# please fix the bug in the generator. You can find a link #
# to the generators git repository on tinkerforge.com #
#############################################################
from collections import namedtuple
try:
from .ip_connection import Device, IPConnection, Error, create_char, create_char_list, create_string, create_chunk_data
except (ValueError, ImportError):
from ip_connection import Device, IPConnection, Error, create_char, create_char_list, create_string, create_chunk_data
GetAcceleration = namedtuple('Acceleration', ['x', 'y', 'z'])
GetConfiguration = namedtuple('Configuration', ['data_rate', 'full_scale'])
GetAccelerationCallbackConfiguration = namedtuple('AccelerationCallbackConfiguration', ['period', 'value_has_to_change'])
GetContinuousAccelerationConfiguration = namedtuple('ContinuousAccelerationConfiguration', ['enable_x', 'enable_y', 'enable_z', 'resolution'])
GetFilterConfiguration = namedtuple('FilterConfiguration', ['iir_bypass', 'low_pass_filter'])
GetSPITFPErrorCount = namedtuple('SPITFPErrorCount', ['error_count_ack_checksum', 'error_count_message_checksum', 'error_count_frame', 'error_count_overflow'])
GetIdentity = namedtuple('Identity', ['uid', 'connected_uid', 'position', 'hardware_version', 'firmware_version', 'device_identifier'])
class BrickletAccelerometerV2(Device):
"""
Measures acceleration in three axis
"""
DEVICE_IDENTIFIER = 2130
DEVICE_DISPLAY_NAME = 'Accelerometer Bricklet 2.0'
DEVICE_URL_PART = 'accelerometer_v2' # internal
CALLBACK_ACCELERATION = 8
CALLBACK_CONTINUOUS_ACCELERATION_16_BIT = 11
CALLBACK_CONTINUOUS_ACCELERATION_8_BIT = 12
FUNCTION_GET_ACCELERATION = 1
FUNCTION_SET_CONFIGURATION = 2
FUNCTION_GET_CONFIGURATION = 3
FUNCTION_SET_ACCELERATION_CALLBACK_CONFIGURATION = 4
FUNCTION_GET_ACCELERATION_CALLBACK_CONFIGURATION = 5
FUNCTION_SET_INFO_LED_CONFIG = 6
FUNCTION_GET_INFO_LED_CONFIG = 7
FUNCTION_SET_CONTINUOUS_ACCELERATION_CONFIGURATION = 9
FUNCTION_GET_CONTINUOUS_ACCELERATION_CONFIGURATION = 10
FUNCTION_SET_FILTER_CONFIGURATION = 13
FUNCTION_GET_FILTER_CONFIGURATION = 14
FUNCTION_GET_SPITFP_ERROR_COUNT = 234
FUNCTION_SET_BOOTLOADER_MODE = 235
FUNCTION_GET_BOOTLOADER_MODE = 236
FUNCTION_SET_WRITE_FIRMWARE_POINTER = 237
FUNCTION_WRITE_FIRMWARE = 238
FUNCTION_SET_STATUS_LED_CONFIG = 239
FUNCTION_GET_STATUS_LED_CONFIG = 240
FUNCTION_GET_CHIP_TEMPERATURE = 242
FUNCTION_RESET = 243
FUNCTION_WRITE_UID = 248
FUNCTION_READ_UID = 249
FUNCTION_GET_IDENTITY = 255
DATA_RATE_0_781HZ = 0
DATA_RATE_1_563HZ = 1
DATA_RATE_3_125HZ = 2
DATA_RATE_6_2512HZ = 3
DATA_RATE_12_5HZ = 4
DATA_RATE_25HZ = 5
DATA_RATE_50HZ = 6
DATA_RATE_100HZ = 7
DATA_RATE_200HZ = 8
DATA_RATE_400HZ = 9
DATA_RATE_800HZ = 10
DATA_RATE_1600HZ = 11
DATA_RATE_3200HZ = 12
DATA_RATE_6400HZ = 13
DATA_RATE_12800HZ = 14
DATA_RATE_25600HZ = 15
FULL_SCALE_2G = 0
FULL_SCALE_4G = 1
FULL_SCALE_8G = 2
INFO_LED_CONFIG_OFF = 0
INFO_LED_CONFIG_ON = 1
INFO_LED_CONFIG_SHOW_HEARTBEAT = 2
RESOLUTION_8BIT = 0
RESOLUTION_16BIT = 1
IIR_BYPASS_APPLIED = 0
IIR_BYPASS_BYPASSED = 1
LOW_PASS_FILTER_NINTH = 0
LOW_PASS_FILTER_HALF = 1
BOOTLOADER_MODE_BOOTLOADER = 0
BOOTLOADER_MODE_FIRMWARE = 1
BOOTLOADER_MODE_BOOTLOADER_WAIT_FOR_REBOOT = 2
BOOTLOADER_MODE_FIRMWARE_WAIT_FOR_REBOOT = 3
BOOTLOADER_MODE_FIRMWARE_WAIT_FOR_ERASE_AND_REBOOT = 4
BOOTLOADER_STATUS_OK = 0
BOOTLOADER_STATUS_INVALID_MODE = 1
BOOTLOADER_STATUS_NO_CHANGE = 2
BOOTLOADER_STATUS_ENTRY_FUNCTION_NOT_PRESENT = 3
BOOTLOADER_STATUS_DEVICE_IDENTIFIER_INCORRECT = 4
BOOTLOADER_STATUS_CRC_MISMATCH = 5
STATUS_LED_CONFIG_OFF = 0
STATUS_LED_CONFIG_ON = 1
STATUS_LED_CONFIG_SHOW_HEARTBEAT = 2
STATUS_LED_CONFIG_SHOW_STATUS = 3
def __init__(self, uid, ipcon):
"""
Creates an object with the unique device ID *uid* and adds it to
the IP Connection *ipcon*.
"""
Device.__init__(self, uid, ipcon, BrickletAccelerometerV2.DEVICE_IDENTIFIER, BrickletAccelerometerV2.DEVICE_DISPLAY_NAME)
self.api_version = (2, 0, 1)
self.response_expected[BrickletAccelerometerV2.FUNCTION_GET_ACCELERATION] = BrickletAccelerometerV2.RESPONSE_EXPECTED_ALWAYS_TRUE
self.response_expected[BrickletAccelerometerV2.FUNCTION_SET_CONFIGURATION] = BrickletAccelerometerV2.RESPONSE_EXPECTED_FALSE
self.response_expected[Bricklet | AccelerometerV2.FUNCTION_GET_CONFIGURATION] = BrickletAccelerometerV2.RESPONSE_EXPECTED_ALWAYS_TRUE
self.response_expected[BrickletAccelerometerV2.FUNCTION_SET_ACCELERATION_CALLBACK_CONFIGURATION] = BrickletAccelerometerV2.RESPONSE_EXPECTED_TRUE
self.response_expected[BrickletAccelerometerV2.FUNCTION_GET_ACCELERATION_CALLBACK_CONFIGURATION] = BrickletAccelerometerV2.RESPONSE_EXPECTED_ALWAYS_TRUE
self.response_expected[BrickletAccelerometerV2.FUNCTION_SET_INFO_LED_CONFIG] | = BrickletAccelerometerV2.RESPONSE_EXPECTED_FALSE
self.response_expected[BrickletAccelerometerV2.FUNCTION_GET_INFO_LED_CONFIG] = BrickletAccelerometerV2.RESPONSE_EXPECTED_ALWAYS_TRUE
self.response_expected[BrickletAccelerometerV2.FUNCTION_SET_CONTINUOUS_ACCELERATION_CONFIGURATION] = BrickletAccelerometerV2.RESPONSE_EXPECTED_TRUE
self.response_expected[BrickletAccelerometerV2.FUNCTION_GET_CONTINUOUS_ACCELERATION_CONFIGURATION] = BrickletAccelerometerV2.RESPONSE_EXPECTED_ALWAYS_TRUE
self.response_expected[BrickletAccelerometerV2.FUNCTION_SET_FILTER_CONFIGURATION] = BrickletAccelerometerV2.RESPONSE_EXPECTED_FALSE
self.response_expected[BrickletAccelerometerV2.FUNCTION_GET_FILTER_CONFIGURATION] = BrickletAccelerometerV2.RESPONSE_EXPECTED_ALWAYS_TRUE
self.response_expected[BrickletAccelerometerV2.FUNCTION_GET_SPITFP_ERROR_COUNT] = BrickletAccelerometerV2.RESPONSE_EXPECTED_ALWAYS_TRUE
self.response_expected[BrickletAccelerometerV2.FUNCTION_SET_BOOTLOADER_MODE] = BrickletAccelerometerV2.RESPONSE_EXPECTED_ALWAYS_TRUE
self.response_expected[BrickletAccelerometerV2.FUNCTION_GET_BOOTLOADER_MODE] = BrickletAccelerometerV2.RESPONSE_EXPECTED_ALWAYS_TRUE
self.response_expected[BrickletAccelerometerV2.FUNCTION_SET_WRITE_FIRMWARE_POINTER] = BrickletAccelerometerV2.RESPONSE_EXPECTED_FALSE
self.response_expected[BrickletAccelerometerV2.FUNCTION_WRITE_FIRMWARE] = BrickletAccelerometerV2.RESPONSE_EXPECTED_ALWAYS_TRUE
self.response_expected[BrickletAccelerometerV2.FUNCTION_SET_STATUS_LED_CONFIG] = BrickletAccelerometerV2.RESPONSE_EXPECTED_FALSE
self.response_expected[BrickletAccelerometerV2.FUNCTION_GET_STATUS_LED_CONFIG] = BrickletAccelerometerV2.RESPONSE_EXPECTED_ALWAYS_TRUE
self.response_expected[BrickletAccelerometerV2.FUNCTION_GET_CHIP_TEMPERATURE] = BrickletAccelerometerV2.RESPONSE_EXPECTED_ALWAYS_TRUE
self.response_expected[BrickletAccelerometerV2.FUNCTION_RESET] = BrickletAccelerometerV2.RESPONSE_EXPECTED_FALSE
self.response_expected[BrickletAccelerometerV2.FUNCTION_WRITE_UID] = BrickletAccelerometerV2.RESPONSE_EXPECTED_FALSE
self.response_expected[BrickletAccelerometerV2.FUNCTION_READ_UID] = BrickletAccelerometerV2.RESPONSE_EXPECTED_ALWAYS_TRUE
self.response_expected[BrickletAccelerometerV2.FUNCTION_GET_IDENTITY] = BrickletAccelerometerV2.RESPONSE_EXPECTED_ALWAYS_TRUE
self.callback_formats[BrickletAccelerometerV2.CALLBACK_ACCELERATION] = (20, 'i i i')
self.callback_formats[BrickletAccelerometerV2.CALLBACK_CONTINUOUS_ACCELERATION_16_BIT] = (68, '30h')
self.callback_formats[BrickletAccelerometerV2.CALL |
poppogbr/genropy | packages/flib/model/item_category.py | Python | lgpl-2.1 | 781 | 0.008963 | # encoding: utf-8
class Table(object):
def config_db(self, pkg):
tbl = pkg.table('item_category', pkey='id', name_long='!!Item category',
name_plural='!!Item category')
self.s | ysFields(tbl)
tbl.column('item_id', size='22', group='_', name_long='Item id').relation('item.id', mode='foreignkey',
| onDelete='cascade')
tbl.column('category_id', size='22', group='_', name_long='Category id').relation('category.id',
mode='foreignkey',
onDelete='cascade')
|
rohitranjan1991/home-assistant | homeassistant/components/sonarr/config_flow.py | Python | mit | 5,724 | 0.000349 | """Config flow for Sonarr."""
from __future__ import annotations
import logging
from typing import Any
from aiopyarr import ArrAuthenticationException, ArrException
from aiopyarr.models.host_configuration import PyArrHostConfiguration
from aiopyarr.sonarr_client import SonarrClient
import voluptuous as vol
import yarl
from homeassistant.config_entries import ConfigFlow, OptionsFlow
from homeassistant.const import CONF_API_KEY, CONF_URL, CONF_VERIFY_SSL
from homeassistant.core import HomeAssistant, callback
from homeassistant.data_entry_flow import FlowResult
from homeassistant.helpers.aiohttp_client import async_get_clientsession
from .const import (
CONF_UPCOMING_DAYS,
CONF_WANTED_MAX_ITEMS,
DEFAULT_UPCOMING_DAYS,
DEFAULT_VERIFY_SSL,
DEFAULT_WANTED_MAX_ITEMS,
DOMAIN,
)
_LOGGER = logging.getLogger(__name__)
async def validate_input(hass: HomeAssistant, data: dict) -> None:
"""Validate the user input allows us to connect.
Data has the keys from DATA_SCHEMA with values provided by the user.
"""
host_configuration = PyArrHostConfiguration(
api_token=data[CONF_API_KEY],
url=data[CONF_URL],
verify_ssl=data[CONF_VERIFY_SSL],
)
sonarr = SonarrClient(
host_configuration=host_configuration,
session=async_get_clientsession(hass),
)
await sonarr.async_get_system_status()
class SonarrConfigFlow(ConfigFlow, domain=DOMAIN):
"""Handle a config flow for Sonarr."""
VERSION = 2
def __init__(self):
"""Initialize the flow."""
self.entry = None
@staticmethod
@callback
def async_get_options_flow(config_entry):
"""Get the options flow for this handler."""
return SonarrOptionsFlowHandler(config_entry)
async def async_step_reauth(self, data: dict[str, Any]) -> FlowResult:
"""Handle configuration by re-auth."""
self.entry = self.hass.config_entries.async_get_entry(self.context["entry_id"])
return await self.async_step_reauth_confirm()
async def async_step_reauth_confirm(
self, user_input: dict[str, str] | None = None
) -> FlowResult:
"""Confirm reauth dialog."""
if user_input is None:
return self.async_show_form(
step_id="reauth_confirm",
description_placeholders={"url": self.entry.data[CONF_URL]},
data_schema=vol.Schema({}),
errors={},
)
return await self.async_step_user()
async def async_step_user(
self, user_input: dict[str, Any] | None = None
) -> FlowResult:
"""Handle a flow initiated by the user."""
errors = {}
if user_input is not None:
if self.entry:
user_input = {**self.entry.data, **user_input}
if CONF_VERIFY_SSL not in user_input:
user_input[CONF_VERIFY_SSL] = DEFAULT_VERIFY_SSL
try:
await validate_input(self.hass, user_input)
except ArrAuthenticationException:
errors = {"base": "invalid_auth"}
except ArrException:
errors = {"base": "cannot_connect"}
except Exception: # pylint: disable=broad-except
_LOGGER.exception("Unexpected exception")
return self.async_abort(reason="unknown")
else:
if self.entry:
return await self._async_reauth_update_entry(user_input)
parsed = yarl.URL(user_input[CONF_URL])
return self.async_create_entry(
title=parsed.host or "Sonarr", data=user_input
)
data_schema = self._get_user_data_schema()
return self.async_show_form(
step_id="user",
data_schema=vol.Schema(data_schema),
errors=errors,
)
| async def _async_reauth_update_entry(self, data: dict) -> FlowResult:
"""Update existing config entry."""
self.hass.config_entries.async_update_entry(self.entry, data=data)
await self.hass.config_entries.async_reload(self.entry.entry_id)
| return self.async_abort(reason="reauth_successful")
def _get_user_data_schema(self) -> dict[str, Any]:
"""Get the data schema to display user form."""
if self.entry:
return {vol.Required(CONF_API_KEY): str}
data_schema: dict[str, Any] = {
vol.Required(CONF_URL): str,
vol.Required(CONF_API_KEY): str,
}
if self.show_advanced_options:
data_schema[
vol.Optional(CONF_VERIFY_SSL, default=DEFAULT_VERIFY_SSL)
] = bool
return data_schema
class SonarrOptionsFlowHandler(OptionsFlow):
"""Handle Sonarr client options."""
def __init__(self, config_entry):
"""Initialize options flow."""
self.config_entry = config_entry
async def async_step_init(self, user_input: dict[str, int] | None = None):
"""Manage Sonarr options."""
if user_input is not None:
return self.async_create_entry(title="", data=user_input)
options = {
vol.Optional(
CONF_UPCOMING_DAYS,
default=self.config_entry.options.get(
CONF_UPCOMING_DAYS, DEFAULT_UPCOMING_DAYS
),
): int,
vol.Optional(
CONF_WANTED_MAX_ITEMS,
default=self.config_entry.options.get(
CONF_WANTED_MAX_ITEMS, DEFAULT_WANTED_MAX_ITEMS
),
): int,
}
return self.async_show_form(step_id="init", data_schema=vol.Schema(options))
|
w1z2g3/crossbar | crossbar/router/auth/ticket.py | Python | agpl-3.0 | 5,567 | 0.003593 | #####################################################################################
#
# Copyright (C) Tavendo GmbH
#
# Unless a separate license agreement exists between you and Tavendo GmbH (e.g. you
# have purchased a commercial license), the license terms below apply.
#
# Should you enter into a separate license agreement after having received a copy of
# this software, t | hen the terms of such license agreement replace the terms below at
# the time at which such license agreement becomes effective.
#
# In case a separate license agreement ends, and such agreement ends without being
# replaced by another separate license agreement, the license terms below apply
# | from the time at which said agreement ends.
#
# LICENSE TERMS
#
# This program is free software: you can redistribute it and/or modify it under the
# terms of the GNU Affero General Public License, version 3, as published by the
# Free Software Foundation. This program is distributed in the hope that it will be
# useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
#
# See the GNU Affero General Public License Version 3 for more details.
#
# You should have received a copy of the GNU Affero General Public license along
# with this program. If not, see <http://www.gnu.org/licenses/agpl-3.0.en.html>.
#
#####################################################################################
from __future__ import absolute_import
import six
from autobahn.wamp import types
from txaio import make_logger
from crossbar.router.auth.pending import PendingAuth
__all__ = ('PendingAuthTicket',)
class PendingAuthTicket(PendingAuth):
"""
Pending authentication information for WAMP-Ticket authentication.
"""
log = make_logger()
AUTHMETHOD = u'ticket'
def __init__(self, session, config):
PendingAuth.__init__(self, session, config)
# The secret/ticket the authenticating principal will need to provide (filled only in static mode).
self._signature = None
def hello(self, realm, details):
# remember the realm the client requested to join (if any)
self._realm = realm
# remember the authid the client wants to identify as (if any)
self._authid = details.authid
# use static principal database from configuration
if self._config[u'type'] == u'static':
self._authprovider = u'static'
if self._authid in self._config.get(u'principals', {}):
principal = self._config[u'principals'][self._authid]
error = self._assign_principal(principal)
if error:
return error
# now set set signature as expected for WAMP-Ticket
self._signature = principal[u'ticket']
return types.Challenge(self._authmethod)
else:
return types.Deny(message=u'no principal with authid "{}" exists'.format(self._authid))
# use configured procedure to dynamically get a ticket for the principal
elif self._config[u'type'] == u'dynamic':
self._authprovider = u'dynamic'
error = self._init_dynamic_authenticator()
if error:
return error
return types.Challenge(self._authmethod)
else:
# should not arrive here, as config errors should be caught earlier
return types.Deny(message=u'invalid authentication configuration (authentication type "{}" is unknown)'.format(self._config['type']))
def authenticate(self, signature):
# WAMP-Ticket "static"
if self._authprovider == u'static':
# when doing WAMP-Ticket from static configuration, the ticket we
# expect was previously stored in self._signature
if signature == self._signature:
# ticket was valid: accept the client
self.log.debug("WAMP-Ticket: ticket was valid!")
return self._accept()
else:
# ticket was invalid: deny client
self.log.debug('WAMP-Ticket (static): expected ticket "{}"" ({}), but got "{}" ({})'.format(self._signature, type(self._signature), signature, type(signature)))
return types.Deny(message=u"ticket in static WAMP-Ticket authentication is invalid")
# WAMP-Ticket "dynamic"
elif self._authprovider == u'dynamic':
self._session_details[u'ticket'] = signature
d = self._authenticator_session.call(self._authenticator, self._realm, self._authid, self._session_details)
def on_authenticate_ok(principal):
# backwards compatibility: dynamic ticket authenticator
# was expected to return a role directly
if type(principal) == six.text_type:
principal = {u'role': principal}
error = self._assign_principal(principal)
if error:
return error
return self._accept()
def on_authenticate_error(err):
return self._marshal_dynamic_authenticator_error(err)
d.addCallbacks(on_authenticate_ok, on_authenticate_error)
return d
else:
# should not arrive here, as config errors should be caught earlier
return types.Deny(message=u'invalid authentication configuration (authentication type "{}" is unknown)'.format(self._config['type']))
|
adamgreig/agg-kicad | scripts/build_mod_tfml_sfml.py | Python | mit | 8,232 | 0.001701 | """
build_mod_tfml_sfml.py
Copyright 2016 Adam Greig
Licensed under the MIT licence, see LICENSE file for details.
Generate footprints for Samtec TFML and SFML connectors.
"""
from __future__ import print_function, division
# Settings ====================================================================
# Courtyard clearance
# Use 0.25 for IPC nominal and 0.10 for IPC least
ctyd_gap = 0.25
# Courtyard grid
ctyd_grid = 0.05
# Courtyard line width
ctyd_width = 0.01
# Silk line width
silk_width = 0.15
# Fab layer line width
fab_width = 0.01
# Ref/Val font size (width x height)
font_size = (1.0, 1.0)
# Ref/Val font thickness
font_thickness = 0.15
# Ref/Val font spacing from centre to top/bottom edge
font_halfheight = 0.7
# End Settings ================================================================
import os
import sys
import time
import math
import argparse
from sexp import parse as sexp_parse, generate as sexp_generate
from kicad_mod import fp_line, fp_text, pad, draw_square
def tfml_pads(pins):
pads = []
x = -((pins - 1) / 2.0) * 1.27
for pin in range(pins):
pads.append(pad(pin*2 + 1, "smd", "rect", (x, 1.715), [0.74, 2.92],
["F.Cu", "F.Mask", "F.Paste"]))
pads.append(pad(pin*2 + 2, "smd", "rect", (x, -1.715), [0.74, 2.92],
["F.Cu", "F.Mask", "F.Paste"]))
x += 1.27
return pads
def sfml_pads(pins):
pads = []
x = -((pins - 1) / 2.0) * 1.27
for pin in range(pins):
pads.append(pad(pin*2 + 1, "smd", "rect", (x, -1.365), [0.74, 2.22],
["F.Cu", "F.Mask", "F.Paste"]))
pads.append(pad(pin*2 + 2, "smd", "rect", (x, 1.365), [0.74, 2.22],
["F.Cu", "F.Mask", "F.Paste"]))
x += 1.27
return pads
def locking_clip(pins):
x = (pins * 1.27 + 1.91) / 2.0
size = [1.2, 1.2]
l = ["*.Mask"]
pads = []
pads.append(pad("", "np_thru_hole", "circle", (+x, 0), size, l, drill=1.2))
pads.append(pad("", "np_thru_hole", "circle", (-x, 0), size, l, drill=1.2))
return pads
def tfml_fab(pins):
_, _, _, _, sq = draw_square(
pins * 1.27 + 3.18, 5.72, (0, 0), "F.Fab", fab_width)
return sq
def sfml_fab(pins):
out = []
l = "F.Fab"
w = fab_width
a_x = (3.94 - 0.38) / 2.0
a_y = (3.05 - 1.52) / 2.0
nw, ne, se, sw, _ = draw_square(
pins * 1.27 + 0.38, 3.05, (0, 0), l, w)
out.append(fp_line(nw, ne, l, w))
out.append(fp_line(ne, (ne[0], ne[1]+a_y), l, w))
out.append(fp_line((ne[0], ne[1]+a_y), (ne[0]+a_x, ne[1]+a_y), l, w))
out.append(fp_line((ne[0]+a_x, ne[1]+a_y), (se[0]+a_x, se[1]-a_y), l, w))
out.append(fp_line((se[0]+a_x, se[1]-a_y), (se[0], se[1]-a_y), l, w))
out.append(fp_line((se[0], se[1]-a_y), se, l, w))
out.append(fp_line(se, sw, l, w))
out.append(fp_line(sw, (sw[0], sw[1]-a_y), l, w))
out.append(fp_line((sw[0], sw[1]-a_y), (sw[0]-a_x, sw[1]-a_y), l, w))
out.append(fp_line((sw[0]-a_x, sw[1]-a_y), (nw[0]-a_x, nw[1]+a_y), l, w))
out.append(fp_line((nw[0]-a_x, nw[1]+a_y), (nw[0], nw[1]+a_y), l, w))
out.append(fp_line((nw[0], nw[1]+a_y), nw, l, w))
return out
def tfml_silk(pins):
out = []
l = "F.SilkS"
w = silk_width
nw, ne, se, sw, _ = draw_square(
pins * 1.27 + 3.18 - w, 5.72 - w, (0, 0), l, w)
out.append(fp_line((nw[0]+1.5, nw[1]), nw, l, w))
out.append(fp_line(nw, sw, l, w))
out.append(fp_line(sw, (sw[0]+1.5, sw[1]), l, w))
out.append(fp_line((se[0]-1.5, se[1]), (se[0], se[1]-1.5), l, w))
out.append(fp_line((se[0], se[1]-1.5), (ne[0], ne[1]+1.5), l, w))
out.append(fp_line((ne[0], ne[1]+1.5), (ne[0]-1.5, ne[1]), l, w))
return out
def sfml_silk(pins):
out = []
l = "F.SilkS"
w = silk_width
a_x = (3.94 - 0.38) / 2.0
a_y = (3.05 - 1.52) / 2.0
nw, ne, se, sw, _ = draw_square(
pins * 1.27 + 0.38 - w, 3.05 - w, (0, 0), l, w)
out.append(fp_line(ne, (ne[0], ne[1]+a_y), l, w))
out.append(fp_line((ne[0], ne[1]+a_y), (ne[0]+a_x, ne[1]+a_y), l, w))
out.append(fp_line((ne[0]+a_x, ne[1]+a_y), (se[0]+a_x, se[1]-a_y), l, w))
out.append(fp_line((se[0]+a_x, se[1]-a_y), (se[0], se[1]-a_y), l, w))
out.append(fp_line((se[0], se[1]-a_y), se, l, w))
out.append(fp_line(se, (se[0]-0.3, se[1]), l, w))
out.append(fp_line((sw[0]+0.3, sw[1]), sw, l, w))
out.append(fp_line(sw, (sw[0], sw[1]-a_y), l, w))
o | ut.append(fp_line((sw[0], sw[1]-a_y), (sw[0]-a_x, sw[1]-a_y), l, w))
out.append(fp_line((sw[0]-a_x, sw[1]-a_y), (nw[0]-a_x, nw[1]+a_y), l, w))
out.append(fp_line((nw[0]-a_x, nw[1]+a_y), (nw[0], nw[1]+a_y), l, w))
out.append(fp_line((nw[0], nw[1]+a_y), nw, l, w))
return out
def ctyd(pins):
w = pins * 1.27 + 3.94 + 2 * ctyd_gap
h = 6.35 + 2 * ctyd_gap
grid = 2 * ctyd_grid
w | = grid * int(math.ceil(w / grid))
h = grid * int(math.ceil(h / grid))
_, _, _, _, sq = draw_square(w, h, (0, 0), "F.CrtYd", ctyd_width)
return sq
def refs(name):
out = []
ctyd_h = 6.35 + 2 * ctyd_gap
y = ctyd_h / 2.0 + font_halfheight
out.append(fp_text("reference", "REF**", (0, -y),
"F.Fab", font_size, font_thickness))
out.append(fp_text("value", name, (0, y),
"F.Fab", font_size, font_thickness))
return out
def tfml_base(name, pins):
tedit = format(int(time.time()), 'X')
sexp = ["module", name, ("layer", "F.Cu"), ("tedit", tedit)]
sexp += tfml_pads(pins)
sexp += tfml_fab(pins)
sexp += ctyd(pins)
sexp += tfml_silk(pins)
sexp += refs(name)
return sexp
def tfml(pins):
name = "TFML-1{:02d}-02-L-D".format(pins)
sexp = tfml_base(name, pins)
return name, sexp_generate(sexp)
def tfml_lc(pins):
name = "TFML-1{:02d}-02-L-D-LC".format(pins)
sexp = tfml_base(name, pins)
sexp += locking_clip(pins)
return name, sexp_generate(sexp)
def sfml_base(name, pins):
tedit = format(int(time.time()), 'X')
sexp = ["module", name, ("layer", "F.Cu"), ("tedit", tedit)]
sexp += sfml_pads(pins)
sexp += sfml_fab(pins)
sexp += ctyd(pins)
sexp += sfml_silk(pins)
sexp += refs(name)
return sexp
def sfml(pins):
name = "SFML-1{:02d}-02-L-D".format(pins)
sexp = sfml_base(name, pins)
return name, sexp_generate(sexp)
def sfml_lc(pins):
name = "SFML-1{:02d}-02-L-D-LC".format(pins)
sexp = sfml_base(name, pins)
sexp += locking_clip(pins)
return name, sexp_generate(sexp)
def main(prettypath, verify=False, verbose=False):
for pins in (5, 7, 10, 15):
for generator in (tfml, tfml_lc, sfml, sfml_lc):
# Generate footprint
name, fp = generator(pins)
path = os.path.join(prettypath, name + ".kicad_mod")
if verify and verbose:
print("Verifying", path)
# Check if the file already exists and isn't changed
if os.path.isfile(path):
with open(path) as f:
old = f.read()
old = [n for n in sexp_parse(old) if n[0] != "tedit"]
new = [n for n in sexp_parse(fp) if n[0] != "tedit"]
if new == old:
continue
# If it needs changing, either verification failed or we rewrite
if verify:
return False
else:
with open(path, "w") as f:
f.write(fp)
if verify:
return True
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("prettypath", type=str, help=
"Path to footprints to process")
parser.add_argument("--verify", action="store_true", help=
"Verify libraries are up to date")
parser.add_argument("--verbose", action="store_true", help=
"Print out every library verified")
args = vars(parser.parse_args())
result = main(**args)
if args['verify']:
if result:
print("OK: all footprints up-to-date.")
sys.exit(0)
else:
print("Error: footprints not up- |
saulpw/visidata | visidata/macos.py | Python | gpl-3.0 | 3,338 | 0.001557 | from visidata import BaseSheet
# for mac users to use Option+x as Alt+x without reconfiguring the terminal
# Option+X
BaseSheet.bindkey('å', 'Alt+a')
BaseSheet.bindkey('∫', 'Alt+b')
BaseSheet.bindkey('ç', 'Alt+c') |
BaseSheet.bindkey('∂', 'Alt+d')
BaseSheet.bindkey('´', 'Alt+e')
BaseSheet.bindkey('ƒ', 'Alt+f')
BaseSheet.bindkey('©', 'Alt+g')
BaseSheet.bindkey('˙', 'Alt+h')
BaseSheet.bindkey('ˆ', 'Alt+i')
BaseSheet.bindkey('∆', 'Alt+j')
BaseSheet.bindkey('˚', 'Alt+k')
BaseSheet.bindkey('¬', 'Alt+l')
BaseSheet.bindkey('µ', 'Alt+m')
BaseSheet.bindkey('˜', 'Alt+n')
BaseSheet.bindkey('ø', 'Alt+o')
BaseSheet.bindkey('π', 'Alt+p')
BaseSheet.bindkey('œ', 'Alt+q')
BaseSheet.bindke | y('®', 'Alt+r')
BaseSheet.bindkey('ß', 'Alt+s')
BaseSheet.bindkey('†', 'Alt+t')
BaseSheet.bindkey('¨', 'Alt+u')
BaseSheet.bindkey('√', 'Alt+v')
BaseSheet.bindkey('∑', 'Alt+w')
BaseSheet.bindkey('≈', 'Alt+x')
BaseSheet.bindkey('¥', 'Alt+y')
BaseSheet.bindkey('Ω', 'Alt+z')
# Option+Shift+X
BaseSheet.bindkey('Å', 'Alt+A')
BaseSheet.bindkey('ı', 'Alt+B')
BaseSheet.bindkey('Ç', 'Alt+C')
BaseSheet.bindkey('Î', 'Alt+D')
#BaseSheet.bindkey('´', 'Alt+E')
BaseSheet.bindkey('Ï', 'Alt+F')
BaseSheet.bindkey('˝', 'Alt+G')
BaseSheet.bindkey('Ó', 'Alt+H')
#BaseSheet.bindkey('ˆ', 'Alt+I')
BaseSheet.bindkey('Ô', 'Alt+J')
BaseSheet.bindkey('', 'Alt+K') # apple logo
BaseSheet.bindkey('Ò', 'Alt+L')
BaseSheet.bindkey('Â', 'Alt+M')
#BaseSheet.bindkey('˜', 'Alt+N')
BaseSheet.bindkey('Ø', 'Alt+O')
BaseSheet.bindkey('∏', 'Alt+P')
BaseSheet.bindkey('Œ', 'Alt+Q')
BaseSheet.bindkey('‰', 'Alt+R')
BaseSheet.bindkey('Í', 'Alt+S')
BaseSheet.bindkey('ˇ', 'Alt+T')
#BaseSheet.bindkey('¨', 'Alt+U')
BaseSheet.bindkey('◊', 'Alt+V')
BaseSheet.bindkey('„', 'Alt+W')
BaseSheet.bindkey('˛', 'Alt+X')
BaseSheet.bindkey('Á', 'Alt+Y')
BaseSheet.bindkey('¸', 'Alt+Z')
# Option+nonletter
BaseSheet.bindkey('¡', 'Alt+1')
BaseSheet.bindkey('™', 'Alt+2')
BaseSheet.bindkey('£', 'Alt+3')
BaseSheet.bindkey('¢', 'Alt+4')
BaseSheet.bindkey('∞', 'Alt+5')
BaseSheet.bindkey('§', 'Alt+6')
BaseSheet.bindkey('¶', 'Alt+7')
BaseSheet.bindkey('•', 'Alt+8')
BaseSheet.bindkey('ª', 'Alt+9')
BaseSheet.bindkey('º', 'Alt+0')
BaseSheet.bindkey('', 'Alt+`')
BaseSheet.bindkey('–', 'Alt+-')
BaseSheet.bindkey('≠', 'Alt+=')
BaseSheet.bindkey('“', 'Alt+[')
BaseSheet.bindkey('‘', 'Alt+]')
BaseSheet.bindkey('«', 'Alt+\\')
BaseSheet.bindkey('÷', 'Alt+/')
BaseSheet.bindkey('…', 'Alt+;')
BaseSheet.bindkey('æ', 'Alt+\'')
BaseSheet.bindkey('≤', 'Alt+,')
BaseSheet.bindkey('≥', 'Alt+.')
# Option+Shift+nonletter
BaseSheet.bindkey('⁄', 'Alt+!')
BaseSheet.bindkey('€', 'Alt+@')
BaseSheet.bindkey('‹', 'Alt+#')
BaseSheet.bindkey('›', 'Alt+$')
BaseSheet.bindkey('fi', 'Alt+%')
BaseSheet.bindkey('fl', 'Alt+^')
BaseSheet.bindkey('‡', 'Alt+&')
BaseSheet.bindkey('°', 'Alt+*')
BaseSheet.bindkey('·', 'Alt+(')
BaseSheet.bindkey('‚', 'Alt+)')
#BaseSheet.bindkey('`', 'Alt+~')
BaseSheet.bindkey('—', 'Alt+_')
BaseSheet.bindkey('±', 'Alt++')
BaseSheet.bindkey('”', 'Alt+{')
BaseSheet.bindkey('’', 'Alt+}')
BaseSheet.bindkey('»', 'Alt+|')
BaseSheet.bindkey('¿', 'Alt+?')
BaseSheet.bindkey('Ú', 'Alt+:')
BaseSheet.bindkey('Æ', 'Alt+"')
BaseSheet.bindkey('¯', 'Alt+<')
BaseSheet.bindkey('˘', 'Alt+>')
|
facebookexperimental/eden | eden/hg-server/edenscm/mercurial/smallcommitmetadata.py | Python | gpl-2.0 | 3,324 | 0.002708 | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2.
# smallcommitmetadata.py - stores a small amount of metadata associated with a commit
from . import json
from .node import bin, hex
from .util import altsortdict
# Stores a mapping of (node, category) -> data, with a FIFO-limited number of entries
class smallcommitmetadata(object):
def __init__(self, vfs, entrylimit):
self.vfs = vfs
self.limit = entrylimit
self.contents = altsortdict()
self.reload()
def reload(self):
"""Read the database from disk."""
if not self.vfs.exists("commit_metadata"):
self.contents = altsortdict()
return
try:
entries = json.loads(self.vfs.tryreadutf8("commit_metadata"))[-self.limit :]
except ValueError:
entries = []
for entry in entries:
self.contents[(bin(entry["node"]), entry["category"])] = entry["data"]
def write(self):
"""Write the database to disk."""
with self.vfs("commit_metadata", "w", atomictemp=True) as f:
entries = [
{"node": hex(node), "category": category, "data": data}
for ((node, category), data) in self.contents.items()
]
json.dump(entries, f)
def store(self, node, category, data):
"""Adds a new entry with the specified node and category, and updates the data on disk. Returns the removed entry, if any."""
self.contents[(node, category)] = data
popped = None
while len(self.contents) > self.limit:
popped = self.contents.popitem(last=False)
self.write()
return popped
def delete(self, node, category):
"""Removes the entry with matching node and category and returns its value."""
value = self.contents[(node, category)]
del self.contents[(node, category)]
return value
def read(self, node, category):
"""Returns the value of the entry with specified node and category."""
return self.contents[(node, category)]
def find(self, node=None, category=None):
"""Returns a map of all entries with matching node and/or category. If both are None, returns all entries."""
return altsortdict(
(
| ((node_, category_), data)
for ((node_, category_), data) in self.contents.items()
if node is None or node == node_
if category is None or category == category_
)
)
def finddelete(self, node=None, category=None):
"""Removes and returns any entries with matching node and/or category."""
entriestoremove = [
((node_, category_), data_)
for ((node_, category_), | data_) in self.contents.items()
if node is None or node == node_
if category is None or category == category_
]
for (key, _value) in entriestoremove:
del self.contents[key]
return altsortdict(entriestoremove)
def clear(self):
"""Removes and returns all entries."""
deleted = self.contents
self.contents = altsortdict()
return deleted
|
saintdragon2/python-3-lecture-2015 | gui_practice/file_io_civil.py | Python | mit | 347 | 0.002882 | __author__ = 'saintdragon2'
fname = 'C:/ttt/test.txt'
f = open(fname)
sum = 0
while | True:
line = f.readline()
if not line:
break
sum += | int(line)
print(sum)
f.close()
file_to_write = open('c:/ttt/abc.csv', 'w')
file_to_write.write('hahaha,kkk,1,2,3,4,5\n')
file_to_write.write('a, b, c, e, d, e')
file_to_write.close() |
gentoo/layman | layman/overlays/modules/stub/stub.py | Python | gpl-2.0 | 1,731 | 0.011554 | #!/usr/bin/python
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
#===============================================================================
#
# Dependencies
#
#-------------------------------------------------------------------------------
from layman.utils import path
from layman.overlays.source import OverlaySource
#===============================================================================
#
# Class StubOverlay
#
#-------------------------------------------------------------------------------
class StubOverlay(OverlaySource):
''' Handles overlays with missing modules. '''
type = 'N/A'
type_key = 'n/a'
def __init__(self, parent, config, _location, ignore = 0):
super(StubOverlay, self).__init__(parent,
config, _location, ignore)
self.branch = self.parent.branch
self.info = {'name': self.parent.name, 'type': self.parent.ovl_type}
self.missing_msg = 'Overlay "%(name)s" is missing "%(type)s" module!'\
% self.info
| s | elf.hint = 'Did you install layman with "%(type)s" support?'\
% self.info
def add(self, base):
'''Add overlay.'''
self.output.error(self.missing_msg)
self.output.warn(self.hint)
return True
def update(self, base, src):
'''
Updates overlay src-url.
'''
self.output.error(self.missing_msg)
self.output.warn(self.hint)
return True
def sync(self, base):
'''Sync overlay.'''
self.output.error(self.missing_msg)
self.output.warn(self.hint)
return True
def supported(self):
'''Overlay type supported?'''
return False
|
Azzahid/Flow-Control | bin/fibo.py | Python | lgpl-3.0 | 153 | 0 | a, b = | 1, 1
total = 0
while a <= 4000000:
if a % 2 == 0:
total += a
a, b = b, a+b # the real formula for Fibonacci sequence
print tota | l
|
pythonLearning-bigData/Snail | run.py | Python | bsd-3-clause | 80 | 0 | from web import views
| if __name__ == "__main__":
views.a | pp.run(debug=True)
|
cc1-cloud/cc1 | src/cm/views/user/template.py | Python | apache-2.0 | 1,727 | 0.001738 | # -*- coding: utf-8 -*-
# @COPYRIGHT_begin
#
# Copyright [2010-2014] Institute of Nuclear Physics PAN, Krakow, Poland
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# @COPYRIGHT_end
"""@package src.cm.views.user.template
@alldecoratedby{src.cm.utils.decorators.user_log}
@author Tomek Sośnicki <tom.sosnicki@gmail.com>
"""
from cm.models.t | emplate import Template
from cm.utils.exception import CMException
from common.states import template_states
from cm.utils.decorators import user_log
@user_log(log=True)
def get_list(caller_id):
"""
Returns list of Templates.
@cmview_user
@response{list(dict)} Template.dict property of each Template
"""
try:
templates = [t.dict for t in Template.obj | ects.filter(state__exact=template_states['active']).order_by('cpu', 'memory')]
except:
raise CMException("template_list")
return templates
@user_log(log=True)
def get_by_id(caller_id, template_id):
"""
@cmview_user
@param_post{template_id,int}
@response{dict} Template.dict property of the requested Template
"""
try:
t = Template.objects.get(id=template_id)
except:
raise CMException("template_get")
return t.dict
|
h2oai/h2o | py/testdir_0xdata_only/test_from_hdfs_hosts.py | Python | apache-2.0 | 2,900 | 0.005862 | import unittest, time, sys, random
sys.path.extend(['.','..','../..','py'])
import h2o, h2o_cmd, h2o_browse as h2b, h2o_import as h2i
class Basic(unittest.TestCase):
def tearDown(self):
h2o.check_sandbox_for_errors()
@classmethod
def setUpClass(cls):
h2o.init(3, use_hdfs=True, hdfs_version='cdh4', hdfs_name_node='mr-0x6')
@classmethod
def tearDownClass(cls):
h2o.tear_down_cloud()
def test_B_hdfs_files(self):
# larger set in my local dir
# fails because classes aren't integers
# "allstate_claim_prediction_train_set.zip",
csvFilenameAll = [
"3G_poker_shuffle",
"TEST-poker1000.csv",
# corrupt zip file?
# "allstate_claim_prediction_train_set.zip",
"and-testing.data",
"arcene2_train.both",
"arcene_train.both",
"bestbuy_test.csv",
"bestbuy_train.csv",
"billion_rows.csv.gz",
"covtype.13x.data",
"covtype.13x.shuffle.data",
"covtype.169x.data",
"covtype.4x.shuffle.data",
"covtype.data",
"covtype4x.shuffle.data",
"hhp.unbalanced.012.1x11.data.gz",
"hhp.unbalanced.012.data.gz",
"hhp.unbalanced.data.gz",
"hhp2.os.noisy.0_1.data",
"hhp2.os.noisy.9_4.data",
"hhp_9_14_12.data",
"leads.csv",
"prostate_long_1G.csv",
]
# pick 8 randomly!
if (1==0):
csvFilenameList = random.sample(csvFilenameAll,8)
# Alternatively: do the list in order! Note the order is easy to hard
else:
csv | FilenameList = csvFilenameAll
# pop open a browser on the cloud
h2b.browseTheCloud()
timeoutSecs = 1000
# save the first, for all comparisions, to avoid slow drift with each iteration
firstglm = {}
for csvFilename in csvFilenameList:
# creates csvFilename.hex from file in hdfs dir
start = time.time()
print 'Parsing', csvFilename
csvPathname = "dataset | s/" + csvFilename
parseResult = h2i.import_parse(path=csvPathname, schema='hdfs', header=0,
timeoutSecs=timeoutSecs, retryDelaySecs=1.0)
print csvFilename, '\nparse time (python)', time.time() - start, 'seconds'
### print h2o.dump_json(parseResult['response'])
print "parse result:", parseResult['destination_key']
# I use this if i want the larger set in my localdir
inspect = h2o_cmd.runInspect(None, parseResult['destination_key'])
h2o_cmd.infoFromInspect(inspect, csvPathname)
# h2b.browseJsonHistoryAsUrlLastMatch("Inspect")
print "\n" + csvFilename
if __name__ == '__main__':
h2o.unit_main()
|
Bekt/tweetement | appengine_config.py | Python | mit | 639 | 0 | import sys
import os.path
sys.path.append(os.path.join(os.path.dirname(__file__), 'src'))
sys.path.append(os.path.join(os.path.dirname(__file__), 'lib'))
sys.pa | th.append(os.path.join(os.path.dirname(__file__), 'lib/pip'))
# Workaround the dev-en | vironment SSL:
# http://stackoverflow.com/q/16192916/893652
if os.environ.get('SERVER_SOFTWARE', '').startswith('Development'):
import imp
import os
from google.appengine.tools.devappserver2.python import sandbox
sandbox._WHITE_LIST_C_MODULES += ['_ssl', '_socket']
imp.load_source('socket',
os.path.join(os.path.dirname(os.__file__), 'socket.py'))
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.