hexsha stringlengths 40 40 | size int64 5 2.06M | ext stringclasses 10 values | lang stringclasses 1 value | max_stars_repo_path stringlengths 3 248 | max_stars_repo_name stringlengths 5 125 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 3 248 | max_issues_repo_name stringlengths 5 125 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 67k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 3 248 | max_forks_repo_name stringlengths 5 125 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 5 2.06M | avg_line_length float64 1 1.02M | max_line_length int64 3 1.03M | alphanum_fraction float64 0 1 | count_classes int64 0 1.6M | score_classes float64 0 1 | count_generators int64 0 651k | score_generators float64 0 1 | count_decorators int64 0 990k | score_decorators float64 0 1 | count_async_functions int64 0 235k | score_async_functions float64 0 1 | count_documentation int64 0 1.04M | score_documentation float64 0 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
207ce01859ee49a195d1d997b32d1fc62a2fd809 | 10,063 | py | Python | corehq/motech/openmrs/finders.py | rochakchauhan/commcare-hq | aa7ab3c2d0c51fe10f2b51b08101bb4b5a376236 | [
"BSD-3-Clause"
] | null | null | null | corehq/motech/openmrs/finders.py | rochakchauhan/commcare-hq | aa7ab3c2d0c51fe10f2b51b08101bb4b5a376236 | [
"BSD-3-Clause"
] | null | null | null | corehq/motech/openmrs/finders.py | rochakchauhan/commcare-hq | aa7ab3c2d0c51fe10f2b51b08101bb4b5a376236 | [
"BSD-3-Clause"
] | null | null | null | """
PatientFinders are used to find OpenMRS patients that correspond to
CommCare cases if none of the patient identifiers listed in
OpenmrsCaseConfig.match_on_ids have successfully matched a patient.
See `README.md`__ for more context.
"""
import logging
from collections import namedtuple
from functools import partial
from operator import eq
from pprint import pformat
from dimagi.ext.couchdbkit import (
DecimalProperty,
DictProperty,
DocumentSchema,
ListProperty,
StringProperty,
)
from corehq.motech.openmrs.const import OPENMRS_DATA_TYPE_BOOLEAN
from corehq.motech.openmrs.finders_utils import (
le_days_diff,
le_levenshtein_percent,
)
from corehq.motech.value_source import (
as_value_source,
deserialize,
recurse_subclasses,
)
MATCH_TYPE_EXACT = 'exact'
MATCH_TYPE_LEVENSHTEIN = 'levenshtein' # Useful for words translated across alphabets
MATCH_TYPE_DAYS_DIFF = 'days_diff' # Useful for estimated dates of birth
MATCH_FUNCTIONS = {
MATCH_TYPE_EXACT: eq,
MATCH_TYPE_LEVENSHTEIN: le_levenshtein_percent,
MATCH_TYPE_DAYS_DIFF: le_days_diff,
}
MATCH_TYPES = tuple(MATCH_FUNCTIONS)
MATCH_TYPE_DEFAULT = MATCH_TYPE_EXACT
logger = logging.getLogger(__name__)
constant_false = {
"value": 'False',
# We are fetching from a case property or a form question value, and
# we want `get_value()` to return False (bool). `get_value()`
# serialises case properties and form question values as external
# data types. OPENMRS_DATA_TYPE_BOOLEAN is useful because it is a
# bool, not a string, so `constant_false.get_value()` will return
# False (not 'False')
"external_data_type": OPENMRS_DATA_TYPE_BOOLEAN,
}
class PatientFinder(DocumentSchema):
"""
The ``PatientFinder`` base class was developed as a way to
handle situations where patient cases are created in CommCare
instead of being imported from OpenMRS.
When patients are imported from OpenMRS, they will come with at
least one identifier that MOTECH can use to match the case in
CommCare with the corresponding patient in OpenMRS. But if the case
is registered in CommCare then we may not have an ID, or the ID
could be wrong. We need to search for a corresponding OpenMRS
patient.
Different projects may focus on different kinds of case properties,
so it was felt that a base class would allow some flexibility.
The ``PatientFinder.wrap()`` method allows you to wrap documents of
subclasses.
The ``PatientFinder.find_patients()`` method must be implemented by
subclasses. It returns a list of zero, one, or many patients. If it
returns one patient, the OpenmrsRepeater.find_or_create_patient()
will accept that patient as a true match.
.. NOTE:: The consequences of a false positive (a Type II error) are
severe: A real patient will have their valid values
overwritten by those of someone else. So ``PatientFinder``
subclasses should be written and configured to skew
towards false negatives (Type I errors). In other words,
it is much better not to choose a patient than to choose
the wrong patient.
"""
# Whether to create a new patient if no patients are found
create_missing = DictProperty(default=constant_false)
@classmethod
def wrap(cls, data):
if 'create_missing' in data and isinstance(data['create_missing'], bool):
data['create_missing'] = {
'external_data_type': OPENMRS_DATA_TYPE_BOOLEAN,
'value': str(data['create_missing'])
}
if cls is PatientFinder:
subclass = {
sub._doc_type: sub for sub in recurse_subclasses(cls)
}.get(data['doc_type'])
return subclass.wrap(data) if subclass else None
else:
return super(PatientFinder, cls).wrap(data)
def find_patients(self, requests, case, case_config):
"""
Given a case, search OpenMRS for possible matches. Return the
best results. Subclasses must define "best". If just one result
is returned, it will be chosen.
"""
raise NotImplementedError
PatientScore = namedtuple('PatientScore', ['patient', 'score'])
class PropertyWeight(DocumentSchema):
case_property = StringProperty()
weight = DecimalProperty()
match_type = StringProperty(required=False, choices=MATCH_TYPES, default=MATCH_TYPE_DEFAULT)
match_params = ListProperty(required=False)
class WeightedPropertyPatientFinder(PatientFinder):
"""
The ``WeightedPropertyPatientFinder`` class finds OpenMRS patients
that match CommCare cases by assigning weights to case properties,
and adding the weights of matching patient properties to calculate a
confidence score.
"""
# Identifiers that are searchable in OpenMRS. e.g.
# [ 'bahmni_id', 'household_id', 'last_name']
searchable_properties = ListProperty()
# The weight assigned to a matching property.
# [
# {"case_property": "bahmni_id", "weight": 0.9},
# {"case_property": "household_id", "weight": 0.9},
# {
# "case_property": "dob",
# "weight": 0.75,
# "match_type": "days_diff",
# // days_diff matches based on days difference from given date
# "match_params": [364]
# },
# {
# "case_property": "first_name",
# "weight": 0.025,
# "match_type": "levenshtein",
# // levenshtein function takes edit_distance / len
# "match_params": [0.2]
# // i.e. 20% is one edit for every 5 characters
# // e.g. "Riyaz" matches "Riaz" but not "Riazz"
# },
# {"case_property": "last_name", "weight": 0.025},
# {"case_property": "municipality", "weight": 0.2},
# ]
property_weights = ListProperty(PropertyWeight)
# The threshold that the sum of weights must pass for a CommCare case to
# be considered a match to an OpenMRS patient
threshold = DecimalProperty(default=1.0)
# If more than one patient passes `threshold`, the margin by which the
# weight of the best match must exceed the weight of the second-best match
# to be considered correct.
confidence_margin = DecimalProperty(default=0.667) # Default: Matches two thirds better than second-best
def __init__(self, *args, **kwargs):
super(WeightedPropertyPatientFinder, self).__init__(*args, **kwargs)
self._property_map = {}
def get_score(self, patient, case):
"""
Return the sum of weighted properties to give an OpenMRS
patient a score of how well they match a CommCare case.
"""
def weights():
for property_weight in self.property_weights:
prop = property_weight['case_property']
jsonpath, value_source_dict = self._property_map[prop]
weight = property_weight['weight']
matches = jsonpath.find(patient)
for match in matches:
patient_value = match.value
case_value = case.get_case_property(prop)
match_type = property_weight['match_type']
match_params = property_weight['match_params']
match_function = partial(MATCH_FUNCTIONS[match_type], *match_params)
is_equivalent = match_function(deserialize(value_source_dict, patient_value), case_value)
yield weight if is_equivalent else 0
return sum(weights())
def find_patients(self, requests, case, case_config):
"""
Matches cases to patients. Returns a list of patients, each
with a confidence score >= self.threshold
"""
from corehq.motech.openmrs.openmrs_config import get_property_map
from corehq.motech.openmrs.repeater_helpers import search_patients
self._property_map = get_property_map(case_config)
candidates = {} # key on OpenMRS UUID to filter duplicates
for prop in self.searchable_properties:
value = case.get_case_property(prop)
if value:
response_json = search_patients(requests, value)
for patient in response_json['results']:
score = self.get_score(patient, case)
if score >= self.threshold:
candidates[patient['uuid']] = PatientScore(patient, score)
if not candidates:
logger.info(
'Unable to match case "%s" (%s): No candidate patients found.',
case.name, case.get_id,
)
return []
if len(candidates) == 1:
patient = list(candidates.values())[0].patient
logger.info(
'Matched case "%s" (%s) to ONLY patient candidate: \n%s',
case.name, case.get_id, pformat(patient, indent=2),
)
return [patient]
patients_scores = sorted(candidates.values(), key=lambda candidate: candidate.score, reverse=True)
if patients_scores[0].score / patients_scores[1].score > 1 + self.confidence_margin:
# There is more than a `confidence_margin` difference
# (defaults to 10%) in score between the best-ranked
# patient and the second-best-ranked patient. Let's go with
# Patient One.
patient = patients_scores[0].patient
logger.info(
'Matched case "%s" (%s) to BEST patient candidate: \n%s',
case.name, case.get_id, pformat(patients_scores, indent=2),
)
return [patient]
# We can't be sure. Just send them all.
logger.info(
'Unable to match case "%s" (%s) to patient candidates: \n%s',
case.name, case.get_id, pformat(patients_scores, indent=2),
)
return [ps.patient for ps in patients_scores]
| 39.61811 | 109 | 0.649111 | 8,295 | 0.824307 | 1,062 | 0.105535 | 586 | 0.058233 | 0 | 0 | 4,923 | 0.489218 |
207cece5083a3bf4d7a32eb8cf668d2c4b468041 | 91 | py | Python | Chapter05/examine_tar_file_content.py | add54/ADMIN_SYS_PYTHON | 5a6d9705537c8663c8f7b0f45d29ccc87b6096e7 | [
"MIT"
] | 116 | 2018-12-21T01:05:47.000Z | 2022-03-23T21:41:41.000Z | Chapter05/examine_tar_file_content.py | add54/ADMIN_SYS_PYTHON | 5a6d9705537c8663c8f7b0f45d29ccc87b6096e7 | [
"MIT"
] | 2 | 2021-03-31T19:36:19.000Z | 2021-06-10T22:29:26.000Z | Chapter05/examine_tar_file_content.py | add54/ADMIN_SYS_PYTHON | 5a6d9705537c8663c8f7b0f45d29ccc87b6096e7 | [
"MIT"
] | 147 | 2018-12-19T14:10:32.000Z | 2022-03-20T11:03:20.000Z | import tarfile
tar_file = tarfile.open("work.tar.gz", "r:gz")
print(tar_file.getnames())
| 15.166667 | 46 | 0.714286 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 19 | 0.208791 |
207dbca61480d6630a2424d70a7f583f2e795a69 | 263 | py | Python | main/__init__.py | graingert/LiteBot | 8ce84fb9e150e532adc4d5c549b0c04f208ba20c | [
"MIT"
] | null | null | null | main/__init__.py | graingert/LiteBot | 8ce84fb9e150e532adc4d5c549b0c04f208ba20c | [
"MIT"
] | null | null | null | main/__init__.py | graingert/LiteBot | 8ce84fb9e150e532adc4d5c549b0c04f208ba20c | [
"MIT"
] | null | null | null | from main import status, tps, server_commands, scoreboard
def setup(bot):
bot.add_cog(status.Status(bot), True)
bot.add_cog(tps.Tps(bot), True)
bot.add_cog(server_commands.ServerCommands(bot), True)
bot.add_cog(scoreboard.ScoreBoard(bot), True)
| 29.222222 | 58 | 0.737643 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
207de260265c6907b91ff91673f6e28b96b0eef2 | 959 | py | Python | rsatools/rp.py | SteelShredder/rsa-tools | 17a3441a7f00b68674a48477eee7b31449eebf6f | [
"MIT"
] | null | null | null | rsatools/rp.py | SteelShredder/rsa-tools | 17a3441a7f00b68674a48477eee7b31449eebf6f | [
"MIT"
] | null | null | null | rsatools/rp.py | SteelShredder/rsa-tools | 17a3441a7f00b68674a48477eee7b31449eebf6f | [
"MIT"
] | null | null | null | from .decrypt import decrypt as d
from .encrypt import encrypt as e
from .generatekeys import genkeys as g
def pg(e, bit):
dp = open("rsakeys/d", "w+")
ep = open("rsakeys/e", "w+")
np = open("rsakeys/n", "w+")
a, b, c = g(e,bit)
np.write(str(a))
ep.write(str(b))
dp.write(str(c))
dp.close()
ep.close()
np.close()
def pe():
mo = open("rsaoutput/eo", "w+")
mp = open("rsainput/ei", "r")
ep = open("rsakeys/e", "r")
np = open("rsakeys/n", "r")
ev=ep.read()
nv=np.read()
mv=mp.read()
mo.write(str(e(int(mv), int(ev), int(nv))))
mp.close()
mo.close()
ep.close()
np.close()
def pd():
mo = open("rsaoutput/do", "w+")
mp = open("rsainput/di", "r")
dp = open("rsakeys/d", "r")
np = open("rsakeys/n", "r")
dv=dp.read()
nv=np.read()
mv=mp.read()
mo.write(str(d(int(mv), int(dv), int(nv))))
mp.close()
mo.close()
dp.close()
np.close()
| 23.390244 | 47 | 0.519291 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 169 | 0.176225 |
207fd602e53b75231f396c0ed8874c586d12e870 | 17,721 | py | Python | thrift/gen-py/hello/UserExchange.py | amitsaha/playground | 82cb5ac02ac90d3fa858a5153b0a5705187c14ce | [
"Unlicense"
] | 4 | 2018-04-14T16:28:39.000Z | 2021-11-14T12:08:02.000Z | thrift/gen-py/hello/UserExchange.py | amitsaha/playground | 82cb5ac02ac90d3fa858a5153b0a5705187c14ce | [
"Unlicense"
] | 3 | 2022-02-14T10:38:51.000Z | 2022-02-27T16:01:16.000Z | thrift/gen-py/hello/UserExchange.py | amitsaha/playground | 82cb5ac02ac90d3fa858a5153b0a5705187c14ce | [
"Unlicense"
] | 4 | 2015-07-07T01:01:27.000Z | 2019-04-12T05:38:26.000Z | #
# Autogenerated by Thrift Compiler (0.9.1)
#
# DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
#
# options string: py
#
from thrift.Thrift import TType, TMessageType, TException, TApplicationException
from ttypes import *
from thrift.Thrift import TProcessor
from thrift.transport import TTransport
from thrift.protocol import TBinaryProtocol, TProtocol
try:
from thrift.protocol import fastbinary
except:
fastbinary = None
class Iface:
def ping(self):
pass
def add_user(self, u):
"""
Parameters:
- u
"""
pass
def get_user(self, uid):
"""
Parameters:
- uid
"""
pass
def clear_list(self):
pass
class Client(Iface):
def __init__(self, iprot, oprot=None):
self._iprot = self._oprot = iprot
if oprot is not None:
self._oprot = oprot
self._seqid = 0
def ping(self):
self.send_ping()
self.recv_ping()
def send_ping(self):
self._oprot.writeMessageBegin('ping', TMessageType.CALL, self._seqid)
args = ping_args()
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_ping(self):
(fname, mtype, rseqid) = self._iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(self._iprot)
self._iprot.readMessageEnd()
raise x
result = ping_result()
result.read(self._iprot)
self._iprot.readMessageEnd()
return
def add_user(self, u):
"""
Parameters:
- u
"""
self.send_add_user(u)
return self.recv_add_user()
def send_add_user(self, u):
self._oprot.writeMessageBegin('add_user', TMessageType.CALL, self._seqid)
args = add_user_args()
args.u = u
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_add_user(self):
(fname, mtype, rseqid) = self._iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(self._iprot)
self._iprot.readMessageEnd()
raise x
result = add_user_result()
result.read(self._iprot)
self._iprot.readMessageEnd()
if result.success is not None:
return result.success
if result.e is not None:
raise result.e
raise TApplicationException(TApplicationException.MISSING_RESULT, "add_user failed: unknown result");
def get_user(self, uid):
"""
Parameters:
- uid
"""
self.send_get_user(uid)
return self.recv_get_user()
def send_get_user(self, uid):
self._oprot.writeMessageBegin('get_user', TMessageType.CALL, self._seqid)
args = get_user_args()
args.uid = uid
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_get_user(self):
(fname, mtype, rseqid) = self._iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(self._iprot)
self._iprot.readMessageEnd()
raise x
result = get_user_result()
result.read(self._iprot)
self._iprot.readMessageEnd()
if result.success is not None:
return result.success
if result.e is not None:
raise result.e
raise TApplicationException(TApplicationException.MISSING_RESULT, "get_user failed: unknown result");
def clear_list(self):
self.send_clear_list()
def send_clear_list(self):
self._oprot.writeMessageBegin('clear_list', TMessageType.CALL, self._seqid)
args = clear_list_args()
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
class Processor(Iface, TProcessor):
def __init__(self, handler):
self._handler = handler
self._processMap = {}
self._processMap["ping"] = Processor.process_ping
self._processMap["add_user"] = Processor.process_add_user
self._processMap["get_user"] = Processor.process_get_user
self._processMap["clear_list"] = Processor.process_clear_list
def process(self, iprot, oprot):
(name, type, seqid) = iprot.readMessageBegin()
if name not in self._processMap:
iprot.skip(TType.STRUCT)
iprot.readMessageEnd()
x = TApplicationException(TApplicationException.UNKNOWN_METHOD, 'Unknown function %s' % (name))
oprot.writeMessageBegin(name, TMessageType.EXCEPTION, seqid)
x.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
return
else:
self._processMap[name](self, seqid, iprot, oprot)
return True
def process_ping(self, seqid, iprot, oprot):
args = ping_args()
args.read(iprot)
iprot.readMessageEnd()
result = ping_result()
self._handler.ping()
oprot.writeMessageBegin("ping", TMessageType.REPLY, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_add_user(self, seqid, iprot, oprot):
args = add_user_args()
args.read(iprot)
iprot.readMessageEnd()
result = add_user_result()
try:
result.success = self._handler.add_user(args.u)
except InvalidValueException, e:
result.e = e
oprot.writeMessageBegin("add_user", TMessageType.REPLY, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_get_user(self, seqid, iprot, oprot):
args = get_user_args()
args.read(iprot)
iprot.readMessageEnd()
result = get_user_result()
try:
result.success = self._handler.get_user(args.uid)
except InvalidValueException, e:
result.e = e
oprot.writeMessageBegin("get_user", TMessageType.REPLY, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_clear_list(self, seqid, iprot, oprot):
args = clear_list_args()
args.read(iprot)
iprot.readMessageEnd()
self._handler.clear_list()
return
# HELPER FUNCTIONS AND STRUCTURES
class ping_args:
thrift_spec = (
)
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('ping_args')
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class ping_result:
thrift_spec = (
)
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('ping_result')
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class add_user_args:
"""
Attributes:
- u
"""
thrift_spec = (
None, # 0
(1, TType.STRUCT, 'u', (User, User.thrift_spec), None, ), # 1
)
def __init__(self, u=None,):
self.u = u
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRUCT:
self.u = User()
self.u.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('add_user_args')
if self.u is not None:
oprot.writeFieldBegin('u', TType.STRUCT, 1)
self.u.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class add_user_result:
"""
Attributes:
- success
- e
"""
thrift_spec = (
(0, TType.I32, 'success', None, None, ), # 0
(1, TType.STRUCT, 'e', (InvalidValueException, InvalidValueException.thrift_spec), None, ), # 1
)
def __init__(self, success=None, e=None,):
self.success = success
self.e = e
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 0:
if ftype == TType.I32:
self.success = iprot.readI32();
else:
iprot.skip(ftype)
elif fid == 1:
if ftype == TType.STRUCT:
self.e = InvalidValueException()
self.e.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('add_user_result')
if self.success is not None:
oprot.writeFieldBegin('success', TType.I32, 0)
oprot.writeI32(self.success)
oprot.writeFieldEnd()
if self.e is not None:
oprot.writeFieldBegin('e', TType.STRUCT, 1)
self.e.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class get_user_args:
"""
Attributes:
- uid
"""
thrift_spec = (
None, # 0
(1, TType.I32, 'uid', None, None, ), # 1
)
def __init__(self, uid=None,):
self.uid = uid
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.I32:
self.uid = iprot.readI32();
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('get_user_args')
if self.uid is not None:
oprot.writeFieldBegin('uid', TType.I32, 1)
oprot.writeI32(self.uid)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class get_user_result:
"""
Attributes:
- success
- e
"""
thrift_spec = (
(0, TType.STRUCT, 'success', (User, User.thrift_spec), None, ), # 0
(1, TType.STRUCT, 'e', (InvalidValueException, InvalidValueException.thrift_spec), None, ), # 1
)
def __init__(self, success=None, e=None,):
self.success = success
self.e = e
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 0:
if ftype == TType.STRUCT:
self.success = User()
self.success.read(iprot)
else:
iprot.skip(ftype)
elif fid == 1:
if ftype == TType.STRUCT:
self.e = InvalidValueException()
self.e.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('get_user_result')
if self.success is not None:
oprot.writeFieldBegin('success', TType.STRUCT, 0)
self.success.write(oprot)
oprot.writeFieldEnd()
if self.e is not None:
oprot.writeFieldBegin('e', TType.STRUCT, 1)
self.e.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class clear_list_args:
thrift_spec = (
)
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('clear_list_args')
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
| 29.05082 | 188 | 0.668585 | 17,214 | 0.97139 | 0 | 0 | 0 | 0 | 0 | 0 | 977 | 0.055132 |
20800899334a7f9045e3040d9fc79c07a6e2cb14 | 784 | py | Python | api/db/db.py | bcgov/data-stream | 2d8fbf3843ee765ee102f306993fdbc742aca5d8 | [
"Apache-2.0"
] | 1 | 2019-02-10T08:27:22.000Z | 2019-02-10T08:27:22.000Z | api/db/db.py | bcgov/data-stream | 2d8fbf3843ee765ee102f306993fdbc742aca5d8 | [
"Apache-2.0"
] | 18 | 2019-02-09T01:02:09.000Z | 2022-03-30T23:04:24.000Z | api/db/db.py | bcgov/data-stream | 2d8fbf3843ee765ee102f306993fdbc742aca5d8 | [
"Apache-2.0"
] | 2 | 2019-02-09T06:36:54.000Z | 2019-02-12T09:52:58.000Z | from mongoengine import connect
from config import Config
from db.models.subscriptions import Subscriptions
class Db:
Subscriptions = None
def __init__(self, createClient=True):
config = Config()
self.db = {}
self.Subscriptions = Subscriptions
self.createClient = createClient
self.initConnection(config)
def initConnection(self, config):
connect(
db=config.data['database']['dbName'],
host=config.data['database']['host'],
port=config.data['database']['port'],
username=config.data['database']['username'],
password=config.data['database']['password'],
authentication_source=config.data['database']['dbName'],
connect=self.createClient)
| 34.086957 | 68 | 0.633929 | 674 | 0.859694 | 0 | 0 | 0 | 0 | 0 | 0 | 108 | 0.137755 |
2080236000439e0ed58e5a725a9936f7390f4727 | 6,490 | py | Python | src/hg/makeDb/scripts/cd8Escape/process_epitopes.py | andypohl/kent | af7a004c8f3fa909cd8c2cfc2e5bea60e3421cd1 | [
"MIT"
] | 171 | 2015-04-22T15:16:02.000Z | 2022-03-18T20:21:53.000Z | src/hg/makeDb/scripts/cd8Escape/process_epitopes.py | andypohl/kent | af7a004c8f3fa909cd8c2cfc2e5bea60e3421cd1 | [
"MIT"
] | 60 | 2016-10-03T15:15:06.000Z | 2022-03-30T15:21:52.000Z | src/hg/makeDb/scripts/cd8Escape/process_epitopes.py | andypohl/kent | af7a004c8f3fa909cd8c2cfc2e5bea60e3421cd1 | [
"MIT"
] | 80 | 2015-04-16T10:39:48.000Z | 2022-03-29T16:36:30.000Z | import os
import re
import gzip
import argparse
import pandas as pd
import numpy as np
from collections import defaultdict
def get_args():
"""
Parse command line arguments
"""
parser = argparse.ArgumentParser(description="Method to create track for escape mutations")
parser.add_argument("-xlsx", help="file containing all the data")
parser.add_argument("-pid", help="pep to number", default="prot_names_pids_8.txt")
parser.add_argument("-gb_tools", help="path to gb_tools", default="./")
args = parser.parse_args()
return args
def read_pid(args):
inputfilehandler = open(args.pid, 'r')
pid = {}
aaid = {}
nucid = {}
for line in inputfilehandler:
line = line.strip()
fields = line.split()
peptide = fields[0]
pid[peptide] = fields[1]
nucid[peptide] = fields[2]
aaid[peptide] = fields[3]
inputfilehandler.close()
return (pid, aaid, nucid)
def get_start_pos(peptide, pid, aaid, nucid):
first_eight = ''.join(list(peptide)[0:8])
if first_eight in pid:
return nucid[first_eight]
return -1
def main(args):
(pid, aaid, nucid) = read_pid(args)
cd8_epitopes = pd.read_excel(args.xlsx,
skiprows=0,
header=0,
index_col=None)
print (cd8_epitopes.columns)
outfiletag = 'escape_mutations'
beddetailfilename = outfiletag+'.beddetail'
bedfilename = outfiletag+'.bed'
bbfilename = outfiletag+'.bb'
#print (cd8_epitopes['Probable Infection Location'])
#print (cd8_epitopes['Gene'])
#print (cd8_epitopes['Position of Mutation'])
#print (cd8_epitopes['AA Change'])
#print (cd8_epitopes['Codon Change'])
#print (cd8_epitopes['Wildtype Sequence'])
#print (cd8_epitopes['Mutant Sequence 1'])
#print (cd8_epitopes['Mutant Sequence 2'])
wt_mt = defaultdict(list)
mutations = []
beddetailfilehandler = open(beddetailfilename, 'w')
for i in range(0, len(cd8_epitopes['Position of Mutation'])):
chrom = "NC_045512v2"
reserved = 0
score = 1000
strand = '+'
pom = cd8_epitopes['Position of Mutation'][i]
gene = cd8_epitopes['Gene'][i]
pil = cd8_epitopes['Probable Infection Location'][i]
aa_change = cd8_epitopes['AA Change'][i]
c_change = cd8_epitopes['Codon Change'][i]
if gene+'_'+c_change+'_'+aa_change not in mutations:
mutations.append(gene+'_'+c_change+'_'+aa_change)
if ';' not in cd8_epitopes['Wildtype Sequence'][i]:
chromStart = get_start_pos(cd8_epitopes['Wildtype Sequence'][i], pid, aaid, nucid)
if chromStart != -1:
chromEnd = str(len(list(cd8_epitopes['Wildtype Sequence'][i]))*3+int(chromStart))
thickStart = str(chromStart)
thickEnd = str(chromEnd)
wt_pep = cd8_epitopes['Wildtype Sequence'][i]
mt_pep = cd8_epitopes['Mutant Sequence 1'][i]
if wt_pep not in wt_mt:
wt_mt[wt_pep].append(mt_pep)
else:
if mt_pep in wt_mt[wt_pep]:
continue
beddetailfilehandler.write(chrom+'\t'+
str(chromStart)+'\t'+
str(chromEnd)+'\t'+
wt_pep+'\t'+
str(score)+'\t'+
strand+'\t'+
thickStart+'\t'+
thickEnd+'\t'+
str(pom)+'\t'+
str(gene)+'\t'+
str(pil)+'\t'+
aa_change+'\t'+
c_change+'\t'+
mt_pep+"\n")
else:
wt_pep = cd8_epitopes['Wildtype Sequence'][i]
wt1_pep = wt_pep.split(';')[0]
wt2_pep = wt_pep.split(';')[1]
mt1_pep = cd8_epitopes['Mutant Sequence 1'][i]
mt2_pep = cd8_epitopes['Mutant Sequence 2'][i]
chromStart = get_start_pos(wt1_pep, pid, aaid, nucid)
if chromStart != -1:
chromEnd = str(len(list(wt1_pep))*3+int(chromStart))
thickStart = chromStart
thickEnd = chromEnd
if wt1_pep not in wt_mt:
wt_mt[wt_pep].append(mt_pep)
else:
if mt1_pep in wt_mt[wt1_pep]:
continue
beddetailfilehandler.write(chrom+'\t'+
str(chromStart)+'\t'+
str(chromEnd)+'\t'+
wt1_pep+'\t'+
str(score)+'\t'+
strand+'\t'+
thickStart+'\t'+
thickEnd+'\t'+
str(pom)+'\t'+
str(gene)+'\t'+
str(pil)+'\t'+
aa_change+'\t'+
c_change+'\t'+
mt1_pep+"\n")
chromStart = get_start_pos(wt2_pep, pid, aaid, nucid)
if chromStart != -1:
chromEnd = str(len(list(wt2_pep))*3+int(chromStart))
thickStart = chromStart
thickEnd = chromEnd
if wt2_pep not in wt_mt:
wt_mt[wt_pep].append(mt_pep)
else:
if mt2_pep in wt_mt[wt2_pep]:
continue
beddetailfilehandler.write(chrom+'\t'+
str(chromStart)+'\t'+
str(chromEnd)+'\t'+
wt2_pep+'\t'+
str(score)+'\t'+
strand+'\t'+
thickStart+'\t'+
thickEnd+'\t'+
str(pom)+'\t'+
str(gene)+'\t'+
str(pil)+'\t'+
aa_change+'\t'+
c_change+'\t'+
mt2_pep+"\n")
beddetailfilehandler.close()
print (len(mutations))
# use gbtools to convert from beddetail to bed and bigbed
os.system(f"bedSort {beddetailfilename} {bedfilename}")
os.system(f"bedToBigBed {bedfilename} wuhCor1.sizes {bbfilename} -tab -type=bed9+ -as=escape_mutants.as")
if __name__ == "__main__":
main(get_args())
| 33.112245 | 109 | 0.495069 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,241 | 0.191217 |
20814154949781ec0720ced4bfe5f982eaceb787 | 2,268 | py | Python | plugins/dnshome_de_srvc.py | ppetr/ddupdate | ed1c16349d5900618e7d6d2322aab3d287f92860 | [
"MIT"
] | null | null | null | plugins/dnshome_de_srvc.py | ppetr/ddupdate | ed1c16349d5900618e7d6d2322aab3d287f92860 | [
"MIT"
] | null | null | null | plugins/dnshome_de_srvc.py | ppetr/ddupdate | ed1c16349d5900618e7d6d2322aab3d287f92860 | [
"MIT"
] | null | null | null | """
ddupdate plugin updating data on dnshome.de.
See: ddupdate(8)
See: https://www.dnshome.de/
"""
from typing import AnyStr
from logging import Logger
from ddupdate.ddplugin import ServicePlugin, ServiceError
from ddupdate.ddplugin import http_basic_auth_setup, get_response, IpAddr
class DeDnsHomeAddressPlugin(ServicePlugin):
"""Update a dns entry on dnshome.de.
Supports using most address plugins including default-web-ip, default-if
and ip-disabled.
You cannot set the host explicitly using a parameter like `hostname`.
Even though the hostname is included in the query, it simply gets ignored.
Set the host you want to update as username (like: subdomain.dnshome.de).
Respects _global_ global `--ip-version` option.
Be sure to configure ddupdate according to your connection type.
netrc: Use a line like
machine www.dnshome.de login <username> password <password>
Options:
none
"""
_name = 'dnshome.de'
_oneliner = 'Updates on https://www.dnshome.de/'
_url = "https://www.dnshome.de/dyndns.php?&hostname={0}"
@staticmethod
def is_success(response: AnyStr) -> bool:
"""Checks if the action was successful using the response.
Args:
response: The response-body to analyze.
Returns:
true, if the response-body starts with
'good' - Update was successful
'nochg' - No change was performed, since records were
already up to date.
"""
return response.startswith('good') or response.startswith('nochg')
def register(self, log: Logger, hostname: str, ip: IpAddr, options):
"""Implement ServicePlugin.register.
Expects the `ip` to be filtered already according to the _global_
`--ip-version` option.
"""
url = self._url.format(hostname)
if ip:
if ip.v4:
url += '&ip=' + ip.v4
if ip.v6:
url += '&ip6=' + ip.v6
http_basic_auth_setup(url)
body = get_response(log, url) # Get ASCII encoded body-content
if not DeDnsHomeAddressPlugin.is_success(body):
raise ServiceError("Bad update reply.\nMessage: " + body)
| 31.068493 | 78 | 0.642416 | 1,978 | 0.872134 | 0 | 0 | 512 | 0.22575 | 0 | 0 | 1,428 | 0.62963 |
2081deed960bb703ad8647423a60570315d1aa3c | 130 | py | Python | debug.py | codingjerk/ztd.blunders-web | 38d4c1049dc3d0bd0b4294ffa419d25cbfbf2b83 | [
"MIT"
] | null | null | null | debug.py | codingjerk/ztd.blunders-web | 38d4c1049dc3d0bd0b4294ffa419d25cbfbf2b83 | [
"MIT"
] | null | null | null | debug.py | codingjerk/ztd.blunders-web | 38d4c1049dc3d0bd0b4294ffa419d25cbfbf2b83 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
from app import app
app.run(host = '0.0.0.0', port = 8089, debug = True, threaded = False, processes = 1)
| 21.666667 | 85 | 0.653846 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 30 | 0.230769 |
2081f91a4c6fb4212b9597707979f9847c86b694 | 4,838 | py | Python | tests/test_ms_sql_server.py | changrunner/zeppos_microsoft_sql_server | d3c9ddbadfadd2262b8d9c6751df9026220e4b75 | [
"Apache-2.0"
] | null | null | null | tests/test_ms_sql_server.py | changrunner/zeppos_microsoft_sql_server | d3c9ddbadfadd2262b8d9c6751df9026220e4b75 | [
"Apache-2.0"
] | null | null | null | tests/test_ms_sql_server.py | changrunner/zeppos_microsoft_sql_server | d3c9ddbadfadd2262b8d9c6751df9026220e4b75 | [
"Apache-2.0"
] | null | null | null | import unittest
from zeppos_microsoft_sql_server.ms_sql_server import MsSqlServer
import pandas as pd
import pyodbc
import os
class TestTheProjectMethods(unittest.TestCase):
def test_constructor_methods(self):
self.assertEqual("<class 'zeppos_microsoft_sql_server.ms_sql_server.MsSqlServer'>", str(type(MsSqlServer(""))))
def test_execute_sql_method(self):
ms_sql = MsSqlServer(
"DRIVER={ODBC Driver 13 for SQL Server}; SERVER=localhost\sqlexpress; DATABASE=master; Trusted_Connection=yes;")
self.assertEqual(True, ms_sql.execute_sql("drop table if exists #tmp"))
def test_drop_table_method(self):
ms_sql = MsSqlServer(
"DRIVER={ODBC Driver 13 for SQL Server}; SERVER=localhost\sqlexpress; DATABASE=master; Trusted_Connection=yes;")
self.assertEqual(True, ms_sql.drop_table("dbo", "table_does_not_exist"))
def test_create_table_method(self):
ms_sql = MsSqlServer(
"DRIVER={ODBC Driver 13 for SQL Server}; SERVER=localhost\sqlexpress; DATABASE=master; Trusted_Connection=yes;")
df = pd.DataFrame({'column_1': [3600],
'column_2': ['12'],
'column_3': [23]
}, columns=['column_1', 'column_2', 'column_3'])
df['column_1'] = df['column_1'].astype(object)
df['column_2'] = df['column_2'].astype(str)
df['column_3'] = df['column_3'].astype(int)
ms_sql.drop_table("dbo", "table_does_not_exist")
self.assertEqual(True, ms_sql.create_table("dbo", "table_does_not_exist", df))
def test_does_table_exists(self):
ms_sql = MsSqlServer(
"DRIVER={ODBC Driver 13 for SQL Server}; SERVER=localhost\sqlexpress; DATABASE=master; Trusted_Connection=yes;")
self.assertEqual(False, ms_sql.does_table_exists('dbo', 'test123456123'))
def test_save_dataframe_by_record_method(self):
ms_sql = MsSqlServer(
"DRIVER={ODBC Driver 13 for SQL Server}; SERVER=localhost\sqlexpress; DATABASE=master; Trusted_Connection=yes;")
ms_sql.drop_table("dbo", "test_table")
ms_sql.execute_sql("create table dbo.test_table (column_1 int)")
# test
df_actual = pd.DataFrame({'column_1': [3600]}, columns=['column_1'])
self.assertEqual(True, ms_sql.save_dataframe_by_record(df_actual, "dbo", "test_table"))
self.assertEqual(1, pd.read_sql("SELECT TOP 1 column_1 FROM dbo.test_table", pyodbc.connect(
"DRIVER={ODBC Driver 13 for SQL Server}; SERVER=localhost\sqlexpress; DATABASE=master; Trusted_Connection=yes;")).shape[0])
def test_save_dataframe_in_bulk_method(self):
ms_sql = MsSqlServer(
"DRIVER={ODBC Driver 13 for SQL Server}; SERVER=localhost\sqlexpress; DATABASE=master; Trusted_Connection=yes;")
# test
df_actual = pd.DataFrame({'column_1': [3600]}, columns=['column_1'])
self.assertEqual(True, ms_sql.save_dataframe_in_bulk(df_actual, "dbo", "test_table"))
self.assertEqual(1, pd.read_sql("SELECT TOP 1 column_1 FROM dbo.test_table", pyodbc.connect(
"DRIVER={ODBC Driver 13 for SQL Server}; SERVER=localhost\sqlexpress; DATABASE=master; Trusted_Connection=yes;")).shape[
0])
def test_1_read_data_into_dataframe_method(self):
ms_sql = MsSqlServer(
"DRIVER={ODBC Driver 13 for SQL Server}; SERVER=localhost\sqlexpress; DATABASE=master; Trusted_Connection=yes;")
self.assertEqual(1,
ms_sql.read_data_into_dataframe("SELECT TOP 1 COLUMN_NAME FROM INFORMATION_SCHEMA.COLUMNS").shape[0])
def test_2_read_data_into_dataframe_method(self):
ms_sql = MsSqlServer(
"DRIVER={ODBC Driver 13 for SQL Server}; SERVER=localhost\sqlexpress; DATABASE=master; Trusted_Connection=yes;")
self.assertEqual(1, ms_sql.read_data_into_dataframe("""
SET NOCOUNT ON; -- This has to be here.
DROP TABLE IF EXISTS #tmp
SELECT DISTINCT TABLE_SCHEMA, TABLE_NAME into #tmp
FROM INFORMATION_SCHEMA.COLUMNS
SELECT count(1) as RECORD_COUNT from #tmp
""").shape[0])
def test_extract_to_csv_method(self):
ms_sql = MsSqlServer(
"DRIVER={ODBC Driver 13 for SQL Server}; SERVER=localhost\sqlexpress; DATABASE=master; Trusted_Connection=yes;")
csv_file = ms_sql.extract_to_csv("select table_schema, table_name from information_schema.tables", r"c:\temp", "test.csv")
self.assertEqual(True, os.path.exists(csv_file.full_file_name))
df = pd.read_csv(csv_file.full_file_name, sep="|")
self.assertGreater(df.shape[0], 0)
os.remove(csv_file.full_file_name)
if __name__ == '__main__':
unittest.main()
| 51.468085 | 135 | 0.670525 | 4,661 | 0.963415 | 0 | 0 | 0 | 0 | 0 | 0 | 2,266 | 0.468375 |
20825b308b651659766c6c9a91b7118e25fa9654 | 827 | py | Python | test22.py | spatole12/ssw555tmashishningrohnitshivani2019Spring | 737f2ac2548c232c1181b0da4dd327d3a12f3c3e | [
"MIT"
] | null | null | null | test22.py | spatole12/ssw555tmashishningrohnitshivani2019Spring | 737f2ac2548c232c1181b0da4dd327d3a12f3c3e | [
"MIT"
] | null | null | null | test22.py | spatole12/ssw555tmashishningrohnitshivani2019Spring | 737f2ac2548c232c1181b0da4dd327d3a12f3c3e | [
"MIT"
] | 2 | 2019-03-09T22:28:21.000Z | 2019-04-29T21:09:38.000Z | import unittest
import io
import sys
from main import Gedcom
class TestProject(unittest.TestCase):
def test_us22_unique_id(self):
# Redirect stdout for unit test
capturedOutput = io.StringIO()
sys.stdout = capturedOutput
FILENAME="My-Family-27-Jan-2019-275.ged"
gedcom = Gedcom(FILENAME)
# Non-unique ID cases
self.assertIn('ERROR US22 for ID @F8@', capturedOutput.getvalue())
self.assertIn('ERROR US22 for ID @I15@', capturedOutput.getvalue())
# Unique ID cases
self.assertNotIn('ERROR US22 for ID @F3@', capturedOutput.getvalue())
self.assertNotIn('ERROR US22 for ID @I2@', capturedOutput.getvalue())
# Reset redirection
sys.stdout = sys.__stdout__
if __name__ == '__main__':
unittest.main() | 34.458333 | 78 | 0.648126 | 717 | 0.866989 | 0 | 0 | 0 | 0 | 0 | 0 | 230 | 0.278114 |
2082afade3820d1cb8855f41bc4382f224e85fa9 | 200 | py | Python | rest_framework_social_oauth2/settings.py | hrahmadi71/django-rest-framework-social-oauth2 | f9de220606bd08981b9d81ab80dd69d70ceb1988 | [
"MIT"
] | 613 | 2018-03-31T01:59:00.000Z | 2022-03-19T14:40:42.000Z | rest_framework_social_oauth2/settings.py | hrahmadi71/django-rest-framework-social-oauth2 | f9de220606bd08981b9d81ab80dd69d70ceb1988 | [
"MIT"
] | 132 | 2015-04-08T17:31:55.000Z | 2018-03-15T13:32:06.000Z | rest_framework_social_oauth2/settings.py | hrahmadi71/django-rest-framework-social-oauth2 | f9de220606bd08981b9d81ab80dd69d70ceb1988 | [
"MIT"
] | 118 | 2018-03-29T02:47:23.000Z | 2022-02-17T12:14:07.000Z | from django.conf import settings
DRFSO2_PROPRIETARY_BACKEND_NAME = getattr(settings, 'DRFSO2_PROPRIETARY_BACKEND_NAME', "Django")
DRFSO2_URL_NAMESPACE = getattr(settings, 'DRFSO2_URL_NAMESPACE', "")
| 40 | 96 | 0.83 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 65 | 0.325 |
2087972111b6fcd0c39cbcbfedd3e3b1270eb392 | 5,499 | py | Python | txircd/modules/core/bans_gline.py | guyguy2001/txircd | d548975a775a3582ebb2f480474cfa3e79a26b8c | [
"BSD-3-Clause"
] | 19 | 2015-07-06T21:24:15.000Z | 2021-04-01T13:05:46.000Z | txircd/modules/core/bans_gline.py | guyguy2001/txircd | d548975a775a3582ebb2f480474cfa3e79a26b8c | [
"BSD-3-Clause"
] | 37 | 2015-01-16T10:01:28.000Z | 2021-06-01T14:17:49.000Z | txircd/modules/core/bans_gline.py | guyguy2001/txircd | d548975a775a3582ebb2f480474cfa3e79a26b8c | [
"BSD-3-Clause"
] | 4 | 2016-09-15T02:48:43.000Z | 2020-01-14T17:23:50.000Z | from twisted.plugin import IPlugin
from twisted.words.protocols import irc
from txircd.config import ConfigValidationError
from txircd.module_interface import Command, ICommand, IModuleData, ModuleData
from txircd.modules.xlinebase import XLineBase
from txircd.utils import durationToSeconds, ircLower, now
from zope.interface import implements
from fnmatch import fnmatchcase
class GLine(ModuleData, XLineBase):
implements(IPlugin, IModuleData)
name = "GLine"
core = True
lineType = "G"
def actions(self):
return [ ("register", 10, self.checkLines),
("changeident", 10, self.checkIdentChange),
("changehost", 10, self.checkHostChange),
("commandpermission-GLINE", 10, self.restrictToOper),
("statsruntype-glines", 10, self.generateInfo),
("burst", 10, self.burstLines) ]
def userCommands(self):
return [ ("GLINE", 1, UserGLine(self)) ]
def serverCommands(self):
return [ ("ADDLINE", 1, ServerAddGLine(self)),
("DELLINE", 1, ServerDelGLine(self)) ]
def load(self):
self.initializeLineStorage()
def verifyConfig(self, config):
if "client_ban_msg" in config and not isinstance(config["client_ban_msg"], basestring):
raise ConfigValidationError("client_ban_msg", "value must be a string")
def checkUserMatch(self, user, mask, data):
banMask = self.normalizeMask(mask)
userMask = ircLower("{}@{}".format(user.ident, user.host()))
if fnmatchcase(userMask, banMask):
return True
userMask = ircLower("{}@{}".format(user.ident, user.realHost))
if fnmatchcase(userMask, banMask):
return True
userMask = ircLower("{}@{}".format(user.ident, user.ip))
if fnmatchcase(userMask, banMask):
return True
return False
def killUser(self, user, reason):
self.ircd.log.info("Matched user {user.uuid} ({user.ident}@{user.host()}) against a g:line: {reason}", user=user, reason=reason)
user.sendMessage(irc.ERR_YOUREBANNEDCREEP, self.ircd.config.get("client_ban_msg", "You're banned! Email abuse@example.com for assistance."))
user.disconnect("G:Lined: {}".format(reason))
def checkLines(self, user):
banReason = self.matchUser(user)
if banReason is not None:
self.killUser(user, banReason)
return False
return True
def checkIdentChange(self, user, oldIdent, fromServer):
self.checkLines(user)
def checkHostChange(self, user, hostType, oldHost, fromServer):
if user.uuid[:3] == self.ircd.serverID:
self.checkLines(user)
def restrictToOper(self, user, data):
if not self.ircd.runActionUntilValue("userhasoperpermission", user, "command-gline", users=[user]):
user.sendMessage(irc.ERR_NOPRIVILEGES, "Permission denied - You do not have the correct operator privileges")
return False
return None
class UserGLine(Command):
implements(ICommand)
def __init__(self, module):
self.module = module
def parseParams(self, user, params, prefix, tags):
if len(params) < 1 or len(params) == 2:
user.sendSingleError("GLineParams", irc.ERR_NEEDMOREPARAMS, "GLINE", "Not enough parameters")
return None
banmask = params[0]
if banmask in self.module.ircd.userNicks:
targetUser = self.module.ircd.users[self.module.ircd.userNicks[banmask]]
banmask = "{}@{}".format(targetUser.ident, targetUser.realHost)
else:
if "@" not in banmask:
banmask = "*@{}".format(banmask)
if len(params) == 1:
return {
"mask": banmask
}
return {
"mask": banmask,
"duration": durationToSeconds(params[1]),
"reason": " ".join(params[2:])
}
def execute(self, user, data):
banmask = data["mask"]
if "reason" in data:
if not self.module.addLine(banmask, now(), data["duration"], user.hostmask(), data["reason"]):
user.sendMessage("NOTICE", "*** G:Line for {} is already set.".format(banmask))
return True
badUsers = []
for checkUser in self.module.ircd.users.itervalues():
reason = self.module.matchUser(checkUser)
if reason is not None:
badUsers.append((checkUser, reason))
for badUser in badUsers:
self.module.killUser(*badUser)
if data["duration"] > 0:
user.sendMessage("NOTICE", "*** Timed g:line for {} has been set, to expire in {} seconds.".format(banmask, data["duration"]))
else:
user.sendMessage("NOTICE", "*** Permanent g:line for {} has been set.".format(banmask))
return True
if not self.module.delLine(banmask):
user.sendMessage("NOTICE", "*** G:Line for {} doesn't exist.".format(banmask))
return True
user.sendMessage("NOTICE", "*** G:Line for {} has been removed.".format(banmask))
return True
class ServerAddGLine(Command):
implements(ICommand)
def __init__(self, module):
self.module = module
def parseParams(self, server, params, prefix, tags):
return self.module.handleServerAddParams(server, params, prefix, tags)
def execute(self, server, data):
if self.module.executeServerAddCommand(server, data):
badUsers = []
for user in self.module.ircd.users.itervalues():
reason = self.module.matchUser(user)
if reason is not None:
badUsers.append((user, reason))
for user in badUsers:
self.module.killUser(*user)
return True
return None
class ServerDelGLine(Command):
implements(ICommand)
def __init__(self, module):
self.module = module
def parseParams(self, server, params, prefix, tags):
return self.module.handleServerDelParams(server, params, prefix, tags)
def execute(self, server, data):
return self.module.executeServerDelCommand(server, data)
glineModule = GLine() | 33.944444 | 142 | 0.704855 | 5,092 | 0.925987 | 0 | 0 | 0 | 0 | 0 | 0 | 887 | 0.161302 |
208a55695ef5b4358c7fc67496a5d6443fd64738 | 3,552 | py | Python | errandpy/utility.py | DIAOZHUO/errandpy | 8465218db8b9be7871bb7d38286e9df42d5e9b9a | [
"MIT"
] | null | null | null | errandpy/utility.py | DIAOZHUO/errandpy | 8465218db8b9be7871bb7d38286e9df42d5e9b9a | [
"MIT"
] | null | null | null | errandpy/utility.py | DIAOZHUO/errandpy | 8465218db8b9be7871bb7d38286e9df42d5e9b9a | [
"MIT"
] | null | null | null | import matplotlib.pyplot as plt
import numpy
import errandpy
"""
logファイルのFitting Parameter: a,b,c,dを返します
normalized_paramの時正規化したパラメーターを返します
"""
def real_a(a, delta, min):
return (a + 1) * delta + min
def real_b(b, delta):
return b * delta
def get_z0FromLogFile(path, isLegacy=False):
with open(path, 'r') as f:
lines = f.readlines()
length = len(lines)
if isLegacy:
s = lines[length - 4].split(" ")
# print(path, lines[length - 4], int(s[13]))
else:
s = lines[length - 3].split(" ")
return int(s[13])
def legacy_get_logFileParamater(path, normalized_param=True, normMode=1) -> []:
with open(path, 'r') as f:
lines = f.readlines()
length = len(lines)
s = lines[length - 2].split(" ")
print(s)
if len(s) == 10:
result = [float(s[3]), float(s[5]), float(s[7]), float(s[9])]
else:
result = [0,0,0,0]
print(" Warning: Log File Error!!! " + path)
if normalized_param is False:
min = float(lines[0].split(" ")[1][normMode:-2])
delta = float(lines[1].split(" ")[1][normMode:-2])
result[0] = real_a(result[0], delta, min)
result[1] = real_b(result[1], delta)
return result
def get_logFileParamater(path, normalized_param=True, normMode=1) -> []:
with open(path, 'r') as f:
lines = f.readlines()
length = len(lines)
s = lines[length - 1].split(" ")
# print(s)
if len(s) == 12 or len(s) == 14:
result = [float(s[3]), float(s[5]), float(s[7]), float(s[9])]
else:
result = [0,0,0,0]
print(" Warning: Log File Error!!! " + path)
if normalized_param is False:
min = float(lines[0].split(" ")[1][normMode:-2])
delta = float(lines[1].split(" ")[1][normMode:-2])
result[0] = real_a(result[0], delta, min)
result[1] = real_b(result[1], delta)
print(result)
return result
def _f_long(x, a, b, c, d):
if errandpy.useLegacyModel:
y = a - b / (1 + c * x) ** d
else:
y = a - b / (c + x) ** d
return y
def clamp(minValue, maxValue, value):
return max(min(value, maxValue), minValue)
def clamp01(value):
return clamp(0, 1, value)
def mean_r(x, y, a, b, c, d):
ss_res = numpy.dot((y - _f_long(x, a, b, c, d)), (y - _f_long(x, a, b, c, d)))
ymean = numpy.mean(y)
ss_tot = numpy.dot((y - ymean), (y - ymean))
return 1 - ss_res / ss_tot
def normalized(array, max=1, bias=0):
minValue = array.min(keepdims=True)
maxValue = array.max(keepdims=True)
result = (array - minValue) / (maxValue - minValue) * max + bias
return result, minValue, maxValue - minValue
def draw_plt(x, y, a, b, c, d, bound, name, ze=None):
y_b = y[bound:]
plt.clf()
plt.scatter(x, y, color='red', label='Original data', alpha=0.5)
_x = x[bound:]
plt.title(name + " (Mean R: " + str(mean_r(_x, y_b, a, b, c, d)) + ")")
plt.axhline(0, color='green', linestyle='dashdot')
plt.axvline(x[bound], color='green', linestyle='dashdot')
if ze is not None:
plt.axvline(x[ze], color='blue', linestyle='dashdot')
plt.plot(x, _f_long(x, a, b, c, d), color='blue', label='Fitted line')
plt.plot(x, y - _f_long(x, a, b, c, d), color='black', label='force curve') | 30.101695 | 83 | 0.530124 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 425 | 0.117794 |
208a6e6d4af030e233066a14c7021ed7e69dabef | 1,576 | py | Python | API_AUTH_TEST.py | DaTiC0/cPanel-Python | e0a7ec4d3ad3908e2ff493f68a578468deac96fc | [
"Apache-2.0"
] | null | null | null | API_AUTH_TEST.py | DaTiC0/cPanel-Python | e0a7ec4d3ad3908e2ff493f68a578468deac96fc | [
"Apache-2.0"
] | null | null | null | API_AUTH_TEST.py | DaTiC0/cPanel-Python | e0a7ec4d3ad3908e2ff493f68a578468deac96fc | [
"Apache-2.0"
] | null | null | null |
# from logging import error
import requests
import urllib.parse as uparse
import config
import logging
# Enabling debugging at http.client level (requests->urllib3->http.client)
# you will see the REQUEST, including HEADERS and DATA, and RESPONSE with HEADERS but without DATA.
# the only thing missing will be the response.body which is not logged.
try: # for Python 3
from http.client import HTTPConnection
except ImportError:
from httplib import HTTPConnection
HTTPConnection.debuglevel = 1
logging.basicConfig() # you need to initialize logging, otherwise you will not see anything from requests
logging.getLogger().setLevel(logging.DEBUG)
requests_log = logging.getLogger("urllib3")
requests_log.setLevel(logging.DEBUG)
requests_log.propagate = True
# curl -H'Authorization: cpanel username:APITOKEN' 'https://example.com:2083/execute/Module/function?parameter=value'
session = requests.session()
credentials = f'{config.USERNAME}:{config.API}'
headers = {
'Authorization': f'cpanel {credentials}',
'User-Agent': config.DEFAULT_USER_AGENT,
'Accept': 'application/json'
}
session.headers.update(headers)
base_url = '{}://{}:{}'.format('https', config.URL, config.PORT)
module = 'Tokens'
function = 'list'
path = f'/execute/{module}/{function}'
url = uparse.urljoin(base_url, path)
print(url)
params = {}
r = session.post(url, params=params)
print(r.headers['Content-Type'])
print(r.status_code)
if r.status_code == 401:
raise requests_log.error
try:
print(r.json())
except ValueError:
raise requests_log.error
print(r.text)
| 25.836066 | 117 | 0.75 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 681 | 0.432107 |
208a944865cc3b2efa6d830a3ced85fa06ed73c4 | 3,369 | py | Python | myutils/pandas_util.py | stas00/fastai-misc | e7e8c18ed798f91b2e026c667f795f45992608b8 | [
"Apache-2.0"
] | 1 | 2018-06-01T17:39:59.000Z | 2018-06-01T17:39:59.000Z | myutils/pandas_util.py | stas00/fastai-misc | e7e8c18ed798f91b2e026c667f795f45992608b8 | [
"Apache-2.0"
] | null | null | null | myutils/pandas_util.py | stas00/fastai-misc | e7e8c18ed798f91b2e026c667f795f45992608b8 | [
"Apache-2.0"
] | null | null | null | # from https://github.com/ohmeow/pandas_examples
# import sys
# sys.path.append('/home/stas/fast.ai')
# from myutils.pandas_util import advanced_describe
import pandas as pd
######################### Data Examination ############################
# - made changes to unique_vals to show a small sample, regardless how many there are
def advanced_describe(df):
# get descriptive stats for dataframe for 'all' column dtypes
desc = df.describe(include='all').T
desc.drop(['top', 'freq', 'unique'], axis=1, inplace=True)
# update column counts (df.describe() returns NaN for non-numeric cols)
counts = pd.Series({ col: df[col].count() for col in df.columns })
desc.update(counts.to_frame('count'))
# add missing count/%
missings = df.isnull().sum()
desc = pd.concat([desc, missings.to_frame('missing')], axis=1)
desc['missing%'] = (desc['missing'] / len(desc)).round(2)
# add unique counts/%
uniques = pd.Series({ col: len(df[col].unique()) for col in df.columns })
desc = pd.concat([desc, uniques.to_frame('unique')], axis=1)
desc['unique%'] = (desc['unique'] / len(desc)).round(2)
unique_vals = pd.Series({ col: df[col].unique() if len(df[col].unique()) < 10 else [*df[col].unique()[0:10],"..."] for col in df.columns })
desc = pd.concat([desc, unique_vals.to_frame('unique_values')], axis=1, sort=True)
# add col dtype
dtypes = pd.Series({ col: df[col].dtype for col in df.columns })
desc = pd.concat([desc, dtypes.to_frame('dtype')], axis=1, sort=True)
return desc
# same as advanced_describe but with fever attributes to avoid
# horizontal scrolling
def advanced_describe_short(df):
# get descriptive stats for dataframe for 'all' column dtypes
desc = df.describe(include='all').T
desc.drop(['top', 'freq', 'unique', '25%', '50%', '75%'], axis=1, inplace=True)
# update column counts (df.describe() returns NaN for non-numeric cols)
counts = pd.Series({ col: df[col].count() for col in df.columns })
desc.update(counts.to_frame('count'))
# add missing count/%
missings = df.isnull().sum()
desc = pd.concat([desc, missings.to_frame('missing')], axis=1)
#desc['missing%'] = (desc['missing'] / len(desc)).round(2)
# add unique counts/%
uniques = pd.Series({ col: len(df[col].unique()) for col in df.columns })
desc = pd.concat([desc, uniques.to_frame('unique')], axis=1)
#desc['unique%'] = (desc['unique'] / len(desc)).round(2)
unique_vals = pd.Series({ col: df[col].unique() if len(df[col].unique()) < 10 else [*df[col].unique()[0:10],"..."] for col in df.columns })
desc = pd.concat([desc, unique_vals.to_frame('unique_values')], axis=1, sort=True)
# add col dtype
dtypes = pd.Series({ col: df[col].dtype for col in df.columns })
desc = pd.concat([desc, dtypes.to_frame('dtype')], axis=1, sort=True)
return desc
######################### Data Cleaning and Preparation ###############
def fillna_by_group(df, target_col, group_cols, agg='median'):
df[target_col] = df.groupby(group_cols)[target_col].transform(lambda x: x.fillna(eval(f'x.{agg}()')))
######################### Feature Engineering #########################
def add_by_regex(df, target_col, new_col, regex):
df[new_col] = df[target_col].str.extract(regex, expand=False)
| 41.592593 | 143 | 0.623034 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,247 | 0.37014 |
208b0a423a689f212d0216be381d251035036c09 | 314 | bzl | Python | tensorflow/compiler/plugin/poplar/poplar.bzl | chenzhengda/tensorflow | 8debb698097670458b5f21d728bc6f734a7b5a53 | [
"Apache-2.0"
] | 74 | 2020-07-06T17:11:39.000Z | 2022-01-28T06:31:28.000Z | tensorflow/compiler/plugin/poplar/poplar.bzl | chenzhengda/tensorflow | 8debb698097670458b5f21d728bc6f734a7b5a53 | [
"Apache-2.0"
] | 9 | 2020-10-13T23:25:29.000Z | 2022-02-10T06:54:48.000Z | tensorflow/compiler/plugin/poplar/poplar.bzl | chenzhengda/tensorflow | 8debb698097670458b5f21d728bc6f734a7b5a53 | [
"Apache-2.0"
] | 12 | 2020-07-08T07:27:17.000Z | 2021-12-27T08:54:27.000Z | load(
"//tensorflow/core/platform:rules_cc.bzl",
"cc_library",
)
def poplar_cc_library(**kwargs):
""" Wrapper for inserting poplar specific build options.
"""
if not "copts" in kwargs:
kwargs["copts"] = []
copts = kwargs["copts"]
copts.append("-Werror=return-type")
cc_library(**kwargs)
| 19.625 | 58 | 0.656051 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 157 | 0.5 |
208b77d9848dd4e23b70112bbfeaf02f7d4d30a9 | 1,016 | py | Python | easy/9.Palindrome_Number.py | Leesoar/leetcode | e566513fc0e7055155157798f06089299bd44fd2 | [
"Apache-2.0"
] | 2 | 2018-03-04T23:29:49.000Z | 2019-04-23T01:13:12.000Z | easy/9.Palindrome_Number.py | Leesoar/leetcode | e566513fc0e7055155157798f06089299bd44fd2 | [
"Apache-2.0"
] | null | null | null | easy/9.Palindrome_Number.py | Leesoar/leetcode | e566513fc0e7055155157798f06089299bd44fd2 | [
"Apache-2.0"
] | 1 | 2018-03-05T09:58:59.000Z | 2018-03-05T09:58:59.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
Question:
Determine whether an integer is a palindrome. Do this without extra space.
Some hints:
Could negative integers be palindromes? (ie, -1)
If you are thinking of converting the integer to string, note the restriction of using extra space.
You could also try reversing an integer. However, if you have solved the problem "Reverse Integer", you know that the reversed integer might overflow. How would you handle such case?
There is a more generic way of solving this problem.
'''
class Solution(object):
def isPalindrome(self, x):
"""
:type x: int
:rtype: bool
"""
if x < 0:
return False
digits = 1
while x/digits >= 10:
digits *= 10
while digits > 1:
right = x % 10
left = int(x/digits)
if left != right:
return False
x = int((x%digits) / 10)
digits /= 100
return True
| 25.4 | 183 | 0.587598 | 467 | 0.459646 | 0 | 0 | 0 | 0 | 0 | 0 | 600 | 0.590551 |
208bc866b2ae4ae06bb7f254c3b642334b3aa7b8 | 598 | py | Python | scripts/lottery_prints.py | chibitrader/smartcotractlottery | 5208988437e71b8927f567d5b7d3d270d0ecdc2f | [
"MIT"
] | null | null | null | scripts/lottery_prints.py | chibitrader/smartcotractlottery | 5208988437e71b8927f567d5b7d3d270d0ecdc2f | [
"MIT"
] | null | null | null | scripts/lottery_prints.py | chibitrader/smartcotractlottery | 5208988437e71b8927f567d5b7d3d270d0ecdc2f | [
"MIT"
] | null | null | null | from brownie import Lottery, accounts, config, network
from web3 import Web3
def printStuff():
account = accounts[0]
lottery = Lottery.deploy(
config["networks"][network.show_active()]["eth_usd_price_feed"],
config["networks"][network.show_active()]["gbp_usd_price_feed"],
{"from": account},
)
entrance_fee = lottery.getEntranceFee()
print(f"Entrance fee: {entrance_fee}")
gbpUsd = lottery.getGbpUsdPrice()
print(f"GBP USD fee: {gbpUsd}")
ethUsd = lottery.getEthUsdPrice()
print(f"ETH USD fee: {ethUsd}")
def main():
printStuff()
| 27.181818 | 72 | 0.665552 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 145 | 0.242475 |
208ff921c6f3b56395e223269bba951dc85c3d8d | 28,128 | py | Python | _build/jupyter_execute/curriculum-notebooks/Languages/FrenchVerbCodingConjugation/french-verb-conjugation.py | BryceHaley/curriculum-jbook | d1246799ddfe62b0cf5c389394a18c2904383437 | [
"CC-BY-4.0"
] | 1 | 2022-03-18T18:19:40.000Z | 2022-03-18T18:19:40.000Z | _build/jupyter_execute/curriculum-notebooks/Languages/FrenchVerbCodingConjugation/french-verb-conjugation.py | callysto/curriculum-jbook | ffb685901e266b0ae91d1250bf63e05a87c456d9 | [
"CC-BY-4.0"
] | null | null | null | _build/jupyter_execute/curriculum-notebooks/Languages/FrenchVerbCodingConjugation/french-verb-conjugation.py | callysto/curriculum-jbook | ffb685901e266b0ae91d1250bf63e05a87c456d9 | [
"CC-BY-4.0"
] | null | null | null | 
<a href="https://hub.callysto.ca/jupyter/hub/user-redirect/git-pull?repo=https%3A%2F%2Fgithub.com%2Fcallysto%2Fcurriculum-notebooks&branch=master&subPath=Languages/FrenchVerbCodingConjugation/French-Verb-Conjugation.ipynb&depth=1" target="_parent"><img src="https://raw.githubusercontent.com/callysto/curriculum-notebooks/master/open-in-callysto-button.svg?sanitize=true" width="123" height="24" alt="Open in Callysto"/></a>
%%html
<script>
function code_toggle() {
if (code_shown){
$('div.input').hide('500');
$('#toggleButton').val('Show Code')
} else {
$('div.input').show('500');
$('#toggleButton').val('Hide Code')
}
code_shown = !code_shown
}
$( document ).ready(function(){
code_shown=false;
$('div.input').hide()
});
</script>
<p> Code is hidden for ease of viewing. Click the Show/Hide button to see. </>
<form action="javascript:code_toggle()"><input type="submit" id="toggleButton" value="Show Code"></form>
import numpy as np
#import matplotlib.pyplot as plt
from IPython.display import display, Math, Latex, HTML, clear_output, Markdown, Javascript
import ipywidgets as widgets
from ipywidgets import interact, FloatSlider, IntSlider, interactive, Layout
from traitlets import traitlets
#module to conjugate
#import mlconjug
#from functools import partial
#import pickle
import plotly as py
import plotly.graph_objs as go
from plotly.offline import download_plotlyjs, init_notebook_mode, plot, iplot
init_notebook_mode(connected=True)
# French Verb Conjugation
----
## Introduction
In this Jupyter Notebook by Callysto you will learn about French verb conjugation. Mastering the basics of verb conjugation is essential to reading and writing in French. There are some basic rules (and exceptions) that we will address.
Because much of conjugation is algorithmic, one can write computer code to do the task for us. If you are interested in the programming aspects, please see the related notebook [French-Verb-Coding](CC-186-French-Verb-Coding.ipynb).
#### Necessary background
- Some basic knowledge of French
- Elementary Python syntax
#### Outline of this notebook
We will cover several important topics
- a review of personal pronouns in French
- two important verbs, Être and Avoir
- the regular verbs, with endings "-er", "-ir" and "-re"
- exceptions to the regular verbs
#### Allons-y!
## Personal pronouns
Conjugation is the processing of force the verb in a sentence to "agree" with the subject of that sentence. Typically, the subject of a sentence is a pronoun, so to start conjugating verbs, we can review the personal pronouns in French.
Below is table showing the subject pronouns in French. These will be used to separate the different cases of verb conjugation.
#table for personal pronouns using plotly
french = ['je','tu','elle, il, on','nous','vous','elles, ils']
english = ['I','you','she, he, one','we','you (plural or formal)','they']
person = ['First','Second','Third','First (plural)','Second (plural)','Third (plural)']
trace0 = go.Table(
columnorder = [1,2,3],
columnwidth = [10,10],
header = dict(
values = ['Person','French','English'],
line = dict(color = 'rgb(0,0,0)'),
fill = dict(color = 'rgb(0,35,48)'),
align = ['center','center'],
font = dict(color = 'white', size = 16),
height = 40
),
cells = dict(
values = [person,french,english],
line = dict(color = 'black'),
fill = dict(color = 'rgb(95,102,161)'),
align = ['center', 'center'],
font = dict(color = 'white', size = 14),
height = 30
)
)
layout = dict(
width=750,
height=450
)
# margin=go.layout.Margin(
# l=0,
# r=0,
# b=0,
# t=0,
# pad=0
# )
data = [trace0]
fig = dict(data = data, layout = layout)
iplot(fig)
Our verb conjugation rules will be based on these personal pronouns, so it is good to get familiar with their translations. French makes a distinction between all of these different tense based on their person, whether or not they are masculine or feminine, and if they are plural or singular.
## Two Important Verbs
Let's jump right to conjugating the two (arguably) most important verbs: To Be and To Have.
## 1. Être (to be)
french = ['je','tu','elle, il, on','nous','vous','elles, ils']
etre_conjug = ['suis','es','est','sommes','êtes','sont']
trace0 = go.Table(
columnorder = [1,2],
columnwidth = [10,10],
header = dict(
values = ['Pronoun','Conjugation'],
line = dict(color = 'rgb(0,0,0)'),
fill = dict(color = 'rgb(0,35,48)'),
align = ['center','center'],
font = dict(color = 'white', size = 16),
height = 40
),
cells = dict(
values = [french,etre_conjug],
line = dict(color = 'black'),
fill = dict(color = 'rgb(95,102,161)'),
align = ['center', 'center'],
font = dict(color = 'white', size = 14),
height = 30
)
)
layout = dict(
width=500,
height=450)
data = [trace0]
fig = dict(data = data, layout = layout)
iplot(fig)
To use these in a sentence, you could write something like:
- Je suis un garçon.
- Elle est une fille.
- Nous sommes tous les humaines.
Notice how in each sentence, the form of the verb changes to match subject pronoun.
"Être" is an irregular verb, that does not obey a certain format, if you will, for conjugating verbs in the present tense. There many examples of exceptions, which we will explore further. But first, the next most important verb:
## 2. Avoir (to have)
french = ["j'",'tu','elle, il, on','nous','vous','elles, ils']
avoir_conjug = ['ai','as','a','avons','avez','ont']
trace0 = go.Table(
columnorder = [1,2],
columnwidth = [10,10],
header = dict(
values = ['Pronoun','Conjugation'],
line = dict(color = 'rgb(0,0,0)'),
fill = dict(color = 'rgb(0,35,48)'),
align = ['center','center'],
font = dict(color = 'white', size = 16),
height = 40
),
cells = dict(
values = [french,avoir_conjug],
line = dict(color = 'black'),
fill = dict(color = 'rgb(95,102,161)'),
align = ['center', 'center'],
font = dict(color = 'white', size = 14),
height = 30
)
)
layout = dict(width=500, height=450)
data = [trace0]
fig = dict(data = data, layout = layout)
iplot(fig)
Notice for the first person singular we have *j'* instead of *je*, this is due to the fact that the verb starts a vowel. This rule is similar to using "a" and "an" in English.
## The Regular Verbs
There are three types of regular verbs, which are identified by their endings. They are:
- the "-er" verbs, such as "parler" (to speak)
- the "-ir" verbs, such as "finir" (to finish)
- the "-re" verbs, such as "vendre" (to sell)
Each of these three type has its own pattern for conjugation, which is shared by all other regular verbs of the same typs. Let's have a look at these.
## 1. The "-er" Regular Verbs
There is a general rubric for conjugating verbs that end in **er** in the present tense.
We will illustrate this with the verb "parler" (to speak). The stem of the verb parler is "parl-". We conjugate it by adding on the endings "e", "es", "e", "ons", "ez" "ent" for the corresponding pronouns, as follows:
french = ['je','tu','elle, il, on','nous','vous','elles, ils']
stem = ['parl-','parl-','parl-','parl-','parl-','parl-']
ending = ['e','es','e','ons','ez','ent']
parler_conjug = ['parle','parles','parle','parlons','parlez','parlent']
trace0 = go.Table(
columnorder = [1,2],
columnwidth = [10,10],
header = dict(
values = ['Pronoun','Conjugation'],
line = dict(color = 'rgb(0,0,0)'),
fill = dict(color = 'rgb(0,35,48)'),
align = ['center','center'],
font = dict(color = 'white', size = 16),
height = 40
),
cells = dict(
values = [french,parler_conjug],
line = dict(color = 'black'),
fill = dict(color = 'rgb(95,102,161)'),
align = ['center', 'center'],
font = dict(color = 'white', size = 14),
height = 30
)
)
layout = dict(width=500, height=450)
data = [trace0]
fig = dict(data = data, layout = layout)
iplot(fig)
This can be taken as the general rule for conjugating **er** verbs in the present tense. All you need to do is find the stem of the verb, which was parl- in this case and then apply these endings to figure out how to conjugate the verb for every personal pronoun.
For instance, try this yourself with the verb "changer" (to sing). The stem is "chant-", so what are the corresponding six conjucations, as in the table above?
This pattern works for most "er" verbs, and there are hundreds of them. Some common ones are:
- aimer (to like/love)
- arriver (to arrive, to happen)
- brosser (to brush)
- chanter (to sing
- chercher (to look for)
- danser (to dance)
- demander (to ask for)
- détester (to hate)
- donner (to give)
- écouter (to listen to)
- étudier (to study)
- gagner (to win, to earn)
- habiter (to live)
- jouer (to play)
- manquer (to miss)
- marcher (to walk, to function)
- parler (to talk, to speak)
- penser (to think)
- regarder (to watch, to look at)
- travailler (to work)
- trouver (to find)
- visiter (to visit (a place)
There are also many exception for hte **er** verbs, which we will discuss below.
## 2. The "-ir" Regular Verbs
There is a general rubric for conjugating verbs that end in **ir** in the present tense.
We will illustrate this with the verb "finir" (to finish). The stem of the verb finit is "fin-". We conjugate it by adding on the endings "is", "is", "it", "issons", "issez" "issent" for the corresponding pronouns, as follows:
french = ['je','tu','elle, il, on','nous','vous','elles, ils']
finir_stem = ['fin-','fin-','fin-','fin-','fin-','fin-']
ir_ending = ['is','is','eit','issons','issez','issent']
finir_conjug = ['finis','finis','finit','finisson','finissez','finissent']
trace0 = go.Table(
columnorder = [1,2],
columnwidth = [10,10],
header = dict(
values = ['Pronoun','Conjugation'],
line = dict(color = 'rgb(0,0,0)'),
fill = dict(color = 'rgb(0,35,48)'),
align = ['center','center'],
font = dict(color = 'white', size = 16),
height = 40
),
cells = dict(
values = [french,finir_conjug],
line = dict(color = 'black'),
fill = dict(color = 'rgb(95,102,161)'),
align = ['center', 'center'],
font = dict(color = 'white', size = 14),
height = 30
)
)
layout = dict(width=500, height=450)
data = [trace0]
fig = dict(data = data, layout = layout)
iplot(fig)
This can be taken as the general rule for conjugating **ir** verbs in the present tense. All you need to do is find the *stem* of the verb, which was fin- in this case and then apply these endings to figure out how to conjugate the verb for every personal pronoun.
For instance, try this yourself with the verb "grandir" (to grow). The stem is "grand-", so what are the corresponding six conjucations, as in the table above?
This pattern works for most "ir" verbs, and there are hundreds of them. Some common ones are:
- applaudir (to applaud)
- bâtir (to build)
- choisir (to choose)
- désobéir (to disobey)
- finir (to finish)
- grandir (to grow up)
- grossir (to gain weight)
- guérir (to heal, to get well)
- maigrir (to lose weight)
- obéir (to obey)
- punir (to punish)
- réfléchir (to think, to reflect)
- remplir (to fill)
- réussir (to succeed)
- vieillir (to grow old)
Again, though, there will be exceptions...
## 3. The "-re" Regular Verbs
There is a general rubric for conjugating verbs that end in **re** in the present tense.
We will illustrate this with the verb "vendre" (to sell). The stem of the verb finit is "vend-". We conjugate it by adding on the endings "s", "s", "nothing", "ons", "ez" "ent" for the corresponding pronouns, as follows:
french = ['je','tu','elle, il, on','nous','vous','elles, ils']
vendre_stem = ['vend-','vend-','vend-','vend-','vend-','vend-']
re_ending = ['s','s','','ons','ez','ent']
vendre_conjug = ['vends','vends','vend','vendons','vendez','vendent']
trace0 = go.Table(
columnorder = [1,2],
columnwidth = [10,10],
header = dict(
values = ['Pronoun','Conjugation'],
line = dict(color = 'rgb(0,0,0)'),
fill = dict(color = 'rgb(0,35,48)'),
align = ['center','center'],
font = dict(color = 'white', size = 16),
height = 40
),
cells = dict(
values = [french,vendre_conjug],
line = dict(color = 'black'),
fill = dict(color = 'rgb(95,102,161)'),
align = ['center', 'center'],
font = dict(color = 'white', size = 14),
height = 30
)
)
layout = dict(width=500, height=450)
data = [trace0]
fig = dict(data = data, layout = layout)
iplot(fig)
This can be taken as the general rule for conjugating **re** verbs in the present tense. All you need to do is find the *stem* of the verb, which was vend- in this case and then apply these endings to figure out how to conjugate the verb for every personal pronoun.
For instance, try this yourself with the verb "grandir" (to grow). The stem is "grand-", so what are the corresponding six conjugations, as in the table above?
This pattern works for most "re" verbs, and there are many of them. Some common ones are:
attendre (to wait)
défendre (to defend)
descendre (to descend)
entendre (to hear)
étendre (to stretch)
fondre (to melt)
pendre (to hang, or suspend)
perdre (to lose)
prétendre (to claim)
rendre (to give back, or return)
répondre (to answer)
vendre (to sell)
Again, though, there will be exceptions...
## 1. Exceptions to the regular er verbs
French is filled with exceptions, which makes it a bit of a difficult language to master as one has to basically dedicate the exceptions to memory. An exception for a verb means that it is not (maybe just partially) conjugating using the endings given above. Most exceptions arise in an alteration of the stem of the verb.
Thankfully there are not many exceptions for the **er** verbs. Here are three notable ones:
## 1a. The "-oyer" and "-uyer" exceptions:
For verbs like "envoyer" (to send) or "ennuyer" (to annoy) the stem changes the "y" to an "i" for all pronouns except nous and vous:
french = ["j'",'tu','elle, il, on','nous','vous','elles, ils']
envoyer_conjug = ['envoie', 'envoies','envoie','envoyons','envoyez','envoient']
trace0 = go.Table(
columnorder = [1,2,3],
columnwidth = [10,10],
header = dict(
values = ['Pronoun','Conjugation'],
line = dict(color = 'rgb(0,0,0)'),
fill = dict(color = 'rgb(0,35,48)'),
align = ['center','center'],
font = dict(color = 'white', size = 16),
height = 40
),
cells = dict(
values = [french,envoyer_conjug],
line = dict(color = 'black'),
fill = dict(color = 'rgb(95,102,161)'),
align = ['center', 'center'],
font = dict(color = 'white', size = 14),
height = 30
)
)
layout = dict(width=500, height=450)
data = [trace0]
fig = dict(data = data, layout = layout)
iplot(fig)
## 1b. The "e_er" or "é_er" exceptions:
Verbs like "acheter" (to buy) or "préférer" (to prefer) also follow an exception rule. The accent aigue becomes an accent grave, that is, é becomes è, except in the nous and vous cases, where it does not change. Note this means the pronunciation of the letter changes as well.
preferer_conjug = ['préfère','préfères','préfère','préférons','préférez','préfèrent']
french = ['je','tu','elle, il, on','nous','vous','elles, ils']
trace0 = go.Table(
columnorder = [1,2,3],
columnwidth = [10,10],
header = dict(
values = ['Pronoun','Conjugation'],
line = dict(color = 'rgb(0,0,0)'),
fill = dict(color = 'rgb(0,35,48)'),
align = ['center','center'],
font = dict(color = 'white', size = 16),
height = 40
),
cells = dict(
values = [french,preferer_conjug],
line = dict(color = 'black'),
fill = dict(color = 'rgb(95,102,161)'),
align = ['center', 'center'],
font = dict(color = 'white', size = 14),
height = 30
)
)
layout = dict(width=500, height=450)
data = [trace0]
fig = dict(data = data, layout = layout)
iplot(fig)
## 1c. The " –eler " and " -eter " exceptions:
For verbs like "appeler" (to call) or "rejeter" (to reject) the letters "l"
or "t" get doubled. Again, this does not hold for the nous and vous cases.
french = ['je','tu','elle, il, on','nous','vous','elles, ils']
appeler_conjug = ['appelle','appelles','appelle','appelons','appelez','appellent']
trace0 = go.Table(
columnorder = [1,2,3],
columnwidth = [10,10],
header = dict(
values = ['Pronoun','Conjugation'],
line = dict(color = 'rgb(0,0,0)'),
fill = dict(color = 'rgb(0,35,48)'),
align = ['center','center'],
font = dict(color = 'white', size = 16),
height = 40
),
cells = dict(
values = [french,appeler_conjug],
line = dict(color = 'black'),
fill = dict(color = 'rgb(95,102,161)'),
align = ['center', 'center'],
font = dict(color = 'white', size = 14),
height = 30
)
)
layout = dict(width=500, height=450)
data = [trace0]
fig = dict(data = data, layout = layout)
iplot(fig)
It's important to be aware of these exceptions, as you will be able to identify patterns in verbs of these forms and the exceptions themselves, like how it doesn't apply for nous and vous. Knowledge of the exceptions is crucial to mastering the language!
## 2. Exceptions to the regular ir verbs
Unfortunately, with the **ir** verbs, there are many, many exceptions. Three important ones are as follows:
## 2a. Verbs like partir (to leave):
For "partir" (to leave), the keep is to drop the "t" from the stem in the singular case, and add the endings "s", "s", "t". For the plural case, you keep the "t". The conjgations go like this:
french = ['je','tu','elle, il, on','nous','vous','elles, ils']
partir_conjug = ['pars','pars','part','partons','partez','partent']
trace0 = go.Table(
columnorder = [1,2,3],
columnwidth = [10,10],
header = dict(
values = ['Pronoun','Conjugation'],
line = dict(color = 'rgb(0,0,0)'),
fill = dict(color = 'rgb(0,35,48)'),
align = ['center','center'],
font = dict(color = 'white', size = 16),
height = 40
),
cells = dict(
values = [french,partir_conjug],
line = dict(color = 'black'),
fill = dict(color = 'rgb(95,102,161)'),
align = ['center', 'center'],
font = dict(color = 'white', size = 14),
height = 30
)
)
layout = dict(width=500, height=450)
data = [trace0]
fig = dict(data = data, layout = layout)
iplot(fig)
Other irregular ir verbs like partir include
- dormir (to sleep)
- mentir (to lie)
- partir (to leave)
- sentir (to feel)
- servir (to serve)
- sortir (to go out)
## 2b. Verbs that end in -llir, -frir, or -vrir
Curiously, these verbs conjugate like an "er" verb. Just take the stem and add the endings "e", "es", "s", "ons", "ez", "emt." For instance, here is the conjugation for ouvrir (to open):
french = ['je','tu','elle, il, on','nous','vous','elles, ils']
ouvrir_conjug = ['ouvre','ouvres','ouvre','ouvrons','ouvrez','ouvrent']
trace0 = go.Table(
columnorder = [1,2,3],
columnwidth = [10,10],
header = dict(
values = ['Pronoun','Conjugation'],
line = dict(color = 'rgb(0,0,0)'),
fill = dict(color = 'rgb(0,35,48)'),
align = ['center','center'],
font = dict(color = 'white', size = 16),
height = 40
),
cells = dict(
values = [french,ouvrir_conjug],
line = dict(color = 'black'),
fill = dict(color = 'rgb(95,102,161)'),
align = ['center', 'center'],
font = dict(color = 'white', size = 14),
height = 30
)
)
layout = dict(width=500, height=450)
data = [trace0]
fig = dict(data = data, layout = layout)
iplot(fig)
Other ir verbs that follow this pattern include:
- couvrir (to cover)
- cueillir (to pick)
- offrir (to offer)
- ouvrir (to open)
- souffrir (to suffer)
## 2c. Verbs that end in -enir
These ones all follow a similar pattern. The stem changes in the singular cases and the endings are just like the first irregular ir case (like partir). Here is the conjugation for tenir (to hold):
french = ['je','tu','elle, il, on','nous','vous','elles, ils']
tenir_conjug = ['tiens','tiens','tient','tenons','tenez','tenent']
trace0 = go.Table(
columnorder = [1,2,3],
columnwidth = [10,10],
header = dict(
values = ['Pronoun','Conjugation'],
line = dict(color = 'rgb(0,0,0)'),
fill = dict(color = 'rgb(0,35,48)'),
align = ['center','center'],
font = dict(color = 'white', size = 16),
height = 40
),
cells = dict(
values = [french,tenir_conjug],
line = dict(color = 'black'),
fill = dict(color = 'rgb(95,102,161)'),
align = ['center', 'center'],
font = dict(color = 'white', size = 14),
height = 30
)
)
layout = dict(width=500, height=450)
data = [trace0]
fig = dict(data = data, layout = layout)
iplot(fig)
Other verbs in this irregular category include:
- appartenir (to belong)
- contenir (to contain)
- convenir (to suit)
- devenir (to become)
- maintenir (to maintain)
- obtenir (to obtain)
- parvenir (to reach, or achieve)
- prévenir (to warn, or prevent)
- retenir (to retain)
- revenir (to come back)
- soutenir (to support)
- (se) souvenir (to remember)
- tenir (to hold)
- venir (to come)
## 2d. Other very irregular ir verbs
There are a dozen or so irregular ir verbs that don't fit any pattern. These include many that end in oir, as well as other like acquérir, asseoir, avoir, courir, devoir, falloir, mourir, pleuvoir, pouvoir, recevoir, savoir, servir, valoir, voir. You just have to learn these conjugations individually.
## 3. Exceptions to the re verbs
As with the other two regular classes, the **re** verbs also have several exceptions. In all cases, the changes involve adding or dropping a consonant in the stem, and possibly adjusting the endings. A quick summary is to say that the unusual changes have to do with making the spelling match the prononciation of the verb forms. In some sense, it is easier to learn what the verbs sound like, and then spell them to match.
There are four basic exceptions, as follows:
## 3a. The verb prendre (to take) and its relatives
Here, you just drop the "d" from the stem in the plural form, and add an extra "n" in the last case:
french = ['je','tu','elle, il, on','nous','vous','elles, ils']
prendre_conjug = ['prends','prends','prend','prenons','prenez','prennent']
trace0 = go.Table(
columnorder = [1,2,3],
columnwidth = [10,10],
header = dict(
values = ['Pronoun','Conjugation'],
line = dict(color = 'rgb(0,0,0)'),
fill = dict(color = 'rgb(0,35,48)'),
align = ['center','center'],
font = dict(color = 'white', size = 16),
height = 40
),
cells = dict(
values = [french,prendre_conjug],
line = dict(color = 'black'),
fill = dict(color = 'rgb(95,102,161)'),
align = ['center', 'center'],
font = dict(color = 'white', size = 14),
height = 30
)
)
layout = dict(width=500, height=450)
data = [trace0]
fig = dict(data = data, layout = layout)
iplot(fig)
## 3b. The verbs battre (to fight) and mettre (to put)
Here, you just drop one "t" from the stem in the singular form:
french = ['je','tu','elle, il, on','nous','vous','elles, ils']
battre_conjug = ['bats','bats','bat','battons','battez','battent']
trace0 = go.Table(
columnorder = [1,2,3],
columnwidth = [10,10],
header = dict(
values = ['Pronoun','Conjugation'],
line = dict(color = 'rgb(0,0,0)'),
fill = dict(color = 'rgb(0,35,48)'),
align = ['center','center'],
font = dict(color = 'white', size = 16),
height = 40
),
cells = dict(
values = [french,battre_conjug],
line = dict(color = 'black'),
fill = dict(color = 'rgb(95,102,161)'),
align = ['center', 'center'],
font = dict(color = 'white', size = 14),
height = 30
)
)
layout = dict(width=500, height=450)
data = [trace0]
fig = dict(data = data, layout = layout)
iplot(fig)
## 3c. The verbs rompre (to break) and its relatives
This one is such a tiny exception: an extra t in the third person singular:
french = ['je','tu','elle, il, on','nous','vous','elles, ils']
rompre_conjug = ['romps','romps','rompt','rompons','rompez','rompent']
trace0 = go.Table(
columnorder = [1,2,3],
columnwidth = [10,10],
header = dict(
values = ['Pronoun','Conjugation'],
line = dict(color = 'rgb(0,0,0)'),
fill = dict(color = 'rgb(0,35,48)'),
align = ['center','center'],
font = dict(color = 'white', size = 16),
height = 40
),
cells = dict(
values = [french,rompre_conjug],
line = dict(color = 'black'),
fill = dict(color = 'rgb(95,102,161)'),
align = ['center', 'center'],
font = dict(color = 'white', size = 14),
height = 30
)
)
layout = dict(width=500, height=450)
data = [trace0]
fig = dict(data = data, layout = layout)
iplot(fig)
## 3d. Finally, Verbs Ending in –aindre, –eindre, and –oindre
In this case, the dre/tre is dropped to form the stem, and in the plural cases, the letter g is inserted. Again, this is to get the prononciation to match the spelling.
french = ['je','tu','elle, il, on','nous','vous','elles, ils']
craindre_conjug = ['crains','crains','craint','craignons','craignez','craignent']
joindre_conjug = ['joins','joins','joint','joignon','joignez','joignent']
peintre_conjug = ['peins','peins','peint','peignons','peignez','peignent']
trace0 = go.Table(
columnorder = [1,2,3,4,5],
columnwidth = [10,10],
header = dict(
values = ['Pronoun','Craindre','Joindre','Peintre'],
line = dict(color = 'rgb(0,0,0)'),
fill = dict(color = 'rgb(0,35,48)'),
align = ['center','center','center','center'],
font = dict(color = 'white', size = 16),
height = 40
),
cells = dict(
values = [french,craindre_conjug,joindre_conjug,peintre_conjug],
line = dict(color = 'black'),
fill = dict(color = 'rgb(95,102,161)'),
align = ['center', 'center','center','center'],
font = dict(color = 'white', size = 14),
height = 30
)
)
layout = dict(width=500, height=450)
data = [trace0]
fig = dict(data = data, layout = layout)
iplot(fig)
## Coding Examples
---
How could one write code to see if someone conjugated a verb correctly? If you are interested in the programming aspects, please see the related notebook [French-Verb-Coding](CC-186-French-Verb-Coding.ipynb).
#perhaps show how this work for a different verb and subject.
#manipulate this code for 'ir' verbs or try to write your own code to handle the exceptions above.
#remember to use the list user_answer for the user_inputs and don't forget to enter some inputs yourself ;)
# user_answer = [je.value,tu.value,elle.value,nous.value,vous.value,elles.value]
# french = ['je','tu','elle/il/on','nous','vous','elles/ils']
# endings = ['e','es','e','ons','ez','ent']
# for i in range(0,len(endings)):
# n = len(endings[i])
# #feel free to change what happens if they get it right or wrong.
# if user_answer[i] != '': #So that it doesn't print if nothing has been entered
# if user_answer[i][-n:] != endings[i]:
# print('The conjugation for "'+french[i]+'" is incorrect')
# if user_answer[i][-n:] == endings[i]:
# print('The conjugation for "'+french[i]+'" is correct!')
---
## Conclusion
In this Jupyter Notebook by Callysto you learned the basics of French verb conjugation in the present tense. In a related noteboo, we see we can expose the structure of the French verb conjugation rules to compose a program that checks if a user input the correct answers to conjugate a verb in the present tense. This is somewhat of a hallmark of coding. Taking some sort of structure of the problem at hand and exposing in the form of generalizable and applicable written code. Breaking down problems in this fashion is essential to computational thinking.
Je te remercie pour avoir essayer les exercises donner.
[](https://github.com/callysto/curriculum-notebooks/blob/master/LICENSE.md) | 33.091765 | 559 | 0.655041 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 6,998 | 0.248376 |
209013f0c2f9bc0d8238ed1fa7a13fadafec827e | 2,948 | py | Python | item_engine/textbase/generate_tests.py | GabrielAmare/ItemEngine | 10277626c3724ad9ae7b934f53e11e305dc34da5 | [
"MIT"
] | null | null | null | item_engine/textbase/generate_tests.py | GabrielAmare/ItemEngine | 10277626c3724ad9ae7b934f53e11e305dc34da5 | [
"MIT"
] | null | null | null | item_engine/textbase/generate_tests.py | GabrielAmare/ItemEngine | 10277626c3724ad9ae7b934f53e11e305dc34da5 | [
"MIT"
] | null | null | null | import os
from typing import List, Iterator
from item_engine.textbase import make_characters
__all__ = ["generate_tests"]
def generate_tests(pckg: str,
inputs: List[str],
__test__: str = '__test__',
spec: str = 'spec',
__test_preview__: str = '__test_preview__',
remove_preview: bool = True
):
PATH_LIST = pckg.split('.')
try:
__import__(name=spec, fromlist=PATH_LIST).engine.build(allow_overwrite=True)
except ImportError:
raise Exception("[TEST GENERATION] : engine build failure !")
try:
engine_module = __import__(name="engine", fromlist=PATH_LIST)
parse = engine_module.parse
build = engine_module.build
except ImportError:
raise Exception("[TEST GENERATION] : generated code failure !")
def get(text: str):
*results, eof = list(parse(make_characters(text, eof=True)))
return [build(result) for result in results if result.at == 0 and result.to == eof.to]
def indent(s: str) -> str:
return '\n'.join(' ' + line for line in s.split('\n'))
def indent_result(result: Iterator):
return "[\n" + indent(",\n".join(map(repr, result))) + "\n]"
tests = indent("\n".join(f"test({text!r}, {indent_result(list(get(text)))!s})" for text in inputs))
content = f"""# THIS MODULE HAVE BEEN GENERATED AUTOMATICALLY, DO NOT MODIFY MANUALLY
from typing import List
from item_engine.textbase import *
PATH_LIST = {PATH_LIST!r}
__all__ = ['run']
try:
__import__(name={spec!r}, fromlist=PATH_LIST).engine.build(allow_overwrite=True)
except ImportError:
raise Exception("[TEST GENERATION] : engine build failure !")
try:
from {'.'.join(PATH_LIST)}.engine import parse
from {'.'.join(PATH_LIST)}.engine.materials import *
except ImportError:
raise Exception("[TEST GENERATION] : generated code failure !")
def get(text: str):
*results, eof = list(parse(make_characters(text, eof=True)))
return [build(result) for result in results if result.at == 0 and result.to == eof.to]
def test(text: str, expected: List[Element]):
result = get(text)
assert expected == result, f"\\ntext = {{text!r}}\\nexpected = {{expected!r}}\\nresult = {{result!r}}"
def run():
{tests}
if __name__ == '__main__':
run()
"""
try:
with open(__test_preview__ + '.py', mode='w', encoding='utf-8') as file:
file.write(content)
try:
__import__(name=__test_preview__, fromlist=PATH_LIST).run()
except Exception as e:
raise Exception("[TEST GENERATION] : preview error", e)
with open(__test__ + '.py', mode='w', encoding='utf-8') as file:
file.write(content)
finally:
if remove_preview:
if os.path.exists(__test_preview__ + '.py'):
os.remove(__test_preview__ + '.py')
| 29.777778 | 108 | 0.620421 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,295 | 0.439281 |
2090b0abc90c285fddee287aeae0b35da855bdc6 | 6,546 | py | Python | sonnet/python/modules/block_matrix_test.py | imraviagrawal/sonnet | 3a305e16af9e274b89db2834e3b7cea9cea6806f | [
"Apache-2.0"
] | 1 | 2020-03-10T15:06:41.000Z | 2020-03-10T15:06:41.000Z | sonnet/python/modules/block_matrix_test.py | schaul/sonnet | 3a305e16af9e274b89db2834e3b7cea9cea6806f | [
"Apache-2.0"
] | null | null | null | sonnet/python/modules/block_matrix_test.py | schaul/sonnet | 3a305e16af9e274b89db2834e3b7cea9cea6806f | [
"Apache-2.0"
] | null | null | null | # Copyright 2017 The Sonnet Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
"""Tests for block_matrix."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from sonnet.python.modules import block_matrix
import tensorflow as tf
def create_input(size, batch_size=1):
x = tf.range(size * batch_size)
return tf.reshape(tf.to_float(x), shape=(batch_size, -1))
class BlockTriangularMatrixTest(tf.test.TestCase):
def _check_output_size(self, btm, result, batch_size=1):
self.assertEqual(result.shape, (batch_size,) + btm.output_shape)
def test_lower(self):
"""Tests block lower-triangular matrix."""
btm = block_matrix.BlockTriangularMatrix(
block_shape=(2, 3), block_rows=3, upper=False)
self.assertEqual(btm.num_blocks, 6)
self.assertEqual(btm.block_size, 6)
self.assertEqual(btm.input_size, 36)
output = btm(create_input(btm.input_size))
with self.test_session() as sess:
result = sess.run(output)
self._check_output_size(btm, result)
expected = np.array([[[0, 1, 2, 0, 0, 0, 0, 0, 0],
[3, 4, 5, 0, 0, 0, 0, 0, 0],
[6, 7, 8, 9, 10, 11, 0, 0, 0],
[12, 13, 14, 15, 16, 17, 0, 0, 0],
[18, 19, 20, 21, 22, 23, 24, 25, 26],
[27, 28, 29, 30, 31, 32, 33, 34, 35]]])
self.assertAllEqual(result, expected)
def test_lower_no_diagonal(self):
"""Tests block lower-triangular matrix without diagonal."""
btm = block_matrix.BlockTriangularMatrix(
block_shape=(2, 3), block_rows=3, include_diagonal=False)
self.assertEqual(btm.num_blocks, 3)
self.assertEqual(btm.block_size, 6)
self.assertEqual(btm.input_size, 18)
output = btm(create_input(btm.input_size))
with self.test_session() as sess:
result = sess.run(output)
self._check_output_size(btm, result)
expected = np.array([[[0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 1, 2, 0, 0, 0, 0, 0, 0],
[3, 4, 5, 0, 0, 0, 0, 0, 0],
[6, 7, 8, 9, 10, 11, 0, 0, 0],
[12, 13, 14, 15, 16, 17, 0, 0, 0]]])
self.assertAllEqual(result, expected)
def test_upper(self):
"""Tests block upper-triangular matrix."""
btm = block_matrix.BlockTriangularMatrix(
block_shape=(2, 3), block_rows=3, upper=True)
self.assertEqual(btm.num_blocks, 6)
self.assertEqual(btm.block_size, 6)
self.assertEqual(btm.input_size, 36)
output = btm(create_input(btm.input_size))
with self.test_session() as sess:
result = sess.run(output)
self._check_output_size(btm, result)
expected = np.array([[[0, 1, 2, 3, 4, 5, 6, 7, 8],
[9, 10, 11, 12, 13, 14, 15, 16, 17],
[0, 0, 0, 18, 19, 20, 21, 22, 23],
[0, 0, 0, 24, 25, 26, 27, 28, 29],
[0, 0, 0, 0, 0, 0, 30, 31, 32],
[0, 0, 0, 0, 0, 0, 33, 34, 35]]])
self.assertAllEqual(result, expected)
def test_upper_no_diagonal(self):
"""Tests block upper-triangular matrix without diagonal."""
btm = block_matrix.BlockTriangularMatrix(
block_shape=(2, 3), block_rows=3, upper=True, include_diagonal=False)
self.assertEqual(btm.num_blocks, 3)
self.assertEqual(btm.block_size, 6)
self.assertEqual(btm.input_size, 18)
output = btm(create_input(btm.input_size))
with self.test_session() as sess:
result = sess.run(output)
self._check_output_size(btm, result)
expected = np.array([[[0, 0, 0, 0, 1, 2, 3, 4, 5],
[0, 0, 0, 6, 7, 8, 9, 10, 11],
[0, 0, 0, 0, 0, 0, 12, 13, 14],
[0, 0, 0, 0, 0, 0, 15, 16, 17],
[0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0]]])
self.assertAllEqual(result, expected)
def test_batch(self):
"""Tests batching."""
btm = block_matrix.BlockTriangularMatrix(
block_shape=(2, 2), block_rows=2, upper=False)
output = btm(create_input(12, batch_size=2))
with self.test_session() as sess:
result = sess.run(output)
self._check_output_size(btm, result, batch_size=2)
expected = np.array([
[[0, 1, 0, 0],
[2, 3, 0, 0],
[4, 5, 6, 7],
[8, 9, 10, 11]],
[[12, 13, 0, 0],
[14, 15, 0, 0],
[16, 17, 18, 19],
[20, 21, 22, 23]]])
self.assertAllEqual(result, expected)
class BlockDiagonalMatrixTest(tf.test.TestCase):
def test_default(self):
"""Tests BlockDiagonalMatrix."""
bdm = block_matrix.BlockDiagonalMatrix(block_shape=(2, 3), block_rows=3)
self.assertEqual(bdm.num_blocks, 3)
self.assertEqual(bdm.block_size, 6)
self.assertEqual(bdm.input_size, 18)
output = bdm(create_input(bdm.input_size))
with self.test_session() as sess:
result = sess.run(output)
expected = np.array([[[0, 1, 2, 0, 0, 0, 0, 0, 0],
[3, 4, 5, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 6, 7, 8, 0, 0, 0],
[0, 0, 0, 9, 10, 11, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 12, 13, 14],
[0, 0, 0, 0, 0, 0, 15, 16, 17]]])
self.assertAllEqual(result, expected)
def test_properties(self):
"""Tests properties of BlockDiagonalMatrix."""
bdm = block_matrix.BlockDiagonalMatrix(block_shape=(3, 5), block_rows=7)
self.assertEqual(bdm.num_blocks, 7)
self.assertEqual(bdm.block_size, 15)
self.assertEqual(bdm.input_size, 105)
self.assertEqual(bdm.output_shape, (21, 35))
self.assertEqual(bdm.block_shape, (3, 5))
if __name__ == "__main__":
tf.test.main()
| 35.193548 | 79 | 0.571494 | 5,445 | 0.831806 | 0 | 0 | 0 | 0 | 0 | 0 | 1,010 | 0.154293 |
20922cbe834b72450e8a7e093ed7edfd4743f387 | 1,330 | py | Python | Real_Time_Scripts/config_push_file.py | channa006/Basic_Python_Scripts | 4b1b5073ec3eb91dbca8f1541e32a55e466eeb59 | [
"MIT"
] | null | null | null | Real_Time_Scripts/config_push_file.py | channa006/Basic_Python_Scripts | 4b1b5073ec3eb91dbca8f1541e32a55e466eeb59 | [
"MIT"
] | null | null | null | Real_Time_Scripts/config_push_file.py | channa006/Basic_Python_Scripts | 4b1b5073ec3eb91dbca8f1541e32a55e466eeb59 | [
"MIT"
] | null | null | null | from netmiko import ConnectHandler
import os
template = """logging host 192.168.20.5 transport tcp port 514
logging trap 6
interface loopback 30
description "{rtr} loopback interface\""""
username = 'test'
password = "test"
# step 1
# fetch the hostname of the router for the template
for n in range(1, 5):
ip = "192.168.20.{0}".format(n)
# device = ConnectHandler(device_type='cisco_ios', ip=ip, username='test', password='test')
# output = device.send_command("show run | in hostname")
output = 'hostname new01601'
output = output.split(" ")
hostname = output[1]
generatedconfig = template.replace("{rtr}", hostname)
# step 2
# create different config files for each router ready to be pushed on routers.
configfile = open(hostname + "_syslog_config.txt", "w")
configfile.write(generatedconfig)
configfile.close()
#step3 (Validation)
# read files for each of the router (created as routername_syslog_config.txt)
print("Showing contents for generated config files....")
for file in os.listdir('./'):
if file.endswith(".txt"):
print(file)
if ("syslog_config" in file):
hostname = file.split("_")[0]
# print(hostname)
fileconfig = open(file)
print("\nShowing contents of " + hostname)
print(fileconfig.read())
fileconfig.close()
| 30.930233 | 91 | 0.681955 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 717 | 0.539098 |
20932d1a311b51df866595b775c0cc75fec00dfe | 3,101 | py | Python | runs.py | petch/elsticity2019 | 9e8079896fdd1011ca769f9e8f5af06d788415e2 | [
"Apache-2.0"
] | 1 | 2020-03-30T15:13:31.000Z | 2020-03-30T15:13:31.000Z | runs.py | petch/elasticity2019 | 9e8079896fdd1011ca769f9e8f5af06d788415e2 | [
"Apache-2.0"
] | null | null | null | runs.py | petch/elasticity2019 | 9e8079896fdd1011ca769f9e8f5af06d788415e2 | [
"Apache-2.0"
] | null | null | null | from ahm import *
from dfm import *
set_log_level(LogLevel.WARNING)
def run(N, n, h, k, c, g, f, ls='mumps', pc='default', do_write=True):
t1 = Timer()
m = int(N/n)
l = int(m/2)
fp, fm, fd, fb = fibers(N, N, l, h, n, k*n, do_write)
tp, tm, td, tb = fibers(N, N, 0, 0, n, k*n, do_write)
lp, lm, ld, lb = fibers(int(m/c), int(m/c), int(l/c), int(h/c), 1, k, do_write)
cp, cm, cd, cb = fibers(int(m/c), int(m/c), int(l/c/n), int(h/c/n), n, k*n, do_write)
E = [20e9, f*20e9]
mu, lmd = lame(E, [0.3, 0.3])
t1.stop()
t2 = Timer()
u = fem(fp, fm, fd, fb, mu, lmd, g, ls, pc, do_write)
ul2 = norm(u)
uli = norm(u.vector(), 'linf')
t2.stop()
t3 = Timer()
ua = ahm(lp + 'ahm_', lm, ld, lb, cp + 'ahm_', cm, cd, cb, mu, lmd, g, ls, pc, do_write)
ea = project(ua - u, u.function_space(), solver_type=ls, preconditioner_type=pc)
eal2 = norm(ea)/ul2
eali = norm(ea.vector(), 'linf')/uli
if do_write:
XDMFFile(fp + 'ahm_error.xdmf').write(ea)
t3.stop()
t4 = Timer()
ud = dfm(cp + 'dfm_', cm, cd, cb, mu, lmd, g, h/N, ls, pc, do_write)
ed = project(ud - u, u.function_space(), solver_type=ls, preconditioner_type=pc)
edl2 = norm(ed)/ul2
edli = norm(ed.vector(), 'linf')/uli
if do_write:
XDMFFile(fp + 'dfm_error.xdmf').write(ed)
t4.stop()
# print(t1.elapsed()[0], t2.elapsed()[0], t3.elapsed()[0], t4.elapsed()[0])
return eal2, eali, edl2, edli
# for ls in ['bicgstab', 'cg', 'gmres', 'minres', 'tfqmr']:
# for pc in ['amg', 'hypre_amg', 'hypre_euclid', 'hypre_parasails']:
# timer = Timer()
# try:
# eal2, eali, edl2, edli = run(256, 1, 2, 2, 1, ls, pc, False)
# print(ls, pc, timer.elapsed(), eal2, eali, edl2, edli)
# except:
# print(ls, pc, timer.elapsed(), 'error')
ls, pc, do_write = 'cg', 'amg', False
N0, n0, k0, d0, c0, f0 = 2048, 4, 1, 2, 1, 32
for g in [Constant([0, -1e5])]:
# run(N0, n0, d0, k0, c0, g, f0, ls, pc, True)
print('k\teal2\teali\tedl2\tedli')
for k in [1, 2, 4, 8, 16]:
eal2, eali, edl2, edli = run(N0, n0, d0, k, c0, g, f0, ls, pc, do_write)
print(f'{k*n0*n0}\t{eal2}\t{eali}\t{edl2}\t{edli}')
print('d\teal2\teali\tedl2\tedli')
for d in [2, 4, 8, 16, 32]:
eal2, eali, edl2, edli = run(N0, n0, d, k0, c0, g, f0, ls, pc, do_write)
print(f'{d/N0}\t{eal2}\t{eali}\t{edl2}\t{edli}')
print('f\teal2\teali\tedl2\tedli')
for f in [8, 16, 32, 64, 128]:
eal2, eali, edl2, edli = run(N0, n0, d0, k0, c0, g, f, ls, pc, do_write)
print(f'{f}\t{eal2}\t{eali}\t{edl2}\t{edli}')
print('n\teal2\teali\tedl2\tedli')
for n in [1, 2, 4, 8, 16]:
eal2, eali, edl2, edli = run(int(N0*n/16), n, d0, k0, c0, g, f0, ls, pc, do_write)
print(f'{n}\t{eal2}\t{eali}\t{edl2}\t{edli}')
print('h\teal2\teali\tedl2\tedli')
for c in [16, 8, 4, 2, 1]:
eal2, eali, edl2, edli = run(N0, n0, 16*d0, k0, c, g, f0, ls, pc, do_write)
print(f'{int(c/N0)}\t{eal2}\t{eali}\t{edl2}\t{edli}') | 38.283951 | 92 | 0.534989 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 938 | 0.302483 |
20937e50985275246decc2c29f01d1f6d8867c7d | 10,781 | py | Python | src/p21_CoorAnalyse.py | leekwunfung817/ComputerVision-technique-ObjectRegister | 5bc5f65b3b7b0c246e8d29764e094f2f35b5b50d | [
"MIT"
] | 1 | 2021-01-23T16:26:15.000Z | 2021-01-23T16:26:15.000Z | src/p21_CoorAnalyse.py | leekwunfung817/ComputerVision-technique-ObjectRegister | 5bc5f65b3b7b0c246e8d29764e094f2f35b5b50d | [
"MIT"
] | null | null | null | src/p21_CoorAnalyse.py | leekwunfung817/ComputerVision-technique-ObjectRegister | 5bc5f65b3b7b0c246e8d29764e094f2f35b5b50d | [
"MIT"
] | null | null | null | import cv2
import math
import func_any
import func_apis
import sys
py_name = sys.argv[0]
app_name = py_name.replace('.pyc','').replace('.py','')
exec('import '+app_name)
exec('config = '+app_name+'.config')
import func_colorArea
import json
# history var['movingCoor'] {
# ID { lostTime('lt'):0 , records('r'):[{ centerCoor:(x,y) },{ centerCoor (x,y), dis, dir },...], lastCenter('lc'):(x,y) },
# ...
# }
totalMoveDebug = True
wholeMoveDebug = False
def OnObjectAppear(var,movingData,ID):
if wholeMoveDebug:
print('OnObjectAppear ',json.dumps(movingData, indent=4, sort_keys=True))
pass
def OnObjectMoving(var,movingData,ID):
if wholeMoveDebug:
print('OnObjectMoving ',json.dumps(movingData, indent=4, sort_keys=True))
pass
def OnObjectDisappearTimeout(var,movingData,ID):
if wholeMoveDebug:
print('OnObjectDisappearTimeout ',json.dumps(movingData, indent=4, sort_keys=True))
pass
def OnObjectDisappear(var,movingData,ID):
if wholeMoveDebug:
print('OnObjectDisappear ',json.dumps(movingData, indent=4, sort_keys=True))
if totalMoveDebug:
begin = movingData['r'][0][0]
last = movingData['lc']
# drawMovingCoor(begin,last)
# demoImage = drawMovingLines(var)
totalDirCode = getDirection(begin,last)
totalDir = getDirectionDes(totalDirCode)
totalDis = distanCoor(begin,last)
froArea = func_colorArea.isPointInArea(var,begin)
toArea = func_colorArea.isPointInArea(var,last)
if config['socket'] is not None:
func_apis.socket( config['socket']['ip'] , config['socket']['port'] , totalDirCode+";"+froArea+";"+toArea)
if totalDis>config['coorMinTotalDistan']:
print('OnObjectDisappear:',json.dumps(movingData, indent=4, sort_keys=True),totalDir,totalDis)
print('OnObjectDisappear Area:',' from: ',froArea,' to: ',toArea)
var['lo'] = { 'begin':str(begin[0])+','+str(begin[1]) , 'last':str(last[0])+','+str(last[1]) , 'froArea':froArea, 'toArea':toArea }
def distanCoor(p1,p2):
(x1,y1)=p1
(x2,y2)=p2
distance = math.sqrt( ((x2-x1)**2)+((y2-y1)**2) )
return distance
def getDirectionDes(des):
if des=='0':return 'stay'
if des=='10':return 'left'
if des=='20':return 'right'
if des=='03':return 'up'
if des=='04':return 'down'
if des=='13':return 'left-up'
if des=='14':return 'left-down'
if des=='23':return 'right-up'
if des=='24': return 'right-down'
def getAngle(p1,p2):
(p1x,p1y) = p1
(p2x,p2y) = p2
dy = p1y - p2y
dx = p2x - p1x
rads = math.atan2(dy,dx)
degs = math.degrees(rads)
return degs
# if degs < 0 :
# degs +=90
def getDirection(fro,to):
(x1,y1)=fro
(x2,y2)=to
di = None
if x2>x1:
di = '2'
elif x2<x1:
di = '1'
else:
di = '0'
if y2>y1:
di += '4'
elif y2<y1:
di += '3'
else:
di += '0'
return di
# history {
# ID { lostTime('lt'):0 , records('r'):[{ centerCoor:(x,y) },{ centerCoor (x,y), dis, dir },...], lastCenter('lc'):(x,y) },
# ...
# }
def getHistoryIDByNewCenter(var,centerCoor):
nearestCenter = None
nearestDistance = None
nearestDirect = None
nearestID = None
for ID in list(var['movingCoor'].keys()):
ele = var['movingCoor'][ID]
centerCur = ele['lc']
distance = distanCoor(centerCur,centerCoor)
if distance<=config['coorMaxDistan']:
dirCode = getDirection(centerCur,centerCoor)
direct = getDirectionDes(dirCode)
if nearestDistance is None:
(nearestDistance,nearestCenter,nearestDirect,nearestID) = (distance,centerCur,direct,ID)
elif distance<nearestDistance:
(nearestDistance,nearestCenter,nearestDirect,nearestID) = (distance,centerCur,direct,ID)
if nearestID is None:
return None
return (nearestID,nearestCenter,nearestDistance,nearestDirect)
# new_id=func_any.dts()
def processNewCenter(var,centerCoor):
obj = getHistoryIDByNewCenter(var,centerCoor)
if obj is None:
new_id=func_any.dts()
var['movingCoor'][new_id] = {
'lt':0,
'r':[(centerCoor,0,0)],
'lc':centerCoor
}
OnObjectAppear(var,var['movingCoor'][new_id],new_id)
return (True,new_id)
else:
(nearestID,nearestCenter,nearestDistance,nearestDirect) = obj
var['movingCoor'][nearestID]['lt']=0
var['movingCoor'][nearestID]['r'].append((centerCoor,nearestDistance,nearestDirect))
var['movingCoor'][nearestID]['lc']=centerCoor
OnObjectMoving(var,var['movingCoor'][nearestID],nearestID)
return (False,nearestID)
def XYWH2Center(xywhi):
(x,y,w,h,img) = xywhi
return ( (x+(w/2)) , (y+(h/2)) )
def getCurrentMovingIDs(var,objs_coor):
moveIDs = []
for xywhi in objs_coor:
centerCoor = XYWH2Center(xywhi)
(isNew, ID) = processNewCenter(var,centerCoor)
if not isNew:
moveIDs.append(ID)
return moveIDs
import func_sql
func_sql.query("""
CREATE TABLE IF NOT EXISTS t_coorMov
(
`begin` VARCHAR(25) NOT NULL,
`last` VARCHAR(25) NOT NULL,
`froArea` VARCHAR(20),
`toArea` VARCHAR(20),
`creation_date` TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP,
CONSTRAINT coorPK PRIMARY KEY (creation_date)
)
""",config)
def processLostCenter(var,moveIDs):
historyIDs = list(var['movingCoor'].keys())
for ID in historyIDs:
if ID not in moveIDs:
if var['movingCoor'][ID]['lt']<3:
var['movingCoor'][ID]['lt']+=1
OnObjectDisappearTimeout(var,var['movingCoor'][ID],ID)
else:
var['lo'] = None
OnObjectDisappear(var,var['movingCoor'][ID],ID)
var['movingCoor'].pop(ID,None)
if var['lo'] is not None:
begin = var['lo']['begin']
last = var['lo']['last']
froArea = var['lo']['froArea']
toArea = var['lo']['toArea']
func_sql.query(f""" INSERT INTO t_coorMov (`begin`,`last`,`froArea`,`toArea`) VALUES ('{begin}','{last}','{froArea}','{toArea}') """,config)
pass
def drawMovingCoor(var,demoImage=None):
demoImage = (var['frame'] if demoImage is None else demoImage)
if var['movingCoor'] is not None:
historyIDs = list(var['movingCoor'].keys())
for ID in historyIDs:
movingData = var['movingCoor'][ID]
begin = movingData['r'][0][0]
(x,y)=begin
begin=(int(x),int(y))
last = movingData['lc']
(x,y)=last
last=(int(x),int(y))
# demoImage = (var['frame'] if demoImage is None else demoImage)
totalDirCode = getDirection(begin,last)
totalDir = getDirectionDes(totalDirCode)
demoImage = cv2.arrowedLine(demoImage, begin, last, (0, 255, 0), 3)
cv2.putText(demoImage, totalDir, last, cv2.FONT_HERSHEY_DUPLEX, 1, (0, 0, 255), 1, cv2.LINE_AA)
return demoImage
def drawMovingLines(var,demoImage = None):
demoImage = (var['frame'] if demoImage is None else demoImage)
if var['movingCoor'] is not None:
historyIDs = list(var['movingCoor'].keys())
for ID in historyIDs:
records = var['movingCoor'][ID]['r']
for recordIndex in range(1,len(records)):
fro = records[recordIndex-1][0]
to = records[recordIndex][0]
# print(fro,to)
(frox,froy) = fro
(tox,toy) = to
fro = (int(frox),int(froy))
to = (int(tox),int(toy))
# print(fro, to)
demoImage = cv2.line(demoImage, fro, to, (0, 255, 0), thickness = 3)
return demoImage
pass
def processNewCentersFroFrame(var,objs_coor):
moveIDs = getCurrentMovingIDs(var,objs_coor)
processLostCenter(var,moveIDs)
demoImage = drawMovingLines(var)
demoImage = drawMovingCoor(var,demoImage)
func_video.WriteVideo2(var,demoImage,'CoorAna')
if demoImage is not None:
cv2.imshow('CoorAnalyse:', demoImage)
return var['movingCoor']
# pass
# history var['movingCoor'] {
# ID {
# lostTime('lt'):0,
# records('r'):[{ centerCoor:(x,y) },{ centerCoor (x,y), dis, dir },...],
# lastCenter('lc'):(x,y),
# lostObj('lo'):
# },
# ...
# }
def run(var,objs_coor):
return processNewCentersFroFrame(var,objs_coor)
# def coors2DisNDir(before,after):
# dirsDat = []
# if len(before)==0 or len(after)==0:
# return dirsDat
# relation_id=0
# for b in before:
# # minDistan = None
# # last_a = None
# # last_ID = None
# for a in after:
# distance = distanCoor(b,a)
# dirCode = getDirection(b,a)
# des = getDirectionDes(dirCode)
# dirsDat.append((str(relation_id),b,a,distance,des))
# relation_id+=1
# # return all combination of the center point (with distance and direction)
# return dirsDat
# def coorLostHandle(var,ID):
# if ID not in var['last_centerCoors']:
# if ID not in var['movingCoor_lostTime'] or var['movingCoor_lostTime'][ID] is None:
# var['movingCoor_lostTime'][ID] = 1
# print('Found ',ID,' object')
# else:
# var['movingCoor_lostTime'][ID] += 1
# print('Lost ',ID,var['movingCoor_lostTime'][ID],' time(s)')
# if var['movingCoor_lostTime'][ID]>=3:
# if var['movingCoor'].pop(ID,None) is None: print('Error remove coor')
# if var['movingCoor_lostTime'].pop(ID,None) is None: print('Error remove coor time out')
# print('Lost ',ID,' already')
# def skipLostMovObj(var):
# keys = list(var['movingCoor'].keys())
# print('movingCoor 1 ',keys)
# for ID in keys:
# coorLostHandle(var,ID)
# # input
# # coor [(x,y,w,h,img)]
# # output
# # center coor [(x,y)]
# def coors2Centers(objs_coor):
# centerCoors = []
# i=0
# for (x, y, w, h, obj_img) in objs_coor:
# center = ( (x+(w/2)) , (y+(h/2)) )
# centerCoors.append(center)
# cv2.imshow('Object '+str(i),obj_img)
# # func_video.WriteVideo2(var,obj_img,'ObjectVideo')
# i+=1
# return centerCoors
# def append2History(dirsDat,var):
# for ID in dirsDat:
# (fro,to,descript) = dirsDat[ID]
# if ID not in var['movingCoor']:
# var['movingCoor'][ID] = []
# var['movingCoor'][ID].append( (fro,to,descript) )
# def dataLabeling(var,dirsDat):
# for (relation_id,b,a,distance,des) in dirsDat:
# if minDistan is None:
# minDistan = distance
# last_a = a
# # last_ID = ID
# elif distance<minDistan:
# minDistan = distance
# last_a = a
# # last_ID = ID
# if minDistan<=config['coorMaxDistan']:
# print('Something move ',des,dirCode)
# dirsDat.append((b,last_a,des))
# print('Same Obj appeared from ',minDistan,' distance')
# else:
# new_id=func_any.dts()
# dirsDat.append((b,last_a,des))
# print('another Obj appeared from ',minDistan,' distance')
# pass
# # input
# # coor [(x,y,w,h,img)]
# # output
# # last frame coor [ID](brfore coor,after coor)
# # history [ID](brfore coor,after coor,distance,direction)
# def objsCoorAna(var,objs_coor):
# # input
# # coor [(x,y,w,h,img)]
# centerCoors = coors2Centers(objs_coor)
# # output
# # center coor [(x,y)]
# if var['last_centerCoors'] is not None: # skip first time
# skipLostMovObj(var)
# # input
# # center coor [(x,y)]
# dirsDat = coors2DisNDir(var['last_centerCoors'],centerCoors)
# # output
# # all combination of the center point (with distance and direction)
# # [(b,a,distance,des)]
# append2History(dirsDat,var)
# print('movingCoor',var['movingCoor'])
# print()
# var['last_centerCoors'] = centerCoors
# return centerCoors
| 26.231144 | 145 | 0.663853 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 4,992 | 0.463037 |
20967a01a86741b766bf6246f14aa2afa9bcccd8 | 3,397 | py | Python | src/day11/__init__.py | CreatingNull/AoC-2021 | ee0aec7aa2f4d7cb2d62838d39f8c74edae8cc96 | [
"MIT"
] | null | null | null | src/day11/__init__.py | CreatingNull/AoC-2021 | ee0aec7aa2f4d7cb2d62838d39f8c74edae8cc96 | [
"MIT"
] | null | null | null | src/day11/__init__.py | CreatingNull/AoC-2021 | ee0aec7aa2f4d7cb2d62838d39f8c74edae8cc96 | [
"MIT"
] | null | null | null | """--- Day 11: Dumbo Octopus ---"""
from pathlib import Path
from numpy import all as all_
from numpy import array
from numpy import byte
from numpy import where
from aoc import open_utf8
def __execute_step(data: array, bounds: [[]]) -> int:
"""Recursive function to simulate a single step in time for the input
state.
:param data: Numpy array of all octopi energy states.
:param bounds: Limit the data to consider, [[y_lo, y_hi],[x_lo,x_hi]]
:return: Total number of flashes occurring in the simulated time-step.
"""
count_flashes = 0
bounded_view = data[bounds[0][0] : bounds[0][1], bounds[1][0] : bounds[1][1]]
# All octopi energy increase by 1, -1 only used to exclude already flashed.
bounded_view[bounded_view != -1] = bounded_view[bounded_view != -1] + 1
# Locate all the pending 10's for executing a flash
flashes = where(bounded_view == 10)
for index in range(len(flashes[0])):
# Only flash if recursion hasn't already flashed this point.
if bounded_view[flashes[0][index], flashes[1][index]] != -1:
count_flashes += 1
bounded_view[flashes[0][index], flashes[1][index]] = -1
# Recuse pending flash on bounded array (sub-array)
count_flashes += __execute_step(
data,
[ # values adjusted by origin change of bounded view
[
bounds[0][0] + flashes[0][index] - 1
if bounds[0][0] + flashes[0][index] > 0
else bounds[0][0] + flashes[0][index],
bounds[0][0] + flashes[0][index] + 2
if bounds[0][0] + flashes[0][index] < data.shape[1]
else bounds[0][0] + flashes[0][index] + 1,
],
[
bounds[1][0] + flashes[1][index] - 1
if bounds[1][0] + flashes[1][index] > 0
else bounds[1][0] + flashes[1][index],
bounds[1][0] + flashes[1][index] + 2
if bounds[1][0] + flashes[1][index] < data.shape[1]
else bounds[1][0] + flashes[1][index] + 1,
],
],
)
return count_flashes
def execute_steps(data: array) -> (int, int):
"""Simulates the final octopi array after num_steps and counts the flashes.
:param data: Numpy array of octopi energy states.
:return: The summation of flashes, the step at which all octopi flashed.
"""
flashes = 0
all_flashed = -1
i = 1 # AoC counts loading data as step 0.
while all_flashed == -1:
new_flashes = __execute_step(data, [[0, data.shape[0]], [0, data.shape[1]]])
data[data == -1] = 0 # reset all flashes to 0 energy
if i <= 100: # part one is up to 99 only
flashes += new_flashes
if all_(data == 0):
all_flashed = i
i += 1
return flashes, all_flashed
def load_dataset(dataset_path: Path) -> array:
"""Loads in the dataset as a numpy array of signed bytes."""
with open_utf8(dataset_path) as file:
return array(
[
[byte(cell) for cell in row.strip()]
for row in file
if len(row.strip()) > 0
],
dtype=byte,
)
| 39.045977 | 84 | 0.541066 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 987 | 0.29055 |
209687c99cd37798d8ff94e233db49d920a9fa57 | 14,653 | py | Python | tests/test_record_parsing.py | jodal/python-netsgiro | b180ad3e6c8e4788c2dc539bdcd35b64b2bf6247 | [
"Apache-2.0"
] | null | null | null | tests/test_record_parsing.py | jodal/python-netsgiro | b180ad3e6c8e4788c2dc539bdcd35b64b2bf6247 | [
"Apache-2.0"
] | null | null | null | tests/test_record_parsing.py | jodal/python-netsgiro | b180ad3e6c8e4788c2dc539bdcd35b64b2bf6247 | [
"Apache-2.0"
] | null | null | null | from datetime import date
import pytest
import netsgiro
import netsgiro.records
def test_transmission_start():
record = netsgiro.records.TransmissionStart.from_string(
'NY00001055555555100008100008080000000000'
'0000000000000000000000000000000000000000'
)
assert record.service_code == netsgiro.ServiceCode.NONE
assert record.RECORD_TYPE == netsgiro.RecordType.TRANSMISSION_START
assert record.data_transmitter == '55555555'
assert record.transmission_number == '1000081'
assert record.data_recipient == '00008080'
def test_transmission_start_fails_when_invalid_format():
line = 'XX' + ('0' * 78)
with pytest.raises(
ValueError,
match='{!r} did not match TransmissionStart record format'.format(line),
):
netsgiro.records.TransmissionStart.from_string(line)
def test_transmission_end():
record = netsgiro.records.TransmissionEnd.from_string(
'NY00008900000006000000220000000000000060'
'0170604000000000000000000000000000000000'
)
assert record.service_code == netsgiro.ServiceCode.NONE
assert record.RECORD_TYPE == netsgiro.RecordType.TRANSMISSION_END
assert record.num_transactions == 6
assert record.num_records == 22
assert record.total_amount == 600
assert record.nets_date == date(2004, 6, 17)
def test_assignment_start_for_avtalegiro_payment_requests():
record = netsgiro.records.AssignmentStart.from_string(
'NY21002000000000040000868888888888800000'
'0000000000000000000000000000000000000000'
)
assert record.service_code == netsgiro.ServiceCode.AVTALEGIRO
assert record.RECORD_TYPE == netsgiro.RecordType.ASSIGNMENT_START
assert record.assignment_type == netsgiro.AssignmentType.TRANSACTIONS
assert record.agreement_id == '000000000'
assert record.assignment_number == '4000086'
assert record.assignment_account == '88888888888'
def test_assignment_start_for_avtalegiro_agreements():
record = netsgiro.records.AssignmentStart.from_string(
'NY21242000000000040000868888888888800000'
'0000000000000000000000000000000000000000'
)
assert record.service_code == netsgiro.ServiceCode.AVTALEGIRO
assert record.RECORD_TYPE == netsgiro.RecordType.ASSIGNMENT_START
assert record.assignment_type == (
netsgiro.AssignmentType.AVTALEGIRO_AGREEMENTS
)
assert record.agreement_id is None
assert record.assignment_number == '4000086'
assert record.assignment_account == '88888888888'
def test_assignment_start_for_avtalegiro_cancellation():
record = netsgiro.records.AssignmentStart.from_string(
'NY21362000000000040000868888888888800000'
'0000000000000000000000000000000000000000'
)
assert record.service_code == netsgiro.ServiceCode.AVTALEGIRO
assert record.RECORD_TYPE == netsgiro.RecordType.ASSIGNMENT_START
assert record.assignment_type == (
netsgiro.AssignmentType.AVTALEGIRO_CANCELLATIONS
)
assert record.agreement_id is None
assert record.assignment_number == '4000086'
assert record.assignment_account == '88888888888'
def test_assignment_start_for_ocr_giro_transactions():
record = netsgiro.records.AssignmentStart.from_string(
'NY09002000100856600000029999104276400000'
'0000000000000000000000000000000000000000'
)
assert record.service_code == netsgiro.ServiceCode.OCR_GIRO
assert record.RECORD_TYPE == netsgiro.RecordType.ASSIGNMENT_START
assert record.assignment_type == netsgiro.AssignmentType.TRANSACTIONS
assert record.agreement_id == '001008566'
assert record.assignment_number == '0000002'
assert record.assignment_account == '99991042764'
def test_assignment_end_for_avtalegiro_payment_requests():
record = netsgiro.records.AssignmentEnd.from_string(
'NY21008800000006000000200000000000000060'
'0170604170604000000000000000000000000000'
)
assert record.service_code == netsgiro.ServiceCode.AVTALEGIRO
assert record.RECORD_TYPE == netsgiro.RecordType.ASSIGNMENT_END
assert record.assignment_type == netsgiro.AssignmentType.TRANSACTIONS
assert record.num_transactions == 6
assert record.num_records == 20
assert record.total_amount == 600
assert record.nets_date_earliest == date(2004, 6, 17)
assert record.nets_date_latest == date(2004, 6, 17)
def test_assignment_end_for_avtalegiro_agreements():
record = netsgiro.records.AssignmentEnd.from_string(
'NY21248800000006000000200000000000000000'
'0000000000000000000000000000000000000000'
)
assert record.service_code == netsgiro.ServiceCode.AVTALEGIRO
assert record.RECORD_TYPE == netsgiro.RecordType.ASSIGNMENT_END
assert record.assignment_type == (
netsgiro.AssignmentType.AVTALEGIRO_AGREEMENTS
)
assert record.num_transactions == 6
assert record.num_records == 20
assert record.total_amount is None
assert record.nets_date_earliest is None
assert record.nets_date_latest is None
def test_assignment_end_for_avtalegiro_cancellations():
record = netsgiro.records.AssignmentEnd.from_string(
'NY21368800000006000000200000000000000060'
'0170604170604000000000000000000000000000'
)
assert record.service_code == netsgiro.ServiceCode.AVTALEGIRO
assert record.RECORD_TYPE == netsgiro.RecordType.ASSIGNMENT_END
assert record.assignment_type == (
netsgiro.AssignmentType.AVTALEGIRO_CANCELLATIONS
)
assert record.num_transactions == 6
assert record.num_records == 20
assert record.total_amount == 600
assert record.nets_date_latest == date(2004, 6, 17)
assert record.nets_date_earliest == date(2004, 6, 17)
def test_assignment_end_for_ocr_giro_transactions():
record = netsgiro.records.AssignmentEnd.from_string(
'NY09008800000020000000420000000000514490'
'0200192200192200192000000000000000000000'
)
assert record.service_code == netsgiro.ServiceCode.OCR_GIRO
assert record.RECORD_TYPE == netsgiro.RecordType.ASSIGNMENT_END
assert record.assignment_type == netsgiro.AssignmentType.TRANSACTIONS
assert record.num_transactions == 20
assert record.num_records == 42
assert record.total_amount == 5144900
assert record.nets_date == date(1992, 1, 20)
assert record.nets_date_earliest == date(1992, 1, 20)
assert record.nets_date_latest == date(1992, 1, 20)
def test_transaction_amount_item_1_for_avtalegiro_payment_request():
record = netsgiro.records.TransactionAmountItem1.from_string(
'NY2121300000001170604 00000000'
'000000100 008000011688373000000'
)
assert record.service_code == netsgiro.ServiceCode.AVTALEGIRO
assert record.RECORD_TYPE == netsgiro.RecordType.TRANSACTION_AMOUNT_ITEM_1
assert record.transaction_type == (
netsgiro.TransactionType.AVTALEGIRO_WITH_BANK_NOTIFICATION
)
assert record.transaction_number == 1
assert record.nets_date == date(2004, 6, 17)
assert record.amount == 100
assert record.kid == '008000011688373'
def test_transaction_amount_item_1_for_avtalegiro_cancellation():
record = netsgiro.records.TransactionAmountItem1.from_string(
'NY2193300000001170604 00000000'
'000000100 008000011688373000000'
)
assert record.service_code == netsgiro.ServiceCode.AVTALEGIRO
assert record.RECORD_TYPE == netsgiro.RecordType.TRANSACTION_AMOUNT_ITEM_1
assert record.transaction_type == (
netsgiro.TransactionType.AVTALEGIRO_CANCELLATION
)
assert record.transaction_number == 1
assert record.nets_date == date(2004, 6, 17)
assert record.amount == 100
assert record.kid == '008000011688373'
def test_transaction_amount_item_1_for_ocr_giro_transactions():
record = netsgiro.records.TransactionAmountItem1.from_string(
'NY09103000000012001921320101464000000000'
'000102000 0000531000000'
)
assert record.service_code == netsgiro.ServiceCode.OCR_GIRO
assert record.RECORD_TYPE == netsgiro.RecordType.TRANSACTION_AMOUNT_ITEM_1
assert record.transaction_type == (
netsgiro.TransactionType.FROM_GIRO_DEBITED_ACCOUNT
)
assert record.transaction_number == 1
assert record.nets_date == date(1992, 1, 20)
assert record.centre_id == '13'
assert record.day_code == 20
assert record.partial_settlement_number == 1
assert record.partial_settlement_serial_number == '01464'
assert record.sign == '0'
assert record.amount == 102000
assert record.kid == '0000531'
def test_transaction_amount_item_2_for_avtalegiro_payment_request():
record = netsgiro.records.TransactionAmountItem2.from_string(
'NY2121310000001NAVN '
' 00000'
)
assert record.service_code == netsgiro.ServiceCode.AVTALEGIRO
assert record.RECORD_TYPE == netsgiro.RecordType.TRANSACTION_AMOUNT_ITEM_2
assert record.transaction_type == (
netsgiro.TransactionType.AVTALEGIRO_WITH_BANK_NOTIFICATION
)
assert record.transaction_number == 1
assert record.payer_name == 'NAVN'
assert record.reference is None
def test_transaction_amount_item_2_for_ocr_giro_transactions():
record = netsgiro.records.TransactionAmountItem2.from_string(
'NY09103100000019636827194099038562000000'
'0160192999905123410000000000000000000000'
)
assert record.service_code == netsgiro.ServiceCode.OCR_GIRO
assert record.RECORD_TYPE == netsgiro.RecordType.TRANSACTION_AMOUNT_ITEM_2
assert record.transaction_type == (
netsgiro.TransactionType.FROM_GIRO_DEBITED_ACCOUNT
)
assert record.transaction_number == 1
assert record.form_number == '9636827194'
assert record.payer_name is None
assert record.reference == '099038562'
assert record.bank_date == date(1992, 1, 16)
assert record.debit_account == '99990512341'
def test_transaction_amount_item_2_for_ocr_giro_with_data_in_filler_field():
record = netsgiro.records.TransactionAmountItem2.from_string(
'NY09103100000029797596016097596016188320'
'6160192999910055240000000000000000000000'
)
assert record.service_code == netsgiro.ServiceCode.OCR_GIRO
assert record.RECORD_TYPE == netsgiro.RecordType.TRANSACTION_AMOUNT_ITEM_2
assert record.transaction_type == (
netsgiro.TransactionType.FROM_GIRO_DEBITED_ACCOUNT
)
assert record.transaction_number == 2
assert record.form_number == '9797596016'
assert record.payer_name is None
assert record.reference == '097596016'
assert record.bank_date == date(1992, 1, 16)
assert record.debit_account == '99991005524'
assert record._filler == '1883206'
def test_transaction_amount_item_3_for_ocr_giro_transactions():
record = netsgiro.records.TransactionAmountItem3.from_string(
'NY0921320000001Foo bar baz '
' 0000000000000000000000000'
)
assert record.service_code == netsgiro.ServiceCode.OCR_GIRO
assert record.RECORD_TYPE == netsgiro.RecordType.TRANSACTION_AMOUNT_ITEM_3
assert record.transaction_type == (
netsgiro.TransactionType.PURCHASE_WITH_TEXT
)
assert record.transaction_number == 1
assert record.text == 'Foo bar baz'
def test_transaction_specification_for_avtalegiro_payment_request():
record = netsgiro.records.TransactionSpecification.from_string(
'NY212149000000140011 Gjelder Faktura: 16'
'8837 Dato: 19/03/0400000000000000000000'
)
assert record.service_code == netsgiro.ServiceCode.AVTALEGIRO
assert record.RECORD_TYPE == netsgiro.RecordType.TRANSACTION_SPECIFICATION
assert record.transaction_type == (
netsgiro.TransactionType.AVTALEGIRO_WITH_BANK_NOTIFICATION
)
assert record.transaction_number == 1
assert record.line_number == 1
assert record.column_number == 1
assert record.text == ' Gjelder Faktura: 168837 Dato: 19/03/04'
def make_specification_records(num_lines, num_columns=2):
return [
netsgiro.records.TransactionSpecification(
service_code=netsgiro.ServiceCode.AVTALEGIRO,
transaction_type=(
netsgiro.TransactionType.AVTALEGIRO_WITH_BANK_NOTIFICATION
),
transaction_number=1,
line_number=line,
column_number=column,
text='Line {}, column {}'.format(line, column),
)
for line in range(1, num_lines + 1)
for column in range(1, num_columns + 1)
]
def test_transaction_specification_to_text_with_max_number_of_records():
records = make_specification_records(42)
result = netsgiro.records.TransactionSpecification.to_text(records)
assert len(result.splitlines()) == 42
assert 'Line 1, column 1' in result
assert 'Line 42, column 2' in result
def test_transaction_specification_to_text_with_too_many_records():
records = make_specification_records(43)
with pytest.raises(
ValueError, match='Max 84 specification records allowed, got 86'
):
netsgiro.records.TransactionSpecification.to_text(records)
def test_avtalegiro_active_agreement():
record = netsgiro.records.AvtaleGiroAgreement.from_string(
'NY21947000000010 00800001168837'
'3J00000000000000000000000000000000000000'
)
assert record.service_code == netsgiro.ServiceCode.AVTALEGIRO
assert record.RECORD_TYPE == netsgiro.RecordType.TRANSACTION_AGREEMENTS
assert record.transaction_type == (
netsgiro.TransactionType.AVTALEGIRO_AGREEMENT
)
assert record.transaction_number == 1
assert record.registration_type == (
netsgiro.AvtaleGiroRegistrationType.ACTIVE_AGREEMENT
)
assert record.kid == '008000011688373'
assert record.notify is True
def test_avtalegiro_new_or_updated_agreement():
record = netsgiro.records.AvtaleGiroAgreement.from_string(
'NY21947000000011 00800001168837'
'3N00000000000000000000000000000000000000'
)
assert record.service_code == netsgiro.ServiceCode.AVTALEGIRO
assert record.RECORD_TYPE == netsgiro.RecordType.TRANSACTION_AGREEMENTS
assert record.transaction_type == (
netsgiro.TransactionType.AVTALEGIRO_AGREEMENT
)
assert record.transaction_number == 1
assert record.registration_type == (
netsgiro.AvtaleGiroRegistrationType.NEW_OR_UPDATED_AGREEMENT
)
assert record.kid == '008000011688373'
assert record.notify is False
| 34.076744 | 80 | 0.743943 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,214 | 0.151095 |
2098c33dfd3aab904ba4db79b07d869e376e18df | 3,630 | py | Python | day_1/day1.py | secworks/advent_of_code_2017 | 20ea821710c388429809ca69102a164542d5d798 | [
"BSD-2-Clause"
] | null | null | null | day_1/day1.py | secworks/advent_of_code_2017 | 20ea821710c388429809ca69102a164542d5d798 | [
"BSD-2-Clause"
] | null | null | null | day_1/day1.py | secworks/advent_of_code_2017 | 20ea821710c388429809ca69102a164542d5d798 | [
"BSD-2-Clause"
] | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#=======================================================================
#
# day_1.py
# --------
# Solution for Advent of code 2017, day 1.
# http://adventofcode.com/2017/day/1
#
# Status: Done.
#
# Joachim Strömbergson 2017
#
#=======================================================================
import sys
VERBOSE = 0
#-------------------------------------------------------------------
# get_input()
#-------------------------------------------------------------------
def get_input():
with open('my_input.txt','r') as f:
test_string = f.read()
return test_string.strip()
#-------------------------------------------------------------------
# parse()
#-------------------------------------------------------------------
def parse(s):
first = s[0]
acc = 0
# Scan through the string and add if pairs match.
for i in range(len(s) - 1):
if s[i] == s[i + 1]:
acc += int(s[i])
# Handle the end case.
if s[0] == s[-1]:
acc += int(s[0])
return acc
#-------------------------------------------------------------------
# parse_two()
#-------------------------------------------------------------------
def parse_two(string):
length = len(string)
ctr = 0;
acc = 0;
i = 0
j = int(length / 2)
while ctr < length:
if VERBOSE:
print("ctr: %d, acc: %d, i: %d, idata: %s, j: %d, jdata: %s" %\
(ctr, acc, i, string[i], j, string[j]))
if string[i] == string[j]:
acc = acc + int(string[i])
i = (i + 1) % length
j = (j + 1) % length
ctr += 1
return acc
#-------------------------------------------------------------------
# part_one()
#-------------------------------------------------------------------
def part_one(string):
print("Result part one: ", parse(string))
print("")
#-------------------------------------------------------------------
# part_two()
#-------------------------------------------------------------------
def part_two(string):
print("Result part two: ", parse_two(string))
print("")
#-------------------------------------------------------------------
# test_one()
#-------------------------------------------------------------------
def test_one():
print("Teststrings part one:")
print(parse("1122"), "Should be 3")
print(parse("1111"), "Should be 4")
print(parse("1234"), "Should be 0")
print(parse("91212129"), "Should be 9")
print("")
#-------------------------------------------------------------------
# test_one()
#-------------------------------------------------------------------
def test_two():
print("Teststrings part two:")
print(parse_two("1212"), "Should be 6")
print(parse_two("1221"), "Should be 0")
print(parse_two("123425"), "Should be 4")
print(parse_two("123123"), "Should be 12")
print(parse_two("12131415"), "Should be 4")
print("")
#-------------------------------------------------------------------
# main()
#-------------------------------------------------------------------
def main():
my_string = get_input()
part_one(my_string)
part_two(my_string)
test_one()
test_two()
#-------------------------------------------------------------------
#-------------------------------------------------------------------
if __name__=="__main__":
# Run the main function.
sys.exit(main())
#=======================================================================
# EOF day_1.py
#=======================================================================
| 26.691176 | 75 | 0.316253 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,259 | 0.622143 |
209aa10a77a0004b2e31e157cd6fb51de7960d31 | 2,718 | py | Python | floodsystem/station.py | knived/ia-flood-risk-project | de911f9a1b90d8a18af664be5af773f6dd0fc0bf | [
"MIT"
] | null | null | null | floodsystem/station.py | knived/ia-flood-risk-project | de911f9a1b90d8a18af664be5af773f6dd0fc0bf | [
"MIT"
] | null | null | null | floodsystem/station.py | knived/ia-flood-risk-project | de911f9a1b90d8a18af664be5af773f6dd0fc0bf | [
"MIT"
] | null | null | null | # Copyright (C) 2018 Garth N. Wells
#
# SPDX-License-Identifier: MIT
"""This module provides a model for a monitoring station, and tools
for manipulating/modifying station data
"""
class MonitoringStation:
"""This class represents a river level monitoring station"""
def __init__(self, station_id, measure_id, label, coord, typical_range,
river, town):
self.station_id = station_id
self.measure_id = measure_id
# Handle case of erroneous data where data system returns
# '[label, label]' rather than 'label'
self.name = label
if isinstance(label, list):
self.name = label[0]
self.coord = coord
self.typical_range = typical_range
self.river = river
self.town = town
self.latest_level = None
def __repr__(self):
d = "Station name: {}\n".format(self.name)
d += " id: {}\n".format(self.station_id)
d += " measure id: {}\n".format(self.measure_id)
d += " coordinate: {}\n".format(self.coord)
d += " town: {}\n".format(self.town)
d += " river: {}\n".format(self.river)
d += " typical range: {}".format(self.typical_range)
return d
def typical_range_consistent(self):
"""Return True if the data is consistent and False if
the data is inconsistent or unavailable."""
# inconsistent if data is unavailable
if self.typical_range == None:
return False
# inconsistent if low range is higher than high range
elif self.typical_range[0] > self.typical_range[1]:
return False
# else consistent
else:
return True
def relative_water_level(self):
"""Returns the latest water level as a fraction of the typical range.
Returns None if data is not available or is inconsistent"""
# Return None if data is not available or is inconsistent
if self.latest_level == None:
return None
elif self.typical_range_consistent() == False:
return None
# Return latest water level as a fraction of the typical range
else:
return (self.latest_level - self.typical_range[0])/(
self.typical_range[1] - self.typical_range[0])
def inconsistent_typical_range_stations(stations):
"""Returns a list of stations that have inconsistent data. The input
is stations, which is a list of MonitoringStation objects."""
# return list of stations with inconsistent data ranges
return [station.name for station in stations if station.typical_range_consistent() == False] | 35.298701 | 96 | 0.619941 | 2,183 | 0.803164 | 0 | 0 | 0 | 0 | 0 | 0 | 1,158 | 0.426049 |
209bc0cc626bc5c803a48b3126392e386031528c | 285 | py | Python | sokoapp/contests/admin.py | Mercy-Nekesa/sokoapp | 6c7bc4c1278b7223226124a49fc33c5b8b6b617a | [
"MIT"
] | 1 | 2019-04-01T05:52:37.000Z | 2019-04-01T05:52:37.000Z | sokoapp/contests/admin.py | Mercy-Nekesa/sokoapp | 6c7bc4c1278b7223226124a49fc33c5b8b6b617a | [
"MIT"
] | 1 | 2015-03-11T16:18:12.000Z | 2015-03-11T16:18:12.000Z | sokoapp/contests/admin.py | Mercy-Nekesa/sokoapp | 6c7bc4c1278b7223226124a49fc33c5b8b6b617a | [
"MIT"
] | null | null | null | from django.contrib import admin
from .models import TimePeriod
class TimePeriodAdminBase(object):
list_display = ('name', 'period_start', 'period_end',)
class TimePeriodAdmin(TimePeriodAdminBase, admin.ModelAdmin):
pass
admin.site.register(TimePeriod, TimePeriodAdmin)
| 19 | 61 | 0.778947 | 163 | 0.57193 | 0 | 0 | 0 | 0 | 0 | 0 | 32 | 0.112281 |
209c085753176d3ce331b595b6bd600bfc4c7cbd | 4,101 | py | Python | dataset/dataset_seq2seq.py | tianyilt/PQ-NET | fa5a160d78492ecb89ba09889fa9186ba6757921 | [
"MIT"
] | 95 | 2020-03-11T11:59:00.000Z | 2022-03-21T03:51:43.000Z | dataset/dataset_seq2seq.py | tianyilt/PQ-NET | fa5a160d78492ecb89ba09889fa9186ba6757921 | [
"MIT"
] | 23 | 2020-03-29T09:57:47.000Z | 2022-03-12T00:19:17.000Z | dataset/dataset_seq2seq.py | tianyilt/PQ-NET | fa5a160d78492ecb89ba09889fa9186ba6757921 | [
"MIT"
] | 17 | 2020-03-28T06:52:21.000Z | 2022-02-22T05:10:55.000Z | from torch.utils.data import Dataset
import torch
import os
import json
from dataset.data_utils import collect_data_id, load_from_hdf5_seq
# Seq2Seq dataset
######################################################
class Seq2SeqDataset(Dataset):
def __init__(self, phase, data_root, class_name, max_n_parts):
self.data_root = os.path.join(data_root, class_name)
self.class_name = class_name
self.max_n_parts = max_n_parts
self.shape_names = self.load_shape_names(phase, self.max_n_parts)
self.all_paths = [os.path.join(self.data_root, name + '.h5') for name in self.shape_names]
self.phase = phase
def load_shape_names(self, phase, max_n_parts, min_n_parts=2):
shape_names = collect_data_id(self.class_name, phase)
with open('data/{}_info.json'.format(self.class_name), 'r') as fp:
nparts_dict = json.load(fp)
filtered_shape_names = []
for name in shape_names:
shape_h5_path = os.path.join(self.data_root, name + '.h5')
if not os.path.exists(shape_h5_path): # check file existence
continue
if min_n_parts <= nparts_dict[name] <= max_n_parts:
filtered_shape_names.append(name)
return filtered_shape_names
def __getitem__(self, index):
path = self.all_paths[index]
data_dict = load_from_hdf5_seq(path, self.max_n_parts, return_numpy=True)
n_parts = data_dict['n_parts']
parts_vox3d = torch.tensor(data_dict['vox3d'], dtype=torch.float32).unsqueeze(1) # (n_parts, 1, dim, dim, dim)
stop_sign = torch.zeros((n_parts, 1), dtype=torch.float32)
stop_sign[-1] = 1
mask = torch.ones((n_parts, 1), dtype=torch.float32)
cond = torch.tensor(data_dict['cond'], dtype=torch.float32)
batch_affine = torch.tensor(data_dict['affine'], dtype=torch.float32)
batch_affine_target = batch_affine.clone()
return {'vox3d': parts_vox3d, 'n_parts': n_parts, 'path': path, 'sign': stop_sign,
'mask': mask, 'cond': cond,
'affine_input': batch_affine, 'affine_target': batch_affine_target}
def __len__(self):
return len(self.shape_names)
def pad_collate_fn_for_dict(batch):
n_parts_batch = [d['n_parts'] for d in batch]
max_n_parts = max(n_parts_batch)
# n_parts_batch = [torch.LongTensor(x) for x in n_parts_batch]
name_batch = [d['path'] for d in batch]
vox3d_batch = [d['vox3d'] for d in batch]
vox3d_batch = list(map(lambda x: pad_tensor(x, pad=max_n_parts, dim=0), vox3d_batch))
vox3d_batch = torch.stack(vox3d_batch, dim=0)
sign_batch = [d['sign'] for d in batch]
sign_batch = list(map(lambda x: pad_tensor(x, pad=max_n_parts, dim=0), sign_batch))
sign_batch = torch.stack(sign_batch, dim=1)
mask_batch = [d['mask'] for d in batch]
mask_batch = list(map(lambda x: pad_tensor(x, pad=max_n_parts, dim=0), mask_batch))
mask_batch = torch.stack(mask_batch, dim=1)
affine_input = [d['affine_input'] for d in batch]
affine_input = list(map(lambda x: pad_tensor(x, pad=max_n_parts, dim=0), affine_input))
affine_input = torch.stack(affine_input, dim=1)
affine_target = [d['affine_target'] for d in batch]
affine_target = list(map(lambda x: pad_tensor(x, pad=max_n_parts, dim=0), affine_target))
affine_target = torch.stack(affine_target, dim=1)
cond_batch = torch.stack([d['cond'] for d in batch], dim=0)
return {'vox3d': vox3d_batch, 'n_parts': n_parts_batch, 'path': name_batch, 'sign': sign_batch,
'mask': mask_batch, 'cond': cond_batch,
'affine_input': affine_input, 'affine_target': affine_target}
def pad_tensor(vec, pad, dim):
"""
args:
vec - tensor to pad
pad - the size to pad to
dim - dimension to pad
return:
a new tensor padded to 'pad' in dimension 'dim'
"""
pad_size = list(vec.shape)
pad_size[dim] = pad - vec.size(dim)
return torch.cat([vec, torch.zeros(*pad_size)], dim=dim)
if __name__ == "__main__":
pass
| 38.688679 | 119 | 0.652524 | 2,020 | 0.492563 | 0 | 0 | 0 | 0 | 0 | 0 | 645 | 0.157279 |
209c796dea1b1a1905e45a4e4298e27aa24479a7 | 16,885 | py | Python | codes/qtm/base.py | vutuanhai237/QuantumTomographyProject | 78058e3faece2209e46c9f9e16a1c38cdb33e7e2 | [
"MIT"
] | null | null | null | codes/qtm/base.py | vutuanhai237/QuantumTomographyProject | 78058e3faece2209e46c9f9e16a1c38cdb33e7e2 | [
"MIT"
] | null | null | null | codes/qtm/base.py | vutuanhai237/QuantumTomographyProject | 78058e3faece2209e46c9f9e16a1c38cdb33e7e2 | [
"MIT"
] | null | null | null | import qiskit
import qtm.progress_bar
import qtm.constant
import qtm.qfim
import qtm.noise
import qtm.optimizer
import qtm.fubini_study
import numpy as np
import types, typing
def measure(qc: qiskit.QuantumCircuit, qubits, cbits=[]):
"""Measuring the quantu circuit which fully measurement gates
Args:
- qc (QuantumCircuit): Measured circuit
- qubits (np.ndarray): List of measured qubit
Returns:
- float: Frequency of 00.. cbit
"""
n = len(qubits)
if cbits == []:
cbits = qubits.copy()
for i in range(0, n):
qc.measure(qubits[i], cbits[i])
if qtm.constant.noise_prob > 0:
noise_model = qtm.noise.generate_noise_model(
n, qtm.constant.noise_prob)
results = qiskit.execute(qc, backend=qtm.constant.backend,
noise_model=noise_model,
shots=qtm.constant.num_shots).result()
# Raw counts
counts = results.get_counts()
# Mitigating noise based on https://qiskit.org/textbook/ch-quantum-hardware/measurement-error-mitigation.html
meas_filter = qtm.noise.generate_measurement_filter(
n, noise_model=noise_model)
# Mitigated counts
counts = meas_filter.apply(counts.copy())
else:
counts = qiskit.execute(
qc, backend=qtm.constant.backend,
shots=qtm.constant.num_shots).result().get_counts()
return counts.get("0" * len(qubits), 0) / qtm.constant.num_shots
def x_measurement(qc: qiskit.QuantumCircuit, qubits, cbits=[]):
"""As its function name
Args:
qc (qiskit.QuantumCircuit): measuremed circuit
qubits (np.ndarray): list of measuremed qubit
cbits (list, optional): classical bits. Defaults to [].
Returns:
qiskit.QuantumCircuit: added measure gates circuit
"""
if cbits == []:
cbits = qubits.copy()
for i in range(0, len(qubits)):
qc.h(qubits[i])
qc.measure(qubits[i], cbits[i])
return qc
def y_measurement(qc: qiskit.QuantumCircuit, qubits, cbits=[]):
"""As its function name
Args:
qc (qiskit.QuantumCircuit): measuremed circuit
qubits (np.ndarray): list of measuremed qubit
cbits (list, optional): classical bits. Defaults to [].
Returns:
qiskit.QuantumCircuit: added measure gates circuit
"""
if cbits == []:
cbits = qubits.copy()
for i in range(0, len(qubits)):
qc.sdg(qubits[i])
qc.h(qubits[i])
qc.measure(qubits[i], cbits[i])
return qc
def z_measurement(qc: qiskit.QuantumCircuit, qubits, cbits=[]):
"""As its function name
Args:
qc (qiskit.QuantumCircuit): measuremed circuit
qubits (np.ndarray): list of measuremed qubit
cbits (list, optional): classical bits. Defaults to [].
Returns:
qiskit.QuantumCircuit: added measure gates circuit
"""
if cbits == []:
cbits = qubits.copy()
for i in range(0, len(qubits)):
qc.measure(qubits[i], cbits[i])
return qc
def get_u_hat(thetas: np.ndarray, create_circuit_func: types.FunctionType, num_qubits: int,
**kwargs):
"""Return inverse of reconstructed gate
Args:
- thetas (np.ndarray): Parameters
- num_qubits (Int): number of qubit
Returns:
- Statevector: The state vector of when applying u_1q gate
"""
qc = qiskit.QuantumCircuit(num_qubits, num_qubits)
if not kwargs:
qc = create_circuit_func(qc, thetas).inverse()
else:
qc = create_circuit_func(qc, thetas, **kwargs).inverse()
return qiskit.quantum_info.Statevector.from_instruction(qc)
def get_cry_index(create_circuit_func: types.FunctionType, thetas: np.ndarray, num_qubits, **kwargs):
"""Return a list where i_th = 1 mean thetas[i] is parameter of CRY gate
Args:
- func (types.FunctionType): The creating circuit function
- thetas (np.ndarray): Parameters
Returns:
- np.ndarray: The index list has length equal with number of parameters
"""
qc = qiskit.QuantumCircuit(num_qubits)
qc = create_circuit_func(qc, thetas, **kwargs)
layers = qtm.fubini_study.split_into_layers(qc)
index_list = []
for layer in layers:
for gate in layer[1]:
if gate[0] == 'cry':
index_list.append(1)
else:
index_list.append(0)
if len(index_list) == len(thetas):
return index_list
return index_list
def grad_loss(qc: qiskit.QuantumCircuit, create_circuit_func: types.FunctionType,
thetas: np.ndarray, **kwargs):
"""Return the gradient of the loss function
L = 1 - |<psi~|psi>|^2 = 1 - P_0
=> nabla_L = - nabla_P_0 = - r (P_0(+s) - P_0(-s))
Args:
- qc (QuantumCircuit): The quantum circuit want to calculate the gradient
- create_circuit_func (Function): The creating circuit function
- thetas (np.ndarray): Parameters
- c_0 (float): cost value
- **kwargs: additional parameters for different create_circuit_func()
Returns:
- np.ndarray: the gradient vector
"""
index_list = get_cry_index(create_circuit_func, thetas,
num_qubits=qc.num_qubits, **kwargs)
grad_loss = np.zeros(len(thetas))
for i in range(0, len(thetas)):
if index_list[i] == 0:
# In equation (13)
thetas1, thetas2 = thetas.copy(), thetas.copy()
thetas1[i] += qtm.constant.two_term_psr['s']
thetas2[i] -= qtm.constant.two_term_psr['s']
qc1 = create_circuit_func(qc.copy(), thetas1, **kwargs)
qc2 = create_circuit_func(qc.copy(), thetas2, **kwargs)
grad_loss[i] = -qtm.constant.two_term_psr['r'] * (
qtm.base.measure(qc1, list(range(qc1.num_qubits))) -
qtm.base.measure(qc2, list(range(qc2.num_qubits))))
if index_list[i] == 1:
# In equation (14)
thetas1, thetas2 = thetas.copy(), thetas.copy()
thetas3, thetas4 = thetas.copy(), thetas.copy()
thetas1[i] += qtm.constant.four_term_psr['alpha']
thetas2[i] -= qtm.constant.four_term_psr['alpha']
thetas3[i] += qtm.constant.four_term_psr['beta']
thetas4[i] -= qtm.constant.four_term_psr['beta']
qc1 = create_circuit_func(qc.copy(), thetas1, **kwargs)
qc2 = create_circuit_func(qc.copy(), thetas2, **kwargs)
qc3 = create_circuit_func(qc.copy(), thetas3, **kwargs)
qc4 = create_circuit_func(qc.copy(), thetas4, **kwargs)
grad_loss[i] = - (qtm.constant.four_term_psr['d_plus'] * (
qtm.base.measure(qc1, list(range(qc1.num_qubits))) -
qtm.base.measure(qc2, list(range(qc2.num_qubits)))) - qtm.constant.four_term_psr['d_minus'] * (
qtm.base.measure(qc3, list(range(qc3.num_qubits))) -
qtm.base.measure(qc4, list(range(qc4.num_qubits)))))
return grad_loss
def grad_psi(qc: qiskit.QuantumCircuit, create_circuit_func: types.FunctionType,
thetas: np.ndarray, r: float, s: float, **kwargs):
"""Return the derivatite of the psi base on parameter shift rule
Args:
- qc (qiskit.QuantumCircuit): circuit
- create_circuit_func (types.FunctionType)
- thetas (np.ndarray): parameters
- r (float): in psr
- s (float): in psr
Returns:
- np.ndarray: N x N matrix
"""
gradient_psi = []
for i in range(0, len(thetas)):
thetas_copy = thetas.copy()
thetas_copy[i] += s
qc_copy = create_circuit_func(qc.copy(), thetas_copy, **kwargs)
psi_qc = qiskit.quantum_info.Statevector.from_instruction(qc_copy).data
psi_qc = np.expand_dims(psi_qc, 1)
gradient_psi.append(r * psi_qc)
gradient_psi = np.array(gradient_psi)
return gradient_psi
def fit_state_tomography(u: qiskit.QuantumCircuit,
create_vdagger_func: types.FunctionType,
thetas: np.ndarray,
num_steps: int,
loss_func: types.FunctionType,
optimizer: types.FunctionType,
verbose: int = 0,
is_return_all_thetas: bool = False,
**kwargs):
"""Return the new thetas that fit with the circuit from create_vdagger_func function
Args:
- u (QuantumCircuit): fitting circuit
- create_vdagger_func (types.FunctionType): added circuit function
- thetas (np.ndarray): parameters
- num_steps (Int): number of iterations
- loss_func (types.FunctionType): loss function
- optimizer (types.FunctionType): otimizer function
- verbose (Int): the seeing level of the fitting process (0: nothing, 1: progress bar, 2: one line per step)
- **kwargs: additional parameters for create_circuit_func()
Returns:
- thetas (np.ndarray): the optimized parameters
- loss_values (np.ndarray): the list of loss_value
"""
thetass = []
loss_values = []
if verbose == 1:
bar = qtm.progress_bar.ProgressBar(max_value=num_steps, disable=False)
for i in range(0, num_steps):
grad_loss = qtm.base.grad_loss(u, create_vdagger_func, thetas, **kwargs)
optimizer_name = optimizer.__name__
if optimizer_name == 'sgd':
thetas = qtm.optimizer.sgd(thetas, grad_loss)
elif optimizer_name == 'adam':
if i == 0:
m, v = list(np.zeros(thetas.shape[0])), list(
np.zeros(thetas.shape[0]))
thetas = qtm.optimizer.adam(thetas, m, v, i, grad_loss)
elif optimizer_name in ['qng_fubini_study', 'qng_qfim', 'qng_adam']:
grad_psi1 = grad_psi(u,
create_vdagger_func,
thetas,
r=qtm.constant.two_term_psr['s'],
s=np.pi,
**kwargs)
u_copy = create_vdagger_func(u.copy(), thetas, **kwargs)
psi = qiskit.quantum_info.Statevector.from_instruction(u_copy).data
psi = np.expand_dims(psi, 1)
if optimizer_name == 'qng_fubini_study':
G = qtm.fubini_study.qng(
u.copy(), thetas, create_vdagger_func, **kwargs)
thetas = qtm.optimizer.qng_fubini_study(thetas, G, grad_loss)
if optimizer_name == 'qng_qfim':
thetas = qtm.optimizer.qng_qfim(
thetas, psi, grad_psi1, grad_loss)
if optimizer_name == 'qng_adam':
if i == 0:
m, v = list(np.zeros(thetas.shape[0])), list(
np.zeros(thetas.shape[0]))
thetas = qtm.optimizer.qng_adam(
thetas, m, v, i, psi, grad_psi1, grad_loss)
else:
thetas = optimizer(thetas, grad_loss)
u_copy = create_vdagger_func(u.copy(), thetas, **kwargs)
loss = loss_func(
qtm.base.measure(u_copy, list(range(u_copy.num_qubits))))
loss_values.append(loss)
thetass.append(thetas.copy())
if verbose == 1:
bar.update(1)
if verbose == 2 and i % 10 == 0:
print("Step " + str(i) + ": " + str(loss))
if verbose == 1:
bar.close()
if is_return_all_thetas:
return thetass, loss_values
else:
return thetas, loss_values
def fit_state_preparation(create_u_func: types.FunctionType,
vdagger: qiskit.QuantumCircuit,
thetas: np.ndarray,
num_steps: int,
loss_func: types.FunctionType,
optimizer: types.FunctionType,
verbose: int = 0,
is_return_all_thetas: bool = False,
**kwargs):
"""Return the new thetas that fit with the circuit from create_u_func function
Args:
- create_u_func (types.FunctionType): added circuit function
- vdagger (QuantumCircuit): fitting circuit
- thetas (np.ndarray): parameters
- num_steps (Int): number of iterations
- loss_func (types.FunctionType): loss function
- optimizer (types.FunctionType): otimizer function
- verbose (Int): the seeing level of the fitting process (0: nothing, 1: progress bar, 2: one line per step)
- **kwargs: additional parameters for create_circuit_func()
Returns:
- thetas (np.ndarray): the optimized parameters
- loss_values (np.ndarray): the list of loss_value
"""
if verbose == 1:
bar = qtm.progress_bar.ProgressBar(max_value=num_steps, disable=False)
thetass = []
loss_values = []
def create_circuit_func(vdagger: qiskit.QuantumCircuit, thetas: np.ndarray, **kwargs):
return create_u_func(qiskit.QuantumCircuit(vdagger.num_qubits, vdagger.num_qubits), thetas, **kwargs).combine(vdagger)
for i in range(0, num_steps):
grad_loss = qtm.base.grad_loss(vdagger, create_circuit_func, thetas, **kwargs)
optimizer_name = optimizer.__name__
if optimizer_name == 'sgd':
thetas = qtm.optimizer.sgd(thetas, grad_loss)
elif optimizer_name == 'adam':
if i == 0:
m, v1 = list(np.zeros(thetas.shape[0])), list(
np.zeros(thetas.shape[0]))
thetas = qtm.optimizer.adam(thetas, m, v1, i, grad_loss)
elif optimizer_name in ['qng_fubini_study', 'qng_qfim', 'qng_adam']:
grad_psi1 = grad_psi(vdagger,
create_circuit_func,
thetas,
r=1 / 2,
s=np.pi,
**kwargs)
v_copy = create_circuit_func(vdagger.copy(), thetas, **kwargs)
psi = qiskit.quantum_info.Statevector.from_instruction(
v_copy).data
psi = np.expand_dims(psi, 1)
if optimizer_name == 'qng_fubini_study':
G = qtm.fubini_study.qng(
vdagger.copy(), thetas, create_circuit_func, **kwargs)
thetas = qtm.optimizer.qng_fubini_study(thetas, G, grad_loss)
if optimizer_name == 'qng_qfim':
thetas = qtm.optimizer.qng_qfim(
thetas, psi, grad_psi1, grad_loss)
if optimizer_name == 'qng_adam':
if i == 0:
m, v1 = list(np.zeros(thetas.shape[0])), list(
np.zeros(thetas.shape[0]))
thetas = qtm.optimizer.qng_adam(
thetas, m, v1, i, psi, grad_psi1, grad_loss)
else:
thetas = optimizer(thetas, grad_loss)
v_copy = create_circuit_func(vdagger.copy(), thetas, **kwargs)
loss = loss_func(
qtm.base.measure(v_copy, list(range(v_copy.num_qubits))))
loss_values.append(loss)
thetass.append(thetas.copy())
if verbose == 1:
bar.update(1)
if verbose == 2 and i % 10 == 0:
print("Step " + str(i) + ": " + str(loss))
if verbose == 1:
bar.close()
if is_return_all_thetas:
return thetass, loss_values
else:
return thetas, loss_values
def fit(u: typing.Union[qiskit.QuantumCircuit, types.FunctionType], v: typing.Union[qiskit.QuantumCircuit, types.FunctionType],
thetas: np.ndarray,
num_steps: int,
loss_func: types.FunctionType,
optimizer: types.FunctionType,
verbose: int = 0,
is_return_all_thetas: bool = False,
**kwargs):
if callable(u):
return fit_state_preparation(create_u_func=u,
vdagger=v,
thetas=thetas,
num_steps=num_steps,
loss_func=loss_func,
optimizer=optimizer,
verbose=verbose,
is_return_all_thetas=is_return_all_thetas,
**kwargs)
else:
return fit_state_tomography(u=u,
create_vdagger_func=v,
thetas=thetas,
num_steps=num_steps,
loss_func=loss_func,
optimizer=optimizer,
verbose=verbose,
is_return_all_thetas=is_return_all_thetas,
**kwargs)
| 38.816092 | 127 | 0.574178 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 4,380 | 0.259402 |
209d4037144ecf01ba71e2b23993e11ab3d64b44 | 1,922 | py | Python | marqeta/response_models/kyc_response.py | marqeta/marqeta-python | 66fa690eb910825c510a391720b0fe717fac0234 | [
"MIT"
] | 21 | 2019-04-12T09:02:17.000Z | 2022-02-18T11:39:06.000Z | marqeta/response_models/kyc_response.py | marqeta/marqeta-python | 66fa690eb910825c510a391720b0fe717fac0234 | [
"MIT"
] | 1 | 2020-07-22T21:27:40.000Z | 2020-07-23T17:38:43.000Z | marqeta/response_models/kyc_response.py | marqeta/marqeta-python | 66fa690eb910825c510a391720b0fe717fac0234 | [
"MIT"
] | 10 | 2019-05-08T14:20:37.000Z | 2021-09-20T18:09:26.000Z | from datetime import datetime, date
from marqeta.response_models.result import Result
from marqeta.response_models.kyc_question import KycQuestion
from marqeta.response_models import datetime_object
import json
import re
class KycResponse(object):
def __init__(self, json_response):
self.json_response = json_response
def __str__(self):
return json.dumps(self.json_response, default=self.json_serial)
@staticmethod
def json_serial(o):
if isinstance(o, datetime) or isinstance(o, date):
return o.__str__()
@property
def created_time(self):
if 'created_time' in self.json_response:
return datetime_object('created_time', self.json_response)
@property
def last_modified_time(self):
if 'last_modified_time' in self.json_response:
return datetime_object('last_modified_time', self.json_response)
@property
def token(self):
return self.json_response.get('token', None)
@property
def user_token(self):
return self.json_response.get('user_token', None)
@property
def business_token(self):
return self.json_response.get('business_token', None)
@property
def result(self):
if 'result' in self.json_response:
return Result(self.json_response['result'])
@property
def manual_override(self):
return self.json_response.get('manual_override', None)
@property
def notes(self):
return self.json_response.get('notes', None)
@property
def questions(self):
if 'questions' in self.json_response:
return [KycQuestion(val) for val in self.json_response['questions']]
@property
def reference_id(self):
return self.json_response.get('reference_id', None)
def __repr__(self):
return '<Marqeta.response_models.kyc_response.KycResponse>' + self.__str__()
| 25.972973 | 85 | 0.686264 | 1,699 | 0.883975 | 0 | 0 | 1,310 | 0.681582 | 0 | 0 | 231 | 0.120187 |
209da4aca789fa213327efbbc4af529de39c5ec3 | 507 | py | Python | pyexlatex/presentation/beamer/templates/control/tocsection.py | whoopnip/py-ex-latex | 66f5fadc35a0bfdce5f1ccb3c80dce8885b061b6 | [
"MIT"
] | 4 | 2020-06-08T07:17:12.000Z | 2021-11-04T21:39:52.000Z | pyexlatex/presentation/beamer/templates/control/tocsection.py | nickderobertis/py-ex-latex | 66f5fadc35a0bfdce5f1ccb3c80dce8885b061b6 | [
"MIT"
] | 24 | 2020-02-17T17:20:44.000Z | 2021-12-20T00:10:19.000Z | pyexlatex/presentation/beamer/templates/control/tocsection.py | nickderobertis/py-ex-latex | 66f5fadc35a0bfdce5f1ccb3c80dce8885b061b6 | [
"MIT"
] | null | null | null | from pyexlatex.models.template import Template
from pyexlatex.presentation import Frame
from pyexlatex.presentation.beamer.control.atbeginsection import AtBeginSection
from pyexlatex.models.toc import TableOfContents
class TableOfContentsAtBeginSection(Template):
def __init__(self):
toc = TableOfContents(options=('currentsection',))
frame = Frame([toc], title='Table of Contents')
at_begin = AtBeginSection(frame)
self.contents = at_begin
super().__init__()
| 33.8 | 79 | 0.751479 | 287 | 0.566075 | 0 | 0 | 0 | 0 | 0 | 0 | 35 | 0.069034 |
209ed5edbe99a92a1f9f44f9e785aa2ea5450e94 | 6,725 | py | Python | pbc/df/ft_ao.py | gmwang18/pyscf | fcd6877751661c8a9743c1c872a4a2b65f6dd7ac | [
"BSD-2-Clause"
] | null | null | null | pbc/df/ft_ao.py | gmwang18/pyscf | fcd6877751661c8a9743c1c872a4a2b65f6dd7ac | [
"BSD-2-Clause"
] | null | null | null | pbc/df/ft_ao.py | gmwang18/pyscf | fcd6877751661c8a9743c1c872a4a2b65f6dd7ac | [
"BSD-2-Clause"
] | null | null | null | #!/usr/bin/env python
#
# Author: Qiming Sun <osirpt.sun@gmail.com>
#
'''
Analytic Fourier transformation AO-pair value for PBC
'''
import ctypes
import numpy
import scipy.linalg
from pyscf import lib
from pyscf import gto
from pyscf.gto.ft_ao import ft_ao as mol_ft_ao
libpbc = lib.load_library('libpbc')
#
# \int mu*nu*exp(-ik*r) dr
#
def ft_aopair(cell, Gv, shls_slice=None, aosym='s1',
b=None, gxyz=None, Gvbase=None,
kpti_kptj=numpy.zeros((2,3)), q=None, verbose=None):
''' FT transform AO pair
\int exp(-i(G+q)r) i(r) j(r) exp(-ikr) dr^3
'''
kpti, kptj = kpti_kptj
if q is None:
q = kptj - kpti
val = _ft_aopair_kpts(cell, Gv, shls_slice, aosym, b, gxyz, Gvbase,
q, kptj.reshape(1,3))
return val[0]
# NOTE buffer out must be initialized to 0
# gxyz is the index for Gvbase
def _ft_aopair_kpts(cell, Gv, shls_slice=None, aosym='s1',
b=None, gxyz=None, Gvbase=None,
q=numpy.zeros(3), kptjs=numpy.zeros((1,3)),
out=None):
''' FT transform AO pair
\int exp(-i(G+q)r) i(r) j(r) exp(-ikr) dr^3
The return list holds the AO pair array
corresponding to the kpoints given by kptjs
'''
q = numpy.reshape(q, 3)
kptjs = numpy.asarray(kptjs, order='C').reshape(-1,3)
nGv = Gv.shape[0]
GvT = numpy.asarray(Gv.T, order='C')
GvT += q.reshape(-1,1)
if (gxyz is None or b is None or Gvbase is None or (abs(q).sum() > 1e-9)
# backward compatibility for pyscf-1.2, in which the argument Gvbase is gs
or (Gvbase is not None and isinstance(Gvbase[0], (int, numpy.integer)))):
p_gxyzT = lib.c_null_ptr()
p_gs = (ctypes.c_int*3)(0,0,0)
p_b = (ctypes.c_double*1)(0)
eval_gz = 'GTO_Gv_general'
else:
if abs(b-numpy.diag(b.diagonal())).sum() < 1e-8:
eval_gz = 'GTO_Gv_orth'
else:
eval_gz = 'GTO_Gv_nonorth'
gxyzT = numpy.asarray(gxyz.T, order='C', dtype=numpy.int32)
p_gxyzT = gxyzT.ctypes.data_as(ctypes.c_void_p)
Gvx = lib.cartesian_prod(Gvbase)
b = numpy.hstack((b.ravel(), q) + Gvbase)
p_b = b.ctypes.data_as(ctypes.c_void_p)
p_gs = (ctypes.c_int*3)(*[len(x) for x in Gvbase])
drv = libpbc.PBC_ft_latsum_kpts
intor = getattr(libpbc, 'GTO_ft_ovlp_sph')
eval_gz = getattr(libpbc, eval_gz)
# make copy of atm,bas,env because they are modified in the lattice sum
atm, bas, env = gto.conc_env(cell._atm, cell._bas, cell._env,
cell._atm, cell._bas, cell._env)
ao_loc = cell.ao_loc_nr()
nao = ao_loc[cell.nbas]
ao_loc = numpy.asarray(numpy.hstack((ao_loc[:-1], ao_loc+nao)),
dtype=numpy.int32)
if shls_slice is None:
shls_slice = (0, cell.nbas, cell.nbas, cell.nbas*2)
else:
shls_slice = (shls_slice[0], shls_slice[1],
cell.nbas+shls_slice[2], cell.nbas+shls_slice[3])
ni = ao_loc[shls_slice[1]] - ao_loc[shls_slice[0]]
nj = ao_loc[shls_slice[3]] - ao_loc[shls_slice[2]]
shape = (nGv, ni, nj)
fill = getattr(libpbc, 'PBC_ft_fill_'+aosym)
# Theoretically, hermitian symmetry can be also found for kpti == kptj:
# f_ji(G) = \int f_ji exp(-iGr) = \int f_ij^* exp(-iGr) = [f_ij(-G)]^*
# The hermi operation needs reordering the axis-0. It is inefficient.
if aosym == 's1hermi': # Symmetry for Gamma point
assert(abs(q).sum() < 1e-9 and abs(kptjs).sum() < 1e-9)
elif aosym == 's2':
i0 = ao_loc[shls_slice[0]]
i1 = ao_loc[shls_slice[1]]
nij = i1*(i1+1)//2 - i0*(i0+1)//2
shape = (nGv, nij)
if out is None:
out = [numpy.zeros(shape, order='F', dtype=numpy.complex128)
for k in range(len(kptjs))]
else:
out = [numpy.ndarray(shape, order='F', dtype=numpy.complex128,
buffer=out[k]) for k in range(len(kptjs))]
out_ptrs = (ctypes.c_void_p*len(out))(
*[x.ctypes.data_as(ctypes.c_void_p) for x in out])
xyz = numpy.asarray(cell.atom_coords(), order='C')
ptr_coord = numpy.asarray(atm[cell.natm:,gto.PTR_COORD],
dtype=numpy.int32, order='C')
Ls = cell.get_lattice_Ls()
exp_Lk = numpy.einsum('ik,jk->ij', Ls, kptjs)
exp_Lk = numpy.exp(1j * numpy.asarray(exp_Lk, order='C'))
drv(intor, eval_gz, fill, out_ptrs, xyz.ctypes.data_as(ctypes.c_void_p),
ptr_coord.ctypes.data_as(ctypes.c_void_p), ctypes.c_int(len(xyz)),
Ls.ctypes.data_as(ctypes.c_void_p), ctypes.c_int(len(Ls)),
exp_Lk.ctypes.data_as(ctypes.c_void_p), ctypes.c_int(len(kptjs)),
(ctypes.c_int*4)(*shls_slice),
ao_loc.ctypes.data_as(ctypes.c_void_p),
GvT.ctypes.data_as(ctypes.c_void_p),
p_b, p_gxyzT, p_gs, ctypes.c_int(nGv),
atm.ctypes.data_as(ctypes.c_void_p), ctypes.c_int(cell.natm*2),
bas.ctypes.data_as(ctypes.c_void_p), ctypes.c_int(cell.nbas*2),
env.ctypes.data_as(ctypes.c_void_p))
if aosym == 's1hermi':
for mat in out:
for i in range(1,ni):
mat[:,:i,i] = mat[:,i,:i]
return out
def ft_ao(mol, Gv, shls_slice=None, b=None,
gxyz=None, Gvbase=None, kpt=numpy.zeros(3), verbose=None):
if abs(kpt).sum() < 1e-9:
return mol_ft_ao(mol, Gv, shls_slice, b, gxyz, Gvbase, verbose)
else:
kG = Gv + kpt
return mol_ft_ao(mol, kG, shls_slice, None, None, None, verbose)
if __name__ == '__main__':
import pyscf.pbc.gto as pgto
import pyscf.dft.numint
from pyscf.pbc import tools
L = 5.
n = 10
cell = pgto.Cell()
cell.a = numpy.diag([L,L,L])
cell.gs = numpy.array([n,n,n])
cell.atom = '''C 1.3 .2 .3
C .1 .1 1.1
'''
cell.basis = 'ccpvdz'
#cell.basis = {'C': [[0, (2.4, .1, .6), (1.0,.8, .4)], [1, (1.1, 1)]]}
#cell.basis = {'C': [[0, (2.4, 1)]]}
cell.unit = 'B'
#cell.verbose = 4
cell.build(0,0)
#cell.nimgs = (2,2,2)
ao2 = ft_aopair(cell, cell.Gv)
nao = cell.nao_nr()
coords = pyscf.pbc.dft.gen_grid.gen_uniform_grids(cell)
aoR = pyscf.pbc.dft.numint.eval_ao(cell, coords)
aoR2 = numpy.einsum('ki,kj->kij', aoR.conj(), aoR)
ngs = aoR.shape[0]
for i in range(nao):
for j in range(nao):
ao2ref = tools.fft(aoR2[:,i,j], cell.gs) * cell.vol/ngs
print(i, j, numpy.linalg.norm(ao2ref - ao2[:,i,j]))
aoG = ft_ao(cell, cell.Gv)
for i in range(nao):
aoref = tools.fft(aoR[:,i], cell.gs) * cell.vol/ngs
print(i, numpy.linalg.norm(aoref - aoG[:,i]))
| 36.950549 | 81 | 0.585576 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,285 | 0.191078 |
20a1526fd423826377772b56d4d80939f2cee7f2 | 7,639 | py | Python | asyncpushbullet/websocket_server.py | rharder/pushbullet.py | 837fff784eaa4f2c2a1be07662d9b7dceb40c99a | [
"MIT"
] | 12 | 2017-02-21T14:53:37.000Z | 2021-04-23T03:05:48.000Z | asyncpushbullet/websocket_server.py | rharder/pushbullet.py | 837fff784eaa4f2c2a1be07662d9b7dceb40c99a | [
"MIT"
] | null | null | null | asyncpushbullet/websocket_server.py | rharder/pushbullet.py | 837fff784eaa4f2c2a1be07662d9b7dceb40c99a | [
"MIT"
] | 2 | 2021-04-11T05:23:33.000Z | 2021-04-12T01:03:38.000Z | #!/usr/bin/env python3
"""
Easy to use Websocket Server.
Source: https://github.com/rharder/handy
June 2018 - Updated for aiohttp v3.3
August 2018 - Updated for Python 3.7, made WebServer support multiple routes on one port
"""
import asyncio
import logging
import weakref
from functools import partial
from typing import Dict, Set, List
import aiohttp # pip install aiohttp
from aiohttp import web
__author__ = "Robert Harder"
__email__ = "rob@iharder.net"
__license__ = "Public Domain"
class WebServer:
"""Hosts a web/websocket server on a given port and responds to multiple routes
(relative urls) at that address.
Source: https://github.com/rharder/handy
Author: Robert Harder
License: Public Domain
"""
def __init__(self, host: str = None, port: int = None, ssl_context=None):
"""
Create a new WebServer that will listen on the given port.
:param port: The port on which to listen
"""
super().__init__()
self.log = logging.getLogger(__name__ + '.' + self.__class__.__name__)
# Passed parameters
self.host: str = host
self.port: int = port
self.ssl_context = ssl_context
# Internal use
self.app: web.Application = None
self.site: web.TCPSite = None
self.runner: web.AppRunner = None
self.route_handlers: Dict[str, WebHandler] = {}
self._running: bool = False
self._shutting_down: bool = False
self._starting_up: bool = False
def __str__(self):
routes = ", ".join(self.route_handlers.keys())
return "{}({}:({})".format(self.__class__.__name__, self.port, routes)
@property
def running(self):
return self._running
@property
def starting_up(self):
return self._starting_up
@property
def shutting_down(self):
return self._shutting_down
async def start(self):
"""
Starts the websocket server and begins listening. This function returns
with the server continuing to listen (non-blocking).
:return: None
"""
if self.starting_up or self.running:
raise Exception("Cannot start server when it is already running.")
self._starting_up = True
self.app = web.Application()
self.app['requests'] = [] # type: List[web.BaseRequest]
self.app.on_shutdown.append(self._on_shutdown)
# Connect routes
for route in self.route_handlers.keys():
self.app.router.add_get(route, partial(self.incoming_http_handler, route))
self.runner = web.AppRunner(self.app)
await self.runner.setup()
self.site = web.TCPSite(self.runner, port=self.port, host=self.host, ssl_context=self.ssl_context)
await self.site.start()
self._running = True
self._starting_up = False
async def shutdown(self):
if not self.running:
raise Exception("Cannot close server that is not running.")
if self.shutting_down:
pass
else:
self._shutting_down = True
await self.runner.cleanup()
async def _on_shutdown(self, app: web.Application):
self.close_current_connections()
self._running = False
self._shutting_down = False
def close_current_connections(self):
for x in self.app["requests"]:
if x is not None and x.transport is not None:
x.transport.close()
def add_route(self, route: str, handler):
if self.running:
raise RuntimeError("Cannot add a route after server is already running.")
self.route_handlers[route] = handler
async def incoming_http_handler(self, route: str, request: web.BaseRequest):
self.app['requests'].append(request)
try:
resp = await self.route_handlers[route].on_incoming_http(route, request)
finally:
self.app['requests'].remove(request)
return resp
class WebHandler:
async def on_incoming_http(self, route: str, request: web.BaseRequest):
return web.Response(body=str(self.__class__.__name__))
class WebsocketHandler(WebHandler):
def __init__(self, *kargs, **kwargs):
super().__init__(*kargs, **kwargs)
self.websockets: Set[web.WebSocketResponse] = weakref.WeakSet()
async def broadcast_json(self, msg):
""" Converts msg to json and broadcasts the json data to all connected clients. """
await self._broadcast(msg, web.WebSocketResponse.send_json)
async def broadcast_text(self, msg: str):
""" Broadcasts a string to all connected clients. """
await self._broadcast(msg, web.WebSocketResponse.send_str)
async def broadcast_bytes(self, msg: bytes):
""" Broadcasts bytes to all connected clients. """
await self._broadcast(msg, web.WebSocketResponse.send_bytes)
async def _broadcast(self, msg, func: callable):
for ws in set(self.websockets): # type: web.WebSocketResponse
await func(ws, msg)
async def close_websockets(self):
"""Closes all active websockets for this handler."""
ws_closers = [ws.close() for ws in set(self.websockets) if not ws.closed]
ws_closers and await asyncio.gather(*ws_closers)
async def on_incoming_http(self, route: str, request: web.BaseRequest):
"""Handles the incoming http(s) request and converts it to a WebSocketResponse.
This method is not meant to be overridden when subclassed.
"""
ws = web.WebSocketResponse()
self.websockets.add(ws)
try:
await ws.prepare(request)
await self.on_websocket(route, ws)
finally:
self.websockets.discard(ws)
return ws
async def on_websocket(self, route: str, ws: web.WebSocketResponse):
"""
Override this function if you want to handle new incoming websocket clients.
The default behavior is to listen indefinitely for incoming messages from clients
and call on_message() with each one.
If you override on_websocket and have your own loop to receive and process messages,
you may also need an await asyncio.sleep(0) line to avoid an infinite loop with the
websocket close message.
Example:
while not ws.closed:
ws_msg = await ws.receive()
await asyncio.sleep(0)
...
"""
try:
while not ws.closed:
ws_msg = await ws.receive() # type: aiohttp.WSMessage
await self.on_message(route=route, ws=ws, ws_msg_from_client=ws_msg)
# If you override on_websocket and have your own loop
# to receive and process messages, you may also need
# this await asyncio.sleep(0) line to avoid an infinite
# loop with the websocket close message.
await asyncio.sleep(0) # Need to yield control back to event loop
except RuntimeError as e: # Socket closing throws RuntimeError
print("RuntimeError - did socket close?", e, flush=True)
pass
finally:
await self.on_close(route, ws)
async def on_message(self, route: str, ws: web.WebSocketResponse, ws_msg_from_client: aiohttp.WSMessage):
""" Override this function to handle incoming messages from websocket clients. """
pass
async def on_close(self, route: str, ws: web.WebSocketResponse):
""" Override this function to handle a websocket having closed. """
pass
| 33.951111 | 109 | 0.641445 | 7,137 | 0.934285 | 0 | 0 | 203 | 0.026574 | 5,066 | 0.663176 | 2,635 | 0.34494 |
20a2f7fe77dd649f772e82e21cbc152c7eace4a3 | 5,475 | py | Python | update/checkplay.py | dalton-lee/AutoUpdate | 5c0d03820bfa86a1b3829d9f6a211fcd00d70fed | [
"MIT"
] | 1 | 2016-09-26T15:48:06.000Z | 2016-09-26T15:48:06.000Z | update/checkplay.py | dalton-lee/AutoUpdate | 5c0d03820bfa86a1b3829d9f6a211fcd00d70fed | [
"MIT"
] | null | null | null | update/checkplay.py | dalton-lee/AutoUpdate | 5c0d03820bfa86a1b3829d9f6a211fcd00d70fed | [
"MIT"
] | null | null | null | #!/usr/bin/env python
#coding=utf-8
import os
import sys
import json
import time
import urllib2
import platform
import ConfigParser
UPDATE_CONFIG = 0
def checkplay(remotedir,workdir):
global UPDATE_CONFIG
if not remotedir.endswith('/'):
remotedir = remotedir + '/'
orgcode = UPDATE_CONFIG.get('global', 'orgcode')
projectdir = remotedir + orgcode
syncservice = UPDATE_CONFIG.get('BusSync', 'servicename')
saleservice = UPDATE_CONFIG.get('BusSale', 'servicename')
port = UPDATE_CONFIG.get('BusSale', 'port')
nginx = UPDATE_CONFIG.get('BusSale', 'nginx')
ngconf = UPDATE_CONFIG.get('BusSale', 'ngconf')
delay = UPDATE_CONFIG.get('BusSale', 'delay')
delay = float(delay)
remotedir = remotedir + 'play-1.2.3/'
localdir = os.path.join(workdir,'play-1.2.3')
play = os.path.join(localdir,'play')
rverfile = remotedir + 'version'
rmd5file = remotedir + 'filemd5'
lverfile = os.path.join(localdir,'version')
lmd5file = os.path.join(localdir,'filemd5')
fileloop = os.path.join(localdir,'FileLoop')
while True:
if not os.path.isdir(localdir):
os.makedirs(localdir)
ls = '0'
rs = '1';
try:
rs = urllib2.urlopen(rverfile).read()
except:
printf ('Can\'t find remote version file:%s,wait for next time!' % rverfile)
time.sleep(60)
continue
try:
ls = open(lverfile).read()
except:
printf ('Can\'t find local version file:%s' % lverfile)
printf ('开始生成本地MD5文件')
try:
os.chdir(localdir)
os.system('python %s' % fileloop)
time.sleep(10)
except:
printf('生成本地MD5文件失败,再说...')
if rs.strip() == ls.strip() :
printf (time.strftime('%Y-%m-%d %H:%M:%S')+':play暂无更新,当前版本号为:%s' % (ls))
else:
printf ('发现新版本,新版本号为:%s' % rs)
printf ('开始解析差异文件:%s' % rmd5file)
remotestr = ''
try:
remotestr = urllib2.urlopen(rmd5file).read()
except:
printf ('无法找到远程md5文件,请检查服务端目录或通过浏览器查看文件是否存在:%s' % rmd5file)
printf ('等待60秒后,重新尝试更新!')
time.sleep(60)
continue
remotedict = json.loads(remotestr)
remotekeys = set(remotedict.keys())
localstr = ''
localdict = {'':''}
try:
localstr = open(lmd5file).read()
except:
printf ('Can\'t find local md5 file:%s' % lmd5file)
try:
localdict = json.loads(localstr)
except:
printf ('Can\'t load md5 file as json:%s' % lmd5file)
localkeys = set(localdict.keys())
printf ('同步删除中..')
localdiff = localkeys-remotekeys
for local in localdiff:
lpath = localdict[local].replace('/',os.path.sep)
filepath = os.path.join(localdir,lpath)
removefile(filepath)
continue
printf ('同步更新中..')
remotediff = remotekeys-localkeys
for remote in remotediff:
rpath = remotedict[remote]
remotepath = remotedir + rpath
filepath = os.path.join(localdir,rpath.replace('/',os.path.sep))
addfile(remotepath,filepath)
continue
if(platform.system() == 'Linux'):
os.system('chmod 744 %s' % play)
printf (time.strftime('%Y-%m-%d %H:%M:%S')+':play由%s版本更新至%s版本成功!' % (ls,rs))
checksync(projectdir,workdir,'BusSync',play,syncservice)
checksale(projectdir,workdir,'BusSale',play,port,nginx,ngconf,delay,saleservice)
time.sleep(60)
def removefile(filepath):
parentdir = os.path.dirname(filepath)
try:
os.remove(filepath)
printf ('del:%s' % filepath)
except:
printf ('already del : %s' % filepath)
try:
filelist = os.listdir(parentdir)
if len(filelist) == 0 :
try:
os.rmdir(parentdir)
printf ('deldir:%s' % parentdir)
except:
printf ('already deldir : %s' % parentdir)
except:
printf ('%s not exist' % parentdir)
def addfile(remotepath,filepath):
dirname = os.path.dirname(filepath)
if not os.path.exists(dirname):
os.makedirs(dirname)
printf ('mkdir:%s' % dirname)
try:
downloadFile(remotepath,filepath)
printf ('add:%s' % filepath)
except:
printf ('failed:%s' % remotepath)
if __name__ == '__main__':
if(platform.system() == 'Linux'):
cmd = "ps aux|grep %s|awk '{print $2}'" % __file__
pid = os.getpid()
for s in os.popen(cmd).readlines():
if pid != int(s):
os.popen('kill %d' % int(s))
config = ConfigParser.ConfigParser()
with open('update.conf') as conf:
config.readfp(conf)
UPDATE_CONFIG = config
remotedir = config.get('global','remotedir')
workdir = config.get('global','workdir')
pymdir = os.path.join(workdir,'pym')
sys.path.append(pymdir)
from pyutil import printf,downloadFile
from checksync import checksync
from checksale import checksale
checkplay(remotedir,workdir) | 32.017544 | 88 | 0.547397 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,134 | 0.198495 |
20a31ef74c9a035d85a87d60cf973255e1f7ccbd | 349 | py | Python | backend/app/app/models/__init__.py | benlau6/fastapi-fullstack | 68a46d576310a1c846315228c1251f36ea23f056 | [
"MIT"
] | 1 | 2022-01-29T07:53:35.000Z | 2022-01-29T07:53:35.000Z | backend/app/app/models/__init__.py | benlau6/fastapi-fullstack | 68a46d576310a1c846315228c1251f36ea23f056 | [
"MIT"
] | null | null | null | backend/app/app/models/__init__.py | benlau6/fastapi-fullstack | 68a46d576310a1c846315228c1251f36ea23f056 | [
"MIT"
] | null | null | null | from .team import Team, TeamRead, TeamCreate, TeamUpdate # , TeamReadWithHeroes
from .hero import Hero, HeroRead, HeroCreate, HeroUpdate # , HeroReadWithTeam
from typing import List, Optional
class TeamReadWithHeroes(TeamRead):
heroes: List[HeroRead] = []
class HeroReadWithTeam(HeroRead):
team: Optional[TeamRead] = None
| 26.846154 | 81 | 0.733524 | 138 | 0.395415 | 0 | 0 | 0 | 0 | 0 | 0 | 44 | 0.126074 |
20a44fcf51e45dd5f7265e5493d7dc9c9faccbd3 | 48 | py | Python | mltools/train/__init__.py | msc5/ml-tools | 75ca504bdc0495e8a929ad73501b7de692b3089a | [
"Apache-2.0"
] | null | null | null | mltools/train/__init__.py | msc5/ml-tools | 75ca504bdc0495e8a929ad73501b7de692b3089a | [
"Apache-2.0"
] | null | null | null | mltools/train/__init__.py | msc5/ml-tools | 75ca504bdc0495e8a929ad73501b7de692b3089a | [
"Apache-2.0"
] | null | null | null | from .train import *
from .logger import Logger
| 16 | 26 | 0.770833 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
20a454e64794fb3ad382db3340f6045cd10b054e | 6,655 | py | Python | HCI/Gesture Recognizer/Python/DTW.py | k-sae/HCI-Lab | ec40136907eaa9ea983efa7b4761e2bb1f5918a5 | [
"Apache-2.0"
] | 3 | 2017-07-03T12:57:36.000Z | 2017-12-11T19:49:14.000Z | HCI/Gesture Recognizer/Python/DTW.py | kareem2048/HCI-Lab | ec40136907eaa9ea983efa7b4761e2bb1f5918a5 | [
"Apache-2.0"
] | null | null | null | HCI/Gesture Recognizer/Python/DTW.py | kareem2048/HCI-Lab | ec40136907eaa9ea983efa7b4761e2bb1f5918a5 | [
"Apache-2.0"
] | 1 | 2018-07-27T11:00:08.000Z | 2018-07-27T11:00:08.000Z | from math import sin, cos, atan2, sqrt
import json
NumPoints = 64
SquareSize = 250.0
AngleRange = 45.0
AnglePrecision = 2.0
Phi = 0.5 * (-1.0 + sqrt(5.0))
class Template:
def __init__(self, name, points):
self.points = points
self.name = name
self.points = resample(self.points, NumPoints)
self.points = rotate_to_zero(self.points)
self.points = scale_to_square(self.points, SquareSize)
self.points = translate_to_origin(self.points)
class Result:
def __init__(self, name, score):
self.Name = name
self.Score = score
class DTWRecognizer:
def __init__(self):
self.templates = []
with open('Templates.json') as data_file:
data = json.load(data_file)
for i in range(len(data['Templates'])) :
templete = data['Templates'][i]['Name']
points = []
for m in range(len(data['Templates'][i]['Points'])) :
point = [data['Templates'][i]['Points'][m]['x'],data['Templates'][i]['Points'][m]['y']]
points.append(point)
self.templates.append(Template(templete,points))
#
'''
self.templates.append(Template("Zig-Zag",[[387, 192],[388, 192],[388, 190],[388, 189],[388, 188],[388, 187],[388, 186],[388, 185],[388, 184],[389, 182],[389, 180],[390, 180],[390, 178],[391, 176],[392, 172],[393, 170],[395, 168],[396, 167],[398, 164],[399, 162],[400, 161],[401, 159],[403, 157],[403, 156],[404, 155],[405, 155],[406, 154],[407, 154],[408, 155],[408, 156],[410, 157],[412, 159],[414, 164],[418, 169],[421, 176],[424, 182],[426, 188],[428, 192],[430, 197],[430, 201],[432, 203],[433, 205], [433, 206],[434, 208],[434, 209],[435, 209],[435, 210],[435, 211],[436, 211],[437, 210],[438, 207],[441, 202],[445, 193],[449, 184],[454, 177],[457, 168], [459, 162],[460, 158],[461, 154],[462, 151],[463, 149],[463, 148],[464, 146],[464, 145], [464, 144],[464, 146],[464, 147],[464, 148],[464, 150],[464, 152],[464, 153],[464, 155],[464, 156],[465, 157],[465, 159], [466, 161],[466, 164],[467, 168],[467, 170],[468, 172],[469, 174],[469, 176],[469, 178],[470, 180],[470, 181],[471, 182],[471, 183],[471, 185],[471, 186],[472, 187],[473, 188],[473, 189],[473, 191],[474, 191],[474, 192],[475, 192],[475, 192],[475, 193],[476, 193],[479, 192],[479, 192],[480, 192],[480, 191],[480, 190],[480, 189],[480, 187],[481, 184],[482, 181],[484, 179],[485, 176],[486, 173],[488, 169],[489, 168],[490, 166],[490, 165],[491, 163],[491, 162],[492, 161]]))
self.templates.append(Template("Line" ,[[430, 158],[435, 156],[440, 156],[447, 154],[456, 153],[465, 151],[476, 150],[484, 149],[495, 148],[503, 148],[510, 148],[517, 148],[520, 148],[525, 148],[530, 148],[533, 148],[535, 149],[538, 149],[539, 149],[540, 149],[541, 149],[542, 149]]))
'''
def Recognize(self, points):
points = resample(points, NumPoints)
points = rotate_to_zero(points)
points = scale_to_square(points, SquareSize)
points = translate_to_origin(points)
b = float("inf")
t = None
for i, temp in enumerate(self.templates):
Tpoints = temp.points
d = distance_at_best_angle(points, Tpoints, -AngleRange, AngleRange, AnglePrecision)
if d < b:
b = d
t = temp
score = 1 - (b / (0.5 * sqrt(SquareSize * SquareSize * 2)))
if t:
return Result(t.name, score)
else:
return Result('Unrecognized', 0.0)
def average(xs): return sum(xs) / len(xs)
def resample(points, n):
I = pathlength(points) / float(n-1)
D = 0
newPoints = [points[0]]
i = 1
while i<len(points):
p_i = points[i]
d = distance(points[i-1], p_i)
if (D + d) >= I:
qx = points[i-1][0] + ((I-D) / d) * (p_i[0] - points[i-1][0])
qy = points[i-1][1] + ((I-D) / d) * (p_i[1] - points[i-1][1])
newPoints.append([qx,qy])
points.insert(i, [qx,qy])
D = 0
else: D = D + d
i+=1
return newPoints
def pathlength(points):
d = 0
for i,p_i in enumerate(points[:len(points)-1]):
d += distance(p_i, points[i+1])
return d
def distance(p1, p2): return float(sqrt((p1[0] - p2[0])**2 + (p1[1] - p2[1])**2))
def centroid(points): return float(average([float(i[0]) for i in points])), float(average([float(i[1]) for i in points]))
def rotate_to_zero(points):
cx, cy = centroid(points)
theta = atan2(cy - points[0][1], cx - points[0][0])
newPoints = rotate_by(points, -theta)
return newPoints
def rotate_by(points, theta):
cx, cy = centroid(points)
newpoints = []
cos_p, sin_p = cos(theta), sin(theta)
for p in points:
qx = (p[0] - cx) * cos_p - (p[1] - cy) * sin_p + cx
qy = (p[0] - cx) * sin_p + (p[1] - cy) * cos_p + cy
newpoints.append([qx,qy])
return newpoints
def bounding_box(points):
minx, maxx = min((p[0] for p in points)), max((p[0] for p in points))
miny, maxy = min((p[1] for p in points)), max((p[1] for p in points))
return minx, miny, maxx-minx, maxy - miny
def scale_to_square(points, size):
min_x, min_y, w, h = bounding_box(points)
newPoints = []
for p in points:
qx = p[0] * (float(size) / w )
qy = p[1] * (float(size) / h )
newPoints.append([qx,qy])
return newPoints
def translate_to_origin(points):
cx, cy = centroid(points)
newpoints = []
for p in points:
qx, qy = p[0] - cx , p[1] - cy
newpoints.append([qx,qy])
return newpoints
def distance_at_best_angle(points, T, ta, tb, td):
x1 = Phi * ta + (1 - Phi) * tb
f1 = distance_at_angle(points, T, x1)
x2 = (1 - Phi) * ta + Phi * tb
f2 = distance_at_angle(points, T, x2)
while abs(tb - ta) > td:
if f1 < f2:
tb,x2,f2 = x2, x1, f1
x1 = Phi * ta + (1 - Phi) * tb
f1 = distance_at_angle(points, T, x1)
else:
ta,x1,f1 = x1, x2, f2
x2 = (1 - Phi) * ta + Phi * tb
f2 = distance_at_angle(points, T, x2)
return min(f1, f2)
def distance_at_angle(points, T, theta):
newpoints = rotate_by(points, theta)
d = pathdistance(newpoints, T)
return d
def pathdistance(a,b):
d = 0
for ai, bi in zip(a,b):
d += distance(ai, bi)
return d / len(a)
| 37.8125 | 1,355 | 0.528475 | 3,481 | 0.523065 | 0 | 0 | 0 | 0 | 0 | 0 | 1,797 | 0.270023 |
20a715a70344718e9a5265f1a46c33cf8a852d19 | 549 | py | Python | course_contents/exercice_templates/npplt.py | maganoegi/python_course_autumn_2021 | 0b297040f4e1eba654f125ee60ac4fa8e06b093b | [
"MIT"
] | null | null | null | course_contents/exercice_templates/npplt.py | maganoegi/python_course_autumn_2021 | 0b297040f4e1eba654f125ee60ac4fa8e06b093b | [
"MIT"
] | null | null | null | course_contents/exercice_templates/npplt.py | maganoegi/python_course_autumn_2021 | 0b297040f4e1eba654f125ee60ac4fa8e06b093b | [
"MIT"
] | 1 | 2021-12-07T07:15:19.000Z | 2021-12-07T07:15:19.000Z |
import numpy as np
import matplotlib.pyplot as plt
import random
if __name__ == '__main__':
ones = np.ones(30, dtype=np.uint8)
print(ones)
doubled = [x * 2 for x in ones]
doubled = ones * 2
print(doubled)
negatives = ones - doubled
print(negatives)
y = np.random.rand(30)
y *= 20
print(y)
x = range(0, 30)
print(x)
plt.plot(x, y, 'r')
plt.xlabel("nombre de personnes")
plt.ylabel("une information associée")
plt.title("ma graphe")
plt.savefig("mafig.png")
plt.show()
| 16.636364 | 42 | 0.599271 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 83 | 0.150909 |
20a7c1efa318407820ebb42f64ba8d670b3812f6 | 1,800 | py | Python | order/models.py | akashbindal91/django_ecommerce_practice | 28b882efe384f8f5312e98cd06263f095756b3a9 | [
"MIT"
] | null | null | null | order/models.py | akashbindal91/django_ecommerce_practice | 28b882efe384f8f5312e98cd06263f095756b3a9 | [
"MIT"
] | null | null | null | order/models.py | akashbindal91/django_ecommerce_practice | 28b882efe384f8f5312e98cd06263f095756b3a9 | [
"MIT"
] | null | null | null | from django.db import models
# Create your models here.
class Order(models.Model):
"""
docstring
"""
token = models.CharField(max_length=250, blank=True)
total = models.DecimalField(
verbose_name="GBP Order Total", max_digits=10, decimal_places=2)
emailAddress = models.EmailField(
verbose_name="Email Address", max_length=254, blank=True)
created = models.DateTimeField(auto_now_add=True)
billingName = models.CharField(max_length=250, blank=True)
billingAddress1 = models.CharField(max_length=250, blank=True)
billingCity = models.CharField(max_length=250, blank=True)
billingPostCode = models.CharField(max_length=250, blank=True)
billingCountry = models.CharField(max_length=250, blank=True)
shippingName = models.CharField(max_length=250, blank=True)
shippingAddress1 = models.CharField(max_length=250, blank=True)
shippingCity = models.CharField(max_length=250, blank=True)
shippingPostCode = models.CharField(max_length=250, blank=True)
shippingCountry = models.CharField(max_length=250, blank=True)
class Meta:
"""
docstring
"""
db_table = 'Order'
ordering = ['-created']
def __str__(self):
return str(self.id)
class OrderItem(models.Model):
"""
docstring
"""
product = models.CharField(max_length=254)
quantity = models.IntegerField()
price = models.DecimalField(
max_digits=10, decimal_places=2, verbose_name="GBP Order Total",)
order = models.ForeignKey(
Order, on_delete=models.CASCADE)
class Meta:
"""
docstring
"""
db_table = 'OrderItem'
def sub_total(self):
return self.quantity * self.price
def __str__(self):
return self.product
| 30 | 73 | 0.675556 | 1,737 | 0.965 | 0 | 0 | 0 | 0 | 0 | 0 | 219 | 0.121667 |
20a95777691d44ffde183bf2cdc61a1f00e86245 | 2,178 | py | Python | tests/test_RecordsDB.py | SamWolski/LabWeaver-analysis | 43e019987b521946ebdcd6c8385e8a1f31e81d8f | [
"MIT"
] | null | null | null | tests/test_RecordsDB.py | SamWolski/LabWeaver-analysis | 43e019987b521946ebdcd6c8385e8a1f31e81d8f | [
"MIT"
] | null | null | null | tests/test_RecordsDB.py | SamWolski/LabWeaver-analysis | 43e019987b521946ebdcd6c8385e8a1f31e81d8f | [
"MIT"
] | null | null | null | import pytest
import os
import LabWeaver_analysis as lw_ana
DB_DIR = os.path.abspath("tests/assets/db")
@pytest.fixture
def existing_records_db():
db_path = os.path.join(DB_DIR, "records_existing.db")
return lw_ana.RecordsDB(db_path)
def test_fetch(existing_records_db):
fetched_record = existing_records_db.filter_records(
("experiment = 'X2021-03-17'",
"meas_id = '0001'"))[0]
assert fetched_record == {"experiment": "X2021-03-17",
"meas_id": "0001",
"cooldown": "CDX1",
"meas_type": "Qubit Rabi amplitude"}
def test_fetch_multiple(existing_records_db):
fetched_records = existing_records_db.filter_records(
("cooldown = 'CDX1'",))
assert fetched_records == [{"experiment": "X2021-03-17",
"meas_id": "0001",
"cooldown": "CDX1",
"meas_type": "Qubit Rabi amplitude"},
{"experiment": "X2021-03-17",
"meas_id": "0002",
"cooldown": "CDX1",
"meas_type": "Qubit Ramsey"}]
@pytest.fixture
def new_records_db(scope="function"):
db_path = os.path.join(DB_DIR, "records_temp.db")
## Return as yield to allow for teardown/destructor
yield lw_ana.RecordsDB(db_path)
## Teardown - delete temp file
os.remove(db_path)
@pytest.fixture
def new_single_record_db(new_records_db, scope="function"):
## Add a new record to the database
new_record = {"experiment": "X2021-03-17",
"meas_id": "0001",
"cooldown": "CDX1",
"meas_type": "Qubit Rabi amplitude"}
new_records_db.add_record(new_record)
yield new_records_db
def test_create_assign_fetch(new_single_record_db):
## Fetch the record by uids and compare
fetched_record = new_single_record_db.filter_records(
("experiment = 'X2021-03-17'",
"meas_id = '0001'"))[0]
assert fetched_record == {"experiment": "X2021-03-17",
"meas_id": "0001",
"cooldown": "CDX1",
"meas_type": "Qubit Rabi amplitude"}
def test_create_assign_delete(new_single_record_db):
## Delete record
new_single_record_db.delete_record(("X2021-03-17", "0001"))
## Ensure no records are left
assert new_single_record_db.head() == []
| 28.657895 | 60 | 0.665289 | 0 | 0 | 524 | 0.240588 | 687 | 0.315427 | 0 | 0 | 842 | 0.386593 |
20aa02e451e1768804d337782ea383cc0aaebba5 | 7,999 | py | Python | src/hooks.py | nsaphra/layer-tagger | 542f5652256d7d3a3ba50b21b034a700c4e5a5cc | [
"MIT"
] | null | null | null | src/hooks.py | nsaphra/layer-tagger | 542f5652256d7d3a3ba50b21b034a700c4e5a5cc | [
"MIT"
] | null | null | null | src/hooks.py | nsaphra/layer-tagger | 542f5652256d7d3a3ba50b21b034a700c4e5a5cc | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
from os.path import join, isfile
import data
class TaggerHook:
def __init__(self, analyzer, key, module, output_vocab, save_prefix, hidden_size=None, dropout=0.5):
self.module = module
self.output_size = len(output_vocab)
self.hidden_size = hidden_size
self.input_size = None
self.dropout = dropout
self.tagger = None
if hidden_size is None:
self.hidden_size = self.output_size # as specified in belinkov et al.
self.key = key
self.handle = None
self.cumulative_eval_loss = 0
self.num_correct = 0
self.num_labeled = 0
self.label_targets = None
self.checkpoint = join('{}.{}.model'.format(save_prefix, self.key))
self.best_validation_loss = None
self.analyzer = analyzer
#TODO inspect individual label performance
def construct_model(self):
self.tagger = nn.Sequential(
nn.Linear(self.input_size, self.hidden_size),
nn.Dropout(self.dropout),
nn.ReLU(),
nn.Linear(self.hidden_size, self.output_size)
)
self.tagger.cuda()
self.tagger.train()
print(self.key)
print(self.tagger)
self.criterion = nn.CrossEntropyLoss()
self.optimizer = torch.optim.SGD(self.tagger.parameters(), lr=0.001)
def set_batch(self, label_targets):
self.label_targets = label_targets
def training_hook(self, layer, input, output):
activation = self.process_activations(output, self.label_targets)
if activation is None:
return
self.tagger.requires_grad = True
self.optimizer.zero_grad()
prediction = self.tagger(activation)
prediction_flat = prediction.view(-1, self.output_size)
loss = self.criterion(prediction_flat, self.label_targets)
loss.backward()
self.optimizer.step()
def testing_hook(self, layer, input, output):
activation = self.process_activations(output, self.label_targets)
if activation is None:
return
self.tagger.requires_grad = False
prediction = self.tagger(activation)
prediction_flat = prediction.view(-1, self.output_size)
self.num_labeled += self.label_targets.size(0)
self.cumulative_eval_loss += self.label_targets.size(0) * self.criterion(prediction_flat, self.label_targets).data
label_predictions = torch.max(prediction_flat.data, 1)[1]
self.num_correct += (label_predictions == self.label_targets.data).sum()
def process_activations(self, activations, sequence):
if activations is None:
return None
if type(activations) is tuple:
if type(activations[0]) is torch.cuda.FloatTensor:
activations = torch.stack(activations, dim=0)
else:
for output in activations:
activation = self.process_activations(output, sequence)
if activation is not None:
return activation # use the first output that has the correct dimensions
return None
elif type(activations.data) is not torch.cuda.FloatTensor:
return None
if activations.dim() > 3:
return None
if activations.dim() == 3 and (activations.size(0) * activations.size(1)) == (sequence.size(0)):
# activations: sequence_length x batch_size x hidden_size
activations = activations.view(sequence.size(0), -1)
if activations.size(0) != sequence.size(0):
return None
# activations: (sequence_length * batch_size) x hidden_size
if self.input_size is None:
self.input_size = activations.size(1)
self.construct_model()
# wrap activations in a new Variable to block backprop
return Variable(activations.data)
def register_hook(self, module, evaluation=True):
if self.handle is not None:
self.handle.remove()
self.cumulative_eval_loss = 0
self.num_correct = 0
self.num_labeled = 0
self.label_targets = None
if evaluation:
self.tagger.eval()
self.handle = module.register_forward_hook(self.testing_hook)
else:
if self.tagger is not None:
self.tagger.train()
self.handle = module.register_forward_hook(self.training_hook)
def save_model(self):
with open(self.checkpoint, 'wb') as file:
torch.save(self.tagger, file)
def load_model(self):
if not isfile(self.checkpoint):
return
with open(self.checkpoint, 'rb') as file:
self.tagger = torch.load(file)
def save_best_model(self):
loss = self.compute_loss()
if loss is None:
return
if self.best_validation_loss is None or loss < self.best_validation_loss:
self.best_validation_loss = loss
self.save_model()
def compute_loss(self):
if self.num_labeled is 0:
return None
return self.cumulative_eval_loss[0] / self.num_labeled
def compute_accuracy(self):
if self.num_labeled is 0:
return None
return 100 * self.num_correct / self.num_labeled
class NetworkLayerInvestigator:
def __init__(self, model, output_vocab, batch_size, bptt, save_prefix):
self.hooks = {}
self.model = model
self.output_vocab = output_vocab
self.batch_size = batch_size
self.bptt = bptt
self.results = {}
self.evaluation = False
self.next_batch = 0
self.batch_hook = None
self.save_prefix = save_prefix
@staticmethod
def module_output_size(module):
# return the size of the final parameters in the module,
# or 0 if there are no parameters
output_size = 0
for key, parameter in module.named_parameters():
if key.find('weight') < 0:
continue
output_size = parameter.size(-1)
return output_size
def get_batch(self, module, input, output):
i = self.next_batch
self.next_batch += 1
seq_len = min(self.bptt, len(self.data_source) - 1 - i)
data = Variable(self.data_source[i:i+seq_len], volatile=self.evaluation)
target = Variable(self.data_source[i+1:i+1+seq_len].view(-1))
for key, handle in self.hooks.items():
handle.set_batch(target)
def set_label_batch(self, labels):
for key, hook in self.hooks.items():
hook.set_batch(labels)
def add_model_hooks(self, evaluation=False):
self.evaluation = evaluation
for module_key, module in self.model.named_modules():
output_size = self.module_output_size(module)
if output_size == 0:
continue
if module_key not in self.hooks:
self.hooks[module_key] = TaggerHook(self, module_key, module, self.output_vocab, self.save_prefix)
# TODO use module.apply()
self.hooks[module_key].register_hook(module, evaluation=self.evaluation)
def save_best_taggers(self):
# assumes that we have recently run a validation set, which is in the current results
for key, tagger in self.hooks.items():
tagger.save_best_model()
def load_best_taggers(self):
# run before running on official test set
for key, tagger in self.hooks.items():
tagger.load_model()
def results_dict(self):
for key, tagger in self.hooks.items():
self.results[key] = {
'loss': tagger.compute_loss(),
'accuracy': tagger.compute_accuracy()
}
return self.results
| 34.038298 | 122 | 0.620578 | 7,821 | 0.977747 | 0 | 0 | 373 | 0.046631 | 0 | 0 | 607 | 0.075884 |
20aa2b45072a77da6b8b4f3ae6b02d4f7ca10fa0 | 61 | py | Python | django_libretto/__init__.py | ze-phyr-us/django-libretto | b19d8aa21b9579ee91e81967a44d1c40f5588b17 | [
"MIT"
] | null | null | null | django_libretto/__init__.py | ze-phyr-us/django-libretto | b19d8aa21b9579ee91e81967a44d1c40f5588b17 | [
"MIT"
] | null | null | null | django_libretto/__init__.py | ze-phyr-us/django-libretto | b19d8aa21b9579ee91e81967a44d1c40f5588b17 | [
"MIT"
] | null | null | null | from . import decorators, forms, http, models, template, url
| 30.5 | 60 | 0.754098 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
20aa667a3ce1f1ef065e0ad54f6366095acc219b | 149 | py | Python | program-4/4a)Extracting substring.py | sumukhmg/PYTHON-LAB-SET-PROGRAMS | 685d11ae8dd6e4352f83288fff1e3922e84bf022 | [
"MIT"
] | 12 | 2022-02-19T14:03:41.000Z | 2022-03-16T12:52:47.000Z | program-4/4a)Extracting substring.py | sumukhmg/PYTHON-LAB-SET-PROGRAMS | 685d11ae8dd6e4352f83288fff1e3922e84bf022 | [
"MIT"
] | 1 | 2022-03-02T14:55:37.000Z | 2022-03-02T14:55:37.000Z | program-4/4a)Extracting substring.py | sumukhmg/PYTHON-LAB-SET-PROGRAMS | 685d11ae8dd6e4352f83288fff1e3922e84bf022 | [
"MIT"
] | 2 | 2022-03-02T14:36:14.000Z | 2022-03-12T08:37:25.000Z | a = input("Enter the string:")
b = a.find("@")
c = a.find("#")
print("The original string is:",a)
print("The substring between @ and # is:",a[b+1:c]) | 29.8 | 51 | 0.604027 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 85 | 0.57047 |
20aac86b0cfe5ca3694d0752f807ee2ec5a74264 | 275 | py | Python | tests/test_ui_systemtray.py | scottwernervt/clipmanager | 34e9f45f7d9a3cef423d9d54df5d220aed5fd821 | [
"BSD-4-Clause"
] | 12 | 2016-02-11T04:14:35.000Z | 2021-12-16T08:13:05.000Z | tests/test_ui_systemtray.py | scottwernervt/clipmanager | 34e9f45f7d9a3cef423d9d54df5d220aed5fd821 | [
"BSD-4-Clause"
] | 11 | 2018-02-01T21:20:08.000Z | 2018-07-20T16:02:01.000Z | tests/test_ui_systemtray.py | scottwernervt/clipmanager | 34e9f45f7d9a3cef423d9d54df5d220aed5fd821 | [
"BSD-4-Clause"
] | null | null | null | import pytest
from clipmanager.ui.systemtray import SystemTrayIcon
@pytest.fixture()
def systemtray():
tray = SystemTrayIcon()
tray.show()
return tray
class TestSystemTrayIcon:
def test_is_visible(self, systemtray):
assert systemtray.isVisible()
| 17.1875 | 52 | 0.730909 | 106 | 0.385455 | 0 | 0 | 95 | 0.345455 | 0 | 0 | 0 | 0 |
20ab73fd70a43c284e00592caac6b6a270c82873 | 5,413 | py | Python | OverlapSep/lda_classify.py | PingjunChen/ChromosomeSeg | 4b5e576696e9998558478fd4ec6b74809ceea49c | [
"MIT"
] | null | null | null | OverlapSep/lda_classify.py | PingjunChen/ChromosomeSeg | 4b5e576696e9998558478fd4ec6b74809ceea49c | [
"MIT"
] | null | null | null | OverlapSep/lda_classify.py | PingjunChen/ChromosomeSeg | 4b5e576696e9998558478fd4ec6b74809ceea49c | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
import os, sys
import json
import numpy as np
import pickle
from sklearn.metrics import accuracy_score
from sklearn.preprocessing import normalize
from sklearn import svm
from sklearn.metrics import confusion_matrix
import matplotlib.pyplot as plt
def load_json_data(json_path):
fea_dict = json.load(open(json_path))
fea_category_dict = {}
for key in fea_dict.keys():
cat = key[:key.find('_')]
if cat not in fea_category_dict:
fea_category_dict[cat] = [fea_dict[key]]
else:
fea_category_dict[cat].append(fea_dict[key])
return fea_category_dict
def generate_data_label(feas):
Data = []
Label = []
for iter in range(0, 24):
if iter<22:
tn = len(feas[str(iter+1)])
for jter in range(tn):
temp = feas[str(iter+1)][jter]
Data.append(np.array(temp))
Label.append([iter])
elif iter==22:
tn = len(feas['X'])
for jter in range(tn):
temp = feas['X'][jter]
Data.append(np.array(temp))
Label.append([iter])
else:
tn = len(feas['Y'])
for jter in range(tn):
temp = feas['Y'][jter]
Data.append(np.array(temp))
Label.append([iter])
Data = np.array(Data)
Label = np.squeeze(Label)
return Data, Label
def data_normalize(trainData, testData):
trainData = normalize(trainData, norm='max')
testData = normalize(testData, norm='max')
mean_data = np.mean(trainData, axis=0, keepdims=True)
trainData = trainData - np.matmul(np.ones((trainData.shape[0],1)), mean_data)
testData = testData - np.matmul(np.ones((testData.shape[0],1)), mean_data)
return trainData, testData
def construct_graph(Label):
tn = Label.shape[0]
uLabel = np.unique(Label)
W = np.zeros((tn,tn))
for iter in range(len(uLabel)):
index = np.squeeze(np.where(Label==uLabel[iter]))
W[np.ix_(index, index)]=1.0/index.size
return W
def LDA(trainData, trainLabel):
class_mean = []
uLabel = np.unique(trainLabel)
class_num = len(uLabel)
W = construct_graph(trainLabel)
Sw = np.matmul(np.matmul(np.transpose(trainData), W), trainData)
Sb = np.matmul(np.transpose(trainData),trainData)
U,D,_ = np.linalg.svd(Sb+0.0001*np.eye(Sb.shape[0]),full_matrices=True)
D = np.diag(np.sqrt(D))
uD = np.matmul(U, np.linalg.inv(D))
S = np.matmul(np.matmul(np.matmul(np.linalg.inv(D),np.transpose(U)), Sw), uD)
Y,_,_= np.linalg.svd(S)
A = np.matmul(uD, Y)
trainVector = np.matmul(trainData, A)
for iter in range(len(uLabel)):
index = np.squeeze(np.where(trainLabel==uLabel[iter]))
temp = np.mean(trainVector[index,:], axis=0, keepdims=True)
class_mean.extend(temp) # saved based on the labels
return A, np.array(class_mean)
def Classify(testData, testLabel, A, class_mean):
testVector = np.matmul(testData, A)
ten = testVector.shape[0]
class_num =class_mean.shape[0]
dist = []
for iter in range(ten):
temp = np.sum((np.matmul(np.ones((class_num,1)), np.expand_dims(testVector[iter,:], axis=0)) - class_mean)**2, axis=1)
dist.append(temp)
dist = np.array(dist)
predict = np.squeeze(np.argmin(dist, axis=1))
accuracy = accuracy_score(testLabel, predict)
CM = confusion_matrix(testLabel, predict)
return dist, accuracy, CM
def lda_pred(fea, project_mat, class_mean):
testVector = np.matmul(fea, project_mat)
class_num = class_mean.shape[0]
dist = np.sum((np.matmul(np.ones((class_num,1)), np.expand_dims(testVector, axis=0)) - class_mean)**2, axis=1)
label, min_dist = np.argmin(dist), min(dist)
return label, min_dist
if __name__ == "__main__":
train_fea_path = "../data/OverlapSep/chromosome_train_feas.json"
test_fea_path = "../data/OverlapSep/chromosome_test_feas.json"
# Both train_feas and test_feas are dictionary with 24 keys, '1', '2',...,'X','Y'
# For each key, there are a list of feature descriptor for chromosome
# Each feature descriptor is also a list with 25 elements
train_feas = load_json_data(train_fea_path)
test_feas = load_json_data(test_fea_path)
# Organize data
trainData, trainLabel = generate_data_label(train_feas)
testData, testLabel = generate_data_label(test_feas)
# LDA classification
# trainData, testData = data_normalize(trainData, testData)
project_mat, class_mean= LDA(trainData, trainLabel) # A is the projection matrix
# save lda model
lda_model_dict = {
'ProjectMat': project_mat,
'ClassMean': class_mean}
lda_model_path = os.path.join("../data/Models/LDA", "lda_model.pkl")
with open(lda_model_path, "wb") as handle:
pickle.dump(lda_model_dict, handle, protocol=pickle.HIGHEST_PROTOCOL)
# # sample-by-sample prediction
# correct_num, total_num = 0, testData.shape[0]
# for it in np.arange(total_num):
# label, _ = lda_pred(testData[it], project_mat, class_mean)
# if label == testLabel[it]:
# correct_num += 1
# test_acc = correct_num * 1.0 / total_num
# print("Testing accuracy is: {:.3f}".format(test_acc))
# prediction and
dist, lda_accuracy, CM = Classify(testData, testLabel, project_mat, class_mean) # dist is with the size test number * class_num
print("The lda accuracy is: {:.3f}".format(lda_accuracy))
# plt.imshow(CM, interpolation='nearest', cmap=plt.cm.Blues)
# plt.title("Chromosome Contour Prediction Confusion Matrix")
# plt.xlabel('Predicted label')
# plt.ylabel('True label')
# labels = np.arange(24)
# tick_marks = np.arange(len(labels))
# plt.xticks(tick_marks, labels, rotation=90)
# plt.yticks(tick_marks, labels)
# plt.tight_layout()
# plt.show()
| 27.902062 | 130 | 0.706632 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,344 | 0.248291 |
20ab84e3551b4b2def018eef997c7f3a722947b1 | 169 | py | Python | 01_getting_started_with_python/src/_solutions/fahrenheit_to_celsius.py | hello-world-academy/beiersdorf_05-06-2019 | 79b244ae21fa2815bf429653ccf6b51ddc468901 | [
"MIT"
] | 1 | 2020-01-17T14:51:40.000Z | 2020-01-17T14:51:40.000Z | 01_getting_started_with_python/src/_solutions/fahrenheit_to_celsius.py | hello-world-academy/beiersdorf_05-06-2019 | 79b244ae21fa2815bf429653ccf6b51ddc468901 | [
"MIT"
] | null | null | null | 01_getting_started_with_python/src/_solutions/fahrenheit_to_celsius.py | hello-world-academy/beiersdorf_05-06-2019 | 79b244ae21fa2815bf429653ccf6b51ddc468901 | [
"MIT"
] | 1 | 2020-12-04T15:37:28.000Z | 2020-12-04T15:37:28.000Z |
def fahrenheit_to_celsius(F):
'''
Function to compute Celsius from Fahrenheit
'''
K = fahrenheit_to_kelvin(F)
C = kelvin_to_celsius(K)
return C
| 18.777778 | 47 | 0.656805 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 59 | 0.349112 |
20ade6e8b171d7ee233a3173d6a3f1af222b52e9 | 16,846 | py | Python | treegen.py | murawaki/dialect-latgeo | 24ecb8bee5e489f41afb4432bf0bf17246b5c631 | [
"MIT"
] | 3 | 2020-10-30T15:28:24.000Z | 2021-03-22T19:38:47.000Z | treegen.py | murawaki/dialect-latgeo | 24ecb8bee5e489f41afb4432bf0bf17246b5c631 | [
"MIT"
] | null | null | null | treegen.py | murawaki/dialect-latgeo | 24ecb8bee5e489f41afb4432bf0bf17246b5c631 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
import numpy as np
import random
import sys
from collections import Counter
import json
from argparse import ArgumentParser
from rand_utils import rand_partition
def build_tree(num_leaves = 10, rootdate = 1000):
"""
Starting from a three-node tree, split a randomly chosen branch to insert a new child
TODO: replace this with a coalescent method
"""
def _get_target_node_by_total_time(node, r):
interval1 = (node["date"] - node["left"]["date"]) * node["left"]["stability"]
if interval1 > r:
return node, True, r
r -= interval1
if node["left"]["left"] is not None:
node2, is_left, r2 = _get_target_node_by_total_time(node["left"], r)
if node2 is not None:
return node2, is_left, r2
r = r2
interval2 = (node["date"] - node["right"]["date"]) * node["right"]["stability"]
if interval2 > r:
return node, False, r
if node["right"]["left"] is not None:
return _get_target_node_by_total_time(node["right"], r - interval2)
return None, False, r - interval2
# endef
gshape, gscale = 2.0, 0.5
tree = {
"date": rootdate,
"left": {
"date": 0,
"left": None,
"right": None,
"name": "L0",
"stability": np.random.gamma(gshape, gscale),
},
"right": {
"date": 0,
"left": None,
"right": None,
"name": "L1",
"stability": np.random.gamma(gshape, gscale),
},
"name": "I0",
"stability": 1.0,
}
cur_leafnum = 2
cur_inodenum = 1
# totaltime = rootdate * 2
totaltime = rootdate * (tree["left"]["stability"] + tree["right"]["stability"])
while cur_leafnum < num_leaves:
r = np.random.uniform(0, totaltime)
parent, is_left, r2 = _get_target_node_by_total_time(tree, r)
cnode = {
"date": 0,
"left": None,
"right": None,
"name": "L{}".format(cur_leafnum),
"stability": np.random.gamma(gshape, gscale),
}
inode = {
"left": None,
"right": None,
"name": "I{}".format(cur_inodenum),
}
if is_left:
inode["date"] = parent["date"] - r2 / parent["left"]["stability"]
assert(inode["date"] > 0)
inode["stability"] = parent["left"]["stability"]
inode["right"] = cnode
inode["left"] = parent["left"]
parent["left"] = inode
else:
inode["date"] = parent["date"] - r2 / parent["right"]["stability"]
inode["stability"] = parent["right"]["stability"]
inode["left"] = cnode
inode["right"] = parent["right"]
parent["right"] = inode
# totaltime += inode["date"]
totaltime += inode["date"] * cnode["stability"]
cur_leafnum += 1
cur_inodenum += 1
return tree
def set_locations_by_random_walk(tree, variance=1.0):
"""
Perform simple random walks to assign coordinates
"""
def _set_locations_main(parent, node, variance):
interval = parent["date"] - node["date"]
_var = variance * interval
loc = np.random.multivariate_normal([parent["x"], parent["y"]], [[_var, 0.0], [0.0, _var]])
node["x"] = loc[0]
node["y"] = loc[1]
if node["left"] is not None:
assert(node["right"] is not None)
_set_locations_main(node, node["left"], variance)
_set_locations_main(node, node["right"], variance)
# endef
tree["x"] = tree["y"] = 0.0
_set_locations_main(tree, tree["left"], variance=variance)
_set_locations_main(tree, tree["right"], variance=variance)
def gen_traits(tree, _lambda=1.0, fnum=100):
"""
At each node,
- randomly choose the number of birth events
- for each birth event, randomly decide which feature is to be updated
"""
def _gen_traits_main(parent, node, flist, vcount, _lambda):
interval = parent["date"] - node["date"]
node["catvect"] = np.copy(parent["catvect"])
# # replace features num times
# num = np.random.poisson(_lambda * interval)
# # the same feature can be updated multiple times along a branch
# target_features = np.unique(np.random.randint(0, len(flist), size=num))
target_features = {}
t = 0.0
while True:
r = np.random.exponential(scale=1.0 / _lambda)
t += r
if t >= interval:
break
# the rich gets richer
weights = list(map(lambda x: x["size"] + 1.0, flist))
fid = rand_partition(weights)
if fid in target_features:
# the same feature can be updated multiple times along a branch
# just update the time
fval = node["catvect"][fid]
fnode["annotation"]["vid2date"][fval] = parent["date"] + t
else:
fnode = flist[fid]
fnode["size"] += 1
fnode["annotation"]["vid2date"][vcount] = parent["date"] + t
node["catvect"][fid] = vcount
vcount += 1
target_features[fid] = t
# ensure that at least one event happens
if len(target_features) <= 0:
t = np.random.uniform(0.0, interval)
fid = np.random.randint(0, len(flist))
fnode = flist[fid]
fnode["size"] += 1
fnode["annotation"]["vid2date"][vcount] = parent["date"] + t
node["catvect"][fid] = vcount
vcount += 1
if node["left"] is not None:
assert(node["right"] is not None)
vcount = _gen_traits_main(node, node["left"], flist, vcount, _lambda)
vcount = _gen_traits_main(node, node["right"], flist, vcount, _lambda)
return vcount
# endef
flist = []
for i in range(fnum):
flist.append({
"fid": i,
"size": 1,
"type": "cat",
"annotation": {
"vid2date": {
i: 0,
}
},
})
tree["catvect"] = np.arange(fnum)
vcount = fnum
vcount = _gen_traits_main(tree, tree["left"], flist, vcount, _lambda)
vcount = _gen_traits_main(tree, tree["right"], flist, vcount, _lambda)
return flist, vcount
def update_tree_by_borrowings(tree, flist, nu=0.05):
def _update_nodeval(node, fid, oldv, newv):
if node["catvect"][fid] != oldv:
return 0
node["catvect"][fid] = newv
change = 1
if node["left"] is not None:
change += _update_nodeval(node["left"], fid, oldv, newv)
change += _update_nodeval(node["right"], fid, oldv, newv)
return change
nodes = get_all_nodes(tree)
nodes_by_date = sorted(nodes, key=lambda x: x["date"], reverse=True)
for i in range(1, len(nodes_by_date)):
node = nodes_by_date[i]
# # # # #
# if node["date"] == 0.0:
# break
# collect branches
contemporary_nodes = []
for pnode in nodes_by_date[:i]:
if pnode["left"] is None:
break
if pnode["left"] is not node and pnode["left"]["date"] <= node["date"]:
contemporary_nodes.append((pnode, pnode["left"]))
if pnode["right"] is not node and pnode["right"]["date"] <= node["date"]:
contemporary_nodes.append((pnode, pnode["right"]))
assert(len(contemporary_nodes) > 0)
weights = []
for pnode, cnode in contemporary_nodes:
# TODO: weighted avg of the locations of pnode and cnode?
dist = np.sqrt((node["x"] - cnode["x"]) ** 2 + (node["y"] - cnode["y"]) ** 2)
weight = np.exp(20.0 * (max(dist / 3, 1.0) ** -0.5))
weights.append(weight)
weights = np.array(weights)
# print(weights / weights.sum())
for fid, is_borrowing in enumerate(np.random.rand(len(flist)) < nu):
if not is_borrowing:
continue
cid = rand_partition(weights)
pnode, cnode = contemporary_nodes[cid]
# too similar, no chance to be documented separately
if node["date"] == 0.0:
overlap = (cnode["catvect"] == pnode["catvect"]).sum() / float(len(pnode["catvect"]))
if overlap > 0.95:
sys.stderr.write("overlap {} ... skip\n".format(overlap))
continue
v = cnode["catvect"][fid]
if cnode["catvect"][fid] == pnode["catvect"][fid]:
newval = v
else:
date = flist[fid]["annotation"]["vid2date"][v]
if date > node["date"]:
newval = v
else:
newval = pnode["catvect"][fid]
# update only if the borrowed one is different from the original
if node["catvect"][fid] != v:
oldv = node["catvect"][fid]
change = _update_nodeval(node, fid, oldv, v)
sys.stderr.write("{} nodes updated\t{} -> {}\n".format(change, oldv, v))
def merge_leaves(tree, thres=0.98):
stack = [tree]
while len(stack) > 0:
node = stack.pop(0)
if node["left"] is not None:
if node["left"]["left"] is None and node["right"]["left"] is None:
assert(node["left"]["date"] == 0.0 and node["right"]["date"] == 0.0)
overlap = (node["left"]["catvect"] == node["right"]["catvect"]).sum() / float(len(node["left"]["catvect"]))
if overlap >= thres:
sys.stderr.write("overlap {} ... remove!\n".format(overlap))
node["name"] = node["left"]["name"]
node["date"] = 0.0
node["left"] = None
node["right"] = None
# restart
# TODO: efficiency
stack = [tree]
else:
sys.stderr.write("test passed {}\n".format(overlap))
else:
stack.append(node["left"])
stack.append(node["right"])
def update_vids(tree, flist, keep_singletons=False):
nodes = get_all_nodes(tree)
fidcounts = [Counter() for i in range(len(flist))]
for node in nodes:
for fid, v in enumerate(node["catvect"]):
fidcounts[fid][v] += 1
do_keep = np.ones(len(flist), dtype=np.bool_)
if not keep_singletons:
for fid in range(len(flist)):
if len(fidcounts[fid]) <= 1:
do_keep[fid] = 0
num_removed = len(flist) - do_keep.sum()
sys.stderr.write("remove {} singleton features\n".format(num_removed))
for node in nodes:
node["catvect"] = node["catvect"][do_keep]
flist2, fidcounts2 = [], []
vcount = 0
for is_kept, fnode, fidcount in zip(do_keep, flist, fidcounts):
if is_kept:
fnode["fid"] = len(flist2)
flist2.append(fnode)
fidcounts2.append(fidcount)
flist = flist2
fidcounts = fidcounts2
vcount = 0
for fid, (fnode, fidcount) in enumerate(zip(flist, fidcounts)):
fnode["size"] = len(fidcount)
vcount += fnode["size"]
labels = sorted(fidcount.keys(), key=int)
fnode["annotation"]["label2vid"] = {}
fnode["annotation"]["vid2label"] = []
for vid, _label in enumerate(labels):
fnode["annotation"]["label2vid"][_label] = vid
fnode["annotation"]["vid2label"].append(_label)
for node in nodes:
node["catvect"][fid] = fnode["annotation"]["label2vid"][node["catvect"][fid]]
return flist, vcount
def get_all_nodes(tree):
stack = [tree]
nodes = []
while len(stack) > 0:
node = stack.pop(0)
nodes.append(node)
if node["left"] is not None:
stack.append(node["left"])
stack.append(node["right"])
return nodes
def get_leaves(node, leaves):
if node["left"] is not None:
get_leaves(node["left"], leaves)
get_leaves(node["right"], leaves)
else:
leaves.append(node)
return leaves
def to_nexus(tree, flist, vcount, dump_tree=False):
leaves = get_leaves(tree, [])
# nexus
rv = "#NEXUS\r\nBEGIN TAXA;\r\nDIMENSIONS NTAX={};\r\nEND;\r\n".format(
len(leaves),
)
rv += "\r\nBEGIN CHARACTERS;\r\nDIMENSIONS NCHAR={};\r\nFORMAT\r\n\tDATATYPE=STANDARD\r\n\tSYMBOLS=\"01\"\r\n\tMISSING=?\r\n\tGAP=-\r\n\tINTERLEAVE=NO\r\n;\r\nMATRIX\n\n".format(vcount)
for node in leaves:
name_normalized = node["name"].replace(" ", "_").replace("(", "").replace(")", "")
binrep = np.zeros(vcount, dtype=np.int32)
for fid, v in enumerate(node["catvect"]):
binrep[v] = 1
rv += "{}\t{}\r".format(name_normalized, "".join(map(str, binrep.tolist())))
rv += ";\r\nEND;\r\n"
if dump_tree:
def _dump_tree(parent, node):
if node["left"] is not None:
rv1 = _dump_tree(node, node["left"])
rv2 = _dump_tree(node, node["right"])
rv = "({},{})".format(rv1, rv2)
else:
rv = node["name"].replace(" ", "_").replace("(", "").replace(")", "")
if parent is not None:
rv += ":{}".format(parent["date"] - node["date"])
return rv
# endef
rv += "\r\nBEGIN Trees;\r\nTree tree1 = "
rv += _dump_tree(None, tree)
rv += ";\r\nEND;\r\n"
return rv
def main():
parser = ArgumentParser()
parser.add_argument("-s", "--seed", metavar="INT", type=int, default=None,
help="random seed")
parser.add_argument('--rootdate', type=float, default=1000.0)
parser.add_argument('--num_leaves', type=int, default=10)
parser.add_argument('--variance', type=float, default=5.0,
help="Brownian process parameter")
parser.add_argument('--fnum', type=int, default=100,
help="# of features")
parser.add_argument('--lambda', dest="_lambda", type=float, default=0.02,
help="parameter of a pure birth process")
parser.add_argument('--nu', type=float, default=0.05,
help="borrowing parameter")
parser.add_argument('--keep_singletons', action="store_true", default=False)
parser.add_argument('--merge_thres', type=float, default=0.90,
help="merge near-identical leaves")
parser.add_argument('--tree', type=str, default=None)
parser.add_argument('--langs', type=str, default=None)
parser.add_argument('--flist', type=str, default=None)
parser.add_argument('--nexus', type=str, default=None)
args = parser.parse_args()
sys.stderr.write("args\t{}\n".format(args))
if args.num_leaves <= 2:
sys.stderr.write("# of leaves must be larger than 2\n")
sys.exit(1)
if args.seed is not None:
np.random.seed(args.seed)
# random.seed(args.seed)
# build a time-tree
tree = build_tree(args.num_leaves, args.rootdate)
# assign an xy coordinate to each node
set_locations_by_random_walk(tree, variance=args.variance)
# generate features
flist, vcount = gen_traits(tree, _lambda=args._lambda, fnum=args.fnum)
sys.stderr.write("{}\n".format(tree))
sys.stderr.write("{}\n".format(vcount))
# sys.stderr.write("{}\n".format(flist))
if args.nu > 0.0:
update_tree_by_borrowings(tree, flist, nu=args.nu)
# merge near-identical leaves
# too similar, no chance to be documented separately
merge_leaves(tree, thres=args.merge_thres)
flist, vcount = update_vids(tree, flist, keep_singletons=args.keep_singletons)
sys.stderr.write("{}\n".format(vcount))
for node in get_all_nodes(tree):
node["catvect"] = node["catvect"].tolist()
if args.tree is not None:
with open(args.tree, 'w') as f:
f.write("{}\n".format(json.dumps(tree)))
if args.langs is not None:
with open(args.langs, 'w') as f:
langs = get_leaves(tree, [])
for lang in langs:
f.write("{}\n".format(json.dumps(lang)))
if args.flist is not None:
with open(args.flist, 'w') as f:
f.write("{}\n".format(json.dumps(flist, indent=4, sort_keys=True)))
if args.nexus is not None:
with open(args.nexus, 'w') as f:
f.write(to_nexus(tree, flist, vcount, dump_tree=True))
if __name__ == "__main__":
main()
| 37.941441 | 189 | 0.540841 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 3,764 | 0.223436 |
20aee2c9b1337bdead59c82461021fc28b8c393c | 1,234 | py | Python | tools/generator/__init__.py | Dev00355/fundamental-tools-copy-from-sap | 1857db103789dde84e9eb40105ecaf029a4cf360 | [
"Apache-2.0"
] | null | null | null | tools/generator/__init__.py | Dev00355/fundamental-tools-copy-from-sap | 1857db103789dde84e9eb40105ecaf029a4cf360 | [
"Apache-2.0"
] | null | null | null | tools/generator/__init__.py | Dev00355/fundamental-tools-copy-from-sap | 1857db103789dde84e9eb40105ecaf029a4cf360 | [
"Apache-2.0"
] | null | null | null | # SPDX-FileCopyrightText: 2014 SAP SE Srdjan Boskovic <srdjan.boskovic@sap.com>
#
# SPDX-License-Identifier: Apache-2.0
# -*- coding: utf-8 -*-
from .business_objects import catalog, rfm_sets
VERSION = "0.2"
# T002, T002C
all_languages = {
# iso2
"ar": "AR - عربي",
"bg": "BG - Български",
"ca": "CA - Català",
"cs": "CS - Čeština",
"da": "DA - Dansk",
"de": "DE - Deutsch",
"el": "EL - Ελληνικά",
"en": "EN - English",
"es": "ES - Español",
"et": "ET - Eesti",
"fi": "FI - Suomi",
"fr": "FR - Français",
"he": "HE - עברית",
"hi": "HI - हिंदी",
"hr": "HR - Hrvatski",
"hu": "HU - Magyar",
"it": "IT - Italiano",
"ja": "JA - 日本語",
"kk": "KK - Қазақ",
"ko": "KO - 한국어",
"lt": "LT - Lietuvių",
"lv": "LV - Latviešu",
"nl": "NL - Nederlands",
"no": "NO - Norsk",
"pl": "PL - polski",
"pt": "PT - Português",
"ro": "RO - Română",
"ru": "RU - Русский",
"sh": "SH - Srpski (Lat.)",
"sk": "SK - Slovenčina",
"sl": "SL - Slovenščina",
"sv": "SV - Svenska",
"th": "TH - Thai",
"tr": "TR - Türkçe",
"uk": "UK - Українська",
"vi": "VI - Việt Nam",
"zf": "ZF - 繁體中文",
"zh": "ZH - 中文",
}
| 23.283019 | 79 | 0.468395 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 936 | 0.702176 |
20b0e2f533dc8fad0f368de7588fa6959bfc4ef4 | 5,038 | py | Python | src/fft_from_image/Sequences.py | szymag/ZFN | d617057ee202e5ac026eb31b8a9598c95b385ba0 | [
"MIT"
] | 2 | 2016-09-13T06:19:40.000Z | 2017-02-24T21:41:07.000Z | src/fft_from_image/Sequences.py | szymag/ZFN | d617057ee202e5ac026eb31b8a9598c95b385ba0 | [
"MIT"
] | 3 | 2015-11-19T06:28:30.000Z | 2016-06-26T19:46:53.000Z | src/fft_from_image/Sequences.py | szymag/ZFN | d617057ee202e5ac026eb31b8a9598c95b385ba0 | [
"MIT"
] | 2 | 2015-11-17T14:46:58.000Z | 2017-10-31T21:35:15.000Z | from src.fft_from_image.ChainGeneration import ChainGeneration
import numpy as np
class ThueMorse(ChainGeneration):
def __init__(self, repeat, tm_num):
ChainGeneration.__init__(self, repeat)
self.tm_num = tm_num
@staticmethod
def tm_construct(seq):
return [(i + 1) % 2 for i in seq]
def sequence(self):
seq = [0]
for i in range(self.tm_num):
seq = seq + self.tm_construct(seq)
return np.repeat(seq, self.repeat)
class Fibonacci(ChainGeneration):
def __init__(self, repeat, fib_num):
ChainGeneration.__init__(self, repeat)
self.fib_num = fib_num
def fib_number(self):
n = self.fib_num
i = h = 1
j = k = 0
while n > 0:
if n % 2 == 1:
t = j * h
j = i * h + j * k + t
i = i * k + t
t = h * h
h = 2 * k * h + t
k = k * k + t
n = int(n / 2)
return j
def sequence_generator(self):
seq1 = [1]
seq2 = [0]
seq = seq2 + seq1
for i in range(self.fib_num - 2):
seq = seq2 + seq1
seq1 = seq2
seq2 = seq
return np.array(seq)
def sequence(self):
return np.repeat(self.sequence_generator(), self.repeat)
class Periodic(ChainGeneration):
def __init__(self, repeat, num):
ChainGeneration.__init__(self, repeat)
self.num = num
def sequence_generator(self):
seq = np.zeros(self.num)
seq[::2] += 1
return seq
def sequence(self):
return np.repeat(self.sequence_generator(), self.repeat)
class Random(ChainGeneration):
def __init__(self, repeat, num, stripes1_count):
ChainGeneration.__init__(self, repeat)
self.num = num
self.stripes1_count = stripes1_count
def sequence_generator(self):
seq = np.zeros(self.num)
seq[:self.stripes1_count] += 1
return np.random.permutation(seq)
def sequence(self):
return np.repeat(self.sequence_generator(), self.repeat)
class Heated(ChainGeneration):
def __init__(self, repeat):
ChainGeneration.__init__(self, repeat)
def cos_sequence(self):
return (np.cos(np.linspace(0, 2 * np.pi, self.repeat)) + 1) / 2
class Custom(ChainGeneration):
def __init__(self, file_name, repeat=1):
ChainGeneration.__init__(self, repeat)
self.tmp = np.transpose(np.loadtxt(file_name))[-1]
self.data = (self.tmp - np.min(self.tmp))
self.data /= np.max(self.data)
def sequence(self):
return self.data
class Phason:
def __init__(self, sequence_type, repeat, num, phason_parameter):
if sequence_type == 'F':
self.f = Fibonacci(1, num)
self.seq = self.f.sequence()
elif sequence_type == 'P':
self.p = Periodic(1, num)
self.seq = self.p.sequence()
elif sequence_type == 'R':
struct = Fibonacci(1, num).fib_number()
stripes1 = Fibonacci(1, num - 2).fib_number()
self.p = Random(1, struct, stripes1) # randomized Fibonacci
self.seq = self.p.sequence()
else:
raise ValueError('No more types supported at the moment')
self.repeat = repeat
self.len = len(self.seq)
self.where_one = self.find_all_phasons(self.seq)
self.phason_parameter = phason_parameter
self.sequence_type = sequence_type
if phason_parameter < 1:
self.phasons_count = int(phason_parameter * len(self.where_one))
else:
self.phasons_count = phason_parameter
def find_all_phasons(self, seq):
a = np.argwhere(seq == 1).T[0]
b = np.concatenate((np.diff(a), np.array([(self.len - a[-1] + a[0])])))
return np.compress(np.where(b == 1, 0, 1) == 1, a)
def sequence_shuffling(self, seq):
if self.sequence_type == "R":
phason_pos = np.argwhere(self.p.sequence_generator() == 1)
print(phason_pos)
return self.seq, phason_pos
else:
if self.phason_parameter < 1:
phasons_pos = np.random.permutation(self.find_all_phasons(seq))[0:self.phasons_count]
seq = self.make_shufling(phasons_pos)
else:
collect_phasons = np.zeros(self.phasons_count)
for i in range(self.phasons_count):
phasons_pos = np.random.permutation(self.find_all_phasons(seq))[0]
seq = self.make_shufling(phasons_pos)
collect_phasons[i] = phasons_pos
phasons_pos = collect_phasons
return seq, phasons_pos
def make_shufling(self, stripe_position):
seq = self.seq
seq[(stripe_position + 1) % len(seq)] = 1
seq[stripe_position] = 0
return seq
def sequence(self, seq):
return np.repeat(seq, self.repeat)
if __name__ == "__main__":
pass
| 30.907975 | 101 | 0.577015 | 4,897 | 0.972013 | 0 | 0 | 82 | 0.016276 | 0 | 0 | 83 | 0.016475 |
20b1f95c01cfbfd10bb97b6e92d962f1bcbd59c0 | 3,992 | py | Python | etf_data_loader.py | xSakix/etf_data | b622064fd4c8e1c2e1d477a2731f51ff1cb08e4d | [
"Apache-2.0"
] | null | null | null | etf_data_loader.py | xSakix/etf_data | b622064fd4c8e1c2e1d477a2731f51ff1cb08e4d | [
"Apache-2.0"
] | null | null | null | etf_data_loader.py | xSakix/etf_data | b622064fd4c8e1c2e1d477a2731f51ff1cb08e4d | [
"Apache-2.0"
] | null | null | null | import pandas as pd
import numpy as np
import os
import sys
def load_data(assets, start_date, end_date):
df_open = load_data_from_file('etf_data_open.csv', assets, start_date, end_date)
df_close = load_data_from_file('etf_data_close.csv', assets, start_date, end_date)
df_high = load_data_from_file('etf_data_high.csv', assets, start_date, end_date)
df_low = load_data_from_file('etf_data_low.csv', assets, start_date, end_date)
df_adj_close = load_data_from_file('etf_data_adj_close.csv', assets, start_date, end_date)
return df_open, df_close, df_high, df_low, df_adj_close
def load_data_from_file(file, assets, start_date, end_date):
if not os.path.isfile(file):
file = '../etf_data/' + file
if not os.path.isfile(file):
file = '../../etf_data/' + file
if not os.path.isfile(file):
file = '../../../etf_data/' + file
print('Loading file ',file)
df = pd.read_csv(file)
df = df.loc[df.Date > start_date]
df = df.loc[df.Date < end_date]
df = df[assets]
indexes = []
for key in df.keys():
for i in df[key].index:
val = df[key][i]
try:
if np.isnan(val) and not indexes.__contains__(i):
indexes.append(i)
except TypeError:
if not indexes.__contains__(i):
indexes.append(i)
df.drop(indexes, inplace=True)
return df
def load_data_from_file2(file, assets, start_date, end_date):
if not os.path.isfile(file):
file = '../etf_data/' + file
if not os.path.isfile(file):
file = '../../etf_data/' + file
if not os.path.isfile(file):
file = '../../../etf_data/' + file
print('Loading file ',file)
df = pd.read_csv(file)
df = df.loc[df.date > start_date]
df = df.loc[df.date < end_date]
df = df[assets]
indexes = []
for key in df.keys():
for i in df[key].index:
val = df[key][i]
try:
if np.isnan(val) and not indexes.__contains__(i):
indexes.append(i)
except TypeError:
if not indexes.__contains__(i):
indexes.append(i)
df.drop(indexes, inplace=True)
return df
def load_all_data_from_file(file, start_date, end_date):
if not os.path.isfile(file):
file = '../etf_data/' + file
if not os.path.isfile(file):
file = '../' + file
if not os.path.isfile(file):
file = '../' + file
print('Loading file ',file)
df = pd.read_csv(file)
df = df.loc[df.Date > start_date]
df = df.loc[df.Date < end_date]
# indexes = []
#
# for key in df.keys():
# for i in df[key].index:
# val = df[key][i]
# try:
# if np.isnan(val) and not indexes.__contains__(i):
# indexes.append(i)
# except TypeError:
# if not indexes.__contains__(i):
# indexes.append(i)
# df.drop(indexes, inplace=True)
return df
def load_all_data_from_file2(file, start_date, end_date):
if not os.path.isfile(file):
file = '../etf_data/' + file
if not os.path.isfile(file):
file = '../' + file
if not os.path.isfile(file):
file = '../' + file
print('Loading file ',file)
df = pd.read_csv(file)
df = df.loc[df.date > start_date]
df = df.loc[df.date < end_date]
return df
def load_all_data(start_date, end_date):
df_open = load_all_data_from_file('etf_data_open.csv', start_date, end_date)
df_close = load_all_data_from_file('etf_data_close.csv', start_date, end_date)
df_high = load_all_data_from_file('etf_data_high.csv', start_date, end_date)
df_low = load_all_data_from_file('etf_data_low.csv', start_date, end_date)
df_adj_close = load_all_data_from_file('etf_data_adj_close.csv', start_date, end_date)
return df_open, df_close, df_high, df_low, df_adj_close
| 29.57037 | 94 | 0.600952 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 754 | 0.188878 |
20b2e3e32b7c66d0208f3258b3c38daa89dc0d38 | 297 | py | Python | test/sysinfo_test.py | peitur/docker-util | 6579c59b809a3dab80c440baa5fabc669cf88b9b | [
"Apache-2.0"
] | 1 | 2016-05-19T13:43:21.000Z | 2016-05-19T13:43:21.000Z | test/sysinfo_test.py | peitur/docker-util | 6579c59b809a3dab80c440baa5fabc669cf88b9b | [
"Apache-2.0"
] | 1 | 2020-11-23T10:21:16.000Z | 2020-11-23T10:25:16.000Z | test/sysinfo_test.py | peitur/docker-util | 6579c59b809a3dab80c440baa5fabc669cf88b9b | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/python3
import sys,os,re
sys.path.append( "../lib" )
sys.path.append( "./lib" )
import Controller
import unittest
from pprint import pprint
class SysinfoTest( unittest.TestCase ):
def test_configuration( self ):
pass
def test_information( self ):
pass
| 13.5 | 39 | 0.666667 | 138 | 0.464646 | 0 | 0 | 0 | 0 | 0 | 0 | 33 | 0.111111 |
20b71d6d70dd441502d84c754b3b91d238f7e7cf | 398 | py | Python | gmmp/management/commands/write_weights_to_dict.py | digideskio/gmmp | d82a4be0787c3a3a9e27dc590d7974f9f884fbb6 | [
"Apache-2.0"
] | 4 | 2020-01-05T09:14:19.000Z | 2022-02-17T03:22:09.000Z | gmmp/management/commands/write_weights_to_dict.py | digideskio/gmmp | d82a4be0787c3a3a9e27dc590d7974f9f884fbb6 | [
"Apache-2.0"
] | 68 | 2019-12-23T02:19:55.000Z | 2021-04-23T06:13:36.000Z | gmmp/management/commands/write_weights_to_dict.py | OpenUpSA/gmmp | d82a4be0787c3a3a9e27dc590d7974f9f884fbb6 | [
"Apache-2.0"
] | 2 | 2019-07-25T11:53:10.000Z | 2020-06-22T02:07:40.000Z | import csv
from pprint import pprint
from django.core.management.base import BaseCommand
class Command(BaseCommand):
def handle(self, *args, **options):
with open(args[0]) as csvfile:
reader = csv.DictReader(csvfile)
weights = []
for row in reader:
row['Twitter'] = 1
weights.append(row)
pprint(weights)
| 26.533333 | 51 | 0.582915 | 306 | 0.768844 | 0 | 0 | 0 | 0 | 0 | 0 | 9 | 0.022613 |
20b73422519f1d4a4cb8a1cfea73259c2ab1d3d5 | 18,613 | py | Python | src/trends.py | didier-devel/confinement | c1915c259ceb12e7163b5d7b8548e458fdcdd53f | [
"X11"
] | 1 | 2020-05-19T11:25:55.000Z | 2020-05-19T11:25:55.000Z | src/trends.py | didier-devel/confinement | c1915c259ceb12e7163b5d7b8548e458fdcdd53f | [
"X11"
] | null | null | null | src/trends.py | didier-devel/confinement | c1915c259ceb12e7163b5d7b8548e458fdcdd53f | [
"X11"
] | null | null | null | import pandas as pd
import numpy as np
from matplotlib import pyplot as plt
import os
from datetime import datetime, date, timedelta
from sklearn.linear_model import LinearRegression
import scipy
import math
import sys
import locator
file_path = os.path.dirname(os.path.realpath(__file__))
proj_path = os.path.abspath(os.path.join(file_path,".."))
datagouv_path = os.path.join(proj_path,"datagouv")
gen_path = os.path.join(proj_path,"../../gatsby/trends/generated")
datagen_path = os.path.join(gen_path,"data")
def downloadIfNeeded(fileName):
need_download = True
if os.path.exists(fileName):
today = date.today()
last_modified_ts = os.path.getmtime(fileName)
mtime = date.fromtimestamp(last_modified_ts)
if (today-mtime).days <= 1:
need_download = False
if need_download:
print("%s Needs a download"%fileName)
if "department" in fileName:
command = "/usr/bin/wget https://www.data.gouv.fr/fr/datasets/r/eceb9fb4-3ebc-4da3-828d-f5939712600a -O %s"%fileName
elif "hospitalieres" in fileName:
command = "/usr/bin/wget https://www.data.gouv.fr/fr/datasets/r/6fadff46-9efd-4c53-942a-54aca783c30c -O %s"%fileName
os.system(command)
else:
print("%s est à jour"%fileName)
urgence_data = os.path.join(datagouv_path,"department_latest.csv")
downloadIfNeeded(urgence_data)
urgence_df = pd.read_csv(urgence_data, sep=";", dtype= {'dep':'object'})
hosp_data = os.path.join(datagouv_path,"donnees_hospitalieres_latest.csv")
downloadIfNeeded(hosp_data)
hosp_df = pd.read_csv(hosp_data, sep=';')
# Heure des données (wget garde le mtime du site web)
last_modified_ts = os.path.getmtime(urgence_data)
data_date = datetime.fromtimestamp(last_modified_ts)
#extraire les données toutes classe d'age
urgence_df = urgence_df[urgence_df["sursaud_cl_age_corona"] == 0].copy()
# Lire le fichier des code département
depts = pd.read_csv(os.path.join(datagouv_path,"departement2020.csv"))
depts.set_index(depts.dep, inplace=True)
depts.drop("dep",axis=1, inplace=True)
# Lire le fichier des régions
regs = pd.read_csv(os.path.join(datagouv_path,"region2020.csv"))
#regs["reg"] = regs["reg"].apply(lambda x: str(x) if len(str(x)) > 1 else '0' + str(x))
regs.set_index(regs.reg, inplace=True)
regs.drop("reg", axis=1, inplace=True)
# Ajouter nom de département, code région, nom région dans les données des urgences
urgence_df["dep_name"] = urgence_df["dep"].apply(lambda x: depts.loc[str(x)].libelle if pd.notnull(x) else None)
urgence_df["reg"] = urgence_df["dep"].apply(lambda x: depts.loc[x].reg if pd.notnull(x) else None)
urgence_df["reg_name"] = urgence_df["reg"].apply(lambda x: regs.loc[x].libelle if pd.notnull(x) else None)
# Ajouter nom de département, code région, nom région dans les données des hospitalières
hosp_df["dep"] = hosp_df["dep"].apply(lambda x: x if len(x) > 1 else '0'+x)
#Retrait de Saint Martin
hosp_df=hosp_df[hosp_df.dep != "978"]
hosp_df["dep_name"] = hosp_df["dep"].apply(lambda x: depts.loc[str(x)].libelle if pd.notnull(x) else None)
hosp_df["reg"] = hosp_df["dep"].apply(lambda x: depts.loc[x].reg if pd.notnull(x) else None)
hosp_df["reg_name"] = hosp_df["reg"].apply(lambda x: regs.loc[x].libelle if pd.notnull(x) else None)
# Afficher les dates au format jj/mm/yy et les mettre en index
def convertDate(isodate):
l = isodate.split('-')
return l[2]+"/"+l[1]+"/"+l[0][2:]
def addDays(df, duration):
# Agrandissement du dataframe du nombre de jours spécifié
d = df.index[-1]
a = d.split("/")
dd = int(a[0])
mm = int(a[1])
yy = 2000 + int(a[2])
first = date(yy,mm,dd)+ timedelta(days=1)
last = date(yy,mm,dd)+ timedelta(days=duration)
current = first
indexExtension = []
while current <= last:
ds = str(current.day)
if len(ds) == 1:
ds = '0'+ds
ms = str(current.month)
if len(ms) == 1:
ms = '0'+ms
ys = str(current.year)[2:]
di = ds + '/' + ms + '/' + ys
indexExtension.append(di)
current += timedelta(days = 1)
return df.reindex(index = df.index.append(pd.Index(indexExtension)))
# Calcul de l'intervalle de confiance de la prédiction
# Voir http://pageperso.lif.univ-mrs.fr/~alexis.nasr/Ens/IAAAM2/SlidesModStat_C1_print.pdf
def estimateSigma(reg, X, Y):
Y_pred = reg.predict(X)
err = (Y - Y_pred)**2
return math.sqrt(err.sum() / (len(err) - 2))
def plot_non_zero(ax, logScale, df, col, label):
col_draw = col
if logScale:
col_draw = "nnz_%s"%col
df[col_draw] = df[col]
df.loc[df[col] == 0 ,col_draw] = np.nan
ax.plot(df[col_draw], label=label)
def make_hosp_bars(has_reg, df_source, hosp_col, reg_index, source_label, ax):
if has_reg:
# Afficher differement la donnée du dernier jour, car tout n'est pas encore remonté. Ce jour n'est pas pris en
# compte pour calculer la tendance
df_source["valid_hosp"] = np.nan
df_source["uncertain_hosp"] = np.nan
df_source.loc[df_source.index[:df_source.index.get_loc(reg_index[-1])+1], "valid_hosp"] = df_source[hosp_col]
last_day = df_source.index[df_source.index.get_loc(reg_index[-1]) + 1]
df_source.loc[last_day,"uncertain_hosp"] = df_source.loc[last_day,hosp_col]
ax.bar(df_source.index,
df_source["valid_hosp"],
label = "Nouvelles hospitalisations quotidiennes - données %s"%source_label,
alpha=0.3,
color="blue")
ax.bar(df_source.index,
df_source["uncertain_hosp"],
alpha=0.2,
edgecolor="black",
linestyle="--",
color="blue")
else:
# Le dernier jour n'est pas facile à avoir ici. Pas affiché. Mais de toute façon, il n'y a pas de tendance calculée.
ax.bar(df_source.index,
df_source[hosp_col],
label = "Nouvelles hospitalisations quotidiennes - données %s"%source_label,
alpha=0.3,
color="blue")
def make_curve(urgence, urg_index, hosp, hosp_index, src_urgence, roll_urg, roll_hosp, file_radical, df_row, label, logScale):
# Plot
fig = plt.figure(figsize=(10,6))
ax = plt.axes()
has_reg = df_row["reg_start"] is not None
# Ajout d'un échelle à droite pour meilleure lecture sur les telephones
ax.yaxis.set_ticks_position('both')
ax.tick_params(labeltop=False, labelright=True)
if src_urgence:
make_hosp_bars(has_reg, urgence, "nbre_hospit_corona", urg_index, "urgences", ax)
ax.plot(urgence[roll_urg], label="Nouvelles hospitalisations quotidiennes lissées - données urgences", color="orange")
if has_reg:
ax.plot(urgence["pred_hosp"], "--", label="Tendance hospitalisations quotidiennes -- données urgences", color="orange")
ax.fill_between(urgence.index, urgence["pred_max"], urgence["pred_min"],color="orange",alpha=0.3, label="Intervalle de confiance")
# En plus foncé sur la zone de prediction
reg_end = urg_index[-1]
pred_index = urgence.index[urgence.index.get_loc(reg_end) + 1 :]
ax.fill_between(pred_index, urgence.loc[pred_index, "pred_max"], urgence.loc[pred_index, "pred_min"],color="orange",alpha=0.2)
# Autres données (non utilsées pour la tendance)
ax.plot(hosp[roll_hosp], label="Nouvelles hospitalisations quotidiennes lissées - données hôpitaux", color="red")
else:
make_hosp_bars(has_reg, hosp, "incid_hosp", hosp_index, "hôpitaux", ax)
ax.plot(hosp[roll_hosp], label="Nouvelles hospitalisations quotidiennes lissées - données hôpitaux", color="orange")
if has_reg:
ax.plot(hosp["pred_hosp"], "--", label="Tendance hospitalisations quotidiennes - données hôpitaux", color="orange")
ax.fill_between(hosp.index, hosp["pred_max"], hosp["pred_min"],color="orange",alpha=0.3, label="Intervalle de confiance")
# En plus foncé sur la zone de prediction
reg_end = hosp_index[-1]
pred_index = hosp.index[hosp.index.get_loc(reg_end) + 1 :]
ax.fill_between(pred_index, hosp.loc[pred_index, "pred_max"], hosp.loc[pred_index,"pred_min"],color="orange",alpha=0.2)
#ax.xaxis.set_major_locator(plt.MaxNLocator(10))
ax.xaxis.set_major_locator(locator.FirstOfMonthLocator())
#ax.xaxis.set_minor_locator(plt.MultipleLocator(1))
ax.legend()
if src_urgence:
# Pour utiliser cette limite pour les données hospitalières, il faudrait étendre l'index vers le 24 février.
ax.set_xlim(left = "24/02/20", right=urgence.index[-1])
if logScale:
plt.yscale("log")
# Same scale for log curves
# Limit high enough to let room for the legend
ax.set_ylim(0.1,50000)
else:
if has_reg:
# Protection contre les prédiction trop divergeantes
df_source = urgence if src_urgence else hosp
hosp_col = "nbre_hospit_corona" if src_urgence else "incid_hosp"
if df_source.loc[df_source.index[-1], "pred_max"] > df_source[hosp_col].max()*4:
ax.set_ylim(0, df_source[hosp_col].max()*4)
ax.set_title("Hospitalisations COVID-19 quotidiennes en %s - échelle %s"%(label,"logarithmique" if logScale else "linéaire"))
file_name = file_radical + ("_log" if logScale else "_lin") + ".png"
plt.savefig(os.path.join(datagen_path,file_name))
df_row["log_curve" if logScale else "lin_curve"] = file_name
plt.close()
def aggregate(df_source, date_col):
df_source = df_source.groupby([date_col]).agg('sum')
# Convertir les dates maintenant que les tris sont faits
df_source["date"] = df_source.index
df_source["date"] = df_source["date"].apply(convertDate)
df_source = df_source.set_index(["date"])
return df_source
def make_rolling(df_source, col):
roll_col = "rolling_%s"%col
nnz_col = "nnz_%s"%col
df_source[nnz_col] = df_source[col]
df_source.loc[df_source[nnz_col]==0,nnz_col] = 0.1
# Calculer la moyenne lissée géométrique
df_source[roll_col] = df_source[nnz_col].rolling(7,center=True).aggregate(lambda x: x.prod()**(1./7))
# Remplacer ce qui vaut 0.1 par 0
df_source.loc[df_source[roll_col]<=0.101, roll_col] = 0
return roll_col
def extract_recent(source, history, use_latest):
if use_latest:
return source.iloc[-history:]
else:
return source.iloc[-history-1:-1]
def make_trend(df_source, hosp_col, roll_col, recent_hist):
recent = extract_recent(df_source, recent_hist, False)
nullVals = len(recent[recent[hosp_col] == 0])
if nullVals == 0:
reg_col = hosp_col
else:
# Remplacer les valeurs nulles par 0.1 (ou 0 si la moyenne glissante vaut 0)
reg_col = "%s_patch"%hosp_col
df_source[reg_col] = df_source[hosp_col]
df_source.loc[df_source[reg_col] == 0, reg_col] = 0.1
df_source.loc[df_source[roll_col] == 0, reg_col] = 0
# Si plus de 2 valeurs nulles, on double aussi la période d'estimation
if nullVals > 2:
recent_hist *= 2
else:
recent_hist = int(recent_hist*1.5)
# Ajouter une colonne de numéro de jour
df_source["num_jour"] = np.arange(len(df_source))
for_regression = extract_recent(df_source, recent_hist,False)
# Si pas assez de données ne pas générer de tendance
if len(for_regression[for_regression[reg_col] > 0]) < recent_hist*0.5:
return None, None, df_source
# Enlever les valeurs nulles ou non définies
for_regression = for_regression[for_regression[reg_col] > 0]
reg = LinearRegression()
X_train = for_regression.drop(columns = [c for c in for_regression.columns if c != "num_jour"])
Y_train = np.log(for_regression[reg_col])
reg.fit(X_train,Y_train)
# Extraire la pente de la regression
slope = reg.coef_[0]
timeToDouble = math.log(2)/slope
# Ajouter deux semaines de données et mettre a jour la colonne num_jour
df_source = addDays(df_source, 15)
df_source["num_jour"] = np.arange(len(df_source))
# Ajouter la prédiction dans les données
df_source["pred_hosp"]=np.nan
# Plage de prédiction: dans la phase descendante - jusqu'à last_day
predIndex = df_source[(df_source["num_jour"] >= X_train.iloc[0]["num_jour"])].index
X = df_source.loc[predIndex].drop(columns = [c for c in df_source.columns if c != "num_jour"])
df_source.loc[predIndex,"pred_hosp"]=np.exp(reg.predict(X))
# Intervalle de confiance
sigma = estimateSigma(reg,X_train,Y_train)
X_train_mean = X_train["num_jour"].mean()
# Ajout de l'intervalle de confiance en log (alpha = 10% -- 1 - alpha/2 = 0.95)
df_source["conf_log_mean"] = np.nan
# Plage pour l'intervalle de confiance sur la moyennes: depuis les données utilisées pour la régerssion linéaire
df_source.loc[predIndex,"conf_log_mean"] = np.sqrt(1./len(X_train) + \
(df_source["num_jour"]-X_train_mean)**2 / ((X_train["num_jour"]-X_train_mean)**2).sum()) * \
sigma*scipy.stats.t.ppf(0.95,len(X_train)-2)
df_source["pred_max"] = df_source["pred_hosp"]*np.exp(df_source["conf_log_mean"])
df_source["pred_min"] = df_source["pred_hosp"]/np.exp(df_source["conf_log_mean"])
return for_regression.index, timeToDouble, df_source
def make_trend_metadata(df_row, reg_index, df_source, timeToDouble, hosp_rate_row_col):
df_row["reg_start"] = reg_index[0] if reg_index is not None else None
df_row["reg_end"]=reg_index[-1] if reg_index is not None else None
cont_end_loc = df_source.index.get_loc(reg_index[-1]) - 11 if reg_index is not None else None
cont_start_loc = df_source.index.get_loc(reg_index[0]) - 11 if reg_index is not None else None
df_row["cont_end"]=df_source.index[cont_end_loc] if reg_index is not None else None
df_row["cont_start"]=df_source.index[cont_start_loc] if reg_index is not None else None
df_row["timeToDouble"] = timeToDouble
if df_row["reg_start"] is not None:
if df_source["pred_max"][-1] > df_row[hosp_rate_row_col]*2 and df_source["pred_min"][-1] < df_row[hosp_rate_row_col]/2.:
df_row["trend_confidence"] = 0
else:
df_row["trend_confidence"] = 1
else:
# Pas de tendance s'il n'y avait pas assez de données pour la calculer
df_row["trend_confidence"] = 0
def make_data(urgence, hosp, file_radical, df_row, label):
urgence = aggregate(urgence, "date_de_passage")
hosp = aggregate(hosp, "jour")
recent_hist = 15
recent = urgence.loc[urgence.index[-recent_hist:]]
recent = extract_recent(urgence, recent_hist, False)
# Utilisation des données urgence si au moins un cas est reporté dans la "période récente"
src_urgence = len(recent[recent["nbre_hospit_corona"] > 0]) >= 1
roll_urg = make_rolling(urgence, "nbre_hospit_corona")
roll_hosp = make_rolling(hosp, "incid_hosp")
# On utilise le dernier jour de la moyenne lissée pour indiquer le nombre d'hospitalisations par jour
if src_urgence:
df_row["hosp_rate_urgence"] = urgence[urgence[roll_urg] > 0 ][roll_urg][-1]
df_row["hosp_rate_all"] = hosp[hosp[roll_hosp] > 0 ][roll_hosp][-1]
df_row["rate_date"] = urgence[urgence[roll_urg] > 0 ].index[-1]
else:
df_row["hosp_rate_all"] = hosp[hosp[roll_hosp] > 0 ][roll_hosp][-1]
df_row["rate_date"] = hosp[hosp[roll_hosp] > 0 ].index[-1]
# make_trend modifies the dataframe (it extends the index) so we need to update the df variables
if src_urgence:
urg_index, urg_timeToDouble, urgence = make_trend(urgence, "nbre_hospit_corona", roll_urg, recent_hist)
else:
# Python interpreter complains if the value is not assigned
urg_index = None
# Calculer la tendance sur les données hospitalière dans tous les cas, même si elle n'est pas
# utilisée pour le moment lorsque les données des urgences sont utilisables
hosp_index, hosp_timeToDouble, hosp = make_trend(hosp, "incid_hosp", roll_hosp, recent_hist)
if src_urgence:
make_trend_metadata(df_row, urg_index, urgence,urg_timeToDouble, "hosp_rate_urgence")
else:
make_trend_metadata(df_row, hosp_index,hosp, hosp_timeToDouble, "hosp_rate_all")
make_curve(urgence, urg_index, hosp, hosp_index, src_urgence, roll_urg, roll_hosp, file_radical, df_row, label, True)
make_curve(urgence, urg_index, hosp, hosp_index, src_urgence, roll_urg, roll_hosp, file_radical, df_row, label, False)
common_fields = ["log_curve", "lin_curve","timeToDouble", "reg_start", "reg_end", "cont_start", "cont_end", "rate_date", "hosp_rate_urgence", "hosp_rate_all", "trend_confidence"]
fr_summary = pd.DataFrame(index=["France"],columns=["data_date"] + common_fields)
fr_summary.loc["France","data_date"] = data_date.strftime("%d/%m/%Y %H:%M")
make_data(urgence_df, hosp_df, "france", fr_summary.loc["France"], "France")
fr_summary.to_csv(os.path.join(datagen_path, "france.csv"), index_label='id')
metropole = [r for r in regs.index if r > 10]
drom = [r for r in regs.index if r < 10]
reg_summary = pd.DataFrame(index = metropole+drom, columns=["reg_name"] + common_fields)
dep_summary = pd.DataFrame(index = depts.index, columns=["dep_name", "reg"] + common_fields)
for reg in metropole + drom:
reg_name = regs.loc[reg]["libelle"]
file_radical = code = "r_" + str(reg)
print(reg, reg_name)
reg_summary.loc[reg]["reg_name"] = reg_name
make_data(urgence_df[urgence_df["reg"] == reg], hosp_df[hosp_df["reg"] == reg], file_radical, reg_summary.loc[reg], reg_name)
reg_depts = depts[depts["reg"]==reg]
for dept in reg_depts.index:
dep_name = reg_depts.loc[dept,"libelle"]
dep_summary.loc[dept,"reg"] = reg
dep_summary.loc[dept,"dep_name"] = dep_name
file_radical = code = "d_" + str(dept)
print("\t%s %s"%(dept, dep_name))
make_data(urgence_df[urgence_df["dep"] == dept], hosp_df[hosp_df["dep"] == dept], file_radical, dep_summary.loc[dept], dep_name)
reg_summary.to_csv(os.path.join(datagen_path, "regions.csv"), index_label="reg")
dep_summary.to_csv(os.path.join(datagen_path, "departements.csv"), index_label="dep")
| 41.733184 | 178 | 0.669908 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 5,628 | 0.301091 |
20b8115dffa3217a7ae85b3889aa549226d9b0c6 | 5,064 | py | Python | tests/test_sorted_feed.py | andyet/thoonk.py | 4535ad05975a6410fe3448ace28d591ba1452f02 | [
"MIT"
] | 63 | 2015-01-13T09:08:19.000Z | 2021-10-05T16:52:52.000Z | tests/test_sorted_feed.py | andyet/thoonk.py | 4535ad05975a6410fe3448ace28d591ba1452f02 | [
"MIT"
] | 2 | 2015-03-19T22:32:01.000Z | 2015-03-23T19:21:24.000Z | tests/test_sorted_feed.py | andyet/thoonk.py | 4535ad05975a6410fe3448ace28d591ba1452f02 | [
"MIT"
] | 12 | 2015-03-16T16:40:38.000Z | 2022-03-28T10:56:24.000Z | import thoonk
from thoonk.feeds import SortedFeed
import unittest
from ConfigParser import ConfigParser
class TestLeaf(unittest.TestCase):
def setUp(self):
conf = ConfigParser()
conf.read('test.cfg')
if conf.sections() == ['Test']:
self.ps = thoonk.Thoonk(host=conf.get('Test', 'host'),
port=conf.getint('Test', 'port'),
db=conf.getint('Test', 'db'))
self.ps.redis.flushdb()
else:
print 'No test configuration found in test.cfg'
exit()
def test_10_basic_sorted_feed(self):
"""Test basic sorted feed publish and retrieve."""
l = self.ps.sorted_feed("sortedfeed")
self.assertEqual(l.__class__, SortedFeed)
l.publish("hi")
l.publish("bye")
l.publish("thanks")
l.publish("you're welcome")
r = l.get_ids()
v = l.get_items()
items = {'1': 'hi',
'2': 'bye',
'3': 'thanks',
'4': "you're welcome"}
self.assertEqual(r, ['1', '2', '3', '4'], "Sorted feed results did not match publish: %s." % r)
self.assertEqual(v, items, "Sorted feed items don't match: %s" % v)
def test_20_sorted_feed_before(self):
"""Test addding an item before another item"""
l = self.ps.sorted_feed("sortedfeed")
l.publish("hi")
l.publish("bye")
l.publish_before('2', 'foo')
r = l.get_ids()
self.assertEqual(r, ['1', '3', '2'], "Sorted feed results did not match: %s." % r)
def test_30_sorted_feed_after(self):
"""Test adding an item after another item"""
l = self.ps.sorted_feed("sortedfeed")
l.publish("hi")
l.publish("bye")
l.publish_after('1', 'foo')
r = l.get_ids()
self.assertEqual(r, ['1', '3', '2'], "Sorted feed results did not match: %s." % r)
def test_40_sorted_feed_prepend(self):
"""Test addding an item to the front of the sorted feed"""
l = self.ps.sorted_feed("sortedfeed")
l.publish("hi")
l.publish("bye")
l.prepend('bar')
r = l.get_ids()
self.assertEqual(r, ['3', '1', '2'],
"Sorted feed results don't match: %s" % r)
def test_50_sorted_feed_edit(self):
"""Test editing an item in a sorted feed"""
l = self.ps.sorted_feed("sortedfeed")
l.publish("hi")
l.publish("bye")
l.edit('1', 'bar')
r = l.get_ids()
v = l.get_item('1')
vs = l.get_items()
items = {'1': 'bar',
'2': 'bye'}
self.assertEqual(r, ['1', '2'],
"Sorted feed results don't match: %s" % r)
self.assertEqual(v, 'bar', "Items don't match: %s" % v)
self.assertEqual(vs, items, "Sorted feed items don't match: %s" % vs)
def test_60_sorted_feed_retract(self):
"""Test retracting an item from a sorted feed"""
l = self.ps.sorted_feed("sortedfeed")
l.publish("hi")
l.publish("bye")
l.publish("thanks")
l.publish("you're welcome")
l.retract('3')
r = l.get_ids()
self.assertEqual(r, ['1', '2', '4'],
"Sorted feed results don't match: %s" % r)
def test_70_sorted_feed_move_first(self):
"""Test moving items around in the feed."""
l = self.ps.sorted_feed('sortedfeed')
l.publish("hi")
l.publish("bye")
l.publish("thanks")
l.publish("you're welcome")
l.move_first('4')
r = l.get_ids()
self.assertEqual(r, ['4', '1', '2', '3'],
"Sorted feed results don't match: %s" % r)
def test_71_sorted_feed_move_last(self):
"""Test moving items around in the feed."""
l = self.ps.sorted_feed('sortedfeed')
l.publish("hi")
l.publish("bye")
l.publish("thanks")
l.publish("you're welcome")
l.move_last('2')
r = l.get_ids()
self.assertEqual(r, ['1', '3', '4', '2'],
"Sorted feed results don't match: %s" % r)
def test_72_sorted_feed_move_before(self):
"""Test moving items around in the feed."""
l = self.ps.sorted_feed('sortedfeed')
l.publish("hi")
l.publish("bye")
l.publish("thanks")
l.publish("you're welcome")
l.move_before('1', '2')
r = l.get_ids()
self.assertEqual(r, ['2', '1', '3', '4'],
"Sorted feed results don't match: %s" % r)
def test_73_sorted_feed_move_after(self):
"""Test moving items around in the feed."""
l = self.ps.sorted_feed('sortedfeed')
l.publish("hi")
l.publish("bye")
l.publish("thanks")
l.publish("you're welcome")
l.move_after('1', '4')
r = l.get_ids()
self.assertEqual(r, ['1', '4', '2', '3'],
"Sorted feed results don't match: %s" % r)
suite = unittest.TestLoader().loadTestsFromTestCase(TestLeaf)
| 34.924138 | 103 | 0.529226 | 4,893 | 0.966232 | 0 | 0 | 0 | 0 | 0 | 0 | 1,607 | 0.317338 |
20b8217b44b0392c058a55680a9fa936fd77410c | 5,061 | py | Python | dolo/tests/test_solvers.py | christophe-gouel/dolo | d9aef6d78d19899e2669e49ee6b7ad9aacf0e35d | [
"BSD-2-Clause"
] | null | null | null | dolo/tests/test_solvers.py | christophe-gouel/dolo | d9aef6d78d19899e2669e49ee6b7ad9aacf0e35d | [
"BSD-2-Clause"
] | null | null | null | dolo/tests/test_solvers.py | christophe-gouel/dolo | d9aef6d78d19899e2669e49ee6b7ad9aacf0e35d | [
"BSD-2-Clause"
] | null | null | null | import unittest
import numpy as np
from dolo.numeric.ncpsolve import ncpsolve, smooth
def josephy(x):
# Computes the function value F(x) of the NCP-example by Josephy.
n=len(x)
Fx=np.zeros(n)
Fx[0]=3*x[0]**2+2*x[0]*x[1]+2*x[1]**2+x[2]+3*x[3]-6
Fx[1]=2*x[0]**2+x[0]+x[1]**2+3*x[2]+2*x[3]-2
Fx[2]=3*x[0]**2+x[0]*x[1]+2*x[1]**2+2*x[2]+3*x[3]-1
Fx[3]=x[0]**2+3*x[1]**2+2*x[2]+3*x[3]-3;
return Fx
def Djosephy(x):
# Local Variables: x, DFx, n
# Function calls: Djosephy, zeros, length
#%
#% Computes the Jacobian DF(x) of the NCP-example by Josephy
#%
n = len(x)
DFx = np.zeros( (n, n) )
DFx[0,0] = 6.*x[0]+2.*x[1]
DFx[0,1] = 2.*x[0]+4.*x[1]
DFx[0,2] = 1.
DFx[0,3] = 3.
DFx[1,0] = 4.*x[0]+1.
DFx[1,1] = 2.*x[1]
DFx[1,2] = 3.
DFx[1,3] = 2.
DFx[2,0] = 6.*x[0]+x[1]
DFx[2,1] = x[0]+4.*x[1]
DFx[2,2] = 2.
DFx[2,3] = 3.
DFx[3,0] = 2.*x[0]
DFx[3,1] = 6.*x[1]
DFx[3,2] = 2.
DFx[3,3] = 3.
return DFx
class SerialSolve(unittest.TestCase):
def test_simple_solve(self):
x0 = np.array([0.5,0.5,0.5,0.5])
lb = np.array([0.0,0.6,0.0,0.0])
ub = np.array([1.0,1.0,1.0,0.4])
fval = np.array([ 0.5, 0.5, 0.1,0.5 ])
jac = np.array([
[1.0,0.2,0.1,0.0],
[1.0,0.2,0.1,0.0],
[0.0,1.0,0.2,0.0],
[0.1,1.0,0.2,0.1]
])
N = 10
d = len(fval)
from dolo.numeric.solver import solver
sol_fsolve = solver(josephy, x0, method='fsolve')
sol_lmmcp = solver(josephy, x0, method='lmmcp')
from numpy.testing import assert_almost_equal
assert_almost_equal(sol_fsolve, sol_lmmcp)
def test_serial_problems(self):
from numpy import inf
import numpy
fun = lambda x: [-josephy(x), -Djosephy(x)]
x0=np.array( [1.25, 0.01, 0.01, 0.50] )
lb=np.array( [0.00, 0.00, 0.00, 0.00] )
ub=np.array( [inf, inf, inf, inf] )
resp = ncpsolve(fun, lb, ub, x0, tol=1e-15)
sol = np.array( [ 1.22474487e+00, 0.00000000e+00, 3.60543164e-17, 5.00000000e-01])
from numpy.testing import assert_almost_equal, assert_equal
assert_almost_equal(sol, resp)
N = 10
d = len(x0)
serial_sol_check = np.zeros((d,N))
for n in range(N):
serial_sol_check[:,n] = resp[0]
s_x0 = np.column_stack([x0]*N)
s_lb = np.column_stack([lb]*N)
s_ub = np.column_stack([ub]*N)
def serial_fun(xvec, deriv=None):
resp = np.zeros( (d,N) )
if deriv=='serial':
dresp = np.zeros( (d,d,N) )
elif deriv=='full':
dresp = np.zeros( (d,N,d,N) )
for n in range(N):
[v, dv] = fun(xvec[:,n])
resp[:,n] = v
if deriv=='serial':
dresp[:,:,n] = dv
elif deriv=='full':
dresp[:,n,:,n] = dv
# if deriv=='full':
# dresp = dresp.swapaxes(0,2).swapaxes(1,3)
if deriv is None:
return resp
else:
return [resp, dresp]
serial_fun_val = lambda x: serial_fun(x)
serial_fun_serial_jac = lambda x: serial_fun(x,deriv='serial')[1]
serial_fun_full_jac = lambda x: serial_fun(x,deriv='full')[1]
from dolo.numeric.solver import solver
print("Serial Bounded solution : ncpsolve")
serial_sol_with_bounds_without_jac = solver( serial_fun_val, s_x0, lb=s_lb, ub=s_ub, method='ncpsolve', serial_problem=True)
print("Serial Bounded solution (with jacobian) : ncpsolve")
serial_sol_with_bounds_with_jac = solver( serial_fun_val, s_x0, s_lb, s_ub, jac=serial_fun_serial_jac, method='ncpsolve', serial_problem=True)
print("Bounded solution : ncpsolve")
sol_with_bounds_without_jac = solver( serial_fun_val, s_x0, s_lb, s_ub, method='ncpsolve', serial_problem=False)
print("Bounded solution (with jacobian) : ncpsolve")
sol_with_bounds_with_jac = solver( serial_fun_val, s_x0, s_lb, s_ub, jac=serial_fun_full_jac, method='ncpsolve', serial_problem=False)
print("Serial Unbounded solution : ncpsolve")
serial_sol_without_bounds_without_jac = solver( serial_fun_val, s_x0, method='newton', serial_problem=True)
print("Unbounded solution : fsolve")
sol_without_bounds_without_jac = solver( serial_fun_val, s_x0, method='fsolve', serial_problem=False)
print("Unbounded solution (with jacobian) : fsolve")
sol_without_bounds = solver( serial_fun_val, s_x0, jac=serial_fun_full_jac, method='fsolve', serial_problem=False)
print("Unbounded solution : lmmcp")
sol_without_bounds = solver( serial_fun_val, s_x0, jac=serial_fun_full_jac, method='lmmcp', serial_problem=False)
# TODO : check that results are equal to the benchmark
if __name__ == '__main__':
unittest.main() | 29.424419 | 150 | 0.563723 | 3,986 | 0.787591 | 0 | 0 | 0 | 0 | 0 | 0 | 785 | 0.155108 |
20b83216ca8a26bd5fed432403994272646d87b3 | 4,627 | py | Python | Solution_6.py | LukeFarrell/Google_Foo_Bar | 22538a8bc75a4eedfcadcc1491b27a710b74f39b | [
"Apache-2.0"
] | null | null | null | Solution_6.py | LukeFarrell/Google_Foo_Bar | 22538a8bc75a4eedfcadcc1491b27a710b74f39b | [
"Apache-2.0"
] | null | null | null | Solution_6.py | LukeFarrell/Google_Foo_Bar | 22538a8bc75a4eedfcadcc1491b27a710b74f39b | [
"Apache-2.0"
] | null | null | null | #Absorbing Markov Matrix Problem
from fractions import Fraction
def answer6(m):
#Initialize Matrix
Y = [[0 for x in range(len(m))] for x in range(len(m))]
terminal = []
nonTerminal = []
#Keep track of absorbing states
for i in range(len(m)):
s = float(sum(m[i]))
if s != 0:
nonTerminal.append(i)
for j in range(len(m)):
#Put fractions in the matrix
Y[i][j] = Fraction(m[i][j]/s).limit_denominator()
else:
if len(Y) <= 1:
Y[i][i] = 1
terminal.append(i)
#Use the absorbing markov method
if len(Y) > 1:
I, R, Q = rearange(Y, terminal, nonTerminal)
IQ = subtract(Q)
F = inverse(IQ)
FR = multiply(F,R)
return LCM(FR[0])
#For small matrices use Brute Force
else:
a = helper(Y)
final = LCM(a[0])
return final[len(nonTerminal):]
def rearange(old,terminal,nonTerminal):
#rearange the matrix
t = terminal+nonTerminal
new = [[0 for x in range(len(old))] for x in range(len(old))]
for i in range(len(old)):
for j in range(len(old)):
new[i][j] = old[t[i]][t[j]]
for i in range(len(terminal)):
new[i][i] =1
#Extreme Indexing to divide the matrix
I = [[new[i][j] for j in range(len(terminal))] for i in range(len(terminal))]
R = [[new[::-1][i][j] for j in range(len(terminal))] for i in range(len(nonTerminal))][::-1]
Q = [[new[::-1][i][::-1][j] for j in range(len(nonTerminal))][::-1] for i in range(len(nonTerminal))][::-1]
return I, R, Q
def subtract(Q):
#Contruct the proper identity matrix
I = [[0 for x in range(len(Q))] for x in range(len(Q))]
for i in range(len(I)):
I[i][i] = 1
#Subtract I - Q
new = [[0 for x in range(len(Q))] for x in range(len(Q))]
for i in range(len(Q)):
for j in range(len(Q)):
new[i][j] = Fraction(I[i][j] - Q[i][j]).limit_denominator()
return new
def LCM(l):
#Calculate the LCM of the while list
new = []
temp = 1
for i in range(0,len(l)):
#Calculate and check the LCM
a = lcm(temp, l[i].denominator)
if a > temp:
temp = a
for i in l:
mult = temp/i.denominator
new.append(int(i.numerator*mult))
new.append(int(temp))
return new
def lcm(x,y):
if x > y:
greater = x
else:
greater = y
while(True):
if((greater % x == 0) and (greater % y == 0)):
lcm = greater
break
greater += 1
return lcm
#Use for brute force calculation of small matricies
def helper(a):
b1 = multiply(a,a)
b2 = []
while b1 != b2:
b2 = b1
b1 = multiply(a,b1)
return b1
def multiply(a,b):
result = [[0 for x in range(len(b[0]))] for x in range(len(a))]
for i in range(len(a)):
for j in range(len(b[0])):
for k in range(len(a[0])):
result[i][j] += Fraction(a[i][k] * b[k][j]).limit_denominator()
return result
def transpose(m):
t = []
for r in range(len(m)):
tRow = []
for c in range(len(m[r])):
if c == r:
tRow.append(m[r][c])
else:
tRow.append(m[c][r])
t.append(tRow)
return t
def getMatrixMinor(m,i,j):
return [row[:j] + row[j+1:] for row in (m[:i]+m[i+1:])]
def getMatrixDeternminant(m):
#base case for 2x2 matrix
if len(m) == 2:
return m[0][0]*m[1][1]-m[0][1]*m[1][0]
#Recursively call for determinant
determinant = 0
for c in range(len(m)):
determinant += ((-1)**c)*m[0][c]*getMatrixDeternminant(getMatrixMinor(m,0,c))
return determinant
def inverse(m):
determinant = getMatrixDeternminant(m)
#special case for 2x2 matrix:
if len(m) == 2:
return [[m[1][1]/determinant, -1*m[0][1]/determinant],
[-1*m[1][0]/determinant, m[0][0]/determinant]]
#find matrix of cofactors
cofactors = []
for r in range(len(m)):
cofactorRow = []
for c in range(len(m)):
minor = getMatrixMinor(m,r,c)
cofactorRow.append(((-1)**(r+c)) * getMatrixDeternminant(minor))
cofactors.append(cofactorRow)
cofactors = transpose(cofactors)
for r in range(len(cofactors)):
for c in range(len(cofactors)):
cofactors[r][c] = cofactors[r][c]/determinant
return cofactors | 31.053691 | 112 | 0.519559 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 529 | 0.114329 |
20b8da61c88575ff4c0f907cbeb1e439d12dd556 | 1,476 | py | Python | banditpylib/learners/mnl_bandit_learner/ts_test.py | Alanthink/banditpylib | ba6dc84d87ae9e9aec48cd622ec9988dccdd18c6 | [
"MIT"
] | 20 | 2020-02-05T23:53:18.000Z | 2021-07-16T21:06:16.000Z | banditpylib/learners/mnl_bandit_learner/ts_test.py | sheelfshah/banditpylib | d455424ed74be1850ee3969b7b31f08d49339005 | [
"MIT"
] | 18 | 2020-02-06T00:23:26.000Z | 2021-07-06T16:37:10.000Z | banditpylib/learners/mnl_bandit_learner/ts_test.py | sheelfshah/banditpylib | d455424ed74be1850ee3969b7b31f08d49339005 | [
"MIT"
] | 8 | 2020-02-06T00:05:10.000Z | 2021-09-18T17:24:29.000Z | from unittest.mock import MagicMock
import google.protobuf.text_format as text_format
import numpy as np
from banditpylib.bandits import CvarReward
from banditpylib.data_pb2 import Actions, Context
from .ts import ThompsonSampling
class TestThompsonSampling:
"""Test thompson sampling policy"""
def test_simple_run(self):
revenues = np.array([0, 0.7, 0.8, 0.9, 1.0])
horizon = 100
reward = CvarReward(0.7)
learner = ThompsonSampling(revenues=revenues,
horizon=horizon,
reward=reward)
# Test warm start
learner.reset()
assert learner.actions(Context()).SerializeToString() == text_format.Parse(
"""
arm_pulls {
arm {
set {
id: 1
}
}
times: 1
}
""", Actions()).SerializeToString()
learner.reset()
# pylint: disable=protected-access
learner._ThompsonSampling__within_warm_start = MagicMock(
return_value=False)
mock_preference_params = np.array([1, 1, 1, 1, 1])
learner._ThompsonSampling__correlated_sampling = MagicMock(
return_value=mock_preference_params)
assert learner.actions(Context()).SerializeToString() == text_format.Parse(
"""
arm_pulls {
arm {
set {
id: 1
id: 2
id: 3
id: 4
}
}
times: 1
}
""", Actions()).SerializeToString()
| 25.894737 | 79 | 0.589431 | 1,239 | 0.839431 | 0 | 0 | 0 | 0 | 0 | 0 | 392 | 0.265583 |
20ba10bd5764ce5847e7ccb1378ad048d2c87711 | 556 | py | Python | maru/lemmatizer/pymorphy.py | chomechome/maru | dae41e250a64a8b6f6ab9647fd60221d5ede8ab6 | [
"MIT"
] | 47 | 2018-09-25T05:17:13.000Z | 2022-03-31T09:27:15.000Z | maru/lemmatizer/pymorphy.py | ojomio/maru | 7a44be7f974c0962f3023f5d064a391d2b4f20b1 | [
"MIT"
] | 2 | 2019-11-26T08:48:54.000Z | 2020-09-10T10:00:39.000Z | maru/lemmatizer/pymorphy.py | ojomio/maru | 7a44be7f974c0962f3023f5d064a391d2b4f20b1 | [
"MIT"
] | 5 | 2019-01-05T17:36:23.000Z | 2021-06-29T08:55:24.000Z | from maru import pymorphy
from maru.lemmatizer.abstract import ILemmatizer
from maru.tag import Tag
from maru.types import Word
class PymorphyLemmatizer(ILemmatizer):
def lemmatize(self, word: Word, tag: Tag) -> Word:
best_parse = max(
pymorphy.analyze(word),
key=lambda parse: (
tag.pos is pymorphy.get_part_of_speech(parse),
tag.case is pymorphy.get_case(parse),
tag.gender is pymorphy.get_gender(parse),
),
)
return best_parse.normal_form
| 30.888889 | 62 | 0.633094 | 425 | 0.764388 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
20bc2480212b49d6e810222a91c0c84532ff3e85 | 230 | py | Python | scripts/read_value.py | jayantpatil/brownie-simple-storage | f501e94e88576a9a267ea451c046cedcb7100871 | [
"MIT"
] | null | null | null | scripts/read_value.py | jayantpatil/brownie-simple-storage | f501e94e88576a9a267ea451c046cedcb7100871 | [
"MIT"
] | null | null | null | scripts/read_value.py | jayantpatil/brownie-simple-storage | f501e94e88576a9a267ea451c046cedcb7100871 | [
"MIT"
] | null | null | null | from brownie import accounts, config, SimpleStorage
def read_contract():
# -1 index gets most recent transaction
simple_storage = SimpleStorage[-1]
print(simple_storage.retrieve())
def main():
read_contract()
| 19.166667 | 51 | 0.721739 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 40 | 0.173913 |
20bd0ca933a883e637a3a9cfd0a0a77678655e62 | 586 | py | Python | freetile/helper/xcb.py | rbn42/freetile | 16a5c95650d1887b372f373c2126f96d991f3366 | [
"MIT"
] | 10 | 2017-09-09T23:24:13.000Z | 2020-04-08T17:07:59.000Z | freetile/helper/xcb.py | rbn42/kd_tree_tile | 16a5c95650d1887b372f373c2126f96d991f3366 | [
"MIT"
] | 1 | 2017-08-26T08:09:03.000Z | 2017-08-26T08:09:34.000Z | freetile/helper/xcb.py | rbn42/freetile | 16a5c95650d1887b372f373c2126f96d991f3366 | [
"MIT"
] | null | null | null | import os
import xcffib
from xcffib.testing import XvfbTest
from xcffib.xproto import Atom, ConfigWindow, EventMask, GetPropertyType
conn = xcffib.connect(os.environ['DISPLAY'])
xproto = xcffib.xproto.xprotoExtension(conn)
def arrange(layout, windowids):
for lay, winid in zip(layout, windowids):
xproto.ConfigureWindow(winid, ConfigWindow.X | ConfigWindow.Y | ConfigWindow.Width | ConfigWindow.Height, lay)
conn.flush()
def move(winid, x, y, sync=True):
xproto.ConfigureWindow(winid, ConfigWindow.X | ConfigWindow.Y, [x, y])
if sync:
conn.flush()
| 27.904762 | 118 | 0.732082 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 9 | 0.015358 |
20bd53028e6f1a61201ddc99f23a3270cceb491c | 1,975 | py | Python | MusicApp/forms.py | krishnasagar14/Project | d3e5595e8572fd5d701f0a4aa051377b13967179 | [
"MIT"
] | null | null | null | MusicApp/forms.py | krishnasagar14/Project | d3e5595e8572fd5d701f0a4aa051377b13967179 | [
"MIT"
] | null | null | null | MusicApp/forms.py | krishnasagar14/Project | d3e5595e8572fd5d701f0a4aa051377b13967179 | [
"MIT"
] | null | null | null | __author__ = 'krishnasagar'
from django import forms
# Refer for forms.MultipleChoiceField always, its helpful -
# http://www.programcreek.com/python/example/58199/django.forms.MultipleChoiceField
class TrackForm(forms.Form):
def __init__(self, *args, **kwargs):
if 'choices' in kwargs:
choices = kwargs.pop('choices')
else:
choices = None
super(TrackForm, self).__init__(*args, **kwargs)
self.fields['tname'] = forms.CharField(label='Track Name', max_length=100,
widget=forms.TextInput(
attrs={'title': 'Track name upto 100 characters'}))
self.fields['rating'] = forms.DecimalField(label='Rating', max_value=10.0,
min_value=0.0, decimal_places=1, max_digits=2,
required=False) # .widget_attrs({'title':'Rating from 0 to 10'})
if choices is not None:
self.fields['gname'] = forms.MultipleChoiceField(label="Genre Names", widget=forms.SelectMultiple(
attrs={'title': 'Multiple Select using RCtrl+Mouse Left Key'}), required=False, choices=choices)
else:
self.fields['gname'] = forms.MultipleChoiceField(label="Genre Names", widget=forms.SelectMultiple(
attrs={'title': 'Multiple Select using RCtrl+Mouse Left Key'}), required=False)
'''def clean(self):
cleaned_data = super(TrackForm, self).clean()
if cleaned_data.get('tname') == None:
self.add_error('tname','Music Track Name required!!!!')
else:
return cleaned_data'''
class GenreForm(forms.Form):
gname = forms.CharField(label='Genre Name', max_length=100,
widget=forms.TextInput(
attrs={'title': 'Genre name upto 100 characters', 'placeholder': 'Required'})) | 50.641026 | 116 | 0.577722 | 1,771 | 0.896709 | 0 | 0 | 0 | 0 | 0 | 0 | 748 | 0.378734 |
20bf7567661d1841f7671da7c9253c4e59abf9f8 | 48 | py | Python | gen_util/__init__.py | CUrW-SL/DSS-Framework | 43a39b322ffb0eb92dd116e77cf9a8479357a121 | [
"MIT"
] | null | null | null | gen_util/__init__.py | CUrW-SL/DSS-Framework | 43a39b322ffb0eb92dd116e77cf9a8479357a121 | [
"MIT"
] | null | null | null | gen_util/__init__.py | CUrW-SL/DSS-Framework | 43a39b322ffb0eb92dd116e77cf9a8479357a121 | [
"MIT"
] | null | null | null | from .controller_util import get_triggering_dags | 48 | 48 | 0.916667 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
20c039fba739ed74ea6b3b472a04879916cf0b13 | 1,412 | py | Python | yb66/run_xoppy_crystal_YB66.py | 91902078/yb66 | ece7f637ac8bacb1ba51a6f1f6f1f2e9cdb91bd9 | [
"CC0-1.0"
] | null | null | null | yb66/run_xoppy_crystal_YB66.py | 91902078/yb66 | ece7f637ac8bacb1ba51a6f1f6f1f2e9cdb91bd9 | [
"CC0-1.0"
] | null | null | null | yb66/run_xoppy_crystal_YB66.py | 91902078/yb66 | ece7f637ac8bacb1ba51a6f1f6f1f2e9cdb91bd9 | [
"CC0-1.0"
] | null | null | null | import numpy
from xoppy_dabax_util import bragg_calc2
from run_diff_pat import run_diff_pat
from srxraylib.plot.gol import plot
if __name__ == "__main__":
descriptor = 'YB66'
SCANFROM = 0 # in microradiants
SCANTO = 100 # in microradiants
MILLER_INDEX_H = 4
MILLER_INDEX_K = 0
MILLER_INDEX_L = 0
TEMPER = 1.0
ENERGY = 8040.0
SCANPOINTS = 200
print("Using crystal descriptor: ",descriptor)
bragg_dictionary = bragg_calc2(descriptor=descriptor,
hh=MILLER_INDEX_H,kk=MILLER_INDEX_K,ll=MILLER_INDEX_L,
temper=TEMPER,
emin=ENERGY-100.0,emax=ENERGY+100.0,
estep=(SCANTO-SCANFROM)/SCANPOINTS,fileout="xcrystal.bra")
run_diff_pat(
MOSAIC = 0,
GEOMETRY = 0,
SCAN = 2,
UNIT = 1,
SCANFROM = SCANFROM,
SCANTO = SCANTO,
SCANPOINTS = SCANPOINTS,
ENERGY = ENERGY,
ASYMMETRY_ANGLE = 0.0,
THICKNESS = 0.7,
MOSAIC_FWHM = 0.1,
RSAG = 125.0,
RMER = 1290.0,
ANISOTROPY = 0,
POISSON = 0.22,
CUT = "2 -1 -1 ; 1 1 1 ; 0 0 0",
FILECOMPLIANCE = "mycompliance.dat")
a = numpy.loadtxt("diff_pat.dat",skiprows=5)
#
# plot
#
plot(a[:, 0], a[:, -1])
| 24.77193 | 102 | 0.53187 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 159 | 0.112606 |
20c08cb1cb00cfdd681c8e68fa79bcb41a100fd8 | 108 | py | Python | code/arc012_1_01.py | KoyanagiHitoshi/AtCoder | 731892543769b5df15254e1f32b756190378d292 | [
"MIT"
] | 3 | 2019-08-16T16:55:48.000Z | 2021-04-11T10:21:40.000Z | code/arc012_1_01.py | KoyanagiHitoshi/AtCoder | 731892543769b5df15254e1f32b756190378d292 | [
"MIT"
] | null | null | null | code/arc012_1_01.py | KoyanagiHitoshi/AtCoder | 731892543769b5df15254e1f32b756190378d292 | [
"MIT"
] | null | null | null | d=({"Saturday":0,"Sunday":0,"Monday":5,"Tuesday":4,"Wednesday":3,"Thursday":2,"Friday":1})
print(d[input()]) | 54 | 90 | 0.638889 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 64 | 0.592593 |
20c0aad0bed3003fbe182d2df998439bf4e1d30a | 9,568 | py | Python | ooi_harvester/utils/parser.py | ooi-data/ooi_harvester | 7ddcad7b05b7ca5d2012ffd2517e1489a4ce489c | [
"MIT"
] | 4 | 2021-01-08T20:01:38.000Z | 2022-03-11T19:03:58.000Z | ooi_harvester/utils/parser.py | ooi-data/ooi_harvester | 7ddcad7b05b7ca5d2012ffd2517e1489a4ce489c | [
"MIT"
] | 7 | 2021-01-08T16:51:50.000Z | 2021-11-02T21:54:22.000Z | ooi_harvester/utils/parser.py | ooi-data/ooi_harvester | 7ddcad7b05b7ca5d2012ffd2517e1489a4ce489c | [
"MIT"
] | null | null | null | import os
import datetime
import math
import traceback
from typing import List
import requests
from loguru import logger
from lxml import etree
from siphon.catalog import TDSCatalog
from dask.utils import memory_repr
import numpy as np
from dateutil import parser
from ooi_harvester.settings import harvest_settings
def estimate_size_and_time(raw):
m = ""
if "requestUUID" in raw:
est_size = raw["sizeCalculation"] / 1024 ** 2
size_txt = "MB"
if (est_size / 1024) >= 1.0:
est_size = est_size / 1024
size_txt = "GB"
est_time = raw["timeCalculation"]
time_txt = "Seconds"
if (est_time / 60) >= 1.0 and (est_time / 60) < 60.0:
est_time = math.floor(est_time / 60)
time_txt = "Minutes"
if est_time == 1:
time_txt = "Minute"
elif (est_time / 60) >= 60.0:
est_time = math.floor(est_time / 60 ** 2)
time_txt = "Hours"
if est_time == 1:
time_txt = "Hour"
m = f"""
Estimated File size: {est_size:.4} {size_txt}
Estimated Time: {est_time} {time_txt}
"""
elif "message" in raw:
m = f"""
No estimate calculated.
{raw['message']['status']}
"""
return m
def parse_uframe_response(resp):
if "allURLs" in resp:
return {
"request_id": resp["requestUUID"],
"thredds_catalog": resp["allURLs"][0],
"download_catalog": resp["allURLs"][1],
"status_url": resp["allURLs"][1] + "/status.txt",
"data_size": resp["sizeCalculation"],
"estimated_time": resp["timeCalculation"],
"units": {
"data_size": "bytes",
"estimated_time": "seconds",
"request_dt": "UTC",
},
"request_dt": datetime.datetime.utcnow().isoformat(),
}
logger.warning(resp)
return None
def param_change(name):
"""
Method to accomodate for param change.
https://oceanobservatories.org/renaming-data-stream-parameters/
"""
if name == 'pressure_depth':
return 'pressure'
else:
return name
def parse_param_dict(param_dict):
unit = None
if "unit" in param_dict:
if isinstance(param_dict["unit"], dict):
if "value" in param_dict["unit"]:
unit = param_dict["unit"]["value"]
product_type = None
if "data_product_type" in param_dict:
if isinstance(param_dict["data_product_type"], dict):
if "value" in param_dict["data_product_type"]:
product_type = param_dict["data_product_type"]["value"]
return {
"pid": param_dict["id"],
"reference_designator": param_change(param_dict["name"]),
"parameter_name": param_dict["display_name"],
"netcdf_name": param_dict["netcdf_name"],
"standard_name": param_dict["standard_name"],
"description": param_dict["description"],
"unit": unit,
"data_level": param_dict['data_level'],
"data_product_type": product_type,
"data_product_identifier": param_dict["data_product_identifier"],
"last_updated": datetime.datetime.utcnow().isoformat(),
}
def parse_global_range_dataframe(global_ranges):
"""Cleans up the global ranges dataframe"""
global_df = global_ranges[global_ranges.columns[:-3]]
global_df.columns = [
"reference_designator",
"parameter_id_r",
"parameter_id_t",
"global_range_min",
"global_range_max",
"data_level",
"units",
]
return global_df
def get_bytes(value, unit):
bytes_map = {
'bytes': 1,
'Kbytes': 1024 ** 1,
'Mbytes': 1024 ** 2,
'Gbytes': 1024 ** 3,
}
return value * bytes_map[unit]
def parse_dataset_element(d, namespace):
dataset_dict = {}
for i in d.getiterator():
clean_tag = i.tag.replace('{' + namespace + '}', '')
if clean_tag == 'dataset':
dataset_dict = dict(**i.attrib)
if clean_tag == 'dataSize':
dataset_dict = dict(
data_size=float(i.text), **i.attrib, **dataset_dict
)
dataset_dict = dict(
size_bytes=get_bytes(
dataset_dict['data_size'], dataset_dict['units']
),
**dataset_dict,
)
if clean_tag == 'date':
dataset_dict = dict(date_modified=i.text, **dataset_dict)
return dataset_dict
def parse_response_thredds(response):
stream_name = response['stream']['table_name']
catalog = TDSCatalog(
response['result']['thredds_catalog'].replace('.html', '.xml')
)
catalog_dict = {
'stream_name': stream_name,
'catalog_url': catalog.catalog_url,
'base_tds_url': catalog.base_tds_url,
'async_url': response['result']['download_catalog'],
}
req = requests.get(catalog.catalog_url)
catalog_root = etree.fromstring(req.content)
namespaces = {}
for k, v in catalog_root.nsmap.items():
if k is None:
namespaces['cat'] = v
else:
namespaces[k] = v
dataset_elements = catalog_root.xpath(
'/cat:catalog/cat:dataset/cat:dataset', namespaces=namespaces
)
datasets = [
parse_dataset_element(i, namespaces['cat']) for i in dataset_elements
]
catalog_dict['datasets'] = datasets
return catalog_dict
def filter_and_parse_datasets(cat):
import re
stream_cat = cat.copy()
name = stream_cat['stream_name']
provenance_files = []
filtered_datasets = []
for d in stream_cat['datasets']:
m = re.search(
r'(deployment(\d{4})_(%s)_(\d{4}\d{2}\d{2}T\d+.\d+)-(\d{4}\d{2}\d{2}T\d+.\d+).nc)' # noqa
% (name),
str(d['name']),
)
prov = re.search(
r'(deployment(\d{4})_(%s)_aggregate_provenance.json)' % (name),
str(d['name']),
)
if m:
_, dep_num, _, start, end = m.groups()
dataset = dict(
deployment=int(dep_num), start_ts=start, end_ts=end, **d
)
filtered_datasets.append(dataset)
elif prov:
_, dep_num, _ = prov.groups()
provenance = dict(deployment=int(dep_num), **d)
provenance_files.append(provenance)
total_bytes = np.sum([d['size_bytes'] for d in filtered_datasets])
stream_cat['datasets'] = filtered_datasets
stream_cat['provenance'] = provenance_files
stream_cat['total_data_size'] = memory_repr(total_bytes)
stream_cat['total_data_bytes'] = total_bytes
return stream_cat
def filter_datasets_by_time(
datasets: List[dict], start_dt: np.datetime64, end_dt: np.datetime64
) -> List[dict]:
"""
Filters datasets collection based on the given start and end datetime.
Each dataset dictionary in the collection MUST have
`start_ts` and `end_ts`key in it.
Parameters
----------
datasets : list
The datasets collection to be filtered.
start_dt : np.datetime64
The start datetime desired.
end_dt : np.datetime64
The end datetime desired.
Returns
-------
list
The filtered datasets collection
"""
filtered_datasets = []
for d in datasets:
start_d = np.datetime64(parser.parse(d['start_ts']))
end_d = np.datetime64(parser.parse(d['end_ts']))
if start_d >= start_dt.astype(
start_d.dtype
) and end_d <= end_dt.astype(start_d.dtype):
filtered_datasets.append(d)
return filtered_datasets
def setup_etl(stream, source='ooinet', target_bucket='s3://ooi-data'):
name = stream['stream_name']
harvest_location = os.path.expanduser('~/.ooi-harvester')
# Setup Local temp folder for netcdf
temp_fold = os.path.join(harvest_location, name)
if not os.path.exists(os.path.dirname(temp_fold)):
os.mkdir(os.path.dirname(temp_fold))
if not os.path.exists(temp_fold):
os.mkdir(temp_fold)
# Setup S3 Bucket
temp_s3_fold = f"s3://temp-ooi-data/{name}.zarr"
final_s3_fold = f"{target_bucket}/{name}"
if source == 'ooinet':
retrieved_dt = stream['result']['request_dt']
else:
retrieved_dt = stream['retrieved_dt']
del stream['retrieved_dt']
return dict(
temp_fold=temp_fold,
temp_bucket=temp_s3_fold,
final_bucket=final_s3_fold,
retrieved_dt=retrieved_dt,
**stream,
)
def seconds_to_date(num):
start_dt = datetime.datetime(1900, 1, 1)
return start_dt + datetime.timedelta(seconds=num)
def get_storage_options(path):
if path.startswith("s3://"):
return harvest_settings.storage_options.aws.dict()
def get_items(keys, orig_dict):
new_dict = {}
for k, v in orig_dict.items():
if k in keys:
new_dict[k] = v
return new_dict
def rename_item(old_key, new_key, orig_dict):
new_dict = orig_dict.copy()
if old_key in new_dict:
new_dict.update({new_key: new_dict[old_key]})
del new_dict[old_key]
return new_dict
def parse_exception(exception):
exc_dict = {
'type': type(exception).__name__,
'value': str(exception),
'traceback': "".join(
traceback.format_exception(
type(exception), exception, exception.__traceback__
)
),
}
return exc_dict
| 29.259939 | 102 | 0.595004 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,529 | 0.264319 |
20c0cfeb06b96a4465675ae0f8ac3dafd13c7aa2 | 537 | py | Python | oro_plugins/migrations/0002_galleryitem_gallery.py | mikeh74/orocus_djangocms | 81946daf17770e27bbe5c56b4caa0529bf3170bc | [
"MIT"
] | null | null | null | oro_plugins/migrations/0002_galleryitem_gallery.py | mikeh74/orocus_djangocms | 81946daf17770e27bbe5c56b4caa0529bf3170bc | [
"MIT"
] | null | null | null | oro_plugins/migrations/0002_galleryitem_gallery.py | mikeh74/orocus_djangocms | 81946daf17770e27bbe5c56b4caa0529bf3170bc | [
"MIT"
] | null | null | null | # Generated by Django 3.0.6 on 2020-06-05 20:15
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('oro_plugins', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='galleryitem',
name='gallery',
field=models.ForeignKey(default=1, on_delete=django.db.models.deletion.CASCADE, to='oro_plugins.Gallery', verbose_name=''),
preserve_default=False,
),
]
| 25.571429 | 135 | 0.640596 | 411 | 0.765363 | 0 | 0 | 0 | 0 | 0 | 0 | 119 | 0.221601 |
20c103d7c3032a926b35b8724929fb67863b89e6 | 7,178 | py | Python | src/mcedit2/widgets/propertylist.py | elcarrion06/mcedit2 | 4bb98da521447b6cf43d923cea9f00acf2f427e9 | [
"BSD-3-Clause"
] | 673 | 2015-01-02T02:08:13.000Z | 2022-03-24T19:38:14.000Z | src/mcedit2/widgets/propertylist.py | ozzhates/mcedit2 | 4bb98da521447b6cf43d923cea9f00acf2f427e9 | [
"BSD-3-Clause"
] | 526 | 2015-01-01T02:10:53.000Z | 2022-02-06T16:24:21.000Z | src/mcedit2/widgets/propertylist.py | ozzhates/mcedit2 | 4bb98da521447b6cf43d923cea9f00acf2f427e9 | [
"BSD-3-Clause"
] | 231 | 2015-01-01T16:47:30.000Z | 2022-03-31T21:51:55.000Z | """
propertylist
"""
from __future__ import absolute_import, division, print_function
from collections import namedtuple
import logging
from PySide.QtCore import Qt
from mceditlib import nbt
from PySide import QtGui, QtCore
from mcedit2.util.load_ui import registerCustomWidget
log = logging.getLogger(__name__)
class PropertyListItemDelegate(QtGui.QStyledItemDelegate):
def __init__(self, *args, **kwargs):
super(PropertyListItemDelegate, self).__init__(*args, **kwargs)
def createEditor(self, parent, option, index):
model = index.model()
tagName, displayName, valueType, min, max = model.properties[index.row()]
if valueType is int:
valueWidget = QtGui.QSpinBox()
valueWidget.setMinimum(min)
valueWidget.setMaximum(max)
elif valueType is float:
valueWidget = QtGui.QDoubleSpinBox()
valueWidget.setMinimum(min)
valueWidget.setMaximum(max)
elif valueType is bool:
valueWidget = QtGui.QCheckBox()
elif isinstance(valueType, list): # Choice list
valueWidget = QtGui.QComboBox()
for value, name in valueType:
valueWidget.addItem(name, value)
elif valueType is unicode:
valueWidget = QtGui.QPlainTextEdit()
else:
raise TypeError("Can't create attribute widgets for %s yet" % valueType)
valueWidget.setParent(parent)
return valueWidget
def setEditorData(self, editor, index):
model = index.model()
rootTag = model.rootTag
tagName, displayName, valueType, min, max = model.properties[index.row()]
if valueType is int:
editor.setValue(rootTag[tagName].value)
elif valueType is float:
editor.setValue(rootTag[tagName].value)
elif valueType is bool:
editor.setChecked(rootTag[tagName].value)
elif isinstance(valueType, list): # Choice list
currentValue = rootTag[tagName].value
try:
currentIndex = [v for v, n in valueType].index(currentValue)
editor.setCurrentIndex(currentIndex)
except ValueError:
editor.addItem("Unknown value %s" % currentValue, currentValue)
elif valueType is unicode:
editor.setPlainText(rootTag[tagName].value)
else:
raise TypeError("Unknown valueType in setEditorData (check this in addNBTProperty, dummy)")
def setModelData(self, editor, model, index):
tagName, displayName, valueType, min, max = model.properties[index.row()]
rootTag = model.rootTag
if valueType is int:
value = int(editor.value())
elif valueType is float:
value = float(editor.value())
elif valueType is bool:
value = editor.isChecked()
elif isinstance(valueType, list): # Choice list
value = valueType[editor.currentIndex()][0]
elif valueType is unicode:
value = editor.plainText()
else:
raise TypeError("Unknown valueType in setModelData (check this in addNBTProperty, dummy)")
model.setData(index, value)
class PropertyListEntry(namedtuple('PropertyListEntry', 'tagName displayName valueType min max')):
pass
class PropertyListModel(QtCore.QAbstractItemModel):
propertyChanged = QtCore.Signal(unicode, object)
def __init__(self, rootTag):
super(PropertyListModel, self).__init__()
self.rootTag = rootTag
self.properties = []
def addNBTProperty(self, tagName, valueType=None, min=None, max=None, displayName=None):
if displayName is None:
displayName = tagName
if valueType is None:
valueType = int
if tagName not in self.rootTag:
return
tag = self.rootTag[tagName]
if tag.tagID == nbt.ID_BYTE:
tagMin = -(1 << 7)
tagMax = (1 << 7) - 1
elif tag.tagID == nbt.ID_SHORT:
tagMin = -(1 << 15)
tagMax = (1 << 15) - 1
elif tag.tagID == nbt.ID_INT:
tagMin = -(1 << 31)
tagMax = (1 << 31) - 1
else: # tag.tagID == nbt.ID_LONG, ID_FLOAT, ID_DOUBLE
# tagMin = -(1 << 63) # xxxx 64-bit spinbox
# tagMax = (1 << 63) - 1
tagMin = -(1 << 31)
tagMax = (1 << 31) - 1
if min is None:
min = tagMin
if max is None:
max = tagMax
self.properties.append(PropertyListEntry(tagName, displayName, valueType, min, max))
def columnCount(self, index):
return 2
def data(self, index, role=Qt.DisplayRole):
if not index.isValid():
return None
entry = self.properties[index.row()]
if role in (Qt.DisplayRole, Qt.EditRole):
if index.column() == 0:
return entry.displayName
else:
value = self.rootTag[entry.tagName].value
if isinstance(entry.valueType, (list, tuple)):
try:
return entry.valueType[value][1]
except IndexError:
return "Unknown value %s" % value
else:
return value
# if role == Qt.CheckStateRole:
# if entry.valueType is not bool:
# return -1
# value = self.rootTag[entry.tagName].value
# return bool(value)
def flags(self, index):
if not index.isValid():
return 0
flags = Qt.ItemIsEnabled | Qt.ItemIsSelectable
if index.column() == 1:
flags |= Qt.ItemIsEditable
entry = self.properties[index.row()]
#if entry.valueType is bool:
# flags |= Qt.ItemIsUserCheckable
return flags
def headerData(self, section, orientation, role=Qt.DisplayRole):
if orientation == Qt.Horizontal and role == Qt.DisplayRole:
return ("Name", "Value")[section]
return None
def index(self, row, column, parent=QtCore.QModelIndex()):
if parent.isValid():
return QtCore.QModelIndex()
return self.createIndex(row, column, None)
def parent(self, index):
return QtCore.QModelIndex()
def rowCount(self, parent=QtCore.QModelIndex()):
if parent.isValid():
return 0
return len(self.properties)
def setData(self, index, value, role=Qt.EditRole):
row = index.row()
entry = self.properties[row]
if self.rootTag[entry.tagName].value != value:
self.rootTag[entry.tagName].value = value
self.propertyChanged.emit(entry.tagName, value)
self.dataChanged.emit(index, index)
@registerCustomWidget
class PropertyListWidget(QtGui.QTreeView):
def __init__(self, *args, **kwargs):
super(PropertyListWidget, self).__init__(*args, **kwargs)
delegate = PropertyListItemDelegate()
self.setItemDelegate(delegate)
self.setEditTriggers(self.CurrentChanged | self.editTriggers())
| 33.542056 | 103 | 0.59766 | 6,827 | 0.951101 | 0 | 0 | 329 | 0.045834 | 0 | 0 | 697 | 0.097102 |
20c1081ab780d304c17bb8b2c18e84dfea0280a2 | 2,810 | py | Python | src/architectures/nmp/adjacency/simple/physics.py | isaachenrion/jets | 59aeba81788d0741af448192d9dfb764fb97cf8d | [
"BSD-3-Clause"
] | 9 | 2017-10-09T17:01:52.000Z | 2018-06-12T18:06:05.000Z | src/architectures/nmp/adjacency/simple/physics.py | isaachenrion/jets | 59aeba81788d0741af448192d9dfb764fb97cf8d | [
"BSD-3-Clause"
] | 31 | 2017-11-01T14:39:02.000Z | 2018-04-18T15:34:24.000Z | src/architectures/nmp/adjacency/simple/physics.py | isaachenrion/jets | 59aeba81788d0741af448192d9dfb764fb97cf8d | [
"BSD-3-Clause"
] | 10 | 2017-10-17T19:23:14.000Z | 2020-07-05T04:44:45.000Z | import math
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
from ._adjacency import _Adjacency
def construct_physics_adjacency(alpha=None, R=None, trainable_physics=False):
if trainable_physics:
assert R is None
assert alpha is None
return TrainablePhysicsAdjacency()
else:
return FixedPhysicsAdjacency(alpha=alpha, R=R)
def compute_dij(p, alpha, R):
p1 = p.unsqueeze(1) + 1e-10
p2 = p.unsqueeze(2) + 1e-10
delta_eta = p1[:,:,:,1] - p2[:,:,:,1]
delta_phi = p1[:,:,:,2] - p2[:,:,:,2]
delta_phi = torch.remainder(delta_phi + math.pi, 2*math.pi) - math.pi
delta_r = (delta_phi**2 + delta_eta**2)**0.5
dij = torch.min(p1[:,:,:,0]**(2.*alpha), p2[:,:,:,0]**(2.*alpha)) * delta_r / R
return dij
class _PhysicsAdjacency(_Adjacency):
def __init__(self,**kwargs):
super().__init__(**kwargs)
@property
def alpha(self):
pass
@property
def R(self):
pass
def raw_matrix(self, p, mask=None, **kwargs):
dij = compute_dij(p, self.alpha, self.R)
#dij = torch.exp(-dij)
#import ipdb; ipdb.set_trace()
return -dij
class FixedPhysicsAdjacency(_PhysicsAdjacency):
def __init__(self, alpha=None, R=None,index='',**kwargs):
name='phy'+index
super().__init__(name=name, **kwargs)
self._alpha = Variable(torch.FloatTensor([alpha]))
self._R = Variable(torch.FloatTensor([R]))
if torch.cuda.is_available():
self._alpha = self._alpha.cuda()
self._R = self._R.cuda()
@property
def alpha(self):
return self._alpha
@property
def R(self):
return self._R
class TrainablePhysicsAdjacency(_PhysicsAdjacency):
def __init__(self, alpha_init=0, R_init=0,index='',**kwargs):
name='tphy'+index
super().__init__(name=name, **kwargs)
base_alpha_init = 0
base_R_init = 0
#def artanh(x):
# assert torch.abs(x) < 1
# return 0.5 * torch.log((1 + x) / 1 - x)
#alpha_init = artanh(alpha_init)
self._base_alpha = nn.Parameter(torch.Tensor([base_alpha_init]))
self._base_R = nn.Parameter(torch.Tensor([base_R_init]))
#self._R = Variable(torch.FloatTensor([R]))
#if torch.cuda.is_available():
# self._R = self._R.cuda()
@property
def alpha(self):
alpha = F.tanh(self._base_alpha)
#import ipdb; ipdb.set_trace()
#alpha_monitor(alpha=alpha.data[0])
return alpha
@property
def R(self):
R = torch.exp(self._base_R)
#R_monitor(R=R.data[0])
return R
PHYSICS_ADJACENCIES = dict(
tphy=TrainablePhysicsAdjacency,
phy=FixedPhysicsAdjacency
)
| 26.261682 | 83 | 0.607117 | 1,879 | 0.668683 | 0 | 0 | 474 | 0.168683 | 0 | 0 | 376 | 0.133808 |
20c10ea10de758aaf068262804a73e854e7eae25 | 1,527 | py | Python | code/ui.py | Navi-d/Survive | ed8bae508c057a753f5cbb3e33499024bbaaab29 | [
"Apache-2.0"
] | null | null | null | code/ui.py | Navi-d/Survive | ed8bae508c057a753f5cbb3e33499024bbaaab29 | [
"Apache-2.0"
] | null | null | null | code/ui.py | Navi-d/Survive | ed8bae508c057a753f5cbb3e33499024bbaaab29 | [
"Apache-2.0"
] | null | null | null | import pygame
class UI:
def __init__(self, surface):
# setup
self.display_surface = surface
# health
self.health_border = pygame.image.load(
'graphics/ui/Border_0.png').convert_alpha()
self.health_bar = pygame.image.load(
'graphics/ui/Health_0.png').convert_alpha()
self.health_bar_topleft = (50, 41)
self.bar_max_width = 152
self.bar_height = 3
# coins
self.coin = pygame.image.load('graphics/ui/coin.png').convert_alpha()
self.coin_rect = self.coin.get_rect(topleft=(50, 61))
self.font = pygame.font.Font(None, 30)
def show_health(self, current, full):
self.display_surface.blit(self.health_bar, (25, 10))
self.display_surface.blit(self.health_border, (25, 10))
current_health_ratio = current / full
current_bar_width = self.bar_max_width * current_health_ratio
health_bar_rect = pygame.Rect(
(self.health_bar_topleft), (current_bar_width, self.bar_height))
pygame.draw.rect(self.display_surface, '#dc4949', health_bar_rect)
def show_coins(self, amount):
self.display_surface.blit(self.coin, self.coin_rect)
coin_amount_surf = self.font.render(str(amount), False, 'black')
coint_amount_rect = coin_amount_surf.get_rect(
midleft=(self.coin_rect.right + 4, self.coin_rect.centery))
self.display_surface.blit(coin_amount_surf, coint_amount_rect)
| 39.153846 | 78 | 0.644401 | 1,506 | 0.986248 | 0 | 0 | 0 | 0 | 0 | 0 | 115 | 0.075311 |
20c121357230b7a557b1c97b8dff117f08572b36 | 464 | py | Python | Buy/migrations/0003_singlecardpurchase_initial_sell_price.py | eliilek/TCGProject | ca1f4e89a8b93ec1073526953d1ca3fab21902b0 | [
"MIT"
] | null | null | null | Buy/migrations/0003_singlecardpurchase_initial_sell_price.py | eliilek/TCGProject | ca1f4e89a8b93ec1073526953d1ca3fab21902b0 | [
"MIT"
] | 3 | 2020-02-11T21:16:54.000Z | 2021-06-10T17:30:26.000Z | Buy/migrations/0003_singlecardpurchase_initial_sell_price.py | eliilek/TCGProject | ca1f4e89a8b93ec1073526953d1ca3fab21902b0 | [
"MIT"
] | null | null | null | # Generated by Django 2.0.3 on 2018-04-18 19:49
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('Buy', '0002_standardset'),
]
operations = [
migrations.AddField(
model_name='singlecardpurchase',
name='initial_sell_price',
field=models.DecimalField(decimal_places=2, default=0, max_digits=6),
preserve_default=False,
),
]
| 23.2 | 81 | 0.62069 | 371 | 0.799569 | 0 | 0 | 0 | 0 | 0 | 0 | 110 | 0.237069 |
20c4dac7c3aa2eaf1c0b401897ed8058cc13195c | 763 | py | Python | ksp_login/__init__.py | trojsten/ksp_login | 681288d5fc355e0e1a04f41c47ff045e96a143ef | [
"BSD-3-Clause"
] | 2 | 2016-06-27T12:45:35.000Z | 2018-06-19T17:21:44.000Z | ksp_login/__init__.py | koniiiik/ksp_login | 681288d5fc355e0e1a04f41c47ff045e96a143ef | [
"BSD-3-Clause"
] | 7 | 2015-12-10T15:32:57.000Z | 2019-05-12T12:28:59.000Z | ksp_login/__init__.py | trojsten/ksp_login | 681288d5fc355e0e1a04f41c47ff045e96a143ef | [
"BSD-3-Clause"
] | 2 | 2016-04-07T08:44:08.000Z | 2019-02-10T12:52:13.000Z | __version__ = '0.6.2'
__version_info__ = tuple(map(int, __version__.split('.')))
from django.utils.translation import ugettext_lazy as _
def __activate_social_auth_monkeypatch():
from social_core.backends.base import BaseAuth
from social_core.backends.open_id import (OPENID_ID_FIELD, OpenIdAuth)
from social_core.backends.livejournal import LiveJournalOpenId
BaseAuth.REQUIRED_FIELD_NAME = None
BaseAuth.REQUIRED_FIELD_VERBOSE_NAME = None
OpenIdAuth.REQUIRED_FIELD_NAME = OPENID_ID_FIELD
OpenIdAuth.REQUIRED_FIELD_VERBOSE_NAME = _('OpenID identity')
LiveJournalOpenId.REQUIRED_FIELD_NAME = 'openid_lj_user'
LiveJournalOpenId.REQUIRED_FIELD_VERBOSE_NAME = _('LiveJournal username')
__activate_social_auth_monkeypatch()
| 33.173913 | 77 | 0.806029 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 65 | 0.08519 |
20c7757584285b39043a17c5228fbc30607e0f7e | 1,709 | py | Python | agents/rule_based/keywords.py | didi/MEEP | eb668fe598e40d244f204363d360babbe1fe0dc2 | [
"Apache-2.0"
] | 17 | 2020-09-09T02:32:14.000Z | 2021-10-01T09:46:40.000Z | agents/rule_based/keywords.py | didi/MEEP | eb668fe598e40d244f204363d360babbe1fe0dc2 | [
"Apache-2.0"
] | 2 | 2020-12-02T09:10:03.000Z | 2020-12-02T20:31:05.000Z | agents/rule_based/keywords.py | didi/MEEP | eb668fe598e40d244f204363d360babbe1fe0dc2 | [
"Apache-2.0"
] | 3 | 2020-10-10T09:14:43.000Z | 2022-01-18T02:36:31.000Z | from enum import Enum
class IndicatorType(Enum):
CLEAR_CONTEXT = 1
YES = 2
NO = 3
PLACES_NEARBY = 4
RELATIVE_LANDMARK = 5
EMPTY_AFTER_FILTERING = 6
INDICATORS = {
IndicatorType.CLEAR_CONTEXT:
set(['hm', 'hmm', 'hrm', 'oops', 'sorry', 'actually']),
IndicatorType.YES: {
'yes': [[]], 'sure': [[]], 'alright': [[]], 'definitely': [[]],
'ok': [[]], 'okay': [[]], 'yep': [[]], 'yeah': [[]], 'yah': [[]],
'perfect': [[]], 'great': [[]],
'lets': [['roll'], ['go'], ['leave']],
'sounds': [['good']],
'thats': [['it'], ['right'], ['the', 'one']],
},
IndicatorType.NO:
set(['not', 'no']),
IndicatorType.PLACES_NEARBY:
set([
'any',
'anything',
'anywhere',
'nearby',
'nearer',
'nearest',
'closer',
'closest',
'farther',
'farthest',
'further',
'furthest',
'another',
'other',
'others',
'places',
'around',
'option',
'options',
'someplace',
'suggest',
'suggestion',
'suggestions',
'recommend',
'recommendation',
'recommendations',
]),
IndicatorType.RELATIVE_LANDMARK: {
'across': [['the', 'street']],
'next': [['to']],
'near': [[]],
'by': [[]],
'close': [['to']],
'from': [[]],
},
}
PLACES_NEARBY_WORD_TO_INDEX_MAP = {
"first": 0,
"second": 1,
"third": 2,
"1st": 0,
"2nd": 1,
"3rd": 2
}
| 23.736111 | 73 | 0.392627 | 149 | 0.087185 | 0 | 0 | 0 | 0 | 0 | 0 | 531 | 0.310708 |
20c77eabbc6690e3c5bb68e42dc26beb132dc547 | 1,549 | py | Python | tensordata/utils/conda/_conda.py | Hourout/tensordata | cbef6742ee0d3bfc4b886358fc01618bb5b63603 | [
"Apache-2.0"
] | 13 | 2019-01-08T10:22:39.000Z | 2020-06-17T10:02:47.000Z | tensordata/utils/conda/_conda.py | Hourout/tensordata | cbef6742ee0d3bfc4b886358fc01618bb5b63603 | [
"Apache-2.0"
] | null | null | null | tensordata/utils/conda/_conda.py | Hourout/tensordata | cbef6742ee0d3bfc4b886358fc01618bb5b63603 | [
"Apache-2.0"
] | 1 | 2020-06-17T10:02:49.000Z | 2020-06-17T10:02:49.000Z | import subprocess
__all__ = ['view_env', 'create_env', 'remove_env']
def view_env():
"""Get virtual environment info."""
cmd = f"conda info -e"
s = subprocess.Popen(cmd, stdout=subprocess.PIPE, shell=True).communicate()[0]
s = s.decode('utf-8').strip().split('\n')[2:]
s = [i.split(' ') for i in s]
return {i[0]:i[-1] for i in s}
def create_env(name, version):
"""Create virtual environment.
Args:
name: virtual environment.
version: python version.
Return:
log info.
"""
cmd = 'conda update -n base -c defaults conda'
subprocess.Popen(cmd, stdout=subprocess.PIPE, shell=True).communicate()[0]
s = view_env()
if name in s:
return 'Virtual environment already exists.'
cmd = f"conda create -n {name} python={version} -y"
subprocess.Popen(cmd, stdout=subprocess.PIPE, shell=True).communicate()[0]
s = view_env()
if name in s:
return 'Virtual environment successfully created.'
return 'Virtual environment failed created.'
def remove_env(name):
"""Remove virtual environment.
Args:
name: virtual environment.
Return:
log info.
"""
s = view_env()
if name not in s:
return 'Virtual environment not exists.'
cmd = f'conda remove -n {name} --all'
subprocess.Popen(cmd, stdout=subprocess.PIPE, shell=True).communicate()[0]
s = view_env()
if name not in s:
return 'Virtual environment successfully removed.'
return 'Virtual environment failed removed.' | 30.98 | 82 | 0.631375 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 714 | 0.460943 |
20ca2e167b593c60175aa9358fd246b00b8a7d20 | 382 | py | Python | ebrains_drive/__init__.py | apdavison/ebrains-drive | 8ff11fc60e77c1b605485f550efc350c1a5f443a | [
"Apache-2.0"
] | 5 | 2020-12-20T16:12:18.000Z | 2022-03-29T09:56:34.000Z | ebrains_drive/__init__.py | apdavison/ebrains-drive | 8ff11fc60e77c1b605485f550efc350c1a5f443a | [
"Apache-2.0"
] | 9 | 2020-09-25T08:27:41.000Z | 2022-01-03T08:41:48.000Z | ebrains_drive/__init__.py | apdavison/ebrains-drive | 8ff11fc60e77c1b605485f550efc350c1a5f443a | [
"Apache-2.0"
] | 2 | 2020-09-18T17:32:38.000Z | 2020-10-13T13:34:38.000Z | """
A Python package for working with the Human Brain Project Model Validation Framework.
Andrew Davison and Shailesh Appukuttan, CNRS, 2017-2020
License: BSD 3-clause, see LICENSE.txt
"""
from ebrains_drive.client import DriveApiClient
def connect(username=None, password=None, token=None, env=""):
client = DriveApiClient(username, password, token, env)
return client | 27.285714 | 85 | 0.767016 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 193 | 0.505236 |
20cc6c6f266cf22a4841a420e28fb03f3c12ecb1 | 1,290 | py | Python | test/mock/mock_lens.py | AshKelly/PyAutoLens | 043795966338a655339e61782253ad67cc3c14e6 | [
"MIT"
] | null | null | null | test/mock/mock_lens.py | AshKelly/PyAutoLens | 043795966338a655339e61782253ad67cc3c14e6 | [
"MIT"
] | null | null | null | test/mock/mock_lens.py | AshKelly/PyAutoLens | 043795966338a655339e61782253ad67cc3c14e6 | [
"MIT"
] | null | null | null | from test.mock.mock_inversion import MockMapper
from test.mock.mock_galaxy import MockHyperGalaxy
class MockTracer(object):
def __init__(self, unblurred_image_1d, blurring_image_1d, has_light_profile, has_pixelization, has_hyper_galaxy,
has_grid_mappers=False):
self.unblurred_image_1d = unblurred_image_1d
self.blurring_image_1d = blurring_image_1d
self.has_light_profile = has_light_profile
self.has_pixelization = has_pixelization
self.has_hyper_galaxy = has_hyper_galaxy
self.has_grid_mappers = has_grid_mappers
@property
def all_planes(self):
return []
@property
def image_plane_image_1d(self):
return self.unblurred_image_1d
@property
def image_plane_blurring_image_1d(self):
return self.blurring_image_1d
@property
def image_plane_images_1d(self):
return [self.unblurred_image_1d]
@property
def image_plane_blurring_images_1d(self):
return [self.blurring_image_1d]
@property
def mappers_of_planes(self):
return [MockMapper()]
@property
def regularization_of_planes(self):
return [MockMapper()]
@property
def hyper_galaxies(self):
return [MockHyperGalaxy(), MockHyperGalaxy()] | 28.043478 | 116 | 0.713178 | 1,191 | 0.923256 | 0 | 0 | 655 | 0.507752 | 0 | 0 | 0 | 0 |
20cf19c220c9e244648b1599487f047cd24ebb59 | 4,623 | py | Python | blog/models.py | florimondmanca/personal-api | 6300f965d3f51d1bf5f10cf1eb15d673bd627631 | [
"MIT"
] | 4 | 2018-08-17T08:06:06.000Z | 2020-02-20T15:15:56.000Z | blog/models.py | florimondmanca/personal-api | 6300f965d3f51d1bf5f10cf1eb15d673bd627631 | [
"MIT"
] | 2 | 2018-10-08T15:59:58.000Z | 2018-10-20T16:50:13.000Z | blog/models.py | florimondmanca/personal-api | 6300f965d3f51d1bf5f10cf1eb15d673bd627631 | [
"MIT"
] | 1 | 2019-09-14T23:15:10.000Z | 2019-09-14T23:15:10.000Z | """Blog models."""
from typing import Union
from django.contrib.postgres.fields import ArrayField
from django.db import models
from django.utils import timezone
from django.utils.text import Truncator, slugify
from markdownx.models import MarkdownxField
from .dbfunctions import Unnest
from .signals import post_published
from .utils import markdown_unformatted
class PostManager(models.Manager):
"""Custom object manager for blog posts."""
def published(self) -> models.QuerySet:
"""Return published blog posts only."""
return self.get_queryset().filter(published__isnull=False)
class Post(models.Model):
"""Represents a blog post."""
objects = PostManager()
SLUG_MAX_LENGTH = 80
title = models.CharField(max_length=300)
slug = models.SlugField(max_length=SLUG_MAX_LENGTH, unique=True)
description = models.TextField(
default="", blank=True, help_text="Used for social cards and RSS."
)
content = MarkdownxField(blank=True, default="")
image_url = models.URLField(blank=True, null=True)
image_caption = models.TextField(null=True, blank=True)
created = models.DateTimeField(auto_now_add=True)
modified = models.DateTimeField(auto_now=True)
published = models.DateTimeField(blank=True, null=True)
class Meta: # noqa
ordering = ("-published",)
# NOTE: Django uses B-Tree indexes, enough for small datasets.
indexes = [
# `created` is used for ordering, which can be sped up by an index.
models.Index(fields=["created"]),
# `published` is filtered on a lot (to retrieve drafts)
# and does not change very often.
models.Index(fields=(["published"])),
]
def save(self, *args, **kwargs):
"""Set slug when creating a post."""
if not self.pk and not self.slug:
self.slug = slugify(self.title)[: self.SLUG_MAX_LENGTH]
return super().save(*args, **kwargs)
def __str__(self) -> str:
"""Represent by its title."""
return str(self.title)
def publish(self, request=None):
"""Publish a blog post by setting its published date."""
self.published = timezone.now()
self.save()
post_published.send(sender=Post, instance=self, request=request)
@property
def is_draft(self) -> bool:
"""Return whether the post is a draft."""
return self.published is None
@property
def preview(self) -> str:
"""Return an unformatted preview of the post contents."""
return Truncator(markdown_unformatted(self.content)).chars(200)
def _find_published(self, order_by, **kwargs):
"""Filter and get the first published item in the queryset, or None."""
if not self.published:
return None
qs = Post.objects.published().order_by(order_by).filter(**kwargs)
return qs and qs[0] or None
@property
def previous(self) -> Union["Post", None]:
"""Return the previous published post.
If the post is not published or there is no previous published post,
returns None.
"""
return self._find_published("-published", published__lt=self.published)
@property
def next(self) -> Union["Post", None]:
"""Return the next published post.
If the post is not published or there is no next published post,
returns None.
"""
return self._find_published("published", published__gt=self.published)
def get_absolute_url(self) -> str:
"""Return the absolute URL path of a blog post."""
return f"/{self.slug}/"
@classmethod
def list_absolute_url(cls) -> str:
"""Return the absolute URL path for the list of posts."""
return "/"
class TagManager(models.Manager):
"""Custom manager for tag objects."""
def with_post_counts(self, published_only: bool = False):
"""Add a `.post_count` attribute on each tag."""
if published_only:
published_filter = models.Q(posts__published__isnull=False)
else:
published_filter = None
count_aggregate = models.Count("posts", filter=published_filter)
return self.get_queryset().annotate(post_count=count_aggregate)
class Tag(models.Model):
"""Represents a group of posts related to similar content."""
objects = TagManager()
name = models.CharField(max_length=20)
posts = models.ManyToManyField(to=Post, related_name="tags")
def __str__(self) -> str:
"""Represent the tag by its name."""
return str(self.name)
| 33.021429 | 79 | 0.653472 | 4,245 | 0.918235 | 0 | 0 | 1,019 | 0.22042 | 0 | 0 | 1,358 | 0.293749 |
20cf365aa273b16e874ac91e02b64fabdf860834 | 148 | py | Python | backend/app/db/base.py | jnana-cetana/XMeme | cb7d5e31c455dc3c7e751dff9e7c8e067090936b | [
"MIT"
] | 19 | 2021-02-15T19:55:25.000Z | 2022-02-01T09:05:07.000Z | backend/app/db/base.py | jnana-cetana/XMeme | cb7d5e31c455dc3c7e751dff9e7c8e067090936b | [
"MIT"
] | null | null | null | backend/app/db/base.py | jnana-cetana/XMeme | cb7d5e31c455dc3c7e751dff9e7c8e067090936b | [
"MIT"
] | null | null | null | # Import all the models, so that Base has them before being imported by Alembic
from app.db.base_class import Base
from app.models.meme import Meme
| 37 | 79 | 0.804054 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 79 | 0.533784 |
20cf807d2352e8fd6b3d5ef73df7e12b1b93cfcd | 6,506 | py | Python | viseron/watchdog/thread_watchdog.py | magicmonkey/viseron | 01733e6185b3467d6336c4d3f93f367bfebf13e8 | [
"MIT"
] | null | null | null | viseron/watchdog/thread_watchdog.py | magicmonkey/viseron | 01733e6185b3467d6336c4d3f93f367bfebf13e8 | [
"MIT"
] | null | null | null | viseron/watchdog/thread_watchdog.py | magicmonkey/viseron | 01733e6185b3467d6336c4d3f93f367bfebf13e8 | [
"MIT"
] | null | null | null | """Watchdog for long-running threads."""
import datetime
import logging
import threading
from typing import Callable, Dict, List, Optional
from viseron.watchdog import WatchDog
LOGGER = logging.getLogger(__name__)
class RestartableThread(threading.Thread):
"""Thread which can be reinstantiated with the clone method.
Arguments are the same as a standard Thread, with a few additions:
:param stop_target: (default=None)
A callable which is called when stop method is called.
:param poll_timer: (default=None)
A mutable list which contains a single element with a timestamp
:param poll_timeout: (default=None)
A timeout in seconds. If poll_timer has not been updated in poll_timeout seconds
the thread is considered stuck and is restarted
:param poll_target: (default=None)
A callable which is called when a timeout occurs.
:param thread_store_category: (default=None)
Thread will be stored in a RestartableThread.thread_store with
thread_store_category as key.
:param register: (default=True)
If true, threads will be registered in the ThreadWatchDog and automatically
restart incase of an exception.
"""
thread_store: Dict[str, List[threading.Thread]] = {}
def __init__(
self,
group=None,
target=None,
name=None,
args=(),
kwargs=None,
*,
daemon=None,
stop_target=None,
poll_timer: Optional[List[float]] = None,
poll_timeout=None,
poll_target=None,
thread_store_category=None,
register=True,
base_class=None,
base_class_args=(),
):
super().__init__(
group=group,
target=target,
name=name,
args=args,
kwargs=kwargs,
daemon=daemon,
)
self._restartable_group = group
self._restartable_target = target
self._restartable_name = name
self._restartable_args = args
self._restartable_kwargs = None
self._restartable_daemon = daemon
self._stop_target = stop_target
if any([poll_timer, poll_timeout, poll_target]) and not all(
[poll_timer, poll_timeout, poll_target]
):
LOGGER.error("poll_timer, poll_timeout, poll_target are mutually inclusive")
if poll_timer:
if not isinstance(poll_timer, list) and len(poll_timer) != 1:
LOGGER.error(
"poll_timer needs to be a list with a single element "
"to keep it mutable"
)
self._poll_timer = poll_timer
self._poll_timeout = poll_timeout
self._poll_target = poll_target
self._thread_store_category = thread_store_category
if thread_store_category:
self.thread_store.setdefault(thread_store_category, []).append(self)
self._register = register
if register:
ThreadWatchDog.register(self)
self._base_class = base_class
self._base_class_args = base_class_args
@property
def started(self):
"""Return if thread has started."""
return self._started.is_set()
@property
def poll_timer(self):
"""Return if thread has started."""
return self._poll_timer
@property
def poll_timeout(self) -> Optional[int]:
"""Return max duration of inactivity for poll timer."""
return self._poll_timeout
@property
def poll_target(self) -> Optional[Callable]:
"""Return target poll method."""
return self._poll_target
@property
def thread_store_category(self) -> Optional[str]:
"""Return given thread store category."""
return self._thread_store_category
def stop(self) -> bool:
"""Calls given stop target method."""
if self._thread_store_category:
self.thread_store[self._thread_store_category].remove(self)
ThreadWatchDog.unregister(self)
return self._stop_target() if self._stop_target else True
def clone(self):
"""Return a clone of the thread to restart it."""
if self._base_class:
return self._base_class(*self._base_class_args, register=False)
return RestartableThread(
group=self._restartable_group,
target=self._restartable_target,
name=self._restartable_name,
args=self._restartable_args,
kwargs=self._restartable_kwargs,
daemon=self._restartable_daemon,
stop_target=self._stop_target,
poll_timer=self._poll_timer,
poll_timeout=self._poll_timeout,
poll_target=self._poll_target,
thread_store_category=self._thread_store_category,
register=False,
base_class=self._base_class,
base_class_args=self._base_class_args,
)
class ThreadWatchDog(WatchDog):
"""A watchdog for long running threads."""
registered_items: List[RestartableThread] = []
def __init__(self):
super().__init__()
self._scheduler.add_job(self.watchdog, "interval", seconds=15)
def watchdog(self):
"""Check for stopped threads and restart them."""
for index, registered_thread in enumerate(self.registered_items):
if not registered_thread.started:
continue
if registered_thread.poll_timer and registered_thread.poll_timer[0]:
now = datetime.datetime.now().timestamp()
if (
now - registered_thread.poll_timer[0]
> registered_thread.poll_timeout
):
LOGGER.debug("Thread {} is stuck".format(registered_thread.name))
registered_thread.poll_target()
registered_thread.join()
else:
continue
elif registered_thread.is_alive():
continue
LOGGER.debug("Thread {} is dead, restarting".format(registered_thread.name))
if registered_thread.thread_store_category:
RestartableThread.thread_store[
registered_thread.thread_store_category
].remove(registered_thread)
self.registered_items[index] = registered_thread.clone()
if not self.registered_items[index].started:
self.registered_items[index].start()
| 35.747253 | 88 | 0.628497 | 6,284 | 0.965878 | 0 | 0 | 665 | 0.102213 | 0 | 0 | 1,563 | 0.24024 |
20cfdc7ab6f4e1e82e2f218fc1e668d19a86e472 | 1,441 | py | Python | day3/main.py | jaydom28/Advent-Of-Code-2021 | 9f846a6b95786c2bc8f6031120495f3985cbe4bf | [
"MIT"
] | null | null | null | day3/main.py | jaydom28/Advent-Of-Code-2021 | 9f846a6b95786c2bc8f6031120495f3985cbe4bf | [
"MIT"
] | null | null | null | day3/main.py | jaydom28/Advent-Of-Code-2021 | 9f846a6b95786c2bc8f6031120495f3985cbe4bf | [
"MIT"
] | null | null | null | def read_lines(file_path):
with open(file_path, 'r') as handle:
return [line.strip() for line in handle]
def count_bits(numbers):
counts = [0] * len(numbers[0])
for num in numbers:
for i, bit in enumerate(num):
counts[i] += int(bit)
return counts
lines = read_lines('test.txt')
counts = count_bits(lines)
total_lines = len(lines)
gamma = [int(c > total_lines/2) for c in counts]
epsilon = [int(not bit) for bit in gamma]
gamma = int(''.join(str(bit) for bit in gamma), 2)
epsilon = int(''.join(str(bit) for bit in epsilon), 2)
print(f'Part 1:\n {gamma * epsilon}\n')
oxygen_rating = lines
co2_rating = lines
for i in range(len(oxygen_rating[0])):
counts = count_bits(oxygen_rating)
total = len(oxygen_rating)
more_common_bit = int(counts[i] >= total/2)
oxygen_rating = [num for num in oxygen_rating if int(num[i]) == more_common_bit]
if len(oxygen_rating) == 1:
break
for i in range(len(co2_rating[0])):
counts = count_bits(co2_rating)
total = len(co2_rating)
more_common_bit = int(counts[i] >= total/2)
co2_rating = [num for num in co2_rating if int(num[i]) != more_common_bit]
if len(co2_rating) == 1:
break
oxygen_rating = int(oxygen_rating[0], 2)
co2_rating = int(co2_rating[0], 2)
print('Part 2')
print(f' Oxygen rating: {oxygen_rating}')
print(f' CO2 rating: {co2_rating}')
print(' ' + str(oxygen_rating * co2_rating))
| 27.711538 | 84 | 0.659264 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 126 | 0.087439 |
20d03fc5918a1b799cd098fc0deb127b9278e5cc | 8,679 | py | Python | tests/torch_api/test_multi_models.py | mmathys/bagua | e17978690452318b65b317b283259f09c24d59bb | [
"MIT"
] | 635 | 2021-06-11T03:03:11.000Z | 2022-03-31T14:52:57.000Z | tests/torch_api/test_multi_models.py | mmathys/bagua | e17978690452318b65b317b283259f09c24d59bb | [
"MIT"
] | 181 | 2021-06-10T12:27:19.000Z | 2022-03-31T04:08:19.000Z | tests/torch_api/test_multi_models.py | shjwudp/bagua | 7e1b438e27e3119b23e472f5b9217a9862932bef | [
"MIT"
] | 71 | 2021-06-10T13:16:53.000Z | 2022-03-22T09:26:22.000Z | import torch
import torch.nn as nn
import torch.nn.functional as F
from tests.internal.common_utils import find_free_port
import unittest
import multiprocessing
import os
from bagua.torch_api.utils import flatten
import bagua.torch_api as bagua
from tests import skip_if_cuda_not_available
N_EPOCHS = 10
class Net1(nn.Module):
def __init__(self):
super(Net1, self).__init__()
self.fc1 = nn.Linear(2, 10, bias=False)
self.fc2 = nn.Linear(10, 50, bias=True)
self.fc3 = nn.Linear(50, 4, bias=False)
self.relu = nn.ReLU()
def forward(self, x):
x = self.relu(self.fc1(x))
x = self.relu(self.fc2(x))
x = self.fc3(x)
return F.softmax(x, dim=1)
class Net2(nn.Module):
def __init__(self):
super(Net2, self).__init__()
self.fc1 = nn.Linear(2, 10, bias=False)
self.fc2 = nn.Linear(10, 30, bias=True)
self.fc3 = nn.Linear(30, 20, bias=True)
self.fc4 = nn.Linear(20, 4, bias=False)
self.relu = nn.ReLU()
def forward(self, x):
x = self.relu(self.fc1(x))
x = self.relu(self.fc2(x))
x = self.relu(self.fc3(x))
x = self.fc4(x)
return F.softmax(x, dim=1)
def _init_bagua_env(rank, env):
# set deterministic
torch.backends.cudnn.benchmark = False
torch.backends.cudnn.deterministic = True
torch.manual_seed(rank)
# initialize subprocess env
os.environ["WORLD_SIZE"] = env["WORLD_SIZE"]
os.environ["LOCAL_WORLD_SIZE"] = env["LOCAL_WORLD_SIZE"]
os.environ["MASTER_ADDR"] = env["MASTER_ADDR"]
os.environ["MASTER_PORT"] = env["MASTER_PORT"]
os.environ["BAGUA_SERVICE_PORT"] = env["BAGUA_SERVICE_PORT"]
os.environ["RANK"] = str(rank)
os.environ["LOCAL_RANK"] = str(rank)
# init bagua distributed process group
torch.cuda.set_device(rank)
bagua.init_process_group()
def _init_torch_env(rank, nprocs, backend):
# set deterministic
torch.backends.cudnn.benchmark = False
torch.backends.cudnn.deterministic = True
torch.manual_seed(rank)
# init torch distributed process group
torch.cuda.set_device(rank)
torch.distributed.init_process_group(
world_size=nprocs,
rank=rank,
backend=backend,
init_method="file:///tmp/.bagua.test.filestore",
)
def run_model(
rank,
results,
env,
):
_init_bagua_env(rank, env)
# construct model and optimizer, etc.
model_1 = Net1().cuda()
optimizer_1 = torch.optim.SGD(model_1.parameters(), lr=0.01)
loss_fn_1 = nn.MSELoss()
model_2 = Net2().cuda()
optimizer_2 = torch.optim.SGD(model_2.parameters(), lr=0.01)
loss_fn_2 = nn.MSELoss()
# wrap model
from bagua.torch_api.algorithms import gradient_allreduce
algorithm = gradient_allreduce.GradientAllReduceAlgorithm()
model_1 = model_1.with_bagua([optimizer_1], algorithm)
model_2 = model_2.with_bagua([optimizer_2], algorithm)
ret = results[rank]
ret.init_weight_1.copy_(flatten([param.data for param in model_1.parameters()]))
ret.init_weight_2.copy_(flatten([param.data for param in model_2.parameters()]))
for epoch in range(N_EPOCHS):
data_1 = torch.randn(8, 2).cuda()
target_1 = torch.randn(8, 4).cuda()
optimizer_1.zero_grad()
output_1 = model_1(data_1)
loss_1 = loss_fn_1(output_1, target_1)
loss_1.backward()
optimizer_1.step()
data_2 = torch.randn(8, 2).cuda()
target_2 = torch.randn(8, 4).cuda()
optimizer_2.zero_grad()
output_2 = model_2(data_2)
loss_2 = loss_fn_2(output_2, target_2)
loss_2.backward()
optimizer_2.step()
ret.end_weight_1.copy_(flatten([param.data for param in model_1.parameters()]))
ret.end_weight_2.copy_(flatten([param.data for param in model_2.parameters()]))
def run_torch_model(
rank,
nprocs,
results,
backend,
env,
):
_init_torch_env(rank, nprocs, backend)
# construct model and optimizer, etc.
model_1 = Net1().cuda()
optimizer_1 = torch.optim.SGD(model_1.parameters(), lr=0.01)
loss_fn_1 = nn.MSELoss()
model_2 = Net2().cuda()
optimizer_2 = torch.optim.SGD(model_2.parameters(), lr=0.01)
loss_fn_2 = nn.MSELoss()
# wrap model
model_1 = torch.nn.parallel.DistributedDataParallel(model_1, device_ids=[rank])
model_2 = torch.nn.parallel.DistributedDataParallel(model_2, device_ids=[rank])
ret = results[rank]
ret.init_weight_1.copy_(flatten([param.data for param in model_1.parameters()]))
ret.init_weight_2.copy_(flatten([param.data for param in model_2.parameters()]))
for epoch in range(N_EPOCHS):
data_1 = torch.randn(8, 2).cuda()
target_1 = torch.randn(8, 4).cuda()
optimizer_1.zero_grad()
output_1 = model_1(data_1)
loss_1 = loss_fn_1(output_1, target_1)
loss_1.backward()
optimizer_1.step()
data_2 = torch.randn(8, 2).cuda()
target_2 = torch.randn(8, 4).cuda()
optimizer_2.zero_grad()
output_2 = model_2(data_2)
loss_2 = loss_fn_2(output_2, target_2)
loss_2.backward()
optimizer_2.step()
ret.end_weight_1.copy_(flatten([param.data for param in model_1.parameters()]))
ret.end_weight_2.copy_(flatten([param.data for param in model_2.parameters()]))
class Result(object):
def __init__(self):
model_1 = Net1()
model_2 = Net2()
self.init_weight_1 = flatten(
[torch.zeros_like(param.data) for param in model_1.parameters()]
)
self.end_weight_1 = flatten(
[torch.zeros_like(param.data) for param in model_1.parameters()]
)
self.init_weight_2 = flatten(
[torch.zeros_like(param.data) for param in model_2.parameters()]
)
self.end_weight_2 = flatten(
[torch.zeros_like(param.data) for param in model_2.parameters()]
)
class TestMultiModels(unittest.TestCase):
@skip_if_cuda_not_available()
def test_multi_models(self):
nprocs = torch.cuda.device_count()
env = {}
mp = multiprocessing.get_context("spawn")
torch_results = [Result() for _ in range(nprocs)]
processes = []
backend = "gloo"
for i in range(nprocs):
p = mp.Process(
target=run_torch_model,
args=(
i,
nprocs,
torch_results,
backend,
env,
),
)
p.start()
processes.append(p)
for p in processes:
p.join(timeout=60)
self.assertTrue(p.exitcode == 0)
env = {
"WORLD_SIZE": str(nprocs),
"LOCAL_WORLD_SIZE": str(nprocs),
"MASTER_ADDR": "127.0.0.1",
"MASTER_PORT": str(find_free_port(8000, 8100)),
"BAGUA_SERVICE_PORT": str(find_free_port(9000, 9100)),
}
bagua_results = [Result() for _ in range(nprocs)]
processes = []
for i in range(nprocs):
p = mp.Process(
target=run_model,
args=(
i,
bagua_results,
env,
),
)
p.start()
processes.append(p)
for p in processes:
p.join(timeout=60)
self.assertTrue(p.exitcode == 0)
for rank in range(nprocs):
self.assertTrue(
torch.all(
torch.isclose(
bagua_results[rank].init_weight_1,
torch_results[rank].init_weight_1,
)
).item()
)
self.assertTrue(
torch.all(
torch.isclose(
bagua_results[rank].end_weight_1,
torch_results[rank].end_weight_1,
)
).item()
)
self.assertTrue(
torch.all(
torch.isclose(
bagua_results[rank].init_weight_2,
torch_results[rank].init_weight_2,
)
).item()
)
self.assertTrue(
torch.all(
torch.isclose(
bagua_results[rank].end_weight_2,
torch_results[rank].end_weight_2,
)
).item()
)
if __name__ == "__main__":
unittest.main()
| 28.833887 | 84 | 0.577486 | 4,148 | 0.477935 | 0 | 0 | 2,599 | 0.299458 | 0 | 0 | 554 | 0.063832 |
20d0d67fe1712516c0256a25ca26beffb42631c7 | 187 | py | Python | 2-6 motto2.py | Holaplace/path_to_python | 8fae2aca8d6da04c39a67514948fdf50e883750a | [
"MIT"
] | 1 | 2019-02-06T01:49:18.000Z | 2019-02-06T01:49:18.000Z | 2-6 motto2.py | Holaplace/path_to_python | 8fae2aca8d6da04c39a67514948fdf50e883750a | [
"MIT"
] | null | null | null | 2-6 motto2.py | Holaplace/path_to_python | 8fae2aca8d6da04c39a67514948fdf50e883750a | [
"MIT"
] | null | null | null | famous_name = "albert einstein"
motto = "A person who never made a mistake never tried anything new."
message = famous_name.title() + "once said, " + '"' + motto + '"'
print(message) | 37.4 | 70 | 0.673797 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 97 | 0.518717 |
20d1c04ac4717063c357eb5f6d023fd7588ec86e | 279 | py | Python | dataset/classification_datasets/__init__.py | JetBrains-Research/contrastive-learning-framework | f213ba2928677e46f7c3938afc96cbbdc64ed77c | [
"MIT"
] | 4 | 2020-12-25T10:00:01.000Z | 2022-03-17T21:09:37.000Z | dataset/classification_datasets/__init__.py | JetBrains-Research/contrastive-learning-framework | f213ba2928677e46f7c3938afc96cbbdc64ed77c | [
"MIT"
] | 4 | 2021-03-12T09:50:20.000Z | 2021-05-07T17:59:20.000Z | dataset/classification_datasets/__init__.py | JetBrains-Research/contrastive-learning-framework | f213ba2928677e46f7c3938afc96cbbdc64ed77c | [
"MIT"
] | 1 | 2022-03-17T21:09:41.000Z | 2022-03-17T21:09:41.000Z | from .code_transformer_dataset import CodeTransformerDataset
from .graph_dataset import GraphDataset
from .path_dataset import PathDataset
from .text_dataset import TextDataset
__all__ = [
"TextDataset",
"PathDataset",
"GraphDataset",
"CodeTransformerDataset"
]
| 23.25 | 60 | 0.78853 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 64 | 0.229391 |
20d1ef885fc67554016c8b52e8e905e73ffbdc1a | 2,826 | py | Python | sovtokenfees/sovtokenfees/test/conftest.py | brentzundel/plugin | 357ab27b0f1a33263b6fefd98e1652cfbc982006 | [
"Apache-2.0"
] | null | null | null | sovtokenfees/sovtokenfees/test/conftest.py | brentzundel/plugin | 357ab27b0f1a33263b6fefd98e1652cfbc982006 | [
"Apache-2.0"
] | null | null | null | sovtokenfees/sovtokenfees/test/conftest.py | brentzundel/plugin | 357ab27b0f1a33263b6fefd98e1652cfbc982006 | [
"Apache-2.0"
] | null | null | null | from sovtoken.constants import XFER_PUBLIC, RESULT
from sovtoken.main import integrate_plugin_in_node as enable_token
from sovtokenfees.main import integrate_plugin_in_node as enable_fees
from plenum.common.util import randomString
from plenum.common.constants import TARGET_NYM, TRUSTEE_STRING, VERKEY
# fixtures, do not remove
from plenum.test.conftest import *
from plenum import PLUGIN_CLIENT_REQUEST_FIELDS
from sovtokenfees import CLIENT_REQUEST_FIELDS
from sovtoken.test.conftest import trustee_wallets, steward_wallets
from sovtoken.test.helper import user1_token_wallet
from sovtokenfees.test.helpers import form_helpers
@pytest.fixture(scope="module")
def do_post_node_creation():
# Integrate plugin into each node.
PLUGIN_CLIENT_REQUEST_FIELDS.update(CLIENT_REQUEST_FIELDS)
def _post_node_creation(node):
enable_token(node)
enable_fees(node)
return _post_node_creation
@pytest.fixture(scope="module")
def nodeSetWithIntegratedTokenPlugin(do_post_node_creation, tconf, txnPoolNodeSet):
return txnPoolNodeSet
@pytest.fixture(scope="module")
def fees(request):
default_fees = {
NYM: 4,
XFER_PUBLIC: 8
}
fees = getValueFromModule(request, "TXN_FEES", default_fees)
return fees
@pytest.fixture()
def fees_set(helpers, fees):
result = helpers.general.do_set_fees(fees)
return get_payload_data(result)
@pytest.fixture(scope="module")
def helpers(
nodeSetWithIntegratedTokenPlugin,
looper,
sdk_pool_handle,
trustee_wallets,
steward_wallets,
sdk_wallet_client,
sdk_wallet_steward
):
return form_helpers(
nodeSetWithIntegratedTokenPlugin,
looper,
sdk_pool_handle,
trustee_wallets,
steward_wallets,
sdk_wallet_client,
sdk_wallet_steward
)
@pytest.fixture(autouse=True)
def reset_fees(helpers):
helpers.node.reset_fees()
@pytest.fixture()
def increased_trustees(helpers, trustee_wallets, sdk_wallet_trustee):
seeds = [randomString(32) for _ in range(3)]
requests = [
helpers.request.nym(seed=seed, role=TRUSTEE_STRING)
for seed in seeds
]
responses = helpers.sdk.send_and_check_request_objects(requests)
wallets = [helpers.wallet.create_client_wallet(seed) for seed in seeds]
yield trustee_wallets + wallets
# TODO: Not certain if this is actually changing the role.
def _update_nym_standard_user(response):
data = get_payload_data(response[RESULT])
request = helpers.request.nym(
dest=data[TARGET_NYM],
verkey=data[VERKEY],
role=None
)
return request
requests = [
_update_nym_standard_user(response)
for _, response in responses
]
helpers.sdk.send_and_check_request_objects(requests)
| 26.166667 | 83 | 0.734607 | 0 | 0 | 896 | 0.317056 | 2,172 | 0.768577 | 0 | 0 | 159 | 0.056263 |
20d20589a9943742a40657aa7ecca6a92ff061ab | 10,688 | py | Python | peregrinearb/utils/single_exchange.py | kecheon/peregrine | 3d308ff3134bc00900421b248f9f93d7ad31ddb6 | [
"MIT"
] | 954 | 2018-02-19T23:20:08.000Z | 2022-03-28T16:37:43.000Z | peregrinearb/utils/single_exchange.py | edouardkombo/peregrine | a3346e937d417acd91468884ee1fc14586cf317d | [
"MIT"
] | 55 | 2018-02-17T00:12:03.000Z | 2021-11-09T03:57:34.000Z | peregrinearb/utils/single_exchange.py | edouardkombo/peregrine | a3346e937d417acd91468884ee1fc14586cf317d | [
"MIT"
] | 307 | 2018-02-24T06:00:13.000Z | 2022-03-30T01:28:32.000Z | import asyncio
import math
import networkx as nx
import ccxt.async_support as ccxt
import datetime
import logging
from .logging_utils import FormatForLogAdapter
__all__ = [
'FeesNotAvailable',
'create_exchange_graph',
'load_exchange_graph',
]
adapter = FormatForLogAdapter(logging.getLogger('peregrinearb.utils.single_exchange'))
class FeesNotAvailable(Exception):
pass
def create_exchange_graph(exchange: ccxt.Exchange):
"""
Returns a simple graph representing exchange. Each edge represents a market.
exchange.load_markets() must have been called. Will throw a ccxt error if it has not.
"""
graph = nx.Graph()
for market_name in exchange.symbols:
try:
base_currency, quote_currency = market_name.split('/')
# if ccxt returns a market in incorrect format (e.g FX_BTC_JPY on BitFlyer)
except ValueError:
continue
graph.add_edge(base_currency, quote_currency, market_name=market_name)
return graph
async def load_exchange_graph(exchange, name=True, fees=True, suppress=None, depth=False, tickers=None) -> nx.DiGraph:
"""
Returns a networkx DiGraph populated with the current ask and bid prices for each market in graph (represented by
edges). If depth, also adds an attribute 'depth' to each edge which represents the current volume of orders
available at the price represented by the 'weight' attribute of each edge.
"""
if suppress is None:
suppress = ['markets']
if name:
exchange = getattr(ccxt, exchange)()
if tickers is None:
adapter.info('Fetching tickers')
tickers = await exchange.fetch_tickers()
adapter.info('Fetched tickers')
market_count = len(tickers)
adapter.info('Loading exchange graph', marketCount=market_count)
adapter.debug('Initializing empty graph with exchange_name and timestamp attributes')
graph = nx.DiGraph()
# todo: get exchange's server time?
graph.graph['exchange_name'] = exchange.id
graph.graph['datetime'] = datetime.datetime.now(tz=datetime.timezone.utc)
adapter.debug('Initialized empty graph with exchange_name and timestamp attributes')
async def add_edges():
tasks = [_add_weighted_edge_to_graph(exchange, market_name, graph, log=True, fees=fees,
suppress=suppress, ticker=ticker, depth=depth, )
for market_name, ticker in tickers.items()]
await asyncio.wait(tasks)
if fees:
for i in range(20):
try:
adapter.info('Loading fees', iteration=i)
# must load markets to get fees
await exchange.load_markets()
except (ccxt.DDoSProtection, ccxt.RequestTimeout) as e:
if i == 19:
adapter.warning('Rate limited on final iteration, raising error', iteration=i)
raise e
adapter.warning('Rate limited when loading markets', iteration=i)
await asyncio.sleep(0.1)
except ccxt.ExchangeNotAvailable as e:
if i == 19:
adapter.warning('Cannot load markets due to ExchangeNotAvailable error, '
'graph will not be loaded.', iteration=i)
raise e
adapter.warning('Received ExchangeNotAvailable error when loading markets', iteration=i)
else:
break
adapter.info('Loaded fees', iteration=i, marketCount=market_count)
currency_count = len(exchange.currencies)
adapter.info('Adding data to graph', marketCount=market_count, currencyCount=currency_count)
await add_edges()
adapter.info('Added data to graph', marketCount=market_count, currencyCount=currency_count)
else:
adapter.info('Adding data to graph', marketCount=market_count)
await add_edges()
adapter.info('Added data to graph', marketCount=market_count)
adapter.debug('Closing connection')
await exchange.close()
adapter.debug('Closed connection')
adapter.info('Loaded exchange graph')
return graph
async def _add_weighted_edge_to_graph(exchange: ccxt.Exchange, market_name: str, graph: nx.DiGraph, log=True,
fees=False, suppress=None, ticker=None, depth=False, ):
"""
todo: add global variable to bid_volume/ ask_volume to see if all tickers (for a given exchange) have value == None
Returns a Networkx DiGraph populated with the current ask and bid prices for each market in graph (represented by
edges).
:param exchange: A ccxt Exchange object
:param market_name: A string representing a cryptocurrency market formatted like so:
'{base_currency}/{quote_currency}'
:param graph: A Networkx DiGraph upon
:param log: If the edge weights given to the graph should be the negative logarithm of the ask and bid prices. This
is necessary to calculate arbitrage opportunities.
:param fees: If fees should be taken into account for prices.
:param suppress: A list or set which tells which types of warnings to not throw. Accepted elements are 'markets'.
:param ticker: A dictionary representing a market as returned by ccxt's Exchange's fetch_ticker method
:param depth: If True, also adds an attribute 'depth' to each edge which represents the current volume of orders
available at the price represented by the 'weight' attribute of each edge.
"""
adapter.debug('Adding edge to graph', market=market_name)
if ticker is None:
try:
adapter.info('Fetching ticker', market=market_name)
ticker = await exchange.fetch_ticker(market_name)
adapter.info('Fetched ticker', market=market_name)
# any error is solely because of fetch_ticker
except:
if 'markets' not in suppress:
adapter.warning('Market is unavailable at this time. It will not be included in the graph.',
market=market_name)
return
if fees:
if 'taker' in exchange.markets[market_name]:
# we always take the taker side because arbitrage depends on filling orders
# sell_fee_dict = exchange.calculate_fee(market_name, 'limit', 'sell', 0, 0, 'taker')
# buy_fee_dict = exchange.calculate_fee(market_name, 'limit', 'buy', 0, 0, 'taker')
fee = exchange.markets[market_name]['taker']
else:
if 'fees' not in suppress:
adapter.warning("The fees for {} have not yet been implemented into ccxt's uniform API."
.format(exchange))
raise FeesNotAvailable('Fees are not available for {} on {}'.format(market_name, exchange.id))
else:
fee = 0.002
else:
fee = 0
fee_scalar = 1 - fee
try:
bid_rate = ticker['bid']
ask_rate = ticker['ask']
if depth:
bid_volume = ticker['bidVolume']
ask_volume = ticker['askVolume']
if bid_volume is None:
adapter.warning('Market is unavailable because its bid volume was given as None. '
'It will not be included in the graph.', market=market_name)
return
if ask_volume is None:
adapter.warning('Market is unavailable because its ask volume was given as None. '
'It will not be included in the graph.', market=market_name)
return
# ask and bid == None if this market is non existent.
except TypeError:
adapter.warning('Market is unavailable at this time. It will not be included in the graph.',
market=market_name)
return
# Exchanges give asks and bids as either 0 or None when they do not exist.
# todo: should we account for exchanges upon which an ask exists but a bid does not (and vice versa)? Would this
# cause bugs?
if ask_rate == 0 or bid_rate == 0 or ask_rate is None or bid_rate is None:
adapter.warning('Market is unavailable at this time. It will not be included in the graph.',
market=market_name)
return
try:
base_currency, quote_currency = market_name.split('/')
# if ccxt returns a market in incorrect format (e.g FX_BTC_JPY on BitFlyer)
except ValueError:
if 'markets' not in suppress:
adapter.warning('Market is unavailable at this time due to incorrect formatting. '
'It will not be included in the graph.', market=market_name)
return
if log:
if depth:
graph.add_edge(base_currency, quote_currency, weight=-math.log(fee_scalar * bid_rate),
depth=-math.log(bid_volume), market_name=market_name, trade_type='SELL',
fee=fee, volume=bid_volume, no_fee_rate=bid_rate)
graph.add_edge(quote_currency, base_currency, weight=-math.log(fee_scalar * 1 / ask_rate),
depth=-math.log(ask_volume * ask_rate), market_name=market_name, trade_type='BUY',
fee=fee, volume=ask_volume, no_fee_rate=ask_rate)
else:
graph.add_edge(base_currency, quote_currency, weight=-math.log(fee_scalar * bid_rate),
market_name=market_name, trade_type='SELL', fee=fee, no_fee_rate=bid_rate)
graph.add_edge(quote_currency, base_currency, weight=-math.log(fee_scalar * 1 / ask_rate),
market_name=market_name, trade_type='BUY', fee=fee, no_fee_rate=ask_rate)
else:
if depth:
graph.add_edge(base_currency, quote_currency, weight=fee_scalar * bid_rate, depth=bid_volume,
market_name=market_name, trade_type='SELL', fee=fee, volume=bid_volume, no_fee_rate=bid_rate)
graph.add_edge(quote_currency, base_currency, weight=fee_scalar * 1 / ask_rate, depth=ask_volume,
market_name=market_name, trade_type='BUY', fee=fee, volume=ask_volume, no_fee_rate=ask_rate)
else:
graph.add_edge(base_currency, quote_currency, weight=fee_scalar * bid_rate,
market_name=market_name, trade_type='SELL', fee=fee, no_fee_rate=bid_rate)
graph.add_edge(quote_currency, base_currency, weight=fee_scalar * 1 / ask_rate,
market_name=market_name, trade_type='BUY', fee=fee, no_fee_rate=ask_rate)
adapter.debug('Added edge to graph', market=market_name)
| 47.292035 | 120 | 0.644835 | 43 | 0.004023 | 0 | 0 | 0 | 0 | 9,676 | 0.905314 | 3,971 | 0.371538 |
20d3448de95befb1c3e21efe82264d8a693a5b30 | 1,223 | py | Python | week3/utilities/foo_synonyms.py | fredriko/search_with_machine_learning_course | 85670d7adf337fede418fa5665b3c5ee80e42b2b | [
"Apache-2.0"
] | null | null | null | week3/utilities/foo_synonyms.py | fredriko/search_with_machine_learning_course | 85670d7adf337fede418fa5665b3c5ee80e42b2b | [
"Apache-2.0"
] | null | null | null | week3/utilities/foo_synonyms.py | fredriko/search_with_machine_learning_course | 85670d7adf337fede418fa5665b3c5ee80e42b2b | [
"Apache-2.0"
] | null | null | null | from pathlib import Path
import fasttext
if __name__ == "__main__":
targets = [
"red", "black", "orange", "white", "gray/black", # colors
"sony", "apple", "canon", "nikon", "dell", # brands
"32", "an", "the", "4", "to", # numbers and stopwords
"inch", "cm", "oz", "gb", "mb", # measurements
"camera", "gps", "mp3", "iphone", "playstation" # products
]
base = Path("/Users/fredriko/PycharmProjects/search_with_machine_learning_course/workspace/titles")
training_data = base / "titles.txt"
model_file = base / "model_file"
kwargs = {
"input": str(training_data),
"epoch": 100,
"ws": 6,
"minn": 0,
"maxn": 0,
"dim": 150,
"model": "skipgram"
}
for min_count in [25]:
print(f"Training with min_count: {min_count}")
kwargs["minCount"] = min_count
model = fasttext.train_unsupervised(**kwargs)
model.save_model(str(model_file))
for target in targets:
print(f"Target: {target}")
nns = model.get_nearest_neighbors(target, 10)
for nn in nns:
print(f"{nn[1]} -- {round(nn[0], 3)}")
print("\n")
| 33.054054 | 103 | 0.54211 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 491 | 0.401472 |
20d57e1a8a831c6f94414a23cecef0df0294572f | 269 | py | Python | tests/conftest.py | BookOps-CAT/cat-maintenance | 264d43bf8a87b80ad2fa439ae4c5bd8f719f02da | [
"MIT"
] | null | null | null | tests/conftest.py | BookOps-CAT/cat-maintenance | 264d43bf8a87b80ad2fa439ae4c5bd8f719f02da | [
"MIT"
] | null | null | null | tests/conftest.py | BookOps-CAT/cat-maintenance | 264d43bf8a87b80ad2fa439ae4c5bd8f719f02da | [
"MIT"
] | null | null | null | import json
import pytest
@pytest.fixture
def test_bib():
with open(".\\test_files\\bib.json") as file:
return json.load(file)
@pytest.fixture
def test_mixed_bib():
with open(".\\test_files\\mixed_bib.json") as file:
return json.load(file)
| 16.8125 | 55 | 0.669145 | 0 | 0 | 0 | 0 | 236 | 0.877323 | 0 | 0 | 56 | 0.208178 |
20d64d6fa5008c4a2703a9b2d7e8670ca7b957a0 | 3,474 | py | Python | PROJECTS/CHALLENGE_BAXTER_PY/denavit_hartenberg.py | san99tiago/MY_ROBOTICS | 871ddbedd0b3fb4292facfa7a0cdf190a6df7f88 | [
"MIT"
] | 1 | 2021-03-26T16:39:15.000Z | 2021-03-26T16:39:15.000Z | PROJECTS/CHALLENGE_BAXTER_PY/denavit_hartenberg.py | san99tiago/MY_ROBOTICS | 871ddbedd0b3fb4292facfa7a0cdf190a6df7f88 | [
"MIT"
] | 1 | 2021-03-21T22:32:12.000Z | 2021-03-21T22:32:12.000Z | PROJECTS/CHALLENGE_BAXTER_PY/denavit_hartenberg.py | san99tiago/MY_ROBOTICS | 871ddbedd0b3fb4292facfa7a0cdf190a6df7f88 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# DENAVIT HARTENBERG FOR NUMERICAL INPUTS
# SANTIAGO GARCIA AND ELKIN GUERRA
import numpy as np
import math
import transformation as transf
def denavit_hartenberg(dh_table, show_info):
"""
Get transformation matrix from Denavit Hartenberg input table
:param dh_matrix: main denavit hartenberg table size(N, 4)
:param show_info: show information ("yes" or "no")
:returns: transformation matrix size(4, 4)
"""
TM = np.identity((4))
# Get transformation matrix for each DH row (and keep multiplying it)
for i in range(np.size(dh_table, 0)):
TMi_0 = transf.Transformation(dh_table[i, 0], 0, 0, [0, 0, 0]).TM
TMi_1 = transf.Transformation(0, 0, 0, [dh_table[i, 1], 0, 0]).TM
TMi_2 = transf.Transformation(0, 0, 0, [0, 0, dh_table[i, 2]]).TM
TMi_3 = transf.Transformation(0, 0, dh_table[i, 3], [0, 0, 0]).TM
TM_current = np.dot(np.dot(np.dot(TMi_0, TMi_1), TMi_2), TMi_3)
TM = np.dot(TM, TM_current)
if (show_info == "yes" or show_info == 1):
print("TMi_0 -", i, "\n", TMi_0)
print("TMi_1 -", i, "\n", TMi_1)
print("TMi_2 -", i, "\n", TMi_2)
print("TMi_3 -", i, "\n", TMi_3)
print("TM_current -", i, "\n", TM_current)
print("TM -", i, "\n", TM, "\n\n")
return TM
# TESTS
if __name__ == "__main__":
# Get all constants for the distances and transformations for Baxter
import constants_baxter
Constants = constants_baxter.GetConstants()
L0 = Constants.L0
L1 = Constants.L1
L2 = Constants.L2
L3 = Constants.L3
L4 = Constants.L4
L5 = Constants.L5
L6 = Constants.L6
L = Constants.L
h = Constants.h
H = Constants.H
LH = Constants.LH
TM_W0_BL = Constants.TM_W0_BL
TM_W0_BR = Constants.TM_W0_BR
TM_BL_0 = Constants.TM_BL_0
TM_BR_0 = Constants.TM_BR_0
TM_7_GL = Constants.TM_7_GL
TM_7_GR = Constants.TM_7_GR
# ---------------TEST 1 (real Baxter values)--------------
theta1 = math.radians(0)
theta2 = math.radians(0)
theta4 = math.radians(0)
theta5 = math.radians(0)
theta6 = math.radians(0)
theta7 = math.radians(0)
DH = np.array([[0, 0, 0, theta1],
[-math.pi/2, L1, 0, theta2],
[0, LH, 0, theta4 + math.pi/2],
[math.pi/2, 0, L4, theta5],
[-math.pi/2, L5, 0, theta6],
[math.pi/2, 0, 0, theta7]])
TM_0_6 = denavit_hartenberg(DH, "no")
TM_W0_GL = np.dot(np.dot(np.dot(TM_W0_BL, TM_BL_0), TM_0_6), TM_7_GL)
print("\n --------- TEST 1 ----------------")
print("\n TM_06: \n", TM_0_6)
print("\n TM_W0_GL: \n", TM_W0_GL)
# ---------------TEST 2 (real Baxter values)--------------
theta1 = math.radians(10)
theta2 = math.radians(20)
theta4 = math.radians(40)
theta5 = math.radians(50)
theta6 = math.radians(60)
theta7 = math.radians(70)
DH = np.array([[0, 0, 0, theta1],
[-math.pi/2, L1, 0, theta2],
[0, LH, 0, theta4 + math.pi/2],
[math.pi/2, 0, L4, theta5],
[-math.pi/2, L5, 0, theta6],
[math.pi/2, 0, 0, theta7]])
TM_0_6 = denavit_hartenberg(DH, "no")
TM_W0_GL = np.dot(np.dot(np.dot(TM_W0_BL, TM_BL_0), TM_0_6), TM_7_GL)
print("\n --------- TEST 2 ----------------")
print("\n TM_06: \n", TM_0_6)
print("\n TM_W0_GL: \n", TM_W0_GL)
| 32.166667 | 73 | 0.560161 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 846 | 0.243523 |
20d6ff4cc9bc454128516dd3b71191a6e4996eac | 6,089 | py | Python | reid.py | vyzboy92/Accelerated-Face-Reidentification-and-Emotion-Recognition | f653f6fbcc82f39667c3c8fa2025a8c26825c92a | [
"MIT"
] | 2 | 2019-05-01T23:17:46.000Z | 2019-12-15T18:20:45.000Z | reid.py | vyzboy92/Accelerated-Face-Reidentification-and-Emotion-Recognition | f653f6fbcc82f39667c3c8fa2025a8c26825c92a | [
"MIT"
] | null | null | null | reid.py | vyzboy92/Accelerated-Face-Reidentification-and-Emotion-Recognition | f653f6fbcc82f39667c3c8fa2025a8c26825c92a | [
"MIT"
] | 2 | 2019-04-17T08:02:20.000Z | 2019-04-24T03:08:46.000Z | from __future__ import print_function
import face_recognition
import cv2
import numpy as np
import pymongo
import sys
import time
import logging as log
from imutils.video import WebcamVideoStream
from openvino.inference_engine import IENetwork, IEPlugin
em_client = pymongo.MongoClient("mongodb://localhost:27017/")
dblist = em_client.list_database_names()
if "Main_DB" in dblist:
print("========================")
print("Main_db found in Mongo")
print("========================")
em_db = em_client["Main_DB"]
em_col = em_db["face_info"]
def init_emotion():
model_xml = "utils/emotions-recognition-retail-0003.xml"
model_bin = "utils/emotions-recognition-retail-0003.bin"
# Plugin initialization for specified device and load extensions library if specified
plugin = IEPlugin(device='CPU')
plugin.add_cpu_extension(
'/opt/intel/openvino/inference_engine/lib/intel64/libcpu_extension_sse4.so')
log.info("Reading IR...")
net = IENetwork(model=model_xml, weights=model_bin)
if plugin.device == "CPU":
supported_layers = plugin.get_supported_layers(net)
not_supported_layers = [l for l in net.layers.keys() if l not in supported_layers]
if len(not_supported_layers) != 0:
log.error("Following layers are not supported by the plugin for specified device {}:\n {}".
format(plugin.device, ', '.join(not_supported_layers)))
log.error("Please try to specify cpu extensions library path in demo's command line parameters using -l "
"or --cpu_extension command line argument")
sys.exit(1)
input_blob = next(iter(net.inputs))
out_blob = next(iter(net.outputs))
log.info("Loading IR to the plugin...")
exec_net = plugin.load(network=net, num_requests=2)
n, c, h, w = net.inputs[input_blob].shape
del net
return exec_net, n, c, w, h, input_blob, out_blob, plugin
def main():
emotion_list = ['neutral', 'happy', 'sad', 'surprise', 'anger']
exec_net, n, c, w, h, input_blob, out_blob, plugin = init_emotion()
faces = list(em_col.find({}))
# Get a reference to webcam #0 (the default one)
fvs = WebcamVideoStream(src=0).start()
time.sleep(0.5)
known_face_encodings = []
known_face_names = []
# Create arrays of known face encodings and their names
for face in faces:
for face_encods in face['encoding']:
known_face_encodings.append(np.asarray(face_encods))
known_face_names.append(face['name'])
# Initialize some variables
face_locations = []
face_encodings = []
face_names = []
process_this_frame = True
cur_request_id = 0
next_request_id = 1
emotion = None
while True:
# Grab a single frame of video
frame = fvs.read()
if frame is None:
break
# Resize frame of video to 1/4 size for faster face recognition processing
small_frame = cv2.resize(frame, (0, 0), fx=0.25, fy=0.25)
# Convert the image from BGR color (which OpenCV uses) to RGB color (which face_recognition uses)
rgb_small_frame = small_frame[:, :, ::-1]
# Only process every other frame of video to save time
if process_this_frame:
# Find all the faces and face encodings in the current frame of video
face_locations = face_recognition.face_locations(rgb_small_frame)
face_encodings = face_recognition.face_encodings(rgb_small_frame, face_locations)
face_names = []
for face_encoding in face_encodings:
# See if the face is a match for the known face(s)
matches = face_recognition.compare_faces(known_face_encodings, face_encoding)
name = "Unknown"
# If a match was found in known_face_encodings, just use the first one.
if True in matches:
first_match_index = matches.index(True)
name = known_face_names[first_match_index]
face_names.append(name)
process_this_frame = not process_this_frame
# Display the results
for (top, right, bottom, left), name in zip(face_locations, face_names):
# Scale back up face locations since the frame we detected in was scaled to 1/4 size
top *= 4
right *= 4
bottom *= 4
left *= 4
face = frame[top:bottom, left:right]
cv2.imshow('face', face)
in_frame = cv2.resize(face, (w, h))
in_frame = in_frame.transpose((2, 0, 1)) # Change data layout from HWC to CHW
in_frame = in_frame.reshape((n, c, h, w))
exec_net.start_async(request_id=cur_request_id, inputs={input_blob: in_frame})
if exec_net.requests[cur_request_id].wait(-1) == 0:
res = exec_net.requests[cur_request_id].outputs[out_blob]
emo_pred = np.argmax(res)
emotion = emotion_list[emo_pred]
# Draw a box around the face
cv2.rectangle(frame, (left, top), (right, bottom), (0, 0, 255), 2)
# Draw a label with a name below the face
cv2.rectangle(frame, (left, bottom - 35), (right, bottom), (0, 0, 255), cv2.FILLED)
cv2.rectangle(frame, (left, bottom + 35), (right, bottom), (0, 0, 255), cv2.FILLED)
font = cv2.FONT_HERSHEY_DUPLEX
cv2.putText(frame, name, (left + 6, bottom - 6), font, 1.0, (255, 255, 255), 1)
cv2.putText(frame, emotion, (left + 6, bottom + 12), font, 1.0, (255, 255, 255), 1)
# Display the resulting image
cv2.imshow('Video', cv2.resize(frame, (1280, 720)))
cur_request_id, next_request_id = next_request_id, cur_request_id
# Hit 'q' on the keyboard to quit!
if cv2.waitKey(1) & 0xFF == ord('q'):
break
# Release handle to the webcam
# video_capture.release()
fvs.stop()
cv2.destroyAllWindows()
del exec_net
del plugin
if __name__ == '__main__':
main()
| 40.865772 | 117 | 0.628018 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,648 | 0.270652 |
20d70585f5c3c218441a7284e243ac4d8d8a6342 | 1,158 | py | Python | ch02/q2-1.py | iamnicoj/ctci | f71f995cb3d3257d3d58f1f167fcab8eaf84d457 | [
"MIT"
] | null | null | null | ch02/q2-1.py | iamnicoj/ctci | f71f995cb3d3257d3d58f1f167fcab8eaf84d457 | [
"MIT"
] | 3 | 2021-03-19T14:35:27.000Z | 2021-03-20T16:12:34.000Z | ch02/q2-1.py | iamnicoj/ctci | f71f995cb3d3257d3d58f1f167fcab8eaf84d457 | [
"MIT"
] | null | null | null | from linked_list import linked_list
#O(N^2)
def remove_dups(linked_list):
# I can use a sorting structure like a balanced binary search tree
# I am going to be just working with the same list
if linked_list.count < 1: return True
anchor = linked_list.head
while anchor is not None:
cursor = anchor
while cursor is not None and cursor.next is not None:
if anchor.item == cursor.next.item:
cursor.next = cursor.next.next
linked_list.count -= linked_list.count
cursor = cursor.next
anchor = anchor.next
####################################
myll = linked_list()
remove_dups(myll)
myll.print_list()
#####
myll = linked_list()
myll.add(2)
remove_dups(myll)
myll.print_list()
#####
myll = linked_list()
myll.add(2)
myll.add(2) #
remove_dups(myll)
myll.print_list()
#####
myll = linked_list()
myll.add(2)
myll.add(4)
myll.add(0)
myll.add(10)
remove_dups(myll)
myll.print_list()
####
myll = linked_list()
myll.add(4)
myll.add(2)
myll.add(0)
myll.add(4)
myll.add(10)
myll.add(0)
myll.add(10)
remove_dups(myll)
myll.print_list() | 17.029412 | 70 | 0.625216 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 180 | 0.15544 |
20d7a4da173d2411bdd31ec724e75ad823385ca2 | 1,600 | py | Python | lib/test_collect_args.py | agnes-yang/firecam | 9282d1b5b83be3abf6a137f7a72c090a9eca05f6 | [
"Apache-2.0"
] | 10 | 2019-12-19T02:37:33.000Z | 2021-12-07T04:47:08.000Z | lib/test_collect_args.py | agnes-yang/firecam | 9282d1b5b83be3abf6a137f7a72c090a9eca05f6 | [
"Apache-2.0"
] | 5 | 2019-10-27T23:22:52.000Z | 2020-02-13T23:08:15.000Z | lib/test_collect_args.py | agnes-yang/firecam | 9282d1b5b83be3abf6a137f7a72c090a9eca05f6 | [
"Apache-2.0"
] | 13 | 2019-09-24T18:53:24.000Z | 2021-07-16T05:57:18.000Z | # Copyright 2018 The Fuego Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""
Test collect_args
"""
import collect_args
import pytest
def testReqStr():
requiredArgs = [
["n", "name", "some string"],
]
args = collect_args.collectArgsInt(['-n', 'abc'], requiredArgs, [], None, False)
assert args.name == 'abc'
def testOptStr():
optionalArgs = [
["o", "name", "some string"],
]
args = collect_args.collectArgsInt(['-o', 'bcd'], [], optionalArgs, None, False)
assert args.name == 'bcd'
def testReqInt():
requiredArgs = [
["v", "value", "some integer", int],
]
args = collect_args.collectArgsInt(['-v', '121'], requiredArgs, [], None, False)
assert args.value == 121
def testMissingReq():
requiredArgs = [
["n", "name", "some string"],
]
# expecting OSError: reading from stdin while output is captured
with pytest.raises(OSError):
args = collect_args.collectArgsInt([], requiredArgs, [], None, False)
| 29.090909 | 84 | 0.626875 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 867 | 0.541875 |
20d874cf234ddb191d66611c5aac626a8862662e | 668 | py | Python | python/test/grammar_translator/testCallEnd.py | ROCmSoftwarePlatform/gpufort | b3d392cf28200cd9b3b2f77689d5a81176b3ec42 | [
"MIT"
] | 57 | 2021-10-04T19:52:55.000Z | 2022-03-29T17:41:36.000Z | python/test/grammar_translator/testCallEnd.py | mjklemm/gpufort | b3d392cf28200cd9b3b2f77689d5a81176b3ec42 | [
"MIT"
] | 12 | 2021-09-29T11:32:59.000Z | 2021-12-09T11:39:54.000Z | python/test/grammar_translator/testCallEnd.py | ROCmSoftwarePlatform/gpufort | b3d392cf28200cd9b3b2f77689d5a81176b3ec42 | [
"MIT"
] | 5 | 2021-10-05T06:16:28.000Z | 2022-02-24T14:32:24.000Z | #!/usr/bin/env python3
# SPDX-License-Identifier: MIT
# Copyright (c) 2021 Advanced Micro Devices, Inc. All rights reserved.
import addtoplevelpath
import sys
import test
import translator.translator
import grammar as translator
testdata = """
1 )
a_d )
psi_d )
2 * lda, ps_d, 1, 1.D0, psi_d, 1 )
spsi_d )
a_d )
1, spsi_d )
1, 1, spsi_d )
lda, ps_d, 1, 1, spsi_d )
lda, ps_d )
lda, ps_d, 1, 1, spsi_d, 1 )
2 * lda, ps_d, 1, 1, spsi_d, 1 )
2 * lda, ps_d, 1, 1.D0, spsi_d, 1 )
""".strip("\n").strip(" ").strip("\n").splitlines()
test.run(
expression = translator.call_end,
testdata = testdata,
tag = "call_end",
raiseException = True
) | 21.548387 | 70 | 0.636228 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 381 | 0.570359 |
20d9b17508efc8213294cb535823ea481dd4acf0 | 7,020 | py | Python | nnef/fold_os.py | lahplover/nnef | dcabf31337e5849593f343e6502fe0b8dc20452e | [
"MIT"
] | 2 | 2021-04-30T06:07:49.000Z | 2021-06-30T06:47:00.000Z | nnef/fold_os.py | lahplover/nnef | dcabf31337e5849593f343e6502fe0b8dc20452e | [
"MIT"
] | null | null | null | nnef/fold_os.py | lahplover/nnef | dcabf31337e5849593f343e6502fe0b8dc20452e | [
"MIT"
] | null | null | null | import numpy as np
import pandas as pd
import torch
from physics.protein_os import Protein
import options
from utils import write_pdb, write_pdb_sample, transform_profile, load_protein
from physics.anneal import AnnealCoords, AnnealFrag
# from physics.move import SampleICNext
from physics.grad_minimizer import *
from physics.dynamics import *
import os
import mdtraj as md
from utils import test_setup
import h5py
#################################################
parser = options.get_fold_parser()
args = options.parse_args_and_arch(parser)
device, model, energy_fn, ProteinBase = test_setup(args)
# position_weights = torch.zeros((1, args.seq_len + 1), device=device)
# position_weights[:, 0:5] = 1
# energy_fn.energy_fn.position_weights = position_weights
#################################################
data_path = 'data/fold/cullpdb_val_deep'
protein_sample = pd.read_csv(f'{data_path}/sample.csv')
pdb_selected = protein_sample['pdb'].values
np.random.shuffle(pdb_selected)
fold_engine = args.fold_engine
mode = args.mode
# sample_ic = SampleICNext(mode)
exp_id = args.load_exp[-5:]
save_dir = args.save_dir
# if not os.path.exists(f'data/fold/{exp_id}'):
# os.mkdir(f'data/fold/{exp_id}')
if not os.path.exists(f'data/fold/{save_dir}'):
os.mkdir(f'data/fold/{save_dir}')
for pdb_id in pdb_selected:
seq, coords_native, profile = load_protein(data_path, pdb_id, mode, device, args)
protein_native = Protein(seq, coords_native, profile)
energy_native = protein_native.get_energy(energy_fn).item()
print('energy_native:', energy_native)
rg2, collision = protein_native.get_rad_gyration(coords_native)
print('native radius of gyration square:', rg2.item())
# residue_energy = protein_native.get_residue_energy(energy_fn)
# print(residue_energy)
# write_pdb(seq, coords_native, pdb_id, 'native', exp_id)
protein = Protein(seq, coords_native.clone(), profile.clone())
if args.random_init:
# random_coords_int = sample_ic.random_coords_int(len(seq)-3).to(device)
# protein.update_coords_internal(random_coords_int)
# extend_coords_int = torch.tensor([[5.367, 1.6, 0.0]], device=device).repeat((len(seq)-3, 1))
extend_coords_int = torch.tensor([[5.367, 0.1, 0.0]], device=device).repeat((len(seq)-3, 1))
protein.update_coords_internal(extend_coords_int)
protein.update_cartesian_from_internal()
coords_init = protein.coords
energy_init = protein.get_energy(energy_fn).item()
print('energy_init:', energy_init)
# write_pdb(seq, coords_init, pdb_id, f'init_{mode}', exp_id)
if fold_engine == 'anneal':
# simulated annealing
torch.set_grad_enabled(False)
if args.anneal_type == 'int_one':
annealer = AnnealCoords(energy_fn, protein, mode=mode, ic_move_std=args.ic_move_std,
T_max=args.T_max, T_min=args.T_min, L=args.L)
elif args.anneal_type == 'frag':
frag_file = h5py.File(f'data/fragment/{pdb_id}/{pdb_id}_int.h5', 'r')
query_pos = torch.tensor(frag_file['query_pos'][()], device=device)
frag_int = torch.tensor(frag_file['coords_int'][()], device=device)
annealer = AnnealFrag(energy_fn, protein, frag=(query_pos, frag_int), use_rg=args.use_rg,
T_max=args.T_max, T_min=args.T_min, L=args.L)
else:
raise ValueError('anneal_type should be int_one / frag.')
annealer.run()
coords_best = annealer.x_best
energy_best = annealer.energy_best
sample = annealer.sample
sample_energy = annealer.sample_energy
elif fold_engine == 'grad':
if args.x_type == 'cart':
minimizer = GradMinimizerCartesian(energy_fn, protein, lr=args.lr, num_steps=args.L)
elif args.x_type == 'internal':
minimizer = GradMinimizerInternal(energy_fn, protein, lr=args.lr, num_steps=args.L, momentum=0.0)
elif args.x_type == 'int_fast':
minimizer = GradMinimizerIntFast(energy_fn, protein, lr=args.lr, num_steps=args.L)
elif args.x_type == 'mixed':
minimizer = GradMinimizerMixed(energy_fn, protein, lr=args.lr, num_steps=args.L)
elif args.x_type == 'mix_fast':
minimizer = GradMinimizerMixFast(energy_fn, protein, lr=args.lr, num_steps=args.L)
else:
raise ValueError('x_type should be cart / internal / mixed / int_fast / mix_fast.')
minimizer.run()
coords_best = minimizer.x_best
energy_best = minimizer.energy_best
sample = minimizer.sample
sample_energy = minimizer.sample_energy
elif fold_engine == 'dynamics':
if args.x_type == 'cart':
minimizer = Dynamics(energy_fn, protein, num_steps=args.L, lr=args.lr, t_noise=args.T_max)
elif args.x_type == 'internal':
minimizer = DynamicsInternal(energy_fn, protein, num_steps=args.L, lr=args.lr, t_noise=args.T_max)
elif args.x_type == 'int_fast':
minimizer = DynamicsIntFast(energy_fn, protein, num_steps=args.L, lr=args.lr, t_noise=args.T_max)
elif args.x_type == 'mixed':
minimizer = DynamicsMixed(energy_fn, protein, num_steps=args.L, lr=args.lr, t_noise=args.T_max)
elif args.x_type == 'mix_fast':
minimizer = DynamicsMixFast(energy_fn, protein, num_steps=args.L, lr=args.lr, t_noise=args.T_max)
else:
raise ValueError('x_type should be cart / internal / mixed / int_fast / mix_fast.')
minimizer.run()
coords_best = minimizer.x_best
energy_best = minimizer.energy_best
sample = minimizer.sample
sample_energy = minimizer.sample_energy
else:
raise ValueError('fold_engine should be anneal / grad / dynamics')
# protein.update_coords(coords_best)
# residue_energy = protein.get_residue_energy(energy_fn)
# print(residue_energy)
# write_pdb(seq, coords_best, pdb_id, f'best_{mode}', exp_id)
# save sampled structures
sample = [coords_native.cpu(), coords_best.cpu(), coords_init.cpu()] + sample
sample_energy = [energy_native, energy_best, energy_init] + sample_energy
# write_pdb_sample(seq, sample, pdb_id, 'sample', exp_id)
# pd.DataFrame({'sample_energy': sample_energy}).to_csv(f'data/fold/{exp_id}/{pdb_id}_energy.csv', index=False)
write_pdb_sample(seq, sample, pdb_id, 'sample', save_dir)
# compute RMSD,
sample_xyz = torch.stack(sample, 0).cpu().detach().numpy()
print(sample_xyz.shape)
t = md.Trajectory(xyz=sample_xyz, topology=None)
t = t.superpose(t, frame=0)
write_pdb_sample(seq, t.xyz, pdb_id, 'sample2', save_dir)
sample_rmsd = md.rmsd(t, t, frame=0) # computation will change sample_xyz;
print(f'best RMSD: {sample_rmsd[1]}')
df = pd.DataFrame({'sample_energy': sample_energy,
'sample_rmsd': sample_rmsd})
df.to_csv(f'data/fold/{save_dir}/{pdb_id}_energy.csv', index=False)
| 43.602484 | 115 | 0.674359 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,969 | 0.280484 |
20d9e7bea557d0ce53ddbee800b0871a1793d79b | 1,002 | py | Python | Bookshelf/MotorTest2.py | anju777/Baby-BB8 | 5914ea5e5b9daf7fc95b32fcfdb4cf216cf35d02 | [
"MIT"
] | 1 | 2021-03-31T20:40:05.000Z | 2021-03-31T20:40:05.000Z | Bookshelf/MotorTest2.py | anju777/Baby-BB8 | 5914ea5e5b9daf7fc95b32fcfdb4cf216cf35d02 | [
"MIT"
] | null | null | null | Bookshelf/MotorTest2.py | anju777/Baby-BB8 | 5914ea5e5b9daf7fc95b32fcfdb4cf216cf35d02 | [
"MIT"
] | null | null | null | import RPi.GPIO as GPIO
import time
# for GPIO numbering, choose BCM
#GPIO.setmode(GPIO.BCM)
# or, for pin numbering, choose BOARD
GPIO.setmode(GPIO.BOARD)
# battery1 = 2
# battery2 = 4
AEnable = 13 #27
AIN1 = 22 #25
AIN2 = 18 #24
# BIN1 = 23 # 16
# BIN2 = 18 # 12
GPIO.setup(AEnable, GPIO.OUT)
GPIO.setup(AIN1, GPIO.OUT)
GPIO.setup(AIN2, GPIO.OUT)
# GPIO.setup(BIN1, GPIO.OUT)
# GPIO.setup(BIN2, GPIO.OUT)
# GPIO.setup(battery1, GPIO.OUT)
# GPIO.setup(battery2, GPIO.OUT)
# GPIO.output(battery1, GPIO.HIGH)
# GPIO.output(battery2, GPIO.HIGH)
for i in range(5):
GPIO.output(AIN1, GPIO.HIGH)
GPIO.output(AIN2, GPIO.LOW)
GPIO.output(AEnable, GPIO.HIGH)
# GPIO.output(BIN1, GPIO.HIGH)
# GPIO.output(BIN2, GPIO.LOW)
time.sleep(2)
GPIO.output(AEnable, GPIO.LOW)
# GPIO.output(AIN1,GPIO.LOW)
# GPIO.output(AIN2, GPIO.HIGH)
# GPIO.output(BIN1,GPIO.LOW)
# GPIO.output(BIN2, GPIO.HIGH)
time.sleep(2)
print("attempt12")
GPIO.cleanup()
| 19.647059 | 39 | 0.660679 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 543 | 0.541916 |
20db30683d6b543731d432357e62120eaca639e6 | 6,272 | py | Python | routes/customer.py | trqanees94/braintree_fast | 8897aa51821ac4b868076a27fe65a74692d33647 | [
"MIT"
] | null | null | null | routes/customer.py | trqanees94/braintree_fast | 8897aa51821ac4b868076a27fe65a74692d33647 | [
"MIT"
] | null | null | null | routes/customer.py | trqanees94/braintree_fast | 8897aa51821ac4b868076a27fe65a74692d33647 | [
"MIT"
] | null | null | null | #bson
from bson import ObjectId
# clients
from clients.mongodb import MongoDB
# braintree/ stripes gateway
from gateway import generate_client_token, transact, find_transaction, find_customer, customer, update_customer ,find_all_customers, \
stripe_customer, update_stripe_customer
# flask
from flask import jsonify, Response
def create(customer_request):
'''
Input:customer_request (request.form)
'''
# customer() uses braintree gateway to create a customer
braintree_data = customer({
"first_name": customer_request.form['first_name'],
"last_name": customer_request.form['last_name'],
"custom_fields": {
"spending_limit": customer_request.form['spending_limit'] if customer_request.form['spending_limit'] else 0
}
})
# stripe_customer() uses stripe gateway to create a customer
stripe_data = stripe_customer(
"{} {}".format(customer_request.form['first_name'], customer_request.form['last_name']),
customer_request.form['spending_limit'] if customer_request.form['spending_limit'] else 0
)
if not braintree_data.is_success:
errors_list = [[x.code, x.message] for x in braintree_data.errors.deep_errors]
error_dict = {
"error_message": errors_list[0][1],
"error_code": errors_list[0][0]
}
else:
error_dict = {}
# customer_pair makes up the Fast Customer record
customer_pair = {
"braintree":{
"customer_id": braintree_data.customer.id,
"customer_first_name": braintree_data.customer.first_name,
"customer_last_name": braintree_data.customer.last_name,
"customer_spending_limit": braintree_data.customer.custom_fields["spending_limit"]
},
"stripe":{
"customer_id": stripe_data.id,
"customer_first_name": stripe_data.name,
"customer_last_name": stripe_data.name,
"customer_spending_limit": stripe_data.metadata.spending_limit
}
}
# open database connection
with MongoDB() as mongo_client:
# add the customer to the customers collection
customer_object = mongo_client.customers.insert_one(customer_pair)
customer_response = {
"fast_customer_id": None if error_dict else str(customer_object["_id"]),
"braintree_id": {} if error_dict else braintree_data.customer.id,
"stripe_id": {} if error_dict else stripe_data.id,
"error": error_dict,
"success": bool(not error_dict)
}
return customer_response
def update(customer_request):
'''
Input: customer_request -(request.args)
'''
# fast_customer_id is sent from the update html page
fast_customer_id = customer_request.args["customer_id"]
updated_first_name=customer_request.args["first_name"]
updated_last_name=customer_request.args["last_name"]
updated_spending_limit=customer_request.args["spending_limit"]
with MongoDB() as mongo_client:
# customer_object contains the braintree and stripe customer pair
customer_object = mongo_client.customers.find_by_id(fast_customer_id)
braintree_id = customer_object['braintree']['customer_id']
stripe_id = customer_object['stripe']['customer_id']
#update_params creates the payload that has updated customer data
update_params = {
"first_name": updated_first_name,
"last_name": updated_last_name,
"custom_fields": {
"spending_limit": updated_spending_limit
}
}
# update_customer() uses the braintree gateway to update customer
braintree_data = update_customer(braintree_id, update_params)
# update_stripe_customer() uses the stripe gateway to update customer
stripe_data = update_stripe_customer(
stripe_id,
"{} {}".format(customer_request.args['first_name'], customer_request.args['last_name']),
customer_request.args['spending_limit']
)
# New customer data must be updated in the MongoDB customers collection
with MongoDB() as mongo_client:
mongo_client.customers.collection.update_one(
{"_id": ObjectId(fast_customer_id)},
{"$set": {
"braintree":{
"customer_id":braintree_id,
"customer_first_name": updated_first_name,
"customer_last_name": updated_last_name,
"customer_spending_limit": updated_spending_limit
},
"stripe":{
"customer_id":stripe_id,
"customer_first_name": "{} {}".format(updated_first_name,updated_last_name),
"customer_last_name": "{} {}".format(updated_first_name,updated_last_name),
"customer_spending_limit": updated_spending_limit
}
}}
)
if not braintree_data.is_success:
errors_list = [[x.code, x.message] for x in braintree_data.errors.deep_errors]
error_dict = {
"error_message": errors_list[0][1],
"error_code": errors_list[0][0]
}
else:
error_dict = {}
customer_response = {
"fast_customer_id": None if error_dict else str(customer_object["_id"]),
"braintree_id": {} if error_dict else braintree_data.customer.id,
"stripe_id": {} if error_dict else stripe_data.id,
"error": error_dict,
"success": bool(not error_dict)
}
return customer_response
def retrieve(mongoid=None):
if mongoid:
with MongoDB() as mongo_client:
# pull single customer from mongodb customers collection
customer_object_list = [mongo_client.customers.find_by_id(mongoid)]
else:
with MongoDB() as mongo_client:
# pull all customers from mongodb customers collection
customer_object_list = mongo_client.customers.find()
return customer_object_list
| 38.243902 | 134 | 0.629464 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,838 | 0.293048 |
20dd4926d6dbb2603389704c797b5810776b1bfd | 596 | py | Python | src/python/web/handler/status.py | AlekLT/seedsync | d370a94253384e1e6e5caa5fcd44692f1d1f1ce3 | [
"Apache-2.0"
] | 255 | 2017-12-25T00:53:40.000Z | 2022-03-27T10:29:21.000Z | src/python/web/handler/status.py | AlekLT/seedsync | d370a94253384e1e6e5caa5fcd44692f1d1f1ce3 | [
"Apache-2.0"
] | 111 | 2018-01-04T10:35:49.000Z | 2022-03-29T15:12:52.000Z | src/python/web/handler/status.py | AlekLT/seedsync | d370a94253384e1e6e5caa5fcd44692f1d1f1ce3 | [
"Apache-2.0"
] | 53 | 2017-12-25T09:34:19.000Z | 2022-03-15T17:53:27.000Z | # Copyright 2017, Inderpreet Singh, All rights reserved.
from bottle import HTTPResponse
from common import Status, overrides
from ..web_app import IHandler, WebApp
from ..serialize import SerializeStatusJson
class StatusHandler(IHandler):
def __init__(self, status: Status):
self.__status = status
@overrides(IHandler)
def add_routes(self, web_app: WebApp):
web_app.add_handler("/server/status", self.__handle_get_status)
def __handle_get_status(self):
out_json = SerializeStatusJson.status(self.__status)
return HTTPResponse(body=out_json)
| 28.380952 | 71 | 0.746644 | 382 | 0.64094 | 0 | 0 | 135 | 0.22651 | 0 | 0 | 72 | 0.120805 |
20dde221df3a2c58bbcc72c289e0409920f45220 | 3,504 | py | Python | scraper/scraper.py | JimmyAustin/Dota2TeamBuilder | 2d8edddafcf4e5e82a7f3ab14bd9a364164c2829 | [
"WTFPL"
] | null | null | null | scraper/scraper.py | JimmyAustin/Dota2TeamBuilder | 2d8edddafcf4e5e82a7f3ab14bd9a364164c2829 | [
"WTFPL"
] | null | null | null | scraper/scraper.py | JimmyAustin/Dota2TeamBuilder | 2d8edddafcf4e5e82a7f3ab14bd9a364164c2829 | [
"WTFPL"
] | null | null | null | import dota2api
from ratelimit import limits, sleep_and_retry
import os
import json
from time import sleep
import random
directory = 'scrape_results'
all_files = [os.path.join(directory, x) for x in os.listdir(directory)]
seen_players = set()
unseen_player_list = []
seen_match_ids = set()
duplicate_matches_count = 0
for filepath in all_files:
print(filepath)
with open(filepath, 'rb') as file_ref:
for line in file_ref:
try:
result = json.loads(line.decode('utf8'))
if 'type' in result and result['type'] == 'STARTED_ON_NEW_PLAYER':
seen_players.add(result['player_id'])
else:
if result['match_id'] in seen_match_ids:
duplicate_matches_count = duplicate_matches_count + 1
seen_match_ids.add(result['match_id'])
for player in result['players']:
if 'account_id' in player:
unseen_player_list.append(player['account_id'])
except Exception:
pass
unseen_player_list = [x for x in unseen_player_list if x not in seen_players]
if len(unseen_player_list) == 0:
unseen_player_list = [31632658] # That's Zin
print('Inited, {0} duplicate matches'.format(duplicate_matches_count))
import dota2api
from ratelimit import limits, sleep_and_retry
api = dota2api.Initialise()
match_count = len(seen_match_ids)
def get_next_filename():
count = 1
while True:
path = './scrape_results/all_matches_{0}.json'.format(count)
if os.path.exists(path) is False:
return path
count = count + 1
matches_file = open(get_next_filename(), 'wb')
def print_status_update():
players_seen = len(seen_players) - len(unseen_player_list)
print("Matches saved: {0}, Players Seen: {1}, Players To Go: {2}".format(match_count, players_seen, len(unseen_player_list)))
@sleep_and_retry
@limits(calls=1, period=1.10)
def api_call(endpoint, *args, **kwargs):
try:
return getattr(api, endpoint)(*args, **kwargs)
except dota2api.src.exceptions.APITimeoutError:
sleep(10)
except Exception as e:
print(e)
print("Sleeping it off.")
sleep(10)
def get_player(player_id):
print('Getting player: {0}'.format(player_id))
try:
history = api_call('get_match_history', account_id=player_id)
except Exception as e:
print(e)
print("Sleeping it off.")
sleep(10)
matches_file.write(json.dumps({'type': 'STARTED_ON_NEW_PLAYER', 'player_id': player_id}).encode('utf8'))
matches_file.write('\n'.encode('utf8'))
for match in random.sample(history['matches'], 5):
get_match(match['match_id'])
def get_match(match_id):
global match_count
if match_id in seen_match_ids:
return
print('get_match_details: {0}'.format(match_id))
print_status_update()
details = api_call('get_match_details', match_id)
matches_file.write(json.dumps(details).encode('utf8'))
matches_file.write('\n'.encode('utf8'))
match_count = match_count + 1
for player in details['players']:
if player.get('account_id') and player['account_id'] not in seen_players:
unseen_player_list.append(player['account_id'])
seen_players.add(player['account_id'])
while len(unseen_player_list) > 0:
try:
get_player(unseen_player_list.pop())
except Exception as e:
pass
| 32.146789 | 129 | 0.652112 | 0 | 0 | 0 | 0 | 317 | 0.090468 | 0 | 0 | 537 | 0.153253 |
20de348221b3567b6adee4a2431833e7187c5364 | 9,728 | py | Python | datasets/ett/ett.py | leondz/datasets | 4110fb6034f79c5fb470cf1043ff52180e9c63b7 | [
"Apache-2.0"
] | 3,395 | 2020-05-13T21:16:50.000Z | 2020-09-10T14:36:50.000Z | datasets/ett/ett.py | leondz/datasets | 4110fb6034f79c5fb470cf1043ff52180e9c63b7 | [
"Apache-2.0"
] | 370 | 2020-05-13T21:28:57.000Z | 2020-09-10T11:03:38.000Z | datasets/ett/ett.py | leondz/datasets | 4110fb6034f79c5fb470cf1043ff52180e9c63b7 | [
"Apache-2.0"
] | 258 | 2020-05-15T01:17:09.000Z | 2020-09-10T12:41:43.000Z | # Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Electricity Transformer Temperature (ETT) dataset."""
from dataclasses import dataclass
import pandas as pd
import datasets
_CITATION = """\
@inproceedings{haoyietal-informer-2021,
author = {Haoyi Zhou and
Shanghang Zhang and
Jieqi Peng and
Shuai Zhang and
Jianxin Li and
Hui Xiong and
Wancai Zhang},
title = {Informer: Beyond Efficient Transformer for Long Sequence Time-Series Forecasting},
booktitle = {The Thirty-Fifth {AAAI} Conference on Artificial Intelligence, {AAAI} 2021, Virtual Conference},
volume = {35},
number = {12},
pages = {11106--11115},
publisher = {{AAAI} Press},
year = {2021},
}
"""
_DESCRIPTION = """\
The data of Electricity Transformers from two separated counties
in China collected for two years at hourly and 15-min frequencies.
Each data point consists of the target value "oil temperature" and
6 power load features. The train/val/test is 12/4/4 months.
"""
_HOMEPAGE = "https://github.com/zhouhaoyi/ETDataset"
_LICENSE = "The Creative Commons Attribution 4.0 International License. https://creativecommons.org/licenses/by/4.0/"
# The HuggingFace Datasets library doesn't host the datasets but only points to the original files.
# This can be an arbitrary nested dict/list of URLs (see below in `_split_generators` method)
_URLS = {
"h1": "https://raw.githubusercontent.com/zhouhaoyi/ETDataset/main/ETT-small/ETTh1.csv",
"h2": "https://raw.githubusercontent.com/zhouhaoyi/ETDataset/main/ETT-small/ETTh2.csv",
"m1": "https://raw.githubusercontent.com/zhouhaoyi/ETDataset/main/ETT-small/ETTm1.csv",
"m2": "https://raw.githubusercontent.com/zhouhaoyi/ETDataset/main/ETT-small/ETTm2.csv",
}
@dataclass
class ETTBuilderConfig(datasets.BuilderConfig):
"""ETT builder config."""
prediction_length: int = 24
multivariate: bool = False
class ETT(datasets.GeneratorBasedBuilder):
"""Electricity Transformer Temperature (ETT) dataset"""
VERSION = datasets.Version("1.0.0")
# You will be able to load one or the other configurations in the following list with
# data = datasets.load_dataset('ett', 'h1')
# data = datasets.load_dataset('ett', 'm2')
BUILDER_CONFIGS = [
ETTBuilderConfig(
name="h1",
version=VERSION,
description="Time series from first county at hourly frequency.",
),
ETTBuilderConfig(
name="h2",
version=VERSION,
description="Time series from second county at hourly frequency.",
),
ETTBuilderConfig(
name="m1",
version=VERSION,
description="Time series from first county at 15-min frequency.",
),
ETTBuilderConfig(
name="m2",
version=VERSION,
description="Time series from second county at 15-min frequency.",
),
]
DEFAULT_CONFIG_NAME = "h1" # It's not mandatory to have a default configuration. Just use one if it make sense.
def _info(self):
if self.config.multivariate:
features = datasets.Features(
{
"start": datasets.Value("timestamp[s]"),
"target": datasets.Sequence(datasets.Sequence(datasets.Value("float32"))),
"feat_static_cat": datasets.Sequence(datasets.Value("uint64")),
"item_id": datasets.Value("string"),
}
)
else:
features = datasets.Features(
{
"start": datasets.Value("timestamp[s]"),
"target": datasets.Sequence(datasets.Value("float32")),
"feat_static_cat": datasets.Sequence(datasets.Value("uint64")),
"feat_dynamic_real": datasets.Sequence(datasets.Sequence(datasets.Value("float32"))),
"item_id": datasets.Value("string"),
}
)
return datasets.DatasetInfo(
# This is the description that will appear on the datasets page.
description=_DESCRIPTION,
# This defines the different columns of the dataset and their types
features=features, # Here we define them above because they are different between the two configurations
# If there's a common (input, target) tuple from the features, uncomment supervised_keys line below and
# specify them. They'll be used if as_supervised=True in builder.as_dataset.
# supervised_keys=("sentence", "label"),
# Homepage of the dataset for documentation
homepage=_HOMEPAGE,
# License for the dataset if available
license=_LICENSE,
# Citation for the dataset
citation=_CITATION,
)
def _split_generators(self, dl_manager):
urls = _URLS[self.config.name]
filepath = dl_manager.download_and_extract(urls)
return [
datasets.SplitGenerator(
name=datasets.Split.TRAIN,
# These kwargs will be passed to _generate_examples
gen_kwargs={
"filepath": filepath,
"split": "train",
},
),
datasets.SplitGenerator(
name=datasets.Split.TEST,
# These kwargs will be passed to _generate_examples
gen_kwargs={
"filepath": filepath,
"split": "test",
},
),
datasets.SplitGenerator(
name=datasets.Split.VALIDATION,
# These kwargs will be passed to _generate_examples
gen_kwargs={
"filepath": filepath,
"split": "dev",
},
),
]
# method parameters are unpacked from `gen_kwargs` as given in `_split_generators`
def _generate_examples(self, filepath, split):
data = pd.read_csv(filepath, parse_dates=True, index_col=0)
start_date = data.index.min()
if self.config.name in ["m1", "m2"]:
factor = 4 # 15-min frequency
else:
factor = 1 # hourly frequency
train_end_date_index = 12 * 30 * 24 * factor # 1 year
if split == "dev":
end_date_index = 12 * 30 * 24 + 4 * 30 * 24 * factor # 1 year + 4 months
else:
end_date_index = 12 * 30 * 24 + 8 * 30 * 24 * factor # 1 year + 8 months
if self.config.multivariate:
if split in ["test", "dev"]:
# rolling windows of prediction_length for dev and test
for i, index in enumerate(
range(
train_end_date_index,
end_date_index,
self.config.prediction_length,
)
):
yield i, {
"start": start_date,
"target": data[: index + self.config.prediction_length].values.astype("float32").T,
"feat_static_cat": [0],
"item_id": "0",
}
else:
yield 0, {
"start": start_date,
"target": data[:train_end_date_index].values.astype("float32").T,
"feat_static_cat": [0],
"item_id": "0",
}
else:
if split in ["test", "dev"]:
# rolling windows of prediction_length for dev and test
for i, index in enumerate(
range(
train_end_date_index,
end_date_index,
self.config.prediction_length,
)
):
target = data["OT"][: index + self.config.prediction_length].values.astype("float32")
feat_dynamic_real = data[["HUFL", "HULL", "MUFL", "MULL", "LUFL", "LULL"]][
: index + self.config.prediction_length
].values.T.astype("float32")
yield i, {
"start": start_date,
"target": target,
"feat_dynamic_real": feat_dynamic_real,
"feat_static_cat": [0],
"item_id": "OT",
}
else:
target = data["OT"][:train_end_date_index].values.astype("float32")
feat_dynamic_real = data[["HUFL", "HULL", "MUFL", "MULL", "LUFL", "LULL"]][
:train_end_date_index
].values.T.astype("float32")
yield 0, {
"start": start_date,
"target": target,
"feat_dynamic_real": feat_dynamic_real,
"feat_static_cat": [0],
"item_id": "OT",
}
| 40.032922 | 117 | 0.557257 | 7,298 | 0.750206 | 3,075 | 0.316098 | 152 | 0.015625 | 0 | 0 | 4,404 | 0.452714 |