hexsha stringlengths 40 40 | size int64 5 2.06M | ext stringclasses 10 values | lang stringclasses 1 value | max_stars_repo_path stringlengths 3 248 | max_stars_repo_name stringlengths 5 125 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 3 248 | max_issues_repo_name stringlengths 5 125 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 67k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 3 248 | max_forks_repo_name stringlengths 5 125 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 5 2.06M | avg_line_length float64 1 1.02M | max_line_length int64 3 1.03M | alphanum_fraction float64 0 1 | count_classes int64 0 1.6M | score_classes float64 0 1 | count_generators int64 0 651k | score_generators float64 0 1 | count_decorators int64 0 990k | score_decorators float64 0 1 | count_async_functions int64 0 235k | score_async_functions float64 0 1 | count_documentation int64 0 1.04M | score_documentation float64 0 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
2af0d2b145267e888538a0d91226a3622e846c24 | 3,591 | py | Python | src/oci/autoscaling/models/cron_execution_schedule.py | Manny27nyc/oci-python-sdk | de60b04e07a99826254f7255e992f41772902df7 | [
"Apache-2.0",
"BSD-3-Clause"
] | 249 | 2017-09-11T22:06:05.000Z | 2022-03-04T17:09:29.000Z | src/oci/autoscaling/models/cron_execution_schedule.py | Manny27nyc/oci-python-sdk | de60b04e07a99826254f7255e992f41772902df7 | [
"Apache-2.0",
"BSD-3-Clause"
] | 228 | 2017-09-11T23:07:26.000Z | 2022-03-23T10:58:50.000Z | src/oci/autoscaling/models/cron_execution_schedule.py | Manny27nyc/oci-python-sdk | de60b04e07a99826254f7255e992f41772902df7 | [
"Apache-2.0",
"BSD-3-Clause"
] | 224 | 2017-09-27T07:32:43.000Z | 2022-03-25T16:55:42.000Z | # coding: utf-8
# Copyright (c) 2016, 2021, Oracle and/or its affiliates. All rights reserved.
# This software is dual-licensed to you under the Universal Permissive License (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl or Apache License 2.0 as shown at http://www.apache.org/licenses/LICENSE-2.0. You may choose either license.
from .execution_schedule import ExecutionSchedule
from oci.util import formatted_flat_dict, NONE_SENTINEL, value_allowed_none_or_none_sentinel # noqa: F401
from oci.decorators import init_model_state_from_kwargs
@init_model_state_from_kwargs
class CronExecutionSchedule(ExecutionSchedule):
"""
An autoscaling execution schedule that uses a cron expression.
"""
def __init__(self, **kwargs):
"""
Initializes a new CronExecutionSchedule object with values from keyword arguments. The default value of the :py:attr:`~oci.autoscaling.models.CronExecutionSchedule.type` attribute
of this class is ``cron`` and it should not be changed.
The following keyword arguments are supported (corresponding to the getters/setters of this class):
:param type:
The value to assign to the type property of this CronExecutionSchedule.
:type type: str
:param timezone:
The value to assign to the timezone property of this CronExecutionSchedule.
Allowed values for this property are: "UTC"
:type timezone: str
:param expression:
The value to assign to the expression property of this CronExecutionSchedule.
:type expression: str
"""
self.swagger_types = {
'type': 'str',
'timezone': 'str',
'expression': 'str'
}
self.attribute_map = {
'type': 'type',
'timezone': 'timezone',
'expression': 'expression'
}
self._type = None
self._timezone = None
self._expression = None
self._type = 'cron'
@property
def expression(self):
"""
**[Required]** Gets the expression of this CronExecutionSchedule.
A cron expression that represents the time at which to execute the autoscaling policy.
Cron expressions have this format: `<second> <minute> <hour> <day of month> <month> <day of week> <year>`
You can use special characters that are supported with the Quartz cron implementation.
You must specify `0` as the value for seconds.
Example: `0 15 10 ? * *`
:return: The expression of this CronExecutionSchedule.
:rtype: str
"""
return self._expression
@expression.setter
def expression(self, expression):
"""
Sets the expression of this CronExecutionSchedule.
A cron expression that represents the time at which to execute the autoscaling policy.
Cron expressions have this format: `<second> <minute> <hour> <day of month> <month> <day of week> <year>`
You can use special characters that are supported with the Quartz cron implementation.
You must specify `0` as the value for seconds.
Example: `0 15 10 ? * *`
:param expression: The expression of this CronExecutionSchedule.
:type: str
"""
self._expression = expression
def __repr__(self):
return formatted_flat_dict(self)
def __eq__(self, other):
if other is None:
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not self == other
| 34.528846 | 245 | 0.654971 | 3,002 | 0.835979 | 0 | 0 | 3,032 | 0.844333 | 0 | 0 | 2,520 | 0.701754 |
2af11eb0b42cbfb5a1efbef4325e698fa799a2e8 | 5,631 | py | Python | roadmap_planner_tools/scripts/pi_manager_example.py | JKBehrens/STAAMS-Solver | b6837da69cda574d081ab3dc0b307e3ce40ad6f2 | [
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | 16 | 2018-12-11T13:02:58.000Z | 2022-02-28T09:05:20.000Z | roadmap_planner_tools/scripts/pi_manager_example.py | stepakar/STAAMS-Solver | b6837da69cda574d081ab3dc0b307e3ce40ad6f2 | [
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | 2 | 2019-11-04T13:16:49.000Z | 2022-02-28T09:17:30.000Z | roadmap_planner_tools/scripts/pi_manager_example.py | stepakar/STAAMS-Solver | b6837da69cda574d081ab3dc0b307e3ce40ad6f2 | [
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | 4 | 2019-02-12T12:41:25.000Z | 2022-02-09T12:55:56.000Z | #!/usr/bin/env python
"""
Copyright (c) 2018 Robert Bosch GmbH
All rights reserved.
This source code is licensed under the BSD-3-Clause license found in the
LICENSE file in the root directory of this source tree.
@author: Jan Behrens
"""
from geometry_msgs.msg import TransformStamped
from roadmap_planner_tools.planner_input_manager import PlannerInputManager
from roadmap_planning_common_msgs.msg import OrderedVisitingConstraint, StringList, ConstraintType
from roadmap_planning_common_msgs.srv import AddObjectRequest
from rospy_message_converter import json_message_converter
def get_pose_dict():
# type: () -> dict[str, PoseStamped]
PosesDict = {}
s = '{"object_type": "gluepoint", "object_name": "loc_8", "pose": {"header": {"stamp": {"secs": 1527494081, "nsecs": 245750904}, "frame_id": "board", "seq": 0}, "pose": {"position": {"y": 0.0673123623248813, "x": -0.11819957737525943, "z": -0.000529293203694906}, "orientation": {"y": -0.09199954920780186, "x": -0.02204239273911617, "z": -0.9826223619036331, "w": -0.15969818331722338}}}}' + '\n'
s += '{"object_type": "gluepoint", "object_name": "loc_6", "pose": {"header": {"stamp": {"secs": 1527494075, "nsecs": 379102230}, "frame_id": "board", "seq": 0}, "pose": {"position": {"y": 0.11369306929267338, "x": -0.1261289291850433, "z": 0.0007951176021754491}, "orientation": {"y": 0.07187094200286825, "x": -0.061023926496261725, "z": 0.9873831660085665, "w": 0.12722079850990872}}}}' + '\n'
s += '{"object_type": "gluepoint", "object_name": "loc_7", "pose": {"header": {"stamp": {"secs": 1527494078, "nsecs": 595736504}, "frame_id": "board", "seq": 0}, "pose": {"position": {"y": 0.1149274695384531, "x": 0.06208364635543662, "z": -0.005476238253788906}, "orientation": {"y": 0.1316436407714954, "x": 0.019739166149056388, "z": 0.9750055991865761, "w": 0.17788872566576838}}}}' + '\n'
s += '{"object_type": "gluepoint", "object_name": "loc_4", "pose": {"header": {"stamp": {"secs": 1527494065, "nsecs": 979056120}, "frame_id": "board", "seq": 0}, "pose": {"position": {"y": 0.06856865760540547, "x": 0.4478018813158141, "z": -0.000679487573898074}, "orientation": {"y": -0.050516132689598016, "x": 0.014163494691613031, "z": -0.878984408924756, "w": -0.47395561461323}}}}' + '\n'
s += '{"object_type": "gluepoint", "object_name": "loc_5", "pose": {"header": {"stamp": {"secs": 1527494071, "nsecs": 795750141}, "frame_id": "board", "seq": 0}, "pose": {"position": {"y": 0.06760627153697468, "x": 0.06349269911330815, "z": -0.0007470379806025116}, "orientation": {"y": -0.010168248374561623, "x": 0.04411559477008324, "z": -0.9325496611657705, "w": -0.3581920580954878}}}}' + '\n'
s += '{"object_type": "gluepoint", "object_name": "loc_2", "pose": {"header": {"stamp": {"secs": 1527494059, "nsecs": 112413883}, "frame_id": "board", "seq": 0}, "pose": {"position": {"y": 0.06838950251450462, "x": 0.6409328063798745, "z": 0.00015782094835932174}, "orientation": {"y": 0.05237392498219545, "x": 0.02846261965189043, "z": 0.8403338301717435, "w": 0.53878187157086}}}}' + '\n'
s += '{"object_type": "gluepoint", "object_name": "loc_3", "pose": {"header": {"stamp": {"secs": 1527494063, "nsecs": 79089880}, "frame_id": "board", "seq": 0}, "pose": {"position": {"y": 0.11665509017783073, "x": 0.6388356663857032, "z": 0.001613388200793883}, "orientation": {"y": -0.10153267056716704, "x": -0.0370089029955912, "z": -0.9240133314356973, "w": -0.3667708020489993}}}}' + '\n'
s += '{"object_type": "gluepoint", "object_name": "loc_1", "pose": {"header": {"stamp": {"secs": 1527494055, "nsecs": 529085398}, "frame_id": "board", "seq": 0}, "pose": {"position": {"y": 0.11632455207600452, "x": 0.4453907194544092, "z": 0.0016855318552673815}, "orientation": {"y": -0.11786933993294174, "x": -0.08813291134398896, "z": -0.9653377546033448, "w": -0.21555145135020032}}}}' + '\n'
json_strs = s.readlines()
for json in json_strs:
if json == "\n":
continue
req = json_message_converter.convert_json_to_ros_message('roadmap_planning_common_msgs/AddObjectRequest', json) # type: AddObjectRequest
PosesDict[req.object_name] = req.pose
return PosesDict
if __name__ == "__main__":
pi_manager = PlannerInputManager()
trans = TransformStamped()
trans.child_frame_id = 'board'
trans.header.frame_id = 'world'
board_quat = [-0.6646584989424609, 0.7469166744613165, 0.009387090228191897, -0.016013860629187193]
board_trans = [0.6, 0.3, 0.02]
trans.transform.translation.x = board_trans[0]
trans.transform.translation.y = board_trans[1]
trans.transform.translation.z = board_trans[2]
trans.transform.rotation.x = board_quat[0]
trans.transform.rotation.y = board_quat[1]
trans.transform.rotation.z = board_quat[2]
trans.transform.rotation.w = board_quat[3]
pi_manager.add_frame(transform=trans)
PosesDict = get_pose_dict()
pi_manager.add_loc(PosesDict.values(), PosesDict.keys(), len(PosesDict) * ["gluepoint"])
myOVC = OrderedVisitingConstraint()
myOVC.name = 'ovc_1'
loc_names_1 = StringList()
loc_names_1.values.append('loc_1')
myOVC.location_names.append(loc_names_1)
pi_manager.add_ovc([myOVC])
myOVC_2 = OrderedVisitingConstraint()
myOVC_2.name = 'ovc_2'
loc_names_1 = StringList()
loc_names_1.values.append('loc_2')
myOVC_2.location_names.append(loc_names_1)
pi_manager.add_ovc([myOVC])
# ct = Constraints[0]
pi_manager.add_ovc_ct(constraint_type=ConstraintType.StartsAfterEnd, first_ovcs=['ovc_1'], second_ovcs=['ovc_2'])
pi_manager.write_planner_input_file('test') | 69.518519 | 402 | 0.679098 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 3,549 | 0.630261 |
2af153b0166a388f1cbc766d5de69bb0d457571e | 415 | py | Python | Trie/PrintUniqueRows.py | kopok2/DataStructures | d3877b542872c0be0f97306061c6ce0c3d98f1f3 | [
"MIT"
] | null | null | null | Trie/PrintUniqueRows.py | kopok2/DataStructures | d3877b542872c0be0f97306061c6ce0c3d98f1f3 | [
"MIT"
] | null | null | null | Trie/PrintUniqueRows.py | kopok2/DataStructures | d3877b542872c0be0f97306061c6ce0c3d98f1f3 | [
"MIT"
] | null | null | null | # coding=utf-8
"""Print unique rows in table.
Trie data structure Python solution.
"""
from Trie import Trie
def print_unique(table):
t = Trie()
for row in table:
if not t.in_tree(row):
print(row)
t.add_key(row)
if __name__ == "__main__":
table = [
"semir",
"dahak",
"semir",
"semiriana",
"sem"
]
print_unique(table)
| 15.37037 | 36 | 0.53253 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 132 | 0.318072 |
2af450314f09ee3a74127b6e73b3040d23c048eb | 2,179 | py | Python | telemetry_client_bgp_sessions.py | akshshar/bigmuddy-network-telemetry-proto | 26ea64cf9910e41c62270fea3b0aa318dd1a51db | [
"Apache-2.0"
] | null | null | null | telemetry_client_bgp_sessions.py | akshshar/bigmuddy-network-telemetry-proto | 26ea64cf9910e41c62270fea3b0aa318dd1a51db | [
"Apache-2.0"
] | null | null | null | telemetry_client_bgp_sessions.py | akshshar/bigmuddy-network-telemetry-proto | 26ea64cf9910e41c62270fea3b0aa318dd1a51db | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
# Standard python libs
import os,sys
sys.path.append("./src/genpy")
import ast, pprint
import pdb
import yaml, json
import telemetry_pb2
from mdt_grpc_dialin import mdt_grpc_dialin_pb2
from mdt_grpc_dialin import mdt_grpc_dialin_pb2_grpc
import json_format
import grpc
#
# Get the GRPC Server IP address and port number
#
def get_server_ip_port():
# Get GRPC Server's IP from the environment
if 'SERVER_IP' not in os.environ.keys():
print "Need to set the SERVER_IP env variable e.g."
print "export SERVER_IP='10.30.110.214'"
os._exit(0)
# Get GRPC Server's Port from the environment
if 'SERVER_PORT' not in os.environ.keys():
print "Need to set the SERVER_PORT env variable e.g."
print "export SERVER_PORT='57777'"
os._exit(0)
return (os.environ['SERVER_IP'], int(os.environ['SERVER_PORT']))
#
# Setup the GRPC channel with the server, and issue RPCs
#
if __name__ == '__main__':
server_ip, server_port = get_server_ip_port()
print "Using GRPC Server IP(%s) Port(%s)" %(server_ip, server_port)
# Create the channel for gRPC.
channel = grpc.insecure_channel(str(server_ip)+":"+str(server_port))
unmarshal = True
# Ereate the gRPC stub.
stub = mdt_grpc_dialin_pb2_grpc.gRPCConfigOperStub(channel)
metadata = [('username', 'vagrant'), ('password', 'vagrant')]
Timeout = 3600*24*365 # Seconds
sub_args = mdt_grpc_dialin_pb2.CreateSubsArgs(ReqId=99, encode=3, subidstr='BGP-SESSION')
stream = stub.CreateSubs(sub_args, timeout=Timeout, metadata=metadata)
for segment in stream:
if not unmarshal:
print segment
else:
# Go straight for telemetry data
telemetry_pb = telemetry_pb2.Telemetry()
encoding_path = 'Cisco-IOS-XR-ipv4-bgp-oper:bgp/instances/'+\
'instance/instance-active/default-vrf/sessions/session'
# Return in JSON format instead of protobuf.
if json.loads(segment.data)["encoding_path"] == encoding_path:
print json.dumps(json.loads(segment.data), indent=3)
os._exit(0)
| 30.263889 | 93 | 0.671409 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 804 | 0.368977 |
2af5ae6a3bb26a288651a381ee3ed2c4e61944eb | 436 | py | Python | Python3-functions/define_custom_exception_class.py | ipetel/code-snippets | e05bb8ef1f5d213aadba501d80b310507a1af117 | [
"MIT"
] | 1 | 2020-08-07T14:57:28.000Z | 2020-08-07T14:57:28.000Z | Python3-functions/define_custom_exception_class.py | ipetel/code-snippets | e05bb8ef1f5d213aadba501d80b310507a1af117 | [
"MIT"
] | null | null | null | Python3-functions/define_custom_exception_class.py | ipetel/code-snippets | e05bb8ef1f5d213aadba501d80b310507a1af117 | [
"MIT"
] | 1 | 2020-12-12T08:29:56.000Z | 2020-12-12T08:29:56.000Z | '''
This code is a simple example how to define custom exception class in Python
'''
# custom exception class
class CustomError(Exception):
def __init__(self,message):
self.message = message
super().__init__(self.message)
# use it whenever you need in your code as follows:
try:
...
<some code>
...
except Exception as e:
print(f'### [ERROR] - {e}')
raise CustomError('some error message')
| 21.8 | 76 | 0.651376 | 131 | 0.300459 | 0 | 0 | 0 | 0 | 0 | 0 | 199 | 0.456422 |
2af5cd50987e2a9a3ac9fee4d70fb52d6229fe2e | 545 | py | Python | 2020/3a.py | combatopera/advent2020 | 6fa54e91ef1a5443dff36c15e65892701293201f | [
"Unlicense"
] | 2 | 2021-12-04T00:02:12.000Z | 2021-12-11T05:38:45.000Z | 2020/3a.py | combatopera/advent2020 | 6fa54e91ef1a5443dff36c15e65892701293201f | [
"Unlicense"
] | null | null | null | 2020/3a.py | combatopera/advent2020 | 6fa54e91ef1a5443dff36c15e65892701293201f | [
"Unlicense"
] | 1 | 2020-12-20T18:50:54.000Z | 2020-12-20T18:50:54.000Z | #!/usr/bin/env python3
from pathlib import Path
slope = 3, 1
class Map:
def __init__(self, rows):
self.w = len(rows[0])
self.rows = rows
def tree(self, x, y):
return '#' == self.rows[y][x % self.w]
def main():
m = Map(Path('input', '3').read_text().splitlines())
xy = [0, 0]
trees = 0
try:
while True:
for i in range(2):
xy[i] += slope[i]
trees += m.tree(*xy)
except IndexError:
print(trees)
if '__main__' == __name__:
main()
| 18.166667 | 56 | 0.500917 | 170 | 0.311927 | 0 | 0 | 0 | 0 | 0 | 0 | 45 | 0.082569 |
2af5d8780b22b10bebb1c9796affe8bcbb118caa | 2,092 | py | Python | src/dataProcessing.py | KJithendra/SCALE-Sim | 5df467916306d536bb4f244cf5d4b4f1aaeb89da | [
"MIT"
] | null | null | null | src/dataProcessing.py | KJithendra/SCALE-Sim | 5df467916306d536bb4f244cf5d4b4f1aaeb89da | [
"MIT"
] | null | null | null | src/dataProcessing.py | KJithendra/SCALE-Sim | 5df467916306d536bb4f244cf5d4b4f1aaeb89da | [
"MIT"
] | null | null | null | import numpy as np
import csv
import matplotlib.pyplot as pyplot
from gBarGraph import *
# Conditional Debugging
debug = False
# Inputs
scaleFac = 10**6 # scaling factor
adList = [[128,128], [64,64], [32,32], [16,16], [8,8]] #list of systolic array dimensions
dfList = ["os", "ws", "is"]
nnList = ["AlphaGoZero", "DeepSpeech2", "FasterRCNN", "NCF_recommendation_short", "Resnet50", "Sentimental_seqCNN", "Transformer_short"]
layerCount = [8, 6, 46, 6, 54, 4, 9]
typeOfData = "cycles" # e.x. cycles, bandwidth etc.
xLabel = ['W1', 'W2', 'W3', 'W4', 'W5', 'W6', 'W7']
# Scale Up
rootFolder = 'outputs/scaleUp_output_summary/'
figName = 'outputs/figures/scaleUp_cycles.png'
# Generate sum of all layers of each run
rtCycles, mdfList = sum_gen(adList=adList, dfList=dfList, \
nnList=nnList, layerCount=layerCount, \
rootFolder=rootFolder, typeOfData=typeOfData, \
scaleFac=scaleFac, debug=debug)
# Generate grouped bar Plot
gBarGraph( rtCycles=rtCycles, xLabel=xLabel, \
figName=figName, dfList=dfList, \
adList=adList, debug=debug)
# Scale Out
rootFolder = 'outputs/scaleOut_output_summary/'
figName = 'outputs/figures/scaleOut_cycles.png'
# Generate sum of all layers of each run
rtCyclesSO, mdfListSO = sum_gen(adList=adList, dfList=dfList, \
nnList=nnList, layerCount=layerCount, \
rootFolder=rootFolder, typeOfData=typeOfData, \
scaleFac=scaleFac, debug=debug)
# Generate grouped bar Plot
gBarGraph( rtCycles=rtCyclesSO, xLabel=xLabel, \
figName=figName, dfList=dfList, \
adList=adList, debug=debug)
mdfList = mdfList + mdfListSO
with open('outputs/missingDataFileList.txt', mode='w') as msFile :
msFile.writelines('%s\n' % element for element in mdfList)
# Ratio of scaleUp and ScaleOut runtimes
ratioSuSO = np.divide(rtCycles, rtCyclesSO)
figName='outputs/figures/ratioSUSO.png'
legendList = ['W1', 'W2', 'W3', 'W4', 'W5', 'W6', 'W7']
xLabel = ['64 PE', '256 PE', '1024 PE', '4096 PE', '16384 PE']
scatterPlot(ratioSUSO=ratioSuSO,xLabel=xLabel,
legendList=legendList,figName=figName, \
dfList=dfList, debug=debug) | 36.068966 | 137 | 0.714149 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 750 | 0.358509 |
2af67edc0d8f4da4a1404fc6cd17e17df40ffa60 | 374 | py | Python | src/cn_query/juhe/exceptions.py | winkidney/juhe-sdk | dc92d7421f54b5858c90b32f75297a76e3ecbac5 | [
"MIT"
] | null | null | null | src/cn_query/juhe/exceptions.py | winkidney/juhe-sdk | dc92d7421f54b5858c90b32f75297a76e3ecbac5 | [
"MIT"
] | null | null | null | src/cn_query/juhe/exceptions.py | winkidney/juhe-sdk | dc92d7421f54b5858c90b32f75297a76e3ecbac5 | [
"MIT"
] | null | null | null | from functools import wraps
DEFAULT_CODE = -1
class APIError(ValueError):
pass
def normalize_network_error(func):
from requests import exceptions as exc
@wraps(func)
def decorated(*args, **kwargs):
try:
return func(*args, **kwargs)
except exc.RequestException as e:
raise APIError(str(e))
return decorated
| 17 | 42 | 0.644385 | 36 | 0.096257 | 0 | 0 | 179 | 0.47861 | 0 | 0 | 0 | 0 |
2af84b6eb941a6c622177f8b9fa1555a747d75f3 | 2,446 | py | Python | keyboard.py | misterpah/ldtp_adapter | df9cd0186cf51174ea131943f8547a9df079035b | [
"MIT"
] | null | null | null | keyboard.py | misterpah/ldtp_adapter | df9cd0186cf51174ea131943f8547a9df079035b | [
"MIT"
] | null | null | null | keyboard.py | misterpah/ldtp_adapter | df9cd0186cf51174ea131943f8547a9df079035b | [
"MIT"
] | null | null | null | import re
def find_key(keyString):
k = PyKeyboard()
key_to_press = None
highest = 0
for each in dir(k):
if each.endswith("_key"):
if similar(keyString + "_key" ,each) > highest:
highest = similar(keyString + "_key" ,each)
key_to_press = getattr(k,each)
return key_to_press
def keypress(key):
k = PyKeyboard()
keys_to_press = regex_keystring(key)
key_to_press = []
for each in keys_to_press:
if len(each) > 1:
key_to_press.append(find_key(each))
else:
key_to_press.append(each)
# pressing time
for each in key_to_press:
k.press_key(each)
def keyrelease(key):
k = PyKeyboard()
keys_to_press = regex_keystring(key)
key_to_press = []
for each in keys_to_press:
if len(each) > 1:
key_to_press.append(find_key(each))
else:
key_to_press.append(each)
# pressing time
for each in key_to_press:
k.release_key(each)
def _press_key(key_int):
k = PyKeyboard()
k.tap_key(key_int)
time.sleep(0.3)
def regex_keystring(string):
regex = r"<([A-Za-z]*)>"
working_string = string
result = []
loop = True
while loop:
if len(working_string) > 0:
try:
found = re.match(regex,working_string)
result.append(found.group(1))
start = len(found.group(0))
working_string = working_string[start:]
except AttributeError:
# not found
result.append(working_string)
working_string = ""
else:
loop = False
return result
def regex_generatekeyevent(string):
regex = r"<(.*?)>"
for each in re.finditer(regex,string):
string = string.replace(each.group(0),";"+each.group(1)+";")
return string.split(";")
def generatekeyevent(key):
k = PyKeyboard()
key = regex_generatekeyevent(key)
if len(key) == 1:
for cur in key[0]:
k.tap_key(cur)
time.sleep(0.3)
else:
for each in key:
if each == "":
continue
cur_key = find_key(each)
if cur_key == None:
for cur in each:
k.tap_key(cur)
time.sleep(0.3)
else:
k.tap_key(cur_key)
time.sleep(0.3)
| 27.177778 | 68 | 0.539248 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 98 | 0.040065 |
2af8911dc224e8f8cbf833e0f7f9e9880ddb3eab | 3,095 | py | Python | dags/crawl.py | a07458666/StockCrawlerSendSlack | 97c4ea3817b81d9acb9216d4a0745fe914f7c8f8 | [
"MIT"
] | null | null | null | dags/crawl.py | a07458666/StockCrawlerSendSlack | 97c4ea3817b81d9acb9216d4a0745fe914f7c8f8 | [
"MIT"
] | null | null | null | dags/crawl.py | a07458666/StockCrawlerSendSlack | 97c4ea3817b81d9acb9216d4a0745fe914f7c8f8 | [
"MIT"
] | null | null | null | #!/usr/bin/python
# -*- coding: utf-8 -*-
import os
import json
import csv
import time
from datetime import date
import requests
class CrawlerController(object):
'''Split targets into several Crawler, avoid request url too long'''
def __init__(self, targets, max_stock_per_crawler=50):
self.crawlers = []
for index in range(0, len(targets), max_stock_per_crawler):
crawler = Crawler(targets[index:index + max_stock_per_crawler])
self.crawlers.append(crawler)
def run(self):
data = []
for crawler in self.crawlers:
data.extend(crawler.get_data())
return data
class Crawler(object):
'''Request to Market Information System'''
def __init__(self, targets):
endpoint = 'http://mis.twse.com.tw/stock/api/getStockInfo.jsp'
# Add 1000 seconds for prevent time inaccuracy
timestamp = int(time.time() * 1000 + 1000000)
channels = '|'.join('tse_{}.tw'.format(target) for target in targets)
self.query_url = '{}?_={}&ex_ch={}'.format(endpoint, timestamp, channels)
def get_data(self):
try:
# Get original page to get session
req = requests.session()
req.get('http://mis.twse.com.tw/stock/index.jsp',
headers={'Accept-Language': 'zh-TW'})
response = req.get(self.query_url)
content = json.loads(response.text)
except Exception as err:
print(err)
data = []
else:
data = content['msgArray']
return data
class Recorder(object):
'''Record data to csv'''
def __init__(self, path='data'):
self.folder_path = '{}/{}'.format(path, date.today().strftime('%Y%m%d'))
if not os.path.isdir(self.folder_path):
os.mkdir(self.folder_path)
def record_to_csv(self, data):
#print('data ', data)
for row in data:
try:
file_path = '{}/{}.csv'.format(self.folder_path, row['c'])
with open(file_path, 'a') as output_file:
writer = csv.writer(output_file, delimiter=',')
writer.writerow([
row['t'],# 資料時間
row['n'],# 名子
row['z'],# 最近成交價
row['y'] # 昨收價
# row['tv'],# 當盤成交量
# row['v'],# 當日累計成交量
# row['a'],# 最佳五檔賣出價格
# row['f'],# 最價五檔賣出數量
# row['b'],# 最佳五檔買入價格
# row['g']# 最佳五檔買入數量
])
except Exception as err:
print(err)
def RunCrawl():
print('os.path ', os.getcwd())
targets = [_.strip() for _ in open('stocknumber.csv', 'r')]
controller = CrawlerController(targets)
data = controller.run()
recorder = Recorder()
recorder.record_to_csv(data)
if __name__ == '__main__':
RunCrawl() | 32.578947 | 82 | 0.515347 | 2,755 | 0.857988 | 0 | 0 | 0 | 0 | 0 | 0 | 800 | 0.249144 |
2afc65a9a228fad0a34b4258aaae1e52ee00b1c9 | 694 | py | Python | supplier_testing/case_05.py | openhealthcare/python-fp17 | 61a665d90b04dc5e94d433dc7fbb16b901c70e7b | [
"BSD-3-Clause"
] | 1 | 2021-02-23T05:48:01.000Z | 2021-02-23T05:48:01.000Z | supplier_testing/case_05.py | openhealthcare/python-fp17 | 61a665d90b04dc5e94d433dc7fbb16b901c70e7b | [
"BSD-3-Clause"
] | 9 | 2018-03-20T15:53:44.000Z | 2018-07-09T10:56:18.000Z | supplier_testing/case_05.py | openhealthcare/python-fp17 | 61a665d90b04dc5e94d433dc7fbb16b901c70e7b | [
"BSD-3-Clause"
] | 2 | 2018-03-22T14:24:41.000Z | 2021-02-23T05:48:02.000Z | import datetime
from fp17 import treatments
def annotate(bcds1):
bcds1.patient.surname = "BEDWORTH"
bcds1.patient.forename = "TOBY"
bcds1.patient.address = ["5 HIGH STREET"]
bcds1.patient.sex = 'M'
bcds1.patient.date_of_birth = datetime.date(1938, 4, 11)
bcds1.date_of_acceptance = datetime.date(2017, 4, 1)
bcds1.date_of_completion = datetime.date(2017, 4, 1)
bcds1.patient_charge_pence = 7320
# Treatments: "Upper Acrylic Denture 12, Ethnic Origin 5"
bcds1.treatments = [
treatments.REGULATION_11_APPLIANCE,
treatments.UPPER_DENTURE_ACRYLIC(12),
treatments.ETHNIC_ORIGIN_5_WHITE_AND_BLACK_AFRICAN,
]
return bcds1
| 26.692308 | 61 | 0.707493 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 91 | 0.131124 |
2aff67cda8054946e671fab749f72b00c6c27c7f | 1,643 | py | Python | mipkit/faces/helpers.py | congvmit/mipkit | d65a5083852dcfc5db766175aa402a5e3a506f21 | [
"MIT"
] | 8 | 2021-06-17T08:13:51.000Z | 2022-02-21T13:31:18.000Z | mipkit/faces/helpers.py | congvmit/mipkit | d65a5083852dcfc5db766175aa402a5e3a506f21 | [
"MIT"
] | null | null | null | mipkit/faces/helpers.py | congvmit/mipkit | d65a5083852dcfc5db766175aa402a5e3a506f21 | [
"MIT"
] | null | null | null | """
The MIT License (MIT)
Copyright (c) 2021 Cong Vo
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
Provided license texts might have their own copyrights and restrictions
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
from collections import OrderedDict
FACIAL_LANDMARKS_68_IDXS = OrderedDict([
("mouth", (48, 68)),
("inner_mouth", (60, 68)),
("right_eyebrow", (17, 22)),
("left_eyebrow", (22, 27)),
("right_eye", (36, 42)),
("left_eye", (42, 48)),
("nose", (27, 36)),
("jaw", (0, 17))
])
# For dlib’s 5-point facial landmark detector:
FACIAL_LANDMARKS_5_IDXS = OrderedDict([
("right_eye", (2, 3)),
("left_eye", (0, 1)),
("nose", (4))
])
| 36.511111 | 78 | 0.720024 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,331 | 0.809119 |
63015142c904ffc67f23900b60665809596a39ad | 465 | py | Python | src/fizz_buzz.py | rckt-cmdr/fizz-buzz | e1ecc9be7a9f1c3067622eba5cc7e35340bef983 | [
"MIT"
] | null | null | null | src/fizz_buzz.py | rckt-cmdr/fizz-buzz | e1ecc9be7a9f1c3067622eba5cc7e35340bef983 | [
"MIT"
] | null | null | null | src/fizz_buzz.py | rckt-cmdr/fizz-buzz | e1ecc9be7a9f1c3067622eba5cc7e35340bef983 | [
"MIT"
] | null | null | null | #!/usr/bin/python3
# File: fizz_buzz.py
# Author: Jonathan Belden
# Description: Fizz-Buzz Coding Challenge
# Reference: https://edabit.com/challenge/WXqH9qvvGkmx4dMvp
def evaluate(inputValue):
result = None
if inputValue % 3 == 0 and inputValue % 5 == 0:
result = "FizzBuzz"
elif inputValue % 3 == 0:
result = "Fizz"
elif inputValue % 5 == 0:
result = "Buzz"
else:
result = str(inputValue)
return result | 22.142857 | 59 | 0.627957 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 185 | 0.397849 |
6301f8566c81a5574264ed3398aba56307ac1ece | 213 | py | Python | Ex.27-Numpy.py | aguinaldolorandi/100-exercicios-Numpy | 276c721bd8b161153223b353168a1c15936edbd1 | [
"MIT"
] | null | null | null | Ex.27-Numpy.py | aguinaldolorandi/100-exercicios-Numpy | 276c721bd8b161153223b353168a1c15936edbd1 | [
"MIT"
] | null | null | null | Ex.27-Numpy.py | aguinaldolorandi/100-exercicios-Numpy | 276c721bd8b161153223b353168a1c15936edbd1 | [
"MIT"
] | null | null | null | # Exercícios Numpy-27
# *******************
import numpy as np
Z=np.arange((10),dtype=int)
print(Z**Z)
print(Z)
print(2<<Z>>2)
print()
print(Z <- Z)
print()
print(1j*Z)
print()
print(Z/1/1)
print()
#print(Z<Z>Z) | 12.529412 | 27 | 0.577465 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 56 | 0.261682 |
630383ff02afa466d04f39beea1550433314bef9 | 234 | py | Python | project_opensource/nanodet-2020_11_27/nanodet-main/nanodet/data/dataset/__init__.py | yunshangyue71/mycodes | 54b876004c32d38d9c0363fd292d745fee8dff3c | [
"Apache-2.0"
] | null | null | null | project_opensource/nanodet-2020_11_27/nanodet-main/nanodet/data/dataset/__init__.py | yunshangyue71/mycodes | 54b876004c32d38d9c0363fd292d745fee8dff3c | [
"Apache-2.0"
] | null | null | null | project_opensource/nanodet-2020_11_27/nanodet-main/nanodet/data/dataset/__init__.py | yunshangyue71/mycodes | 54b876004c32d38d9c0363fd292d745fee8dff3c | [
"Apache-2.0"
] | null | null | null | import copy
from .coco import CocoDataset
def build_dataset(cfg, mode):
dataset_cfg = copy.deepcopy(cfg)
if dataset_cfg['name'] == 'coco':
dataset_cfg.pop('name')
return CocoDataset(mode=mode, **dataset_cfg)
| 23.4 | 52 | 0.679487 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 18 | 0.076923 |
6304f6f1ec92ec4d1702c41e4ebb9a78aa978644 | 5,387 | py | Python | straintables/Executable/GenomePipeline.py | Gab0/linkageMapper | 549b292e5b6ab22e03373483cd27236aa2f635eb | [
"MIT"
] | null | null | null | straintables/Executable/GenomePipeline.py | Gab0/linkageMapper | 549b292e5b6ab22e03373483cd27236aa2f635eb | [
"MIT"
] | 1 | 2020-05-03T15:13:07.000Z | 2020-05-04T03:01:59.000Z | straintables/Executable/GenomePipeline.py | Gab0/straintables | 549b292e5b6ab22e03373483cd27236aa2f635eb | [
"MIT"
] | null | null | null | #!/bin/python
"""
straintables' main pipeline script;
"""
import os
import argparse
import shutil
import straintables
import subprocess
from Bio.Align.Applications import ClustalOmegaCommandline
from straintables.logo import logo
from straintables.Executable import primerFinder, detectMutations,\
compareHeatmap, matrixAnalysis
from straintables.Database import directoryManager
Description = """
Main straintables pipeline.
Will initialize an analysis directory based on the provided genomes,
annotation and wanted regions."
If primers were not declared at the wanted regions file,
it will try to create primers based on the region sequence,
by using the region boundaries defined at the annotation file.
The resulting directory can be inspected manually
or used to build dissimilarity matrices through the command 'stview'.
"""
class Options():
def __init__(self, options):
self.__dict__.update(options)
def find_primers(options):
return primerFinder.Execute(options)
def run_alignment(filePrefix,
clustalPath=straintables.Definitions.ClustalCommand):
infile = filePrefix + ".fasta"
outfile = filePrefix + ".aln"
aln_cmd = ClustalOmegaCommandline(clustalPath,
infile=infile,
outfile=outfile,
force=True,
outfmt="clustal")
stdout, stderr = aln_cmd()
print(stdout)
def draw_tree(filePrefix):
infile = filePrefix + ".ph"
outfile = filePrefix + "pdf"
treeOptions = Options({
"InputFile": infile,
"OutputFile": outfile
})
straintables.DrawGraphics.drawTree.Execute(treeOptions)
def run_meshclust(filePrefix):
subprocess.run([
"meshclust",
filePrefix + ".fasta",
"--output",
filePrefix + ".clst",
"--id", "0.999",
"--align"
])
def detect_mutations(filePrefix):
infile = filePrefix + ".aln"
mutationsOptions = Options({
"InputFile": infile,
"PlotSubtitle": ""
})
detectMutations.Execute(mutationsOptions)
def matrix_analysis(WorkingDirectory):
analysisOptions = Options({
"WorkingDirectory": WorkingDirectory,
"updateOnly": False
})
return matrixAnalysis.Execute(analysisOptions)
def parse_arguments():
parser = argparse.ArgumentParser(description=Description)
parser.add_argument("--noamplicon", dest="DoAmplicon",
action="store_false", default=True)
parser.add_argument("--noalign", dest="DoAlignment",
action="store_false", default=True)
parser.add_argument("--alnmode", dest="AlignmentMode",
default="clustal")
parser.add_argument("--clustalpath", dest="ClustalPath",
default=straintables.Definitions.ClustalCommand)
parser = primerFinder.parse_arguments(parser)
return parser.parse_args()
def TestMeshclust():
# -- TEST MESHCLUST SETUP;
if shutil.which("meshclust"):
print("MeshClust enabled!")
return True
else:
print("MeshClust not found! Disabled...")
return False
def process_individual_region(options, locusName, MeshClustEnabled):
filePrefix = os.path.join(
options.WorkingDirectory,
straintables.Definitions.FastaRegionPrefix + locusName)
print("Running alignment for %s..." % locusName)
run_alignment(filePrefix,
clustalPath=options.ClustalPath)
# draw_tree(filePrefix)
detect_mutations(filePrefix)
if MeshClustEnabled:
run_meshclust(filePrefix)
def Execute(options):
if not options.PrimerFile:
print("Fatal: No primer file specified!")
exit(1)
# -- SELECT WORKING DIRECTORY;
if not options.WorkingDirectory:
AnalysisCode = os.path.splitext(options.PrimerFile)[0]
AnalysisCode = os.path.basename(AnalysisCode)
WorkingDirectoryBase = "analysisResults"
options.WorkingDirectory = os.path.join(WorkingDirectoryBase,
AnalysisCode)
# -- TEST CLUSTAL SETUP;
if not shutil.which(options.ClustalPath):
print("%s not found! Aborting..." % options.ClustalPath)
exit(1)
MeshClustEnabled = TestMeshclust()
directoryManager.createDirectoryPath(options.WorkingDirectory)
# SHOW BEAUTIFUL ASCII ART;
print(logo)
# -- RUN PIPELINE;
if options.DoAmplicon:
result = find_primers(options)
if not result:
print("Failure to find primers.")
exit(1)
AllowedAlignModes = ["clustal"]
if options.AlignmentMode not in AllowedAlignModes:
print("Unknown alignment mode %s." % (options.AlignmentMode))
exit(1)
MatchedRegions = straintables.OutputFile.MatchedRegions(
options.WorkingDirectory)
MatchedRegions.read()
SuccessfulLoci = MatchedRegions.content["LocusName"]
if options.DoAlignment:
for locusName in SuccessfulLoci:
process_individual_region(options, locusName, MeshClustEnabled)
if matrix_analysis(options.WorkingDirectory):
print("Analysis successful.")
def main():
options = parse_arguments()
Execute(options)
if __name__ == "__main__":
main()
| 25.29108 | 75 | 0.656952 | 87 | 0.01615 | 0 | 0 | 0 | 0 | 0 | 0 | 1,240 | 0.230184 |
6306e2b42d2b769e50bc98e817a532f98f68ffd4 | 1,881 | py | Python | venv/lib/python3.8/site-packages/vsts/test/v4_1/models/test_failures_analysis.py | amcclead7336/Enterprise_Data_Science_Final | ccdc0aa08d4726bf82d71c11a1cc0c63eb301a28 | [
"Unlicense",
"MIT"
] | null | null | null | venv/lib/python3.8/site-packages/vsts/test/v4_1/models/test_failures_analysis.py | amcclead7336/Enterprise_Data_Science_Final | ccdc0aa08d4726bf82d71c11a1cc0c63eb301a28 | [
"Unlicense",
"MIT"
] | null | null | null | venv/lib/python3.8/site-packages/vsts/test/v4_1/models/test_failures_analysis.py | amcclead7336/Enterprise_Data_Science_Final | ccdc0aa08d4726bf82d71c11a1cc0c63eb301a28 | [
"Unlicense",
"MIT"
] | 2 | 2021-05-23T16:46:31.000Z | 2021-05-26T23:51:09.000Z | # --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
# Generated file, DO NOT EDIT
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------------------------
from msrest.serialization import Model
class TestFailuresAnalysis(Model):
"""TestFailuresAnalysis.
:param existing_failures:
:type existing_failures: :class:`TestFailureDetails <test.v4_1.models.TestFailureDetails>`
:param fixed_tests:
:type fixed_tests: :class:`TestFailureDetails <test.v4_1.models.TestFailureDetails>`
:param new_failures:
:type new_failures: :class:`TestFailureDetails <test.v4_1.models.TestFailureDetails>`
:param previous_context:
:type previous_context: :class:`TestResultsContext <test.v4_1.models.TestResultsContext>`
"""
_attribute_map = {
'existing_failures': {'key': 'existingFailures', 'type': 'TestFailureDetails'},
'fixed_tests': {'key': 'fixedTests', 'type': 'TestFailureDetails'},
'new_failures': {'key': 'newFailures', 'type': 'TestFailureDetails'},
'previous_context': {'key': 'previousContext', 'type': 'TestResultsContext'}
}
def __init__(self, existing_failures=None, fixed_tests=None, new_failures=None, previous_context=None):
super(TestFailuresAnalysis, self).__init__()
self.existing_failures = existing_failures
self.fixed_tests = fixed_tests
self.new_failures = new_failures
self.previous_context = previous_context
| 49.5 | 108 | 0.604997 | 1,272 | 0.676236 | 0 | 0 | 0 | 0 | 0 | 0 | 1,321 | 0.702286 |
6308a827909b901b26c90ab14b8e55036177fe0f | 1,634 | py | Python | health.py | adil-zhang/healthy_auto | ca42cafacd1d5120274048f42dc99b83b8c08970 | [
"Apache-2.0"
] | 4 | 2021-02-24T06:48:18.000Z | 2021-03-22T09:46:28.000Z | health.py | adil-zhang/healthy_auto | ca42cafacd1d5120274048f42dc99b83b8c08970 | [
"Apache-2.0"
] | null | null | null | health.py | adil-zhang/healthy_auto | ca42cafacd1d5120274048f42dc99b83b8c08970 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
# coding=utf-8
import json
import random
import requests
import argparse
import time
# 请求内容转json
def getval():
values = {"province": "北京", "city": "北京市", "addressType": "整租房", "temperature": "36.7", "dayNum": "", "contactHbPerplr": "无接触", "toWh": "未去过/路过武汉", "familySymptom": "无症状", "remarks": "", "otherDesc": "", "backDate": "2020-02-12",
"jtgj": "自驾/步行", "bc": "", "addressOther": "", "hbOther": "", "familyOther": None, "lj": "是", "ljOther": "", "workStatus": "到岗上班", "workOther": "", "returnCountry": "未出国", "returnCountryRemarks": "", "provinceId": "110000", "symptom": "无症状"}
values_json = json.dumps(values)
return values_json
# 请求头
def gethead():
headers = {'content-type': 'application/json'}
return headers
# 获取GitHub中Securts中的用户名
def getarg():
parser = argparse.ArgumentParser()
parser.add_argument("-s",
dest="PEOPLE",
nargs='*',
help="people name of the NAME list")
args = parser.parse_args()
people = args.PEOPLE
return people
if __name__ == '__main__':
people = getarg()
value = getval()
head = gethead()
# 防止每天时间固定,增加休眠时间(虽然actions的时间本来就不是很准确)
sl_s = random.randint(1, 2)
time.sleep(sl_s)
sl_m = int(sl_s/60)
print('本次休眠时间为'+str(sl_m)+'分钟')
print('**************耐心等待中*************')
if people:
for people in people:
url = 'https://health.foton.com.cn/health-attendance/health/save/'+people+'@foton'
req = requests.post(url, data=value, headers=head)
print('今日打卡成功!')
| 28.666667 | 255 | 0.573439 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 890 | 0.484749 |
63092478a708b5cc4e0a97fbe0778f3cdbd516d2 | 7,848 | py | Python | thirdparty/his_evaluators/his_evaluators/utils/video.py | tj-eey/impersonator | 3ed69b3db7e5369a39a9de5d97e965ed9172eff6 | [
"MIT"
] | 1,717 | 2019-09-27T00:44:11.000Z | 2022-03-29T11:38:21.000Z | thirdparty/his_evaluators/his_evaluators/utils/video.py | tj-eey/impersonator | 3ed69b3db7e5369a39a9de5d97e965ed9172eff6 | [
"MIT"
] | 81 | 2019-09-27T11:52:05.000Z | 2022-03-12T00:00:31.000Z | thirdparty/his_evaluators/his_evaluators/utils/video.py | tj-eey/impersonator | 3ed69b3db7e5369a39a9de5d97e965ed9172eff6 | [
"MIT"
] | 337 | 2019-09-27T02:46:06.000Z | 2022-03-14T22:00:02.000Z | # -*- coding: utf-8 -*-
# @Time : 2019-08-02 18:31
# @Author : Wen Liu
# @Email : liuwen@shanghaitech.edu.cn
import os
import cv2
import glob
import shutil
from multiprocessing import Pool
from concurrent.futures import ProcessPoolExecutor
from functools import partial
from tqdm import tqdm
import numpy as np
import subprocess
def auto_unzip_fun(x, f):
return f(*x)
def make_video(output_mp4_path, img_path_list, save_frames_dir=None, fps=24):
"""
output_path is the final mp4 name
img_dir is where the images to make into video are saved.
"""
first_img = cv2.imread(img_path_list[0])
h, w = first_img.shape[:2]
pool_size = 40
tmp_avi_video_path = '%s.avi' % output_mp4_path
fourcc = cv2.VideoWriter_fourcc(*'XVID')
videoWriter = cv2.VideoWriter(tmp_avi_video_path, fourcc, fps, (w, h))
args_list = [(img_path,) for img_path in img_path_list]
with Pool(pool_size) as p:
for img in tqdm(p.imap(partial(auto_unzip_fun, f=cv2.imread), args_list), total=len(args_list)):
videoWriter.write(img)
videoWriter.release()
if save_frames_dir:
for i, img_path in enumerate(img_path_list):
shutil.copy(img_path, '%s/%.8d.jpg' % (save_frames_dir, i))
os.system("ffmpeg -y -i %s -vcodec h264 %s > /dev/null 2>&1" % (tmp_avi_video_path, output_mp4_path))
os.system("rm %s" % tmp_avi_video_path)
def fuse_image(img_path_list, row_num, col_num):
assert len(img_path_list) == row_num * col_num
img_list = [cv2.imread(img_path) for img_path in img_path_list]
row_imgs = []
for i in range(row_num):
col_imgs = img_list[i * col_num: (i + 1) * col_num]
col_img = np.concatenate(col_imgs, axis=1)
row_imgs.append(col_img)
fused_img = np.concatenate(row_imgs, axis=0)
return fused_img
def fuse_video(video_frames_path_list, output_mp4_path, row_num, col_num, fps=24):
assert len(video_frames_path_list) == row_num * col_num
frame_num = len(video_frames_path_list[0])
first_img = cv2.imread(video_frames_path_list[0][0])
h, w = first_img.shape[:2]
fused_h, fused_w = h * row_num, w * col_num
args_list = []
for frame_idx in range(frame_num):
fused_frame_path_list = [video_frames[frame_idx] for video_frames in video_frames_path_list]
args_list.append((fused_frame_path_list, row_num, col_num))
pool_size = 40
tmp_avi_video_path = '%s.avi' % output_mp4_path
fourcc = cv2.VideoWriter_fourcc(*'XVID')
# for args in args_list:
# fuse_image(*args)
# exit()
videoWriter = cv2.VideoWriter(tmp_avi_video_path, fourcc, fps, (fused_w, fused_h))
with Pool(pool_size) as p:
for img in tqdm(p.imap(partial(auto_unzip_fun, f=fuse_image), args_list), total=len(args_list)):
videoWriter.write(img)
videoWriter.release()
os.system("ffmpeg -y -i %s -vcodec h264 %s > /dev/null 2>&1" % (tmp_avi_video_path, output_mp4_path))
os.system("rm %s" % (tmp_avi_video_path))
def merge(src_img, ref_img_path, out_img_path, pad):
h, w = src_img.shape[:2]
image_size = h
ref_img = cv2.imread(ref_img_path)
out_img = cv2.imread(out_img_path)
if ref_img.shape[0] != image_size and ref_img.shape[1] != image_size:
ref_img = cv2.resize(ref_img, (image_size, image_size))
if out_img.shape[0] != image_size and out_img.shape[1] != image_size:
out_img = cv2.resize(out_img, (image_size, image_size))
# print(src_img.shape, ref_img.shape, out_img.shape)
merge_img = np.concatenate([src_img, pad, ref_img, pad, out_img], axis=1)
return merge_img
def load_image(image_path, image_size=512):
"""
Args:
image_path (str):
image_size (int):
Returns:
image (np.ndarray): (image_size, image_size, 3), BGR channel space, in the range of [0, 255], np.uint8.
"""
image = cv2.imread(image_path)
image = cv2.resize(image, (image_size, image_size))
return image
def fuse_one_image(img_paths, image_size):
return load_image(img_paths[0], image_size)
def fuse_two_images(img_paths, image_size):
"""
Args:
img_paths (list of str):
image_size (int):
Returns:
fuse_img (np.ndarray): (image_size // 2, image_size, 3), BGR channel space, in the range of [0, 255], np.uint8.
"""
img_size = image_size // 2
img_1 = load_image(img_paths[0], img_size)
img_2 = load_image(img_paths[1], img_size)
fuse_img = np.concatenate([img_1, img_2], axis=0)
return fuse_img
def fuse_four_images(img_paths, image_size):
"""
Args:
img_paths (list of str):
image_size (int):
Returns:
fuse_img (np.ndarray): (image_size, image_size, 3), BGR channel space, in the range of [0, 255], np.uint8.
"""
fuse_img_1 = fuse_two_images(img_paths[0:2], image_size)
fuse_img_2 = fuse_two_images(img_paths[2:4], image_size)
fuse_img = np.concatenate([fuse_img_1, fuse_img_2], axis=1)
return fuse_img
def fuse_eight_images(img_paths, image_size):
"""
Args:
img_paths (list of str):
image_size (int):
Returns:
fuse_img (np.ndarray): (image_size // 2, image_size, 3), BGR channel space, in the range of [0, 255], np.uint8.
"""
fuse_img_1 = fuse_two_images(img_paths[0:4], image_size // 2)
fuse_img_2 = fuse_two_images(img_paths[4:8], image_size // 2)
fuse_img = np.concatenate([fuse_img_1, fuse_img_2], axis=0)
return fuse_img
def fuse_source(all_src_img_paths, image_size=512):
"""
Args:
all_src_img_paths (list of str): the list of source image paths, currently it only supports, 1, 2, 4, 8 number
of source images.
image_size (int): the final image resolution, (image_size, image_size, 3)
Returns:
fuse_img (np.ndarray): (image_size, image_size, 3), BGR channel space, in the range of [0, 255], np.uint8.
"""
ns = len(all_src_img_paths)
# TODO, currently it only supports, 1, 2, 4, 8 number of source images.
assert ns in [1, 2, 4, 8], "{} must be in [1, 2, 4, 8], currently it only supports, " \
"1, 2, 4, 8 number of source images."
if ns == 1:
fuse_img = load_image(all_src_img_paths[0], image_size)
elif ns == 2:
fuse_img = fuse_two_images(all_src_img_paths, image_size)
elif ns == 4:
fuse_img = fuse_four_images(all_src_img_paths, image_size)
elif ns == 8:
fuse_img = fuse_eight_images(all_src_img_paths, image_size)
else:
raise ValueError("{} must be in [1, 2, 4, 8], currently it only supports, "
"1, 2, 4, 8 number of source images.")
return fuse_img
def fuse_source_reference_output(output_mp4_path, src_img_paths, ref_img_paths, out_img_paths,
image_size=512, pad=10, fps=25):
total = len(ref_img_paths)
assert total == len(out_img_paths), "{} != {}".format(total, len(out_img_paths))
fused_src_img = fuse_source(src_img_paths, image_size)
pad_region = np.zeros((image_size, pad, 3), dtype=np.uint8)
pool_size = min(15, os.cpu_count())
tmp_avi_video_path = '%s.avi' % output_mp4_path
fourcc = cv2.VideoWriter_fourcc(*'XVID')
W = fused_src_img.shape[1] + (image_size + pad) * 2
videoWriter = cv2.VideoWriter(tmp_avi_video_path, fourcc, fps, (W, image_size))
with ProcessPoolExecutor(pool_size) as pool:
for img in tqdm(pool.map(merge, [fused_src_img] * total,
ref_img_paths, out_img_paths, [pad_region] * total)):
videoWriter.write(img)
videoWriter.release()
os.system("ffmpeg -y -i %s -vcodec h264 %s > /dev/null 2>&1" % (tmp_avi_video_path, output_mp4_path))
os.system("rm %s" % tmp_avi_video_path)
| 30.776471 | 119 | 0.658894 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,049 | 0.261086 |
630b1968948a312327fd4f74f6cc73dcebabe1eb | 10,346 | py | Python | podcats/__init__.py | moritzj29/podcats | fc2bf1656eba5e6cc5062fe2b55717ef9b24dd5b | [
"BSD-3-Clause"
] | null | null | null | podcats/__init__.py | moritzj29/podcats | fc2bf1656eba5e6cc5062fe2b55717ef9b24dd5b | [
"BSD-3-Clause"
] | null | null | null | podcats/__init__.py | moritzj29/podcats | fc2bf1656eba5e6cc5062fe2b55717ef9b24dd5b | [
"BSD-3-Clause"
] | null | null | null | """
Podcats is a podcast feed generator and a server.
It generates RSS feeds for podcast episodes from local audio files and,
optionally, exposes the feed and as well as the episode file via
a built-in web server so that they can be imported into iTunes
or another podcast client.
"""
import os
import re
import time
import argparse
import mimetypes
from email.utils import formatdate
from os import path
from xml.sax.saxutils import escape, quoteattr
try:
from urllib.request import pathname2url
except ImportError:
# For python 2
# noinspection PyUnresolvedReferences
from urllib import pathname2url
import mutagen
import humanize
from mutagen.id3 import ID3
from flask import Flask, Response
# noinspection PyPackageRequirements
from jinja2 import Environment, FileSystemLoader
from collections import defaultdict
__version__ = '0.6.3'
__licence__ = 'BSD'
__author__ = 'Jakub Roztocil'
__url__ = 'https://github.com/jakubroztocil/podcats'
WEB_PATH = '/web'
STATIC_PATH = '/static'
TEMPLATES_ROOT = os.path.join(os.path.dirname(__file__), 'templates')
BOOK_COVER_EXTENSIONS = ('.jpg', '.jpeg', '.png')
jinja2_env = Environment(loader=FileSystemLoader(TEMPLATES_ROOT))
class Episode(object):
"""Podcast episode"""
def __init__(self, filename, relative_dir, root_url, title_format='{filename}{title}'):
self.filename = filename
self.relative_dir = relative_dir
self.root_url = root_url
self.length = os.path.getsize(filename)
self.tags = mutagen.File(self.filename, easy=True)
self.title_format = title_format
print(self.tags)
try:
self.id3 = ID3(self.filename)
except Exception:
self.id3 = None
def __lt__(self, other):
return self.date < other.date
def __gt__(self, other):
return self.date > other.date
def __cmp__(self, other):
a, b = self.date, other.date
return (a > b) - (a < b) # Python3 cmp() equivalent
def as_xml(self):
"""Return episode item XML"""
template = jinja2_env.get_template('episode.xml')
return template.render(
title=escape(self.title),
url=quoteattr(self.url),
guid=escape(self.url),
mimetype=self.mimetype,
length=self.length,
date=formatdate(self.date),
image_url=self.image,
description=self.description,
)
def as_html(self):
"""Return episode item html"""
filename = os.path.basename(self.filename)
directory = os.path.split(os.path.dirname(self.filename))[-1]
template = jinja2_env.get_template('episode.html')
return template.render(
title=escape(self.title),
url=self.url,
filename=filename,
directory=directory,
mimetype=self.mimetype,
length=humanize.naturalsize(self.length),
date=formatdate(self.date),
image_url=self.image,
description=self.description,
)
def get_tag(self, name):
"""Return episode file tag info"""
try:
return self.tags[name][0]
except (KeyError, IndexError):
pass
def _to_url(self, filepath):
fn = os.path.basename(filepath)
path = STATIC_PATH + '/' + self.relative_dir + '/' + fn
path = re.sub(r'//', '/', path)
url = self.root_url + pathname2url(path)
return url
@property
def title(self):
"""Return episode title"""
filename = os.path.splitext(os.path.basename(self.filename))[0]
try:
args = defaultdict(lambda: '<unset>', {'filename': filename})
for key, value in self.tags.items():
args[key.lower()] = value[0] # use first entry only
text = self.title_format.format_map(args)
except Exception:
print('Failed setting title for file {}. Using filename as title.'.format(filename))
text = filename
return text
@property
def url(self):
"""Return episode url"""
return self._to_url(self.filename)
@property
def date(self):
"""Return episode date as unix timestamp"""
dt = self.get_tag('date')
if dt:
formats = [
'%Y-%m-%d:%H:%M:%S',
'%Y-%m-%d:%H:%M',
'%Y-%m-%d:%H',
'%Y-%m-%d',
'%Y-%m',
'%Y',
]
for fmt in formats:
try:
dt = time.mktime(time.strptime(dt, fmt))
break
except ValueError:
pass
else:
dt = None
if not dt:
dt = os.path.getmtime(self.filename)
return dt
@property
def mimetype(self):
"""Return file mimetype name"""
if self.filename.endswith('m4b'):
return 'audio/x-m4b'
else:
return mimetypes.guess_type(self.filename)[0]
@property
def image(self):
"""Return an eventual cover image"""
directory = os.path.split(self.filename)[0]
image_files = []
for fn in os.listdir(directory):
ext = os.path.splitext(fn)[1]
if ext.lower() in BOOK_COVER_EXTENSIONS:
image_files.append(fn)
if len(image_files) > 0:
abs_path_image = image_files[0]
return self._to_url(abs_path_image)
else:
return None
@property
def description(self):
"""Return description"""
try:
return self.tags['description'][0]
except Exception:
return ''
class Channel(object):
"""Podcast channel"""
def __init__(self, root_dir, root_url, host, port, title, link, debug=False, video=False, title_format='{filename}{title}'):
self.root_dir = root_dir or os.getcwd()
self.root_url = root_url
self.host = host
self.port = int(port)
self.link = link or self.root_url
self.title = title or os.path.basename(
os.path.abspath(self.root_dir.rstrip('/')))
self.description = 'Feed generated by <a href="%s">Podcats</a>.' % __url__
self.debug = debug
self.video = video
self.title_format = title_format
def __iter__(self):
for root, _, files in os.walk(self.root_dir):
relative_dir = root[len(self.root_dir):]
for fn in files:
filepath = os.path.join(root, fn)
mimetype = mimetypes.guess_type(filepath)[0]
if (mimetype and 'audio' in mimetype
or filepath.endswith('m4b')
or (mimetype and 'video' in mimetype and self.video is True)
):
yield Episode(filepath, relative_dir, self.root_url, title_format=self.title_format)
def as_xml(self):
"""Return channel XML with all episode items"""
template = jinja2_env.get_template('feed.xml')
return template.render(
title=escape(self.title),
description=escape(self.description),
link=escape(self.link),
items=u''.join(episode.as_xml() for episode in sorted(self))
).strip()
def as_html(self):
"""Return channel HTML with all episode items"""
template = jinja2_env.get_template('feed.html')
return template.render(
title=escape(self.title),
description=self.description,
link=escape(self.link),
items=u''.join(episode.as_html() for episode in sorted(self)),
).strip()
def serve(channel):
"""Serve podcast channel and episodes over HTTP"""
server = Flask(
__name__,
static_folder=channel.root_dir,
static_url_path=STATIC_PATH,
)
server.route('/')(
lambda: Response(
channel.as_xml(),
content_type='application/xml; charset=utf-8')
)
server.add_url_rule(
WEB_PATH,
view_func=channel.as_html,
methods=['GET'],
)
server.run(host=channel.host, port=channel.port, debug=channel.debug, threaded=True)
def main():
"""Main function"""
args = parser.parse_args()
url = 'http://' + args.host + ':' + args.port
channel = Channel(
root_dir=path.abspath(args.directory),
root_url=url,
host=args.host,
port=args.port,
title=args.title,
link=args.link,
debug=args.debug,
video=args.video,
title_format=args.title_format
)
if args.action == 'generate':
print(channel.as_xml())
elif args.action == 'generate_html':
print(channel.as_html())
else:
print('Welcome to the Podcats web server!')
print('\nYour podcast feed is available at:\n')
print('\t' + channel.root_url + '\n')
print('The web interface is available at\n')
print('\t{url}{web_path}\n'.format(url=url, web_path=WEB_PATH))
serve(channel)
parser = argparse.ArgumentParser(
description='Podcats: podcast feed generator and server <%s>.' % __url__
)
parser.add_argument(
'--host',
default='localhost',
help='listen hostname or IP address'
)
parser.add_argument(
'--port',
default='5000',
help='listen tcp port number'
)
parser.add_argument(
'action',
metavar='COMMAND',
choices=['generate', 'generate_html', 'serve'],
help='`generate` the RSS feed to the terminal, or'
'`serve` the generated RSS as well as audio files'
' via the built-in web server'
)
parser.add_argument(
'directory',
metavar='DIRECTORY',
help='path to a directory with episode audio files',
)
parser.add_argument(
'--debug',
action="store_true",
help='Serve with debug mode on'
)
parser.add_argument('--title', help='optional feed title')
parser.add_argument('--link', help='optional feed link')
parser.add_argument(
'--video',
action="store_true",
help='include video files as well'
)
parser.add_argument(
'--title-format',
dest='title_format',
default='{filename}{title}',
help='title format string and arguments'
)
if __name__ == '__main__':
main()
| 29.988406 | 128 | 0.593853 | 6,546 | 0.632708 | 573 | 0.055384 | 2,237 | 0.216219 | 0 | 0 | 2,227 | 0.215252 |
630b33dd7a6cf0ba0ee698de8f04e2a629b7f3ca | 477 | py | Python | 1301-1400/1387-Binary Trees With Factors/1387-Binary Trees With Factors.py | jiadaizhao/LintCode | a8aecc65c47a944e9debad1971a7bc6b8776e48b | [
"MIT"
] | 77 | 2017-12-30T13:33:37.000Z | 2022-01-16T23:47:08.000Z | 1301-1400/1387-Binary Trees With Factors/1387-Binary Trees With Factors.py | jxhangithub/LintCode-1 | a8aecc65c47a944e9debad1971a7bc6b8776e48b | [
"MIT"
] | 1 | 2018-05-14T14:15:40.000Z | 2018-05-14T14:15:40.000Z | 1301-1400/1387-Binary Trees With Factors/1387-Binary Trees With Factors.py | jxhangithub/LintCode-1 | a8aecc65c47a944e9debad1971a7bc6b8776e48b | [
"MIT"
] | 39 | 2017-12-07T14:36:25.000Z | 2022-03-10T23:05:37.000Z | class Solution:
"""
@param A:
@return: nothing
"""
def numFactoredBinaryTrees(self, A):
A.sort()
MOD = 10 ** 9 + 7
dp = {}
for j in range(len(A)):
dp[A[j]] = 1
for i in range(j):
if A[j] % A[i] == 0:
num = A[j] // A[i]
if num in dp:
dp[A[j]] = (dp[A[j]] + dp[A[i]] * dp[num]) % MOD
return sum(dp.values()) % MOD
| 26.5 | 72 | 0.358491 | 476 | 0.997904 | 0 | 0 | 0 | 0 | 0 | 0 | 47 | 0.098532 |
630b761bbb89cc51497f1ffa2e537867746f74b6 | 3,254 | py | Python | Machine learning/ML/ARIMA/DS1_ar_model_2.py | warpalatino/public | f04ce183799bcdd2fb8dc376d41d286314c19460 | [
"MIT"
] | 1 | 2021-01-04T10:37:16.000Z | 2021-01-04T10:37:16.000Z | Machine learning/ML/ARIMA/DS1_ar_model_2.py | warpalatino/public | f04ce183799bcdd2fb8dc376d41d286314c19460 | [
"MIT"
] | null | null | null | Machine learning/ML/ARIMA/DS1_ar_model_2.py | warpalatino/public | f04ce183799bcdd2fb8dc376d41d286314c19460 | [
"MIT"
] | null | null | null | import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import statsmodels.graphics.tsaplots as sgt
from statsmodels.tsa.arima_model import ARMA
from scipy.stats.distributions import chi2
import statsmodels.tsa.stattools as sts
# ------------------------
# load data
# ----------
raw_csv_data = pd.read_csv("../data/Index2018.csv")
df_comp=raw_csv_data.copy()
# -- make the index a datetime object
df_comp.date = pd.to_datetime(df_comp.date, dayfirst = True)
df_comp.set_index("date", inplace=True)
df_comp=df_comp.asfreq('b')
# -- fill na values
df_comp=df_comp.fillna(method='ffill')
# -- redefine column names and add a new column on returns
df_comp['market_value']=df_comp.ftse
df['returns'] = df.market_value.pct_change(1).mul(100)
df = df.iloc[1:]
# -- delete redundant data
del df_comp['spx']
del df_comp['dax']
del df_comp['ftse']
del df_comp['nikkei']
# split dataset (on straight data = prices)
# ----------
size = int(len(df_comp) * 0.8)
df = df_comp.iloc[:size]
df_test = df_comp.iloc[size:]
# review ACF and PACF (in reality is more functional to run auto_arima vs checking ACF/PACF manually, but this is for sake of example)
# ----------
sgt.plot_acf(df.returns, lags=40, zero = False)
plt.title("ACF FTSE Returns", size=24)
sgt.plot_pacf(df.returns, lags = 40, zero = False, method = ('ols'))
plt.title("PACF FTSE Returns", size=24)
plt.show()
# => we know data is non-stationary from a previous exercise
# select AR model (by looking to PACF here) and iterating through more models...until LLR will stop going down
# ----------
model_ret_ar_1 = ARMA(df.returns, order = (1,0)).fit()
print(model_ar.summary())
print('----------')
model_ar_4 = ARMA(df.returns, order=(4,0)).fit()
print(model_ar_4.summary())
print('----------')
model_ar_6 = ARMA(df.returns, order=(6,0)).fit()
print(model_ar_6.summary())
print('----------')
# => by comparing the LLR stat and AIC/BIC from models' summary we can see what is the best order ... (we would find out AR(6,0))
# => remember that auto_arima is much easier...
# compare LLR results across models
# ----------
def LLR_test(mod_1, mod_2, DF=1):
L1 = mod_1.fit().llf
L2 = mod_2.fit().llf
LR = (2*(L2-L1))
p = chi2.sf(LR, DF).round(3)
return p
print('LLR test 1: ', LLR_test(model_ar_1, model_ar_4, DF=3))
print('LLR test 2: ', LLR_test(model_ar_4, model_ar_7, DF=3))
# now let's normalise values and run the AR model again to see what happens ...
# ----------
benchmark = df.market_value.iloc[0]
df['norm'] = df.market_value.div(benchmark).mul(100)
bench_ret = df.returns.iloc[0]
df['norm_ret'] = df.returns.div(bench_ret).mul(100)
# we jump straight to order 6 here without repeating previous steps (as we see on class notes)
model_norm_ret_ar_6 = ARMA(df.norm_ret, order=(6,0)).fit()
print(model_norm_ret_ar_6.summary())
# analyzing residuals
# ----------
df['res_ret'] = model_norm_ret_ar_6
df.res_ret.mean()
df.res_ret.var()
# -- let's see if there is any significant error that the model has missed (via ACF or PACF)
sgt.plot_acf(df.res_ret, zero = False, lags = 40)
plt.title("ACF Of Residuals for Returns",size=24)
# -- plotting all residuals
df.res_ret.plot(figsize=(20,5))
plt.title("Residuals of Returns", size=24)
# plt.show()
| 29.853211 | 134 | 0.690227 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,413 | 0.434235 |
630d031d30f4b3afe57f014af81f537273595260 | 2,861 | py | Python | cytoskeleton_analyser/database/sqlite_alchemy_orm/containers/cell_elements.py | vsukhor/cytoskeleton-analyser | 681a1f6ba1381a5fb293f2310fce5e97d400cfcb | [
"BSD-3-Clause"
] | null | null | null | cytoskeleton_analyser/database/sqlite_alchemy_orm/containers/cell_elements.py | vsukhor/cytoskeleton-analyser | 681a1f6ba1381a5fb293f2310fce5e97d400cfcb | [
"BSD-3-Clause"
] | null | null | null | cytoskeleton_analyser/database/sqlite_alchemy_orm/containers/cell_elements.py | vsukhor/cytoskeleton-analyser | 681a1f6ba1381a5fb293f2310fce5e97d400cfcb | [
"BSD-3-Clause"
] | null | null | null | # Copyright (c) 2021 Valerii Sukhorukov. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. Neither the name of the copyright holder nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER ''AS IS'' AND ANY
# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# -----------------------------------------------------------------------------
"""Subclasses of container base class for tailored to cell components.
"""
from collections import namedtuple
from . import container
from .. import models
class MembraneNucleus(container.Optional):
"""Container class for configuration of cell nuclear membrane.
"""
model = models.ConfigNucleus
class MembranePlasma(container.Optional):
"""Container class for configuration of cell plasma membrane.
"""
model = models.ConfigPlasma
class InSpace(container.Base):
"""Container class for configuration of unanchored microtubule MTOC.
"""
model = models.ConfigMtocInSpace
class Golgi(container.Base):
"""Container class for configuration of Golgi-type MTOC.
"""
model = models.ConfigMtocGolgi
class Centrosome(container.Base):
"""Container class for configuration of centrosome-type MTOC.
"""
model = models.ConfigMtocCentrosome
class Nucleus(container.Base):
"""Container class for configuration of Nucleus-type MTOC.
"""
model = models.ConfigMtocNucleus
#: Types of Microtubule Organizing Centers (MTOCs).
Mtoc = namedtuple('Mtoc', 'InSpace Golgi Centrosome Nucleus')
# Microtubule Organizing Centers (MTOCs).
mtoc = Mtoc(InSpace, Golgi, Centrosome, Nucleus)
| 35.320988 | 79 | 0.739602 | 867 | 0.303041 | 0 | 0 | 0 | 0 | 0 | 0 | 2,205 | 0.77071 |
630e334606bd98d4e81def6c3b2ffa679f3a7e67 | 948 | py | Python | snow/views.py | cdmaok/web-sentiment | 15fbe33327f9272035393d8c9991c06933163cc2 | [
"Apache-2.0"
] | null | null | null | snow/views.py | cdmaok/web-sentiment | 15fbe33327f9272035393d8c9991c06933163cc2 | [
"Apache-2.0"
] | null | null | null | snow/views.py | cdmaok/web-sentiment | 15fbe33327f9272035393d8c9991c06933163cc2 | [
"Apache-2.0"
] | null | null | null | from django.shortcuts import render
from django.http import HttpResponse
from django.views.decorators.csrf import csrf_exempt
import json
from snownlp import SnowNLP
# Create your views here.
@csrf_exempt
def index(request):
message = {}
print request.method
if request.method == 'GET':
message = construct_message(request.GET,'text')
elif request.method == 'POST':
print '%r' %request
message = construct_message(json.loads(request.body),'text')
else:
print 'invalid request'
return HttpResponse(json.dumps(message))
def construct_message(parameter,key):
message = {}
if not parameter.has_key(key):
message['Code'] = 400
message['Message'] = 'invalid request'
else:
text = parameter[key]
if text.strip() == '':
message['Code'] = 406
message['Message'] = 'empty text'
else:
s = SnowNLP(text)
score = s.sentiments
print text,score
message['Code'] = 200
message['Message'] = score
return message
| 23.7 | 62 | 0.709916 | 0 | 0 | 0 | 0 | 339 | 0.357595 | 0 | 0 | 145 | 0.152954 |
630eeba371e059df6ca1c18dab67c68b23c5a057 | 10,677 | py | Python | Products/CMFCore/FSPageTemplate.py | fulv/Products.CMFCore | 1d6ce101b10aaefba8aa917b6aa404e6c49e254d | [
"ZPL-2.1"
] | null | null | null | Products/CMFCore/FSPageTemplate.py | fulv/Products.CMFCore | 1d6ce101b10aaefba8aa917b6aa404e6c49e254d | [
"ZPL-2.1"
] | null | null | null | Products/CMFCore/FSPageTemplate.py | fulv/Products.CMFCore | 1d6ce101b10aaefba8aa917b6aa404e6c49e254d | [
"ZPL-2.1"
] | null | null | null | ##############################################################################
#
# Copyright (c) 2001 Zope Foundation and Contributors.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
#
##############################################################################
""" Customizable page templates that come from the filesystem.
"""
import re
import six
from six import get_unbound_function
from AccessControl.class_init import InitializeClass
from AccessControl.SecurityInfo import ClassSecurityInfo
from AccessControl.SecurityManagement import getSecurityManager
from App.special_dtml import DTMLFile
from OFS import bbb
from Products.PageTemplates.PageTemplate import PageTemplate
from Products.PageTemplates.utils import charsetFromMetaEquiv
from Products.PageTemplates.utils import encodingFromXMLPreamble
from Products.PageTemplates.ZopePageTemplate import Src
from Products.PageTemplates.ZopePageTemplate import ZopePageTemplate
from Products.PageTemplates.ZopePageTemplate import preferred_encodings
from Shared.DC.Scripts.Script import Script
from .DirectoryView import registerFileExtension
from .DirectoryView import registerMetaType
from .FSObject import FSObject
from .permissions import FTPAccess
from .permissions import View
from .permissions import ViewManagementScreens
from .utils import _checkConditionalGET
from .utils import _dtmldir
from .utils import _setCacheHeaders
xml_detect_re = re.compile(
br'^\s*<\?xml\s+(?:[^>]*?encoding=["\']([^"\'>]+))?')
charset_re = re.compile(r'charset.*?=.*?(?P<charset>[\w\-]*)',
re.I | re.M | re.S)
_marker = object()
class FSPageTemplate(FSObject, Script, PageTemplate):
"""Wrapper for Page Template.
"""
meta_type = 'Filesystem Page Template'
_owner = None # Unowned
manage_options = (
{'label': 'Customize', 'action': 'manage_main'},)
security = ClassSecurityInfo()
security.declareObjectProtected(View)
security.declareProtected(ViewManagementScreens, # NOQA: flake8: D001
'manage_main')
manage_main = DTMLFile('custpt', _dtmldir)
# Declare security for unprotected PageTemplate methods.
security.declarePrivate('pt_edit', 'write') # NOQA: flake8: D001
def __init__(self, id, filepath, fullname=None, properties=None):
FSObject.__init__(self, id, filepath, fullname, properties)
self.ZBindings_edit(self._default_bindings)
def _createZODBClone(self):
"""Create a ZODB (editable) equivalent of this object."""
obj = ZopePageTemplate(self.getId(), self._text, self.content_type)
obj.expand = 0
obj.write(self.read())
return obj
# def ZCacheable_isCachingEnabled(self):
# return 0
def _readFile(self, reparse):
"""Read the data from the filesystem.
"""
if reparse:
if six.PY2:
# not 'rb', as this is a text file!
file = open(self._filepath, 'rU')
else:
file = open(self._filepath, 'br')
try:
data = file.read()
if not six.PY2:
data = data.replace(b'\r\n', b'\n').replace(b'\r', b'\n')
finally:
file.close()
# If we already have a content_type set it must come from a
# .metadata file and we should always honor that. The content
# type is initialized as text/html by default, so we only
# attempt further detection if the default is encountered.
# One previous misbehavior remains: It is not possible to
# force a text/html type if parsing detects it as XML.
encoding = None
preferred = preferred_encodings[:]
if getattr(self, 'content_type', 'text/html') == 'text/html':
xml_info = xml_detect_re.match(data)
if xml_info:
# Smells like xml
# set "content_type" from the XML declaration
encoding = xml_info.group(1) or 'utf-8'
self.content_type = 'text/xml; charset=%s' % encoding
if not isinstance(data, six.text_type):
if encoding is None:
charset = getattr(self, 'charset', None)
if charset is None:
if self.content_type.startswith('text/html'):
mo = charset_re.search(self.content_type)
if mo:
charset = mo.group(1).lower()
if charset is None:
charset = charsetFromMetaEquiv(data)
elif self.content_type.startswith('text/xml'):
charset = encodingFromXMLPreamble(data)
else:
raise ValueError('Unsupported content_type: %s' %
self.content_type)
if charset is not None:
preferred.insert(0, charset)
else:
preferred.insert(0, encoding)
for enc in preferred:
try:
data = six.text_type(data, enc)
if isinstance(data, six.text_type):
break
except UnicodeDecodeError:
continue
else:
data = six.text_type(data)
self.write(data)
@security.private
def read(self):
# Tie in on an opportunity to auto-update
self._updateFromFS()
return FSPageTemplate.inheritedAttribute('read')(self)
# The following is mainly taken from ZopePageTemplate.py
expand = 0
output_encoding = 'utf-8'
__defaults__ = None
__code__ = ZopePageTemplate.__code__
_default_bindings = ZopePageTemplate._default_bindings
security.declareProtected(View, '__call__') # NOQA: flake8: D001
def pt_macros(self):
# Tie in on an opportunity to auto-reload
self._updateFromFS()
return FSPageTemplate.inheritedAttribute('pt_macros')(self)
def pt_render(self, source=0, extra_context={}):
self._updateFromFS() # Make sure the template has been loaded.
if not source:
# If we have a conditional get, set status 304 and return
# no content
if _checkConditionalGET(self, extra_context):
return ''
result = FSPageTemplate.inheritedAttribute('pt_render')(
self, source, extra_context)
if not source:
_setCacheHeaders(self, extra_context)
return result
@security.protected(ViewManagementScreens)
def pt_source_file(self):
""" Return a file name to be compiled into the TAL code.
"""
return 'file:%s' % self._filepath
security.declarePrivate('_ZPT_exec') # NOQA: flake8: D001
_ZPT_exec = get_unbound_function(ZopePageTemplate._exec)
@security.private
def _exec(self, bound_names, args, kw):
"""Call a FSPageTemplate"""
try:
response = self.REQUEST.RESPONSE
except AttributeError:
response = None
# Read file first to get a correct content_type default value.
self._updateFromFS()
if 'args' not in kw:
kw['args'] = args
bound_names['options'] = kw
try:
response = self.REQUEST.RESPONSE
if 'content-type' not in response.headers:
response.setHeader('content-type', self.content_type)
except AttributeError:
pass
security = getSecurityManager()
bound_names['user'] = security.getUser()
# Retrieve the value from the cache.
keyset = None
if self.ZCacheable_isCachingEnabled():
# Prepare a cache key.
keyset = {
# Why oh why?
# All this code is cut and paste
# here to make sure that we
# dont call _getContext and hence can't cache
# Annoying huh?
'here': self.aq_parent.getPhysicalPath(),
'bound_names': bound_names}
result = self.ZCacheable_get(keywords=keyset)
if result is not None:
# Got a cached value.
return result
# Execute the template in a new security context.
security.addContext(self)
try:
result = self.pt_render(extra_context=bound_names)
if keyset is not None:
# Store the result in the cache.
self.ZCacheable_set(result, keywords=keyset)
return result
finally:
security.removeContext(self)
return result
# Copy over more methods
if bbb.HAS_ZSERVER:
security.declareProtected(FTPAccess, # NOQA: flake8: D001
'manage_FTPget')
manage_FTPget = get_unbound_function(ZopePageTemplate.manage_FTPget)
security.declareProtected(View, 'get_size') # NOQA: flake8: D001
get_size = get_unbound_function(ZopePageTemplate.get_size)
getSize = get_size
security.declareProtected(ViewManagementScreens, # NOQA: flake8: D001
'PrincipiaSearchSource')
PrincipiaSearchSource = get_unbound_function(
ZopePageTemplate.PrincipiaSearchSource)
security.declareProtected(ViewManagementScreens, # NOQA: flake8: D001
'document_src')
document_src = get_unbound_function(ZopePageTemplate.document_src)
pt_getContext = get_unbound_function(ZopePageTemplate.pt_getContext)
source_dot_xml = Src()
setattr(FSPageTemplate, 'source.xml', FSPageTemplate.source_dot_xml)
setattr(FSPageTemplate, 'source.html', FSPageTemplate.source_dot_xml)
InitializeClass(FSPageTemplate)
registerFileExtension('pt', FSPageTemplate)
registerFileExtension('zpt', FSPageTemplate)
registerFileExtension('html', FSPageTemplate)
registerFileExtension('htm', FSPageTemplate)
registerMetaType('Page Template', FSPageTemplate)
| 36.565068 | 78 | 0.60429 | 8,313 | 0.778589 | 0 | 0 | 2,219 | 0.20783 | 0 | 0 | 2,868 | 0.268615 |
630fea4816a8abd9eaf09ba98c364ef95304a04e | 7,559 | py | Python | branches/g3d-8.0-64ffmpeg-win/bin/ice/doxygen.py | brown-ccv/VRG3D | 0854348453ac150b27a8ae89024ef57360f15d45 | [
"BSD-3-Clause"
] | null | null | null | branches/g3d-8.0-64ffmpeg-win/bin/ice/doxygen.py | brown-ccv/VRG3D | 0854348453ac150b27a8ae89024ef57360f15d45 | [
"BSD-3-Clause"
] | null | null | null | branches/g3d-8.0-64ffmpeg-win/bin/ice/doxygen.py | brown-ccv/VRG3D | 0854348453ac150b27a8ae89024ef57360f15d45 | [
"BSD-3-Clause"
] | null | null | null | # doxygen.py
#
# Doxygen Management
from utils import *
import glob
##############################################################################
# Doxygen Management #
##############################################################################
"""
Called from buildDocumentation.
"""
def createDoxyfile(state):
# Create the template, surpressing Doxygen's usual output
shell("doxygen -g Doxyfile > /dev/null")
# Edit it
f = open('Doxyfile', 'r+')
text = f.read()
# TODO: excludes
propertyMapping = {
'PROJECT_NAME' : '"' + state.projectName.capitalize() + '"',
'OUTPUT_DIRECTORY' : '"' + pathConcat(state.buildDir, 'doc') + '"',
'EXTRACT_ALL' : "YES",
'STRIP_FROM_PATH' : '"' + state.rootDir + '"',
'TAB_SIZE' : "4",
'QUIET' : 'YES',
'WARN_IF_UNDOCUMENTED' : 'NO',
'WARN_NO_PARAMDOC' : 'NO',
'HTML_OUTPUT' : '"./"',
'GENERATE_LATEX' : 'NO',
'RECURSIVE' : 'YES',
'SORT_BRIEF_DOCS' : 'YES',
'MACRO_EXPANSION' : 'YES',
'JAVADOC_AUTOBRIEF' : 'YES',
'EXCLUDE' : 'build graveyard temp doc-files data-files',
"ALIASES" : ('"cite=\par Referenced Code:\\n " ' +
'"created=\par Created:\\n" ' +
'"edited=\par Last modified:\\n" ' +
'"maintainer=\par Maintainer:\\n" ' +
'"units=\par Units:\\n"')
}
# Rewrite the text by replacing any of the above properties
newText = ""
for line in string.split(text,"\n"):
newText += (doxyLineRewriter(line, propertyMapping) + "\n")
# Write the file back out
f.seek(0)
f.write(newText)
f.close()
#########################################################################
""" Called from createDoxyfile. """
def doxyLineRewriter(lineStr, hash):
line = string.strip(lineStr) # remove leading and trailing whitespace
if (line == ''): # it's a blank line
return lineStr
elif (line[0] == '#'): # it's a comment line
return lineStr
else : # here we know it's a property assignment
prop = string.strip(line[0:string.find(line, "=")])
if hash.has_key(prop):
print prop + ' = ' + hash[prop]
return prop + ' = ' + hash[prop]
else:
return lineStr
class DoxygenRefLinkRemapper:
# Given the output of a Doxygen directory, rewrites the output .html
# files so that G3D::ReferenceCountedPointer<X> instances link to X
# instead of ReferenceCountedPointer.
#
# The current implementation only works for the G3D build itself.
# It is intended to be expanded in the future to support projects
# built against G3D.
def remap(self, sourcePath, remapPath):
self.__buildValidRefs(sourcePath)
self.__remapRefLinks(remapPath)
def __buildValidRefs(self, sourcePath):
# initialize the ref name mapping
self.validRefs = {}
# build list of valid source files
sources = os.listdir(sourcePath)
# discard non-class/struct documentation files
sources = filter(lambda filename: re.search('^class|^struct', filename), sources)
sources = filter(lambda filename: not re.search('-members.html$', filename), sources)
# discard filenames with encoded spaces (implies templates) for now
sources = filter(lambda filename: not re.search('_01', filename), sources)
# build the dictionary mapping valid ref names to their documentation
for filename in sources:
memberRefName, nonmemberRefName = self.__buildRefNames(filename)
self.validRefs.update({memberRefName:filename, nonmemberRefName:filename})
def __buildRefNames(self, filename):
# build list of qualified scopes from filename
capitalizedScopes = self.__buildScopes(filename)
# build the qualified name used as prefix
qualifiedPrefix = ''
for scope in capitalizedScopes:
qualifiedPrefix += scope + '::'
# build the member typedef ref name (e.g., G3D::Class::Ref)
memberRefName = qualifiedPrefix + 'Ref'
# build the non-member ref name (e.g., G3D::ClassRef)
nonmemberRefName = qualifiedPrefix[:-2] + 'Ref'
return memberRefName, nonmemberRefName
def __buildScopes(self, filename):
# remove the file type ('class', 'struct', '.html') and separate the scopes ('::')
sansType = re.split('class|struct', filename)[1]
sansType = re.split('.html', sansType)[0]
rawScopes = re.split('_1_1', sansType)
# re-capitalize letters
capitalizedScopes = []
for scope in rawScopes:
scope = re.sub('_(?<=_)\w', lambda match: match.group(0)[1].upper(), scope)
capitalizedScopes.append(scope)
return capitalizedScopes
def __remapRefLinks(self, remapPath):
# initialize the current remapping filename
self.currentRemapFilename = ''
# loop through all valid html/documentation files in the remap path
for filename in glob.glob(os.path.join(os.path.normcase(remapPath), '*.html')):
self.currentRemapFilename = filename
# will hold updated file contents
remappedBuffer = ''
# read each line in file and replace any matched ref links
f = open(filename)
try:
for line in f:
remappedBuffer += re.sub('(href="class_g3_d_1_1_reference_counted_pointer.html">)([a-zA-Z0-9:]+)(</a>)', self.__linkMatchCallack, line)
finally:
f.close()
#assume lines were read and remapped correctly, write new documentation
writeFile(filename, remappedBuffer)
def __linkMatchCallack(self, match):
# if ref search fails, build the fully qualified ref name that we can search the dictionary for
# e.g., SuperShader::Pass::Ref would be qualified as G3D::SuperShader::Pass::Ref
# ref links found in non-struct/class files will be matched as-is
# note: this would have to be redone if multiple source directories is implemented
if match.group(2) in self.validRefs:
return 'href="' + self.validRefs[match.group(2)] + '">' + match.group(2) + match.group(3)
elif re.search('class|struct', self.currentRemapFilename):
# get list of scopes from current filename
qualifiedScopes = self.__buildScopes(self.currentRemapFilename)
# build a prefix including all of the scopes except for current one (should be class/struct)
for numScopes in range(0, len(qualifiedScopes)):
qualifiedPrefix = ''
for scope in qualifiedScopes[:-numScopes]:
qualifiedPrefix += scope + '::'
qualifiedRef = qualifiedPrefix + match.group(2)
if qualifiedRef in self.validRefs:
return 'href="' + self.validRefs[qualifiedRef] + '">' + match.group(2) + match.group(3)
return match.group(0)
| 40.207447 | 155 | 0.557878 | 4,997 | 0.661066 | 0 | 0 | 0 | 0 | 0 | 0 | 3,110 | 0.41143 |
631067d54a45a87c1899e57d958a0bc4823a517a | 3,074 | py | Python | models/LASSO.py | MattAHarrington/crypto-comovement | 3812f7cd5ba61e704041acc3e272840f2a5a9cc2 | [
"MIT"
] | 1 | 2021-04-15T21:40:39.000Z | 2021-04-15T21:40:39.000Z | models/LASSO.py | MattAHarrington/crypto-comovement | 3812f7cd5ba61e704041acc3e272840f2a5a9cc2 | [
"MIT"
] | null | null | null | models/LASSO.py | MattAHarrington/crypto-comovement | 3812f7cd5ba61e704041acc3e272840f2a5a9cc2 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# Standard imports
import pandas as pd
import numpy as np
# Pytorch
import torch
from torch import nn
# Using sklearn's LASSO implementation
from sklearn.linear_model import Lasso
# Local Files
from models.model_interface import CryptoModel
class LASSO(CryptoModel):
"""Wrapper around the sklearn LASSO class"""
def __init__(self, alpha=0.1, warm_start=True, verbose_training=False):
"""Create the LASSO model.
:input_size: Input size to the AutoEncoder, should be n_coins
"""
# Arguments
self.alpha = alpha
self.verbose_training = verbose_training
self.model = Lasso(alpha=alpha, fit_intercept=True, warm_start=warm_start)
# set the default plotting color
self.set_plotting_color()
def predict(self, sample):
"""Predict the next out of sample timestep
:sample: Vector or DataFrame of timesteps to use as input for the predictor(s).
:returns: [batch_size, 1, n_coins] Tensor of predictions
"""
n_samp, _, n_features = sample.shape
yhat = self.model.predict(sample.reshape((n_samp, n_features)))
if self.verbose_training:
print(f'prediction: {yhat}')
return yhat
def train(self, training_set):
"""Train, or re-train, the LSTM and AE
:training_set: DataFrame of training samples
"""
X, Y = [], []
for data, target in training_set:
X.append(data.numpy())
Y.append(target.numpy())
X = np.vstack(X)
Y = np.vstack(Y)
n_samples, _, n_features = X.shape
X = X.reshape((n_samples, n_features))
Y = Y.reshape((n_samples, n_features))
self.model.fit(X, Y)
#TODO: print out that coefficients (or at least num of) that the L1 normalization leaves
coef = self.model.coef_
all_zeros = np.isin([0,-0], coef)
if self.verbose_training:
print(f'All zeros? {all_zeros}')
print(f'Coefs? {coef.shape}')
if np.isin(False, all_zeros):
print(self.model.coef_)
print(type(self.model.coef_))
print(self.model.coef_.shape)
def get_fullname(self):
"""Get the full-grammar name for this model
:returns: English phrase as string
"""
return f"LASSO_alpha-{self.alpha}"
def get_filename(self):
"""Get the abbreviated (file)name for this model
:returns: Abbreviated string with underscores
"""
return f"LASSO_alpha-{self.alpha}"
def needs_retraining(self):
"""Does this model need regular retraining while forecasting?
:returns: bool
"""
return True
def set_plotting_color(self, color="#FCB97D"):
"""Set color used for plotting
:color: Hex value string
"""
self.color = color
def get_plotting_color(self):
"""return color for graphing distinction
:returns: str of color
"""
return self.color
| 29.27619 | 96 | 0.612882 | 2,805 | 0.912492 | 0 | 0 | 0 | 0 | 0 | 0 | 1,293 | 0.420625 |
6310d4042eb7ef64a2b5785735563119f99531f8 | 719 | py | Python | Test_Beat_Detector.py | filipmazurek/Heart-Rate-Monitor | eef5bed63357442dde3746e0443671a947f0ec3d | [
"MIT"
] | null | null | null | Test_Beat_Detector.py | filipmazurek/Heart-Rate-Monitor | eef5bed63357442dde3746e0443671a947f0ec3d | [
"MIT"
] | null | null | null | Test_Beat_Detector.py | filipmazurek/Heart-Rate-Monitor | eef5bed63357442dde3746e0443671a947f0ec3d | [
"MIT"
] | null | null | null | from Beat_Detector import BeatDetector
from SignalChoice import *
update_time_seconds = 20
update_time_easy = 2 # means enough data for 2 seconds.
array_easy = [0, 2, 5, 10, 5, 2, 0, 2, 5, 10, 5]
def test_get_num_beats():
beat_detector = BeatDetector(update_time_seconds, SignalChoice.both)
assert 2 == beat_detector.get_num_beats(array_easy)
def test_single_array_hr():
beat_detector = BeatDetector(update_time_easy, SignalChoice.both)
assert 60 == beat_detector.single_array_hr(array_easy) # 2 beats in 2 seconds means 60 bpm
def test_find_inst_hr():
beat_detector = BeatDetector(update_time_easy, SignalChoice.both)
assert 60 == beat_detector.find_instant_hr(array_easy, array_easy)
| 32.681818 | 95 | 0.769124 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 69 | 0.095967 |
6310fb525d8e21534dcc57582305f77d4f195a47 | 1,806 | py | Python | test_ocr.py | lanstonpeng/NightOwlServer | 1810c631c44d53f885e79164c48a7cca61441cce | [
"MIT"
] | null | null | null | test_ocr.py | lanstonpeng/NightOwlServer | 1810c631c44d53f885e79164c48a7cca61441cce | [
"MIT"
] | null | null | null | test_ocr.py | lanstonpeng/NightOwlServer | 1810c631c44d53f885e79164c48a7cca61441cce | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# encoding: utf-8
import urllib, urllib2
import tempfile
import base64
from PIL import Image
import os
# 全局变量
API_URL = 'http://apis.baidu.com/apistore/idlocr/ocr'
API_KEY = "0c69d1b8ec1c96561cb9ca3c037d7225"
def get_image_text(img_url=None):
headers = {}
# download image
opener = urllib2.build_opener()
opener.addheaders = [('User-agent', 'Mozilla/5.0')]
#img_request = urllib2.Request(img_url, headers=headers)
#img_data = urllib2.urlopen(img_request).read()
response = opener.open(img_url)
img_data = response.read()
# save image to some place
origin_img = tempfile.NamedTemporaryFile(delete=False)
save_img = tempfile.NamedTemporaryFile(delete=False)
origin_img.write(img_data)
origin_img.flush()
# convert image
im = Image.open(origin_img.name)
im.convert('RGB').save(save_img.name, "JPEG")
with open(save_img.name, "rb") as image_file:
encoded_image = base64.b64encode(image_file.read())
data = {}
data['fromdevice'] = "pc"
data['clientip'] = "10.10.10.0"
data['detecttype'] = "LocateRecognize"
data['languagetype'] = "CHN_ENG"
data['imagetype'] = "1"
data['image'] = encoded_image
decoded_data = urllib.urlencode(data)
req = urllib2.Request(API_URL, data = decoded_data)
req.add_header("Content-Type", "application/x-www-form-urlencoded")
req.add_header("apikey", API_KEY)
resp = urllib2.urlopen(req)
content = resp.read()
# remove useless files
os.unlink(origin_img.name)
os.unlink(save_img.name)
if(content):
return content
return None
if __name__ == "__main__":
print get_image_text("http://www.liantu.com/tiaoma/eantitle.php?title=enl2dnhtUHNPMzQ0TUFpRk5sOTZseEZpYk1PeFYwWlBFQlc2a1dtZjcwaz0=")
| 26.173913 | 136 | 0.692137 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 639 | 0.35226 |
6311a426d4eeb126a9bc59367b5a6f277acb2a51 | 2,381 | py | Python | token_importance_utils.py | keyurfaldu/token_importance | 38099bcd0f145d4559075e79c8cbc2fb240a72be | [
"MIT"
] | null | null | null | token_importance_utils.py | keyurfaldu/token_importance | 38099bcd0f145d4559075e79c8cbc2fb240a72be | [
"MIT"
] | null | null | null | token_importance_utils.py | keyurfaldu/token_importance | 38099bcd0f145d4559075e79c8cbc2fb240a72be | [
"MIT"
] | 1 | 2021-01-14T07:28:33.000Z | 2021-01-14T07:28:33.000Z | import torch
class AttentionBasedImportance:
def __init__(self, inputs, tokenizer, attentions):
if type(inputs["input_ids"]) == torch.Tensor:
if inputs["input_ids"].device != torch.device(type='cpu'):
self.input_ids = inputs["input_ids"].cpu()
else:
self.input_ids = inputs["input_ids"]
self.input_ids = self.input_ids.numpy()
self.tokenizer = tokenizer
self.attentions = attentions
self.layers_count = len(attentions)
self.heads_count = attentions[0].shape[1]
def get_attn_head_processor(self, layer, head, record_index, valid_tokens, tokens):
attns = self.attentions[layer][record_index][head][:valid_tokens+1,:valid_tokens+1]
"""
if type(attns) == torch.Tensor:
if attns.device != torch.device(type='cpu'):
attns = attns.cpu()
attns = attns.numpy()
"""
def attn_head_processor(sep_attn_upperbound=0.7):
total_weight = attns.sum()
sep_weight = attns[:,-1].sum() + attns[-1,:].sum() + attns[:,0].sum() + attns[0,:].sum()
if (sep_weight > sep_attn_upperbound*total_weight):
return None
record_data = []
for j in range(valid_tokens+1):
token_attns = list(attns[j,:])
record_data.append({"token": "%s "%tokens[j],
"heat": list(map(lambda x: float(x), token_attns)),
})
return record_data
return attn_head_processor
def get_data_for_textual_heatmap(self, record_index, sep_attn_upperbound=0.7, spcific_layers_heads=[]):
record = self.input_ids[record_index]
tokens = self.tokenizer.convert_ids_to_tokens(record)
valid_tokens = record.nonzero()[0].max()
data = []
tiles = []
for i in range(self.layers_count):
for h in range(self.heads_count):
processor = self.get_attn_head_processor(i, h, record_index, valid_tokens, tokens)
output = processor(sep_attn_upperbound)
if output is not None:
data.append(output)
tiles.append("H-%s-%s"%(i, h))
return data, tiles
| 40.355932 | 107 | 0.553129 | 2,357 | 0.98992 | 0 | 0 | 0 | 0 | 0 | 0 | 258 | 0.108358 |
6312dcb5c362d8fae0652ab1d6808d9dcaa3d504 | 3,704 | py | Python | python/2020/day11.py | SylvainDe/aoc | b8a4609327831685ef94c9960350ff7bb5ace1a5 | [
"MIT"
] | null | null | null | python/2020/day11.py | SylvainDe/aoc | b8a4609327831685ef94c9960350ff7bb5ace1a5 | [
"MIT"
] | null | null | null | python/2020/day11.py | SylvainDe/aoc | b8a4609327831685ef94c9960350ff7bb5ace1a5 | [
"MIT"
] | null | null | null | # vi: set shiftwidth=4 tabstop=4 expandtab:
import datetime
import itertools
import collections
RUN_LONG_TESTS = False
def string_to_seat_layout(string):
return {
(i, j): s
for i, line in enumerate(string.splitlines())
for j, s in enumerate(line)
}
def seat_layout_to_string(seats):
m = max(i for i, j in seats.keys())
n = max(j for i, j in seats.keys())
return "\n".join("".join(seats[(i, j)] for j in range(n + 1)) for i in range(m + 1))
def get_seat_layout_from_file(file_path="../../resources/year2020_day11_input.txt"):
with open(file_path) as f:
return string_to_seat_layout(f.read())
directions = [pos for pos in itertools.product((-1, 0, +1), repeat=2) if pos != (0, 0)]
def get_new_seat_value_1(seat, nb_neigh):
if seat == "L":
return "#" if nb_neigh == 0 else seat
elif seat == "#":
return "L" if nb_neigh >= 4 else seat
return seat
def get_new_seats1(seats):
neighbours_count = collections.Counter(
(x + dx, y + dy)
for (x, y), seat in seats.items()
if seat == "#"
for dx, dy in directions
)
return {
pos: get_new_seat_value_1(seat, neighbours_count[pos])
for pos, seat in seats.items()
}
def get_new_seat_value_rule2(seat, nb_visible):
if seat == "L":
return "#" if nb_visible == 0 else seat
elif seat == "#":
return "L" if nb_visible >= 5 else seat
return seat
def get_new_seats2(seats):
visible_count = collections.Counter()
for (x, y), seat in seats.items():
if seat == "#":
for dx, dy in directions:
for d in itertools.count(start=1):
pos = x + (d * dx), y + (d * dy)
seat = seats.get(pos, None)
if seat != ".":
visible_count[pos] += 1
break
return {
pos: get_new_seat_value_rule2(seat, visible_count[pos])
for pos, seat in seats.items()
}
def get_nb_seat_on_fixedpoint(seats, func):
while True:
new_seats = func(seats)
if new_seats == seats:
break
seats = new_seats
return sum(seat == "#" for seat in seats.values())
def run_tests():
example1 = string_to_seat_layout(
"""L.LL.LL.LL
LLLLLLL.LL
L.L.L..L..
LLLL.LL.LL
L.LL.LL.LL
L.LLLLL.LL
..L.L.....
LLLLLLLLLL
L.LLLLLL.L
L.LLLLL.LL"""
)
example2 = string_to_seat_layout(
"""#.##.##.##
#######.##
#.#.#..#..
####.##.##
#.##.##.##
#.#####.##
..#.#.....
##########
#.######.#
#.#####.##"""
)
example3a = string_to_seat_layout(
"""#.LL.L#.##
#LLLLLL.L#
L.L.L..L..
#LLL.LL.L#
#.LL.LL.LL
#.LLLL#.##
..L.L.....
#LLLLLLLL#
#.LLLLLL.L
#.#LLLL.##"""
)
example3b = string_to_seat_layout(
"""#.LL.LL.L#
#LLLLLL.LL
L.L.L..L..
LLLL.LL.LL
L.LL.LL.LL
L.LLLLL.LL
..L.L.....
LLLLLLLLL#
#.LLLLLL.L
#.LLLLL.L#"""
)
assert example2 == get_new_seats1(example1)
assert example3a == get_new_seats1(example2)
assert get_nb_seat_on_fixedpoint(example1, get_new_seats1) == 37
assert example2 == get_new_seats2(example1)
assert example3b == get_new_seats2(example2)
assert get_nb_seat_on_fixedpoint(example1, get_new_seats2) == 26
def get_solutions():
seat_layout = get_seat_layout_from_file()
print(get_nb_seat_on_fixedpoint(seat_layout, get_new_seats1) == 2204)
if RUN_LONG_TESTS:
print(get_nb_seat_on_fixedpoint(seat_layout, get_new_seats2) == 1986)
if __name__ == "__main__":
RUN_LONG_TESTS = True
begin = datetime.datetime.now()
run_tests()
get_solutions()
end = datetime.datetime.now()
print(end - begin)
| 23.295597 | 88 | 0.584503 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 597 | 0.161177 |
6315ca182a138a7397f2bdcc32da39042e1ae1a2 | 3,356 | py | Python | gestor_usuarios/apps/app_gestor_usuarios/forms.py | Enrialonso/LP-Manage-Contacts | fdee309442b44efee5f7aa67612738b1655197c9 | [
"Unlicense"
] | null | null | null | gestor_usuarios/apps/app_gestor_usuarios/forms.py | Enrialonso/LP-Manage-Contacts | fdee309442b44efee5f7aa67612738b1655197c9 | [
"Unlicense"
] | null | null | null | gestor_usuarios/apps/app_gestor_usuarios/forms.py | Enrialonso/LP-Manage-Contacts | fdee309442b44efee5f7aa67612738b1655197c9 | [
"Unlicense"
] | null | null | null | from django import forms
from apps.app_gestor_usuarios.models import db_usuarios, db_manage_contacts
class signupForm(forms.ModelForm):
class Meta:
model = db_usuarios
fields = [
'name',
'last',
'email',
'password',
]
labels = {
'name':'Name',
'last':'Last',
'email':'Email',
'password':'Password',
}
widgets = {
'name': forms.TextInput(attrs={'class':'form-control'}),
'last': forms.TextInput(attrs={'class':'form-control'}),
'email': forms.EmailInput(attrs={'class':'form-control'}),
'password': forms.PasswordInput(attrs={'class':'form-control'}),
}
class loginForm(forms.ModelForm):
class Meta:
model = db_usuarios
fields = [
'email',
'password',
]
labels = {
'email':'Email',
'password':'Password',
}
widgets = {
'email': forms.EmailInput(attrs={'class':'form-control'}),
'password': forms.PasswordInput(attrs={'class':'form-control'}),
}
class addForm(forms.ModelForm):
class Meta:
model = db_manage_contacts
fields = [
'id',
'associated_user',
'name',
'last',
'email',
'phone_local',
'phone_mov',
'street',
'street_number',
'population',
'state',
'postalcode',
'country',
'url_web',
]
labels = {
'id':'id',
'associated_user':'Associated User',
'name':'Name',
'last':'Last',
'email':'Email',
'phone_local':'Local Phone',
'phone_mov':'Cel. Phone',
'street':'Street',
'street_number':'Street Number',
'population':'Population',
'state':'Comunity',
'postalcode':'Postal code',
'country':'Country',
'url_web':'Web or Blog',
}
widgets = {
'id':forms.TextInput(attrs={'class':'form-control'}),
'associated_user':forms.EmailInput(attrs={'class':'form-control','readonly':'True'}),
'name': forms.TextInput(attrs={'class':'form-control'}),
'last': forms.TextInput(attrs={'class':'form-control'}),
'email': forms.EmailInput(attrs={'class':'form-control'}),
'phone_local': forms.TextInput(attrs={'class':'form-control'}),
'phone_mov': forms.TextInput(attrs={'class': 'form-control'}),
'street': forms.TextInput(attrs={'class': 'form-control', 'id':'route'}),
'street_number': forms.TextInput(attrs={'class': 'form-control', 'id': 'street_number'}),
'population': forms.TextInput(attrs={'class': 'form-control', 'id':'locality'}),
'state': forms.TextInput(attrs={'class': 'form-control', 'id': 'administrative_area_level_1'}),
'postalcode': forms.TextInput(attrs={'class': 'form-control', 'id':'postal_code'}),
'country': forms.TextInput(attrs={'class': 'form-control', 'id':'country'}),
'url_web': forms.URLInput(attrs={'class': 'form-control'}),
} | 32.901961 | 107 | 0.502384 | 3,249 | 0.968117 | 0 | 0 | 0 | 0 | 0 | 0 | 1,280 | 0.381406 |
631620859e8efb33b69f79589785ecbf70b6297e | 1,613 | py | Python | service/routes/greet.py | illuscio-dev/isleservice-py | e7be171b08cf4ef43b1e569d163390d851ca8601 | [
"MIT"
] | null | null | null | service/routes/greet.py | illuscio-dev/isleservice-py | e7be171b08cf4ef43b1e569d163390d851ca8601 | [
"MIT"
] | null | null | null | service/routes/greet.py | illuscio-dev/isleservice-py | e7be171b08cf4ef43b1e569d163390d851ca8601 | [
"MIT"
] | null | null | null | from spanserver import (
SpanRoute,
Request,
Response,
MimeType,
RecordType,
DocInfo,
DocRespInfo,
)
from isleservice_objects import models, errors, schemas
from service.api import api
class SchemaCache:
ENEMY_FULL = schemas.EnemySchema()
ENEMY_POST = schemas.EnemySchema(exclude=["rank"])
@api.route("/greet")
class Greet(SpanRoute):
"""
Bellow a greeting at an enemy.
"""
@api.use_schema(req=SchemaCache.ENEMY_FULL, resp=MimeType.TEXT)
async def on_get(
self, req: Request[RecordType, models.Enemy], resp: Response
) -> None:
"""
Get a greeting.
"""
data = await req.media_loaded()
resp.media = f"{data.addressable.upper()}!"
@api.use_schema(req=SchemaCache.ENEMY_POST, resp=MimeType.TEXT)
async def on_post(
self, req: Request[RecordType, models.Enemy], resp: Response
) -> None:
"""
Send a greeting to an Enemy.
"""
resp.status_code = 201
raise errors.CoughError("HACK! COUGH! HCCCCKKK!!!")
class Document:
example_enemy = models.Enemy("General", models.Name("Obi-wan", "Kenobi"))
get = DocInfo(
req_example=example_enemy,
responses={200: DocRespInfo(example="GENERAL KENOBI!")},
)
# We can use the full model here, the excluded field will get ignored when
# marshmallow dumps the schema (no validation happens on a dump)
post = DocInfo(
req_example=example_enemy,
responses={201: DocRespInfo(example="GENERAL KENOBI!")},
)
| 27.338983 | 82 | 0.620583 | 1,372 | 0.850589 | 0 | 0 | 1,281 | 0.794172 | 495 | 0.306882 | 405 | 0.251085 |
6317df6d3d996ef9dd1c80a21ebe80c0b3ce61e6 | 948 | py | Python | openregister/record.py | psd/openregister | 7aa831464f2917c5be7a5ae34ff4c0de96451811 | [
"MIT"
] | null | null | null | openregister/record.py | psd/openregister | 7aa831464f2917c5be7a5ae34ff4c0de96451811 | [
"MIT"
] | null | null | null | openregister/record.py | psd/openregister | 7aa831464f2917c5be7a5ae34ff4c0de96451811 | [
"MIT"
] | null | null | null | from .item import Item
from .entry import Entry
from copy import copy
class Record(object):
"""
A Record, the tuple of an entry and it's item
Records are useful for representing the latest entry for a
field value.
Records are serialised as the merged entry and item
"""
def __init__(self, entry=None, item=None):
self.entry = entry
self.item = item
@property
def primitive(self):
"""Record as Python primitive."""
primitive = copy(self.item.primitive)
primitive.update(self.entry.primitive)
return primitive
@primitive.setter
def primitive(self, primitive):
"""Record from Python primitive."""
self.entry = Entry()
self.entry.primitive = primitive
primitive = copy(primitive)
for field in self.entry.fields:
del primitive[field]
self.item = Item()
self.item.primitive = primitive
| 24.947368 | 62 | 0.632911 | 875 | 0.922996 | 0 | 0 | 539 | 0.568565 | 0 | 0 | 266 | 0.280591 |
63181b955335972638426181671ca5d3dffa487d | 385 | py | Python | pyexcel_matplotlib/__init__.py | pyexcel/pyexcel-matplotlib | 8771fcf3cc82164b50dc7ec0314838bf3de63e3b | [
"BSD-3-Clause"
] | null | null | null | pyexcel_matplotlib/__init__.py | pyexcel/pyexcel-matplotlib | 8771fcf3cc82164b50dc7ec0314838bf3de63e3b | [
"BSD-3-Clause"
] | null | null | null | pyexcel_matplotlib/__init__.py | pyexcel/pyexcel-matplotlib | 8771fcf3cc82164b50dc7ec0314838bf3de63e3b | [
"BSD-3-Clause"
] | null | null | null | """
pyexcel_matplotlib
~~~~~~~~~~~~~~~~~~~
chart drawing plugin for pyexcel
:copyright: (c) 2016-2017 by Onni Software Ltd.
:license: New BSD License, see LICENSE for further details
"""
from pyexcel.plugins import PyexcelPluginChain
PyexcelPluginChain(__name__).add_a_renderer(
relative_plugin_class_path='plot.MatPlotter',
file_types=['svg', 'png']
)
| 22.647059 | 62 | 0.690909 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 235 | 0.61039 |
63188b300be0d3bc46d1755aa736cffb77943a19 | 199 | py | Python | content/Coverage Criteria/code-snippets-2-fytd/test_impl_with_properties.py | rvprasad/software-testing-course | 3803851dcf9f7bbd0f0b89fca6c9c5e3a48f22e0 | [
"CC-BY-4.0"
] | 11 | 2018-02-08T05:23:28.000Z | 2021-05-24T13:23:56.000Z | content/Coverage Criteria/code-snippets-2-fytd/test_impl_with_properties.py | rvprasad/software-testing-course | 3803851dcf9f7bbd0f0b89fca6c9c5e3a48f22e0 | [
"CC-BY-4.0"
] | null | null | null | content/Coverage Criteria/code-snippets-2-fytd/test_impl_with_properties.py | rvprasad/software-testing-course | 3803851dcf9f7bbd0f0b89fca6c9c5e3a48f22e0 | [
"CC-BY-4.0"
] | 2 | 2020-09-15T08:51:22.000Z | 2021-01-26T12:07:18.000Z | #py.test --cov-report=term --cov=. --cov-config=coverage.rc --cov-fail-under=100
from impl import PhysicalInfo
import pytest
import hypothesis.strategies as st
from hypothesis import given, assume
| 24.875 | 80 | 0.778894 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 80 | 0.40201 |
6318c5a3cbb9f437c2f48b78416a895e4ca8d74b | 7,025 | py | Python | homepage/views.py | gwillig/octocat | 4a9f79782a0da656d61450552ce27a49047abe53 | [
"MIT"
] | null | null | null | homepage/views.py | gwillig/octocat | 4a9f79782a0da656d61450552ce27a49047abe53 | [
"MIT"
] | 5 | 2020-06-09T13:02:15.000Z | 2021-06-10T18:47:45.000Z | homepage/views.py | gwillig/octocat | 4a9f79782a0da656d61450552ce27a49047abe53 | [
"MIT"
] | null | null | null | from django.http import Http404, JsonResponse
from django.shortcuts import render
from chatbot.chatbot import create_chatbot
import threading
import pickle
import collections
from io import BytesIO
import numpy as np
import urllib.request
import json
import wave
import librosa
import pandas as pd
from homepage.models import Memory, Person, Raw_Conversation
from django.views.decorators.csrf import csrf_exempt
from django.utils.decorators import method_decorator
from sklearn.naive_bayes import GaussianNB
from chatterbot.trainers import ListTrainer
url ="https://samples.openweathermap.org/data/2.5/weather?q=Eberstadt,%20DE&appid=b6907d289e10d714a6e88b30761fae22"
import sys
np.set_printoptions(threshold=sys.maxsize)
#====Create cheatbot
chatbot = create_chatbot()
#====
def get_mfcc_feature(data):
"""
Converts a wave file into his mfcc features
@args:
data(binary):
@return:
mfcc_features(np.array)
"""
fs=44100
x = librosa.util.fix_length(data, 45000)
mfcc_features = librosa.feature.mfcc(y=x, sr=fs)
return mfcc_features
def get_person(request,name_person):
count = Person.objects.filter(first_name=name_person).count()
if count==0:
p1 = Person(first_name=name_person)
p1.save()
return JsonResponse({"name_person": "unkown"})
else:
return JsonResponse({"name_person": "kown"})
@method_decorator(csrf_exempt, name='post')
def classify_audio(request):
'#0.Step: Get data for classification'
data = request.body
'#1.Step: Check if shape is dividy by 2 zero'
if len(data)%2==0:
data_float_raw = librosa.util.buf_to_float(data)
else:
data_float_raw = librosa.util.buf_to_float(data[:-1])
'2.Step: # Trim the beginning and ending silence'
data_float, index = librosa.effects.trim(data_float_raw)
'#0.1.Step: Get mfcc feature for data'
prediction_mfcc = get_mfcc_feature_data(data_float)
'#0.2.Step: Flatten mfcc '
prediction_mfcc_fl = prediction_mfcc.flatten()
'#1.Step: Load all data from dbase table Memory'
df = pd.DataFrame(list(Memory.objects.all().values()))
'#2.Step: Create train_label and train_data'
train_data_list = []
for i in range(0, len(df)):
train_data_list.append(
bytes_numpy(df.loc[i, "blob_data_mfcc"]).flatten()
)
train_data = np.array(train_data_list)
train_label = df["ground_truth"].values
'#3.Step: Fit bayes classifier'
clf = GaussianNB()
clf.fit(train_data, train_label)
'#4.Step: Make prediction'
prediction = clf.predict([prediction_mfcc_fl])
print(prediction)
'# Make relative prediction'
relative_predict = clf.predict_log_proba([prediction_mfcc_fl]) / clf.predict_log_proba([prediction_mfcc_fl]).sum()
relative_predict_round_flat = np.around(relative_predict * 100, 4).flatten()
'#Step: Combine the classes and the relative'
result_dict = {}
for el_cl, el_pre in zip(clf.classes_, relative_predict_round_flat):
result_dict[el_cl] = el_pre
'#Step:Sort the dict'
d_sorted = dict(sorted(result_dict.items(), key=lambda kv: kv[1]))
print(d_sorted)
return JsonResponse({"prediction": d_sorted})
def home(request ):
return render(request, 'home.html')
@method_decorator(csrf_exempt, name='post')
def recorded_audio(request):
data = request.body
ground_truth = request.headers["Ground-Truth"]
'#1.Step: Check if shape is dividy by 2 zero'
if len(data)%2==0:
data_float = librosa.util.buf_to_float(data)
else:
data_float = librosa.util.buf_to_float(data[:-1])
'#1.Step: Get the raw data'
np_bytes = BytesIO()
np.save(np_bytes, data_float, allow_pickle=True)
np_bytes_raw = np_bytes.getvalue()
'#2.Step: Get the mfcc features'
mfcc = get_mfcc_feature_data(data_float)
np_bytes = BytesIO()
np.save(np_bytes, mfcc, allow_pickle=True)
np_bytes_mfcc = np_bytes.getvalue()
m1 = Memory(ground_truth=ground_truth,blob_data_raw=data,
blob_data_float=np_bytes_raw, blob_data_mfcc=np_bytes_mfcc)
m1.save()
return JsonResponse({"successfully":"Successfully saved to db"})
def reco(request):
return render(request, 'reco.html')
def audio(request):
'#1.Step: Get all memoires for the table'
all_memories = Memory.objects.all()
all_memories_list = []
for el in all_memories.values('ground_truth').distinct():
key_word = el["ground_truth"]
Memory.objects.filter(ground_truth=key_word)
count = Memory.objects.filter(ground_truth=key_word).count()
all_memories_list.append({"ground_truth":key_word,
"count":count
})
return render(request,'record_audio.html',{"Person":"Gustav","all_memories_list":all_memories_list})
def get_weather(request):
url = 'http://api.openweathermap.org/data/2.5/weather?q=Eberstadt,ger&units=metric&lang=de&APPID=b3b25ed86b9fb6cfaac03f9b37164eef'
req = urllib.request.urlopen(url)
req_con = req.read().decode('utf-8')
req_json = json.loads(req_con)
return JsonResponse(req_json)
def chatbot_answer(request,name_person_global,person_statement):
'#1.Step: The the question'
print("ehllo")
chatbot_response = chatbot.get_response(person_statement)
'#2.Step: Save conversation for training'
task = threading.Thread(target=save_to_db, args=(name_person_global,person_statement,chatbot_response))
'#2.1.Step: Define task as deamon so that the main program can exit and the saving to db is in other thread'
task.daemon = True
task.start()
return JsonResponse({"chatbot_response":str(chatbot_response)})
def train_chatbot(request,optional_answer_chatbot,person_statement):
trainer = ListTrainer(chatbot)
trainer.train([person_statement,optional_answer_chatbot])
return JsonResponse({"successful_trained": f"person_statement: {person_statement},"+
f"optional_answer_chatbot:{optional_answer_chatbot}"})
# Get example audio file
def get_mfcc_feature_data(data):
"""
Converts a wave file into his mfcc features
@args:
data_path(str):
@return:
mfcc_features(np.array)
"""
fs = 44100
x = librosa.util.fix_length(data, 45000)
mfcc_features = librosa.feature.mfcc(y=x, sr=fs)
return mfcc_features
def save_to_db(name_person_global,person_statement,chatbot_response):
"""
functions save a dataset to the database
@args:
- name_person_global (str): e.g. "gustav"
- person_statement(str): e.g. "Wie heißt du?"
- chatbot_response (str): ich heiße Felix
"""
rc = Raw_Conversation(person_name=name_person_global,person_statement=person_statement,
chatbot_response=chatbot_response)
rc.save()
def bytes_numpy(bytes_raw):
load_bytes = BytesIO(bytes_raw)
loaded_np = np.load(load_bytes, allow_pickle=True)
return loaded_np | 33.774038 | 134 | 0.701495 | 0 | 0 | 0 | 0 | 2,736 | 0.389355 | 0 | 0 | 1,951 | 0.277643 |
63194ee4c304415890bdeb1468d2982220dc84d9 | 5,749 | py | Python | settings/base.py | ankit-ak/django-ecommerce-1 | 248127526c03c7c0f25a2df84365a0d0199b9693 | [
"MIT"
] | 4 | 2021-04-06T16:50:57.000Z | 2022-03-02T00:50:44.000Z | settings/base.py | ankit-ak/django-ecommerce-1 | 248127526c03c7c0f25a2df84365a0d0199b9693 | [
"MIT"
] | null | null | null | settings/base.py | ankit-ak/django-ecommerce-1 | 248127526c03c7c0f25a2df84365a0d0199b9693 | [
"MIT"
] | 7 | 2021-02-22T08:07:20.000Z | 2022-03-06T10:17:28.000Z | """
Django settings for petstore project.
Generated by 'django-admin startproject' using Django 2.2.10.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.2/ref/settings/
"""
import os
from django.contrib.messages import constants as messages
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.2/howto/deployment/checklist/
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.sites',
'django.contrib.humanize',
# third party
'crispy_forms',
'allauth',
'allauth.account',
'storages',
# local
'accounts.apps.AccountsConfig',
'pages.apps.PagesConfig',
'products.apps.ProductsConfig',
'basket.apps.BasketConfig',
'checkout.apps.CheckoutConfig',
'orders.apps.OrdersConfig',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
# 'whitenoise.middleware.WhiteNoiseMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
# user middleware
'basket.middleware.BasketMiddleware',
]
ROOT_URLCONF = 'petstore.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'petstore.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
# Password validation
# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# User defined settings
AUTH_USER_MODEL = 'accounts.CustomUser'
EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend'
STATICFILES_FINDERS = [
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
]
# AWS Setup for media store on Heroku
USE_S3 = os.getenv('USE_S3') == 'TRUE'
if USE_S3:
# aws settings
AWS_ACCESS_KEY_ID = os.getenv('AWS_ACCESS_KEY_ID')
AWS_SECRET_ACCESS_KEY = os.getenv('AWS_SECRET_ACCESS_KEY')
AWS_STORAGE_BUCKET_NAME = os.getenv('AWS_STORAGE_BUCKET_NAME')
AWS_DEFAULT_ACL = None
AWS_S3_CUSTOM_DOMAIN = f'{AWS_STORAGE_BUCKET_NAME}.s3.amazonaws.com'
AWS_S3_OBJECT_PARAMETERS = {'CacheControl': 'max-age=86400'}
# s3 static settings
STATIC_LOCATION = 'static'
STATIC_URL = f'https://{AWS_S3_CUSTOM_DOMAIN}/{STATIC_LOCATION}/'
STATICFILES_STORAGE = 'petstore.storage_backends.StaticStorage'
# s3 public media settings
PUBLIC_MEDIA_LOCATION = 'media'
MEDIA_URL = f'https://{AWS_S3_CUSTOM_DOMAIN}/{PUBLIC_MEDIA_LOCATION}/'
DEFAULT_FILE_STORAGE = 'petstore.storage_backends.PublicMediaStorage'
else:
STATIC_URL = '/staticfiles/'
STATIC_ROOT = os.path.join(BASE_DIR, 'staticfiles')
MEDIA_URL = '/mediafiles/'
MEDIA_ROOT = os.path.join(BASE_DIR, 'mediafiles')
STATICFILES_DIRS = (os.path.join(BASE_DIR, 'static'),)
CRISPY_TEMPLATE_PACK = 'bootstrap4'
# Bootstrap class mappings for django messages
MESSAGE_TAGS = {
messages.DEBUG: 'alert-info',
messages.INFO: 'alert-info',
messages.SUCCESS: 'alert-success',
messages.WARNING: 'alert-warning',
messages.ERROR: 'alert-danger',
}
# django-allauth config
SITE_ID = 1
AUTHENTICATION_BACKENDS = (
'django.contrib.auth.backends.ModelBackend',
'allauth.account.auth_backends.AuthenticationBackend',
)
LOGIN_REDIRECT_URL = 'home'
ACCOUNT_LOGOUT_REDIRECT = 'home'
ACCOUNT_SESSION_REMEMBER = True
ACCOUNT_EMAIL_REQUIRED = True
ACCOUNT_PRESERVE_USERNAME_CASING = False
# custom forms to override allauth defaults
ACCOUNT_FORMS = {
'signup': 'accounts.forms.CustomSignupForm',
'login': 'accounts.forms.CustomLoginForm',
}
ACCOUNT_USERNAME_REQUIRED = False
ACCOUNT_AUTHENTICATION_METHOD = 'email'
ACCOUNT_UNIQUE_EMAIL = True
ACCOUNT_USER_MODEL_USERNAME_FIELD = None
# stripe keys
STRIPE_TEST_PUBLISHABLE_KEY = os.environ.get('STRIPE_TEST_PUBLISHABLE_KEY')
STRIPE_TEST_SECRET_KEY = os.environ.get('STRIPE_TEST_SECRET_KEY')
# whitenoise setting
# STATICFILES_STORAGE = 'whitenoise.storage.CompressedManifestStaticFilesStorage'
| 28.043902 | 91 | 0.728996 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 3,596 | 0.6255 |
631ba99c9d1ddbfd1ea38d627f51de23ced2b917 | 440 | py | Python | day-01/part-2/jon.py | evqna/adventofcode-2020 | 526bb9c87057d02bda4de9647932a0e25bdb3a5b | [
"MIT"
] | 12 | 2020-11-30T19:22:18.000Z | 2021-06-21T05:55:58.000Z | day-01/part-2/jon.py | evqna/adventofcode-2020 | 526bb9c87057d02bda4de9647932a0e25bdb3a5b | [
"MIT"
] | 13 | 2020-11-30T17:27:22.000Z | 2020-12-22T17:43:13.000Z | day-01/part-2/jon.py | evqna/adventofcode-2020 | 526bb9c87057d02bda4de9647932a0e25bdb3a5b | [
"MIT"
] | 3 | 2020-12-01T08:49:40.000Z | 2022-03-26T21:47:38.000Z | from tool.runners.python import SubmissionPy
class JonSubmission(SubmissionPy):
def run(self, s):
l = [int(x) for x in s.strip().split()]
n = len(l)
for i in range(n):
for j in range(i):
if l[i] + l[j] > 2020:
continue
for k in range(j):
if l[i] + l[j] + l[k] == 2020:
return str(l[i] * l[j] * l[k])
| 25.882353 | 54 | 0.427273 | 392 | 0.890909 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
631bccc0853eef2e7f3442d8f5f162deb98c59c4 | 1,919 | py | Python | threading/recon-ng_threadpool.py | all3g/pieces | bc378fd22ddc700891fe7f34ab0d5b341141e434 | [
"CNRI-Python"
] | 34 | 2016-10-31T02:05:24.000Z | 2018-11-08T14:33:13.000Z | threading/recon-ng_threadpool.py | join-us/python-programming | bc378fd22ddc700891fe7f34ab0d5b341141e434 | [
"CNRI-Python"
] | 2 | 2017-05-11T03:00:31.000Z | 2017-11-01T23:37:37.000Z | threading/recon-ng_threadpool.py | join-us/python-programming | bc378fd22ddc700891fe7f34ab0d5b341141e434 | [
"CNRI-Python"
] | 21 | 2016-08-19T09:05:45.000Z | 2018-11-08T14:33:16.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# From /opt/recon-ng/recon/mixins/threads.py
from Queue import Queue, Empty
import threading
import time
import logging
logging.basicConfig(level=logging.INFO, format="[+] %(message)s")
logger = logging.getLogger("mutilthreads")
class ThreadingMixin(object):
def __init__(self):
self.stopped = threading.Event()
self.queue = Queue()
self.threadNum = 10
def _thread_wrapper(self, *args):
while not self.stopped.is_set():
try:
item = self.queue.get_nowait()
except Empty:
continue
try:
self.module_thread(item, *args)
except:
logger.info('thread exception')
finally:
self.queue.task_done()
def threads(self, *args):
[self.queue.put(_) for _ in args[0]]
threads = [
threading.Thread(target=self._thread_wrapper, args=args[1:])
for i in range(self.threadNum)
]
[_.setDaemon(True) for _ in threads]
[_.start() for _ in threads]
try:
while not self.queue.empty():
time.sleep(0.7)
except KeyboardInterrupt:
self.stopped.set()
[_.join() for _ in threads]
self.queue.join()
self.stopped.set()
def module_thread(self, item, *args):
logger.info(item)
pass
if __name__ == '__main__':
# define a new ThreadingMixin's subclass
class demo(ThreadingMixin):
def __init__(self):
super(demo, self).__init__()
def module_thread(self, item, callback):
logger.info(callback(item))
def callback(word):
return "abc - %s" % word
words = []
with open('wordlists.txt') as f:
words = [i.strip() for i in f]
d = demo()
d.threads(words, callback)
| 23.9875 | 72 | 0.560188 | 1,361 | 0.709224 | 0 | 0 | 0 | 0 | 0 | 0 | 212 | 0.110474 |
631c035d110528366d1acb4c246097b03b5fc1bd | 2,184 | py | Python | abstracts_mutual_PCAscore.py | diegovalenzuelaiturra/EasyPatents | 6e922e8b749b5baeaef9fcaabfdd789dfa9191a7 | [
"MIT"
] | 2 | 2019-11-20T04:29:56.000Z | 2021-06-09T22:09:32.000Z | abstracts_mutual_PCAscore.py | diegovalenzuelaiturra/EasyPatents | 6e922e8b749b5baeaef9fcaabfdd789dfa9191a7 | [
"MIT"
] | 15 | 2020-01-28T21:33:32.000Z | 2022-01-13T00:33:02.000Z | abstracts_mutual_PCAscore.py | diegovalenzuelaiturra/EasyPatents | 6e922e8b749b5baeaef9fcaabfdd789dfa9191a7 | [
"MIT"
] | 2 | 2017-12-31T08:53:04.000Z | 2021-06-09T22:09:34.000Z | from BusquedasSem import *
import seaborn as sns
def main():
df = pd.read_csv('./client0-sort.csv')
df_abstract = df['Abstract']
l = df_abstract.size
abstracts = df_abstract.values
PCA_score = np.zeros((l, l))
abstracts_aux = preprocessing_abstracts_PCA(abstracts)
for i in range(l):
#PCA_score[i][:] = PCAscore2(thoughtobeat(words=abstracts_aux[i], abstracts=abstracts_aux))
aux = PCAscore2(
thoughtobeat(words=abstracts_aux[i], abstracts=abstracts_aux))
for j in range(l):
PCA_score[i][j] = aux[j]
print(PCA_score)
PCA_score = pd.DataFrame(PCA_score)
sns.set()
sns.heatmap(PCA_score)
sns.plt.show()
def preprocessing_abstracts_PCA(abstracts):
abstracts_aux = []
for abstract in abstracts:
text = minimizar(abstract)
text = deletePunt(text=text)
text = deleteStop(text=text, leng='english')
#text = nltk.tokenize.word_tokenize(text)
text = deleteWord('CD', text)
text = deleteWord('DT', text)
text = stemmingLemmatizer(text)
abstracts_aux.append(text)
return abstracts_aux
def simpleScore(abstract_i, abstract_j, gamma):
freq = list()
freq_acum = 0
score = 1
# (?) normalizar score por longitud del abstract (?)
l_i = len(abstract_i)
l_j = len(abstract_j)
for i in abstract_i:
for j in abstract_j:
freq_i = abstract_j.count(i) / l_j
freq_j = abstract_i.count(j) / l_i
freq.append(freq_i + freq_j)
freq_acum += freq_i + freq_j
maximo = np.amax(freq)
for n in freq:
if freq_acum == 0:
score = -math.inf
return score
else:
aux = np.log(gamma + ((n / maximo)**(3 / 4)) /
(freq_acum**(3 / 4)))
score += aux
return score
def Score_abstract_preprocessing(abstract):
text = minimizar(abstract)
text = deletePunt(text=text)
text = deleteStop(text=text, leng='english')
text = deleteWord('CD', text)
text = stemmingLemmatizer(text)
return text.split()
if __name__ == '__main__':
main()
| 25.103448 | 99 | 0.604396 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 254 | 0.1163 |
631cd2a6dbe8d5531dc2796c165be89911afb6ea | 864 | py | Python | .workloads/hash-crack/hash-crack.py | lolenseu/Projects | 34c2eef473697b092e13743f28d575cf0fb08324 | [
"MIT"
] | 5 | 2022-01-31T14:44:24.000Z | 2022-03-26T14:02:35.000Z | .workloads/hash-crack/hash-crack.py | lolenseu/Projects | 34c2eef473697b092e13743f28d575cf0fb08324 | [
"MIT"
] | null | null | null | .workloads/hash-crack/hash-crack.py | lolenseu/Projects | 34c2eef473697b092e13743f28d575cf0fb08324 | [
"MIT"
] | null | null | null | import os
import sys
import time
import hashlib
target_hash = input("Input a target hash: ")
numbers = '1234567890'
uptext = 'qwertyuiopasdfghjklzxcvbnm'
lotext = 'QWERTYUIOPASDFGHJKLZXCVBNM'
steach = uptext + lotext
lash_ash = open('log', 'w')
found_hash = open('saved', 'w')
def animate():
print(f"[{gen_text_hash}] is [{gen_raw_text}]", end="\r")
def hash_found(gen_raw_text, gen_text_hash):
print(f"[{gen_raw_text}] is [{gen_text_hash}]")
found_hash.write(f"[{gen_text_hash}] is [{gen_raw_text}]")
counter = 0
found = False
while not found:
for i in range(len(steach)):
gen_raw_text = counter #steach[counter]
gen_text_hash = hashlib.md5(str(gen_raw_text).encode()).hexdigest()
animate()
if gen_text_hash == target_hash:
hash_found(gen_raw_text, gen_text_hash)
found = True
break
counter += 1
if found == True:
break
| 20.093023 | 69 | 0.706019 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 249 | 0.288194 |
631d80f3701744174c3da072f2943d2b83e7badc | 11,100 | py | Python | scripts/bert_system/voter.py | GKingA/tuw-inf-hasoc2021 | d04b816db2e2b1b2a5a6d455596ed369f8d1da14 | [
"MIT"
] | null | null | null | scripts/bert_system/voter.py | GKingA/tuw-inf-hasoc2021 | d04b816db2e2b1b2a5a6d455596ed369f8d1da14 | [
"MIT"
] | null | null | null | scripts/bert_system/voter.py | GKingA/tuw-inf-hasoc2021 | d04b816db2e2b1b2a5a6d455596ed369f8d1da14 | [
"MIT"
] | null | null | null | import pandas as pd
from sklearn.metrics import classification_report, confusion_matrix
from argparse import ArgumentParser
from read_data import read_csv
def create_toxic_result(path, expected, out, toxic_category, test=False):
if not test:
df_true = read_csv(expected, names=['comment_id', 'toxic', 'engaging', 'fact'],
force_names=True)
else:
df_true = read_csv(expected, names=['comment_id', 'comment_text'], force_names=True)
df_binary = read_csv(path, names=['text', 'OTHER', 'CATEGORY'], force_names=True)
df_binary["result"] = pd.Series(
[1 if category >= other else 0 for (category, other) in zip(df_binary.CATEGORY, df_binary.OTHER)])
result = pd.concat([df_binary, df_true], axis=1)
if not test:
print(confusion_matrix(result[toxic_category].tolist(), result.result.tolist()))
print(classification_report(result[toxic_category].tolist(), result.result.tolist()))
comment_text = 'text' if 'text' in result else 'comment_text'
result.to_csv(out, index=False, sep='\t', columns=['comment_id', comment_text, 'result'])
return classification_report(result[toxic_category].tolist(), result.result.tolist(), output_dict=True)
def create_binary_result(path, expected, out, hasoc, test=True):
if hasoc:
df_true = read_csv(expected, names=['id', 'c_text', 'binary', 'labels', 'additional'], force_names=True)
else:
df_true = read_csv(expected, names=['text', 'binary', 'labels'])
df_binary = read_csv(path, names=['text', 'OTHER', 'OFFENSE'])
off = 'OFFENSE' if 'OFFENSE' in df_binary else 'HOF'
oth = 'OTHER' if 'OTHER' in df_binary else 'NOT'
try:
df_binary["result"] = pd.Series(
[off if offense >= other else oth for (offense, other) in zip(df_binary[off], df_binary[oth])])
except KeyError:
return {'macro avg': {'precision': 0, 'recall': 0, 'f1-score': 0}}
result = pd.concat([df_binary, df_true], axis=1)
if not test:
print(confusion_matrix(result.binary.tolist(), result.result.tolist()))
print(classification_report(result.binary.tolist(), result.result.tolist()))
cr = classification_report(result.binary.tolist(), result.result.tolist(), output_dict=True)
else:
cr = None
result["binary"] = result["result"]
if 'labels' in result:
result.to_csv(out, index=False, sep='\t', columns=['text', 'binary', 'labels'])
else:
result.to_csv(out, index=False, sep=',', columns=['id', 'binary', 'c_text'])
return cr
def determine_offense(df, dont_use_binary):
off = 'OFFENSE' if 'OFFENSE' in df else 'HOF'
oth = 'OTHER' if 'OTHER' in df else 'NOT'
if dont_use_binary:
return pd.Series(['OTHER' if abuse < 0.5 and profanity < 0.5 and insult < 0.5 else
('PROFANITY' if profanity > abuse and profanity > insult else
('INSULT' if insult > abuse else 'ABUSE'))
for (profanity, abuse, insult) in
zip(df.PROFANITY, df.ABUSE, df.INSULT)])
return pd.Series(['OTHER' if other > offense else
('PROFANITY' if profanity > abuse and profanity > insult else
('INSULT' if insult > abuse else 'ABUSE'))
for (other, offense, profanity, abuse, insult) in
zip(df[oth], df[off], df.PROFANITY, df.ABUSE, df.INSULT)])
def find_best_binary_result(binary, abuse, insult, profanity, expected, epochs=10):
df_true = read_csv(expected, names=['text', 'binary', 'labels'])
best = 0
best_tuple = ()
results = {}
for b in range(epochs):
for a in range(epochs):
for i in range(epochs):
for p in range(epochs):
df_binary = read_csv(binary.format(b), names=['text', 'OTHER', 'OFFENSE'])
ab = read_csv(abuse.format(a), names=['text', 'OTHER', 'ABUSE'])
ab.drop('OTHER', axis=1, inplace=True)
ins = read_csv(insult.format(i), names=['text', 'OTHER', 'INSULT'])
ins.drop('OTHER', axis=1, inplace=True)
pro = read_csv(profanity.format(p), names=['text', 'OTHER', 'PROFANITY'])
pro.drop('OTHER', axis=1, inplace=True)
ab_in = ab.merge(ins, how='inner')
df = ab_in.merge(pro, how='inner')
df = df.merge(df_binary, how="right")
df.fillna(value=0, inplace=True)
df["result"] = determine_offense(df)
result = pd.merge(df, df_true, how='inner')
print(confusion_matrix(result.labels.tolist(), result.result.tolist()))
print(classification_report(result.labels.tolist(), result.result.tolist()))
results[(b, a, i, p)] = \
classification_report(result.labels.tolist(), result.result.tolist(), output_dict=True)
if results[(b, a, i, p)]['macro avg']['f1-score'] > best:
best = results[(b, a, i, p)]['macro avg']['f1-score']
best_tuple = (b, a, i, p)
print(results[best_tuple])
return best_tuple
def create_all_binary_result(binary, abuse, insult, profanity, expected, out, test, dont_use_binary):
if 'HASOC' in expected:
df_true = read_csv(expected, names=['id', 'text', 'binary', 'labels', 'explicit'], force_names=True)
if not test:
df_true.labels.replace('HATE', 'ABUSE', inplace=True)
df_true.labels.replace('OFFN', 'INSULT', inplace=True)
df_true.labels.replace('PRFN', 'PROFANITY', inplace=True)
df_true.labels.replace('NONE', 'OTHER', inplace=True)
else:
df_true = read_csv(expected, names=['text', 'binary', 'labels'])
df_binary = read_csv(binary, names=['text', 'OTHER', 'OFFENSE'], force_names=True)
ab = read_csv(abuse, names=['text_1', 'OTHER', 'ABUSE'], force_names=True)
ab.drop('OTHER', axis=1, inplace=True)
ins = read_csv(insult, names=['text_2', 'OTHER', 'INSULT'], force_names=True)
ins.drop('OTHER', axis=1, inplace=True)
pro = read_csv(profanity, names=['text_3', 'OTHER', 'PROFANITY'], force_names=True)
pro.drop('OTHER', axis=1, inplace=True)
df = pd.concat([ab, ins, pro, df_binary], axis=1)
df.fillna(value=0, inplace=True)
df["result"] = determine_offense(df, dont_use_binary)
if not test:
print(confusion_matrix(df_true.labels.tolist(), df.result.tolist()))
print(classification_report(df_true.labels.tolist(), df.result.tolist()))
if 'HASOC' in expected:
df.result.replace('ABUSE', 'HATE', inplace=True)
df.result.replace('INSULT', 'OFFN', inplace=True)
df.result.replace('PROFANITY', 'PRFN', inplace=True)
df.result.replace('OTHER', 'NONE', inplace=True)
result = pd.concat([df, df_true], axis=1)
result.to_csv(out, sep=',', index=False, columns=['id', 'result'])
def create_categorical_result(categorical, binary, expected, out):
if 'HASOC' in expected:
df_true = read_csv(expected, names=['id', 'text', 'binary', 'labels', 'explicit'], force_names=True)
df_true.labels.replace('HATE', 'ABUSE', inplace=True)
df_true.labels.replace('OFFN', 'INSULT', inplace=True)
df_true.labels.replace('PRFN', 'PROFANITY', inplace=True)
df_true.labels.replace('NONE', 'OTHER', inplace=True)
else:
df_true = read_csv(expected, names=['text', 'binary', 'labels'])
df_binary = read_csv(binary, names=['text', 'OTHER', 'OFFENSE'], force_names=True)
df_categorical = read_csv(categorical, names=['text', 'OTHER', 'INSULT', 'ABUSE', 'PROFANITY'], force_names=True)
df_categorical["result"] = df_categorical.drop('text', axis=1).idxmax(axis=1)
result = pd.concat([df_categorical, df_true], axis=1)
print(confusion_matrix(result.labels.tolist(), result.result.tolist()))
print(classification_report(result.labels.tolist(), result.result.tolist()))
result.to_csv(out, sep='\t', index=False, columns=['text', 'result'])
def voting(paths, out, weights=None):
df = None
for i, path in enumerate(paths):
f = open(path)
first_line = f.readline()
f.close()
if 'comment_id' in first_line:
df_right = read_csv(path, names=['comment_id', f'text_{i}', f'label_{i}'], force_names=True)
else:
df_right = read_csv(path, names=[f'text_{i}', f'label_{i}'], force_names=True)
df = df_right if df is None else pd.concat([df, df_right], axis=1)
if weights is None:
df["vote"] = (pd.concat([df[f'label_{i}'] for i in range(len(paths))], axis=1).mean(axis=1) >= 0.5) * 1
df["vote_2"] = (pd.concat([df[f'label_{i}'] for i in range(len(paths))], axis=1).sum(axis=1) >= 1) * 1
df.to_csv(out, sep='\t', index=False, columns=['comment_id', 'text_0', 'vote_2'])
if __name__ == '__main__':
argparser = ArgumentParser()
argparser.add_argument('--toxic_category', choices=["toxic", "engaging", "fact", "none"], default="none")
argparser.add_argument('--binary')
argparser.add_argument('--abuse')
argparser.add_argument('--insult')
argparser.add_argument('--profanity')
argparser.add_argument('--categorical')
argparser.add_argument('--expected')
argparser.add_argument('--vote', nargs='+')
argparser.add_argument('--weights', nargs='+', type=float)
argparser.add_argument('--out', required=True)
argparser.add_argument('--find', action='store_true')
argparser.add_argument('--test', action='store_true')
argparser.add_argument('--hasoc', action='store_true')
argparser.add_argument('--dont_use_binary', action='store_true')
args = argparser.parse_args()
if args.vote is not None:
voting(args.vote, args.out, args.weights)
elif args.categorical is not None:
create_categorical_result(args.categorical, args.binary, args.expected, args.out)
elif args.toxic_category != "none":
create_toxic_result(args.binary, args.expected, args.out, args.toxic_category, test=args.test)
elif args.binary is not None and args.abuse is not None and args.insult is not None and args.profanity is not None:
if args.find:
indices = find_best_binary_result(args.binary, args.abuse, args.insult, args.profanity, args.expected)
create_all_binary_result(args.binary.format(indices[0]), args.abuse.format(indices[1]),
args.insult.format(indices[2]), args.profanity.format(indices[3]),
args.expected, args.out, args.test, args.dont_use_binary)
else:
create_all_binary_result(args.binary, args.abuse, args.insult, args.profanity, args.expected, args.out,
args.test, args.dont_use_binary)
else:
create_binary_result(args.binary, args.expected, args.out, hasoc=args.hasoc, test=args.test)
| 54.411765 | 119 | 0.623063 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,645 | 0.148198 |
631e5480b8873184d63c0836ee3dfc256b04bd00 | 157 | py | Python | cap8/ex2.py | felipesch92/livroPython | 061b1c095c3ec2d25fb1d5fdfbf9e9dbe10b3307 | [
"MIT"
] | null | null | null | cap8/ex2.py | felipesch92/livroPython | 061b1c095c3ec2d25fb1d5fdfbf9e9dbe10b3307 | [
"MIT"
] | null | null | null | cap8/ex2.py | felipesch92/livroPython | 061b1c095c3ec2d25fb1d5fdfbf9e9dbe10b3307 | [
"MIT"
] | null | null | null | def multiplo(a, b):
if a % b == 0:
return True
else:
return False
print(multiplo(2, 1))
print(multiplo(9, 5))
print(multiplo(81, 9))
| 17.444444 | 22 | 0.566879 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
63220bdf987500349f76e2f5fb63d85c0d153e06 | 1,165 | py | Python | parsifal/apps/reviews/migrations/0020_searchresult.py | ShivamPytho/parsifal | 9386a0fb328d4880d052c94e9224ce50a9b2f6a6 | [
"MIT"
] | 342 | 2015-01-14T17:25:35.000Z | 2022-02-26T11:59:09.000Z | parsifal/apps/reviews/migrations/0020_searchresult.py | ShivamPytho/parsifal | 9386a0fb328d4880d052c94e9224ce50a9b2f6a6 | [
"MIT"
] | 56 | 2015-01-23T12:57:59.000Z | 2022-03-12T01:01:38.000Z | parsifal/apps/reviews/migrations/0020_searchresult.py | ShivamPytho/parsifal | 9386a0fb328d4880d052c94e9224ce50a9b2f6a6 | [
"MIT"
] | 200 | 2015-01-22T21:50:41.000Z | 2021-12-28T13:42:57.000Z | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import parsifal.apps.reviews.models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('library', '0013_auto_20150710_1614'),
('reviews', '0019_study_comments'),
]
operations = [
migrations.CreateModel(
name='SearchResult',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('imported_file', models.FileField(null=True, upload_to=parsifal.apps.reviews.models.search_result_file_upload_to)),
('documents', models.ManyToManyField(to='library.Document')),
('review', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='reviews.Review')),
('search_session', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='reviews.SearchSession', null=True)),
('source', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='reviews.Source')),
],
),
]
| 40.172414 | 138 | 0.654077 | 987 | 0.84721 | 0 | 0 | 0 | 0 | 0 | 0 | 240 | 0.206009 |
6322146c8d971464c6f726ebdba3a3d7a2540028 | 6,518 | py | Python | mastiff/plugins/analysis/EXE/EXE-singlestring.py | tt1379/mastiff | 04d569e4fa59513572e77c74b049cad82f9b0310 | [
"Apache-2.0"
] | 164 | 2015-02-09T18:19:26.000Z | 2022-02-23T09:49:18.000Z | mastiff/plugins/analysis/EXE/EXE-singlestring.py | ashishhmittal/mastiff | 04d569e4fa59513572e77c74b049cad82f9b0310 | [
"Apache-2.0"
] | 1 | 2016-05-20T16:21:33.000Z | 2016-05-20T16:21:33.000Z | mastiff/plugins/analysis/EXE/EXE-singlestring.py | ashishhmittal/mastiff | 04d569e4fa59513572e77c74b049cad82f9b0310 | [
"Apache-2.0"
] | 43 | 2015-03-03T11:15:58.000Z | 2021-10-02T02:14:57.000Z | #!/usr/bin/env python
"""
Copyright 2012-2013 The MASTIFF Project, All Rights Reserved.
This software, having been partly or wholly developed and/or
sponsored by KoreLogic, Inc., is hereby released under the terms
and conditions set forth in the project's "README.LICENSE" file.
For a list of all contributors and sponsors, please refer to the
project's "README.CREDITS" file.
"""
__doc__ = """
Single-byte string plug-in
Plugin Type: EXE
Purpose:
Attackers have begun to obfuscate embedded strings by moving a single byte
at a time into a character array. In assembler, it looks like:
mov mem, 0x68
mov mem+4, 0x69
mov mem+8, 0x21
...
Using a strings program, these strings will not be found. This script looks
for any strings embedded in this way and prints them out. It does this by
looking through the file for C6 opcodes, which are the start of the
"mov mem/reg, imm" instruction. It will then decode it, grab the value and
create a string from it.
Requirements:
- distorm3 (http://code.google.com/p/distorm/)
Output:
None
"""
__version__ = "$Id: 6322146c8d971464c6f726ebdba3a3d7a2540028 $"
import logging
import re
import os
try:
from distorm3 import Decode, Decode32Bits
except ImportError, err:
print "EXE-SingleString: Could not import distorm3: %s" % error
import mastiff.plugins.category.exe as exe
# Change the class name and the base class
class SingleString(exe.EXECat):
"""Extract single-byte strings from an executable."""
def __init__(self):
"""Initialize the plugin."""
exe.EXECat.__init__(self)
self.length = 3
self.raw = False
def activate(self):
"""Activate the plugin."""
exe.EXECat.activate(self)
def deactivate(self):
"""Deactivate the plugin."""
exe.EXECat.deactivate(self)
def findMov(self, filename):
""" look through the file for any c6 opcode (mov reg/mem, imm)
when it finds one, decode it and put it into a dictionary """
#log = logging.getLogger('Mastiff.Plugins.' + self.name + '.findMov')
f = open(filename,'rb')
offset = 0
instructs = {}
mybyte = f.read(1)
while mybyte:
if mybyte == "\xc6":
# found a mov op - decode and record it
f.seek(offset)
mybyte = f.read(16)
# p will come back as list of (offset, size, instruction, hexdump)
p = Decode(offset, mybyte, Decode32Bits)
# break up the mnemonic
ma = re.match('(MOV) ([\S\s]+), ([x0-9a-fA-F]+)', p[0][2])
if ma is not None:
instructs[offset] = [ma.group(1), ma.group(2), ma.group(3), p[0][1]] # mnemonic, size
#log.debug( "MOV instructions detected: %x %s %d" % (offset,p[0][2],p[0][1]) )
f.seek(offset+1)
mybyte = f.read(1)
offset = offset + 1
f.close()
return instructs
def decodeBytes(self, instructs):
""" Take in a dict of instructions - parse through each instruction and grab the strings """
#log = logging.getLogger('Mastiff.Plugins.' + self.name + '.decodeBytes')
curString = ""
curOffset = 0
strList = []
usedBytes = []
for off in sorted(instructs.keys()):
if off not in usedBytes:
# set up the new offset if needed
if curOffset == 0:
curOffset = off
while off in instructs:
usedBytes.append(off)
hexVal = int(instructs[off][2], 16)
opLen = instructs[off][3]
# is hexVal out of range?
if hexVal < 32 or hexVal > 126 and (hexVal != 10 or hexVal != 13 or hexVal != 9):
# end of string
#log.debug("%x non-string char - new string: %d: %s" % (curOffset, hexVal,curString))
strList.append([curOffset, curString])
curOffset = off + opLen
curString = ""
else:
#add to string
if not self.raw and hexVal == 10:
# line feed
curString = curString + "\\r"
elif not self.raw and hexVal == 13:
# return
curString = curString + "\\n"
elif not self.raw and hexVal == 9:
# tab
curString = curString + "\\t"
else:
curString = curString + chr(hexVal)
off = off + opLen
strList.append([curOffset, curString])
curOffset = 0
curString = ""
usedBytes.append(off)
return strList
def analyze(self, config, filename):
"""Analyze the file."""
# sanity check to make sure we can run
if self.is_activated == False:
return False
log = logging.getLogger('Mastiff.Plugins.' + self.name)
log.info('Starting execution.')
self.length = config.get_var(self.name, 'length')
if self.length is None:
self.length = 3
self.raw = config.get_bvar(self.name, 'raw')
# find the bytes in the file
instructs = self.findMov(filename)
# now lets get the strings
strlist = self.decodeBytes(instructs)
self.output_file(config.get_var('Dir','log_dir'), strlist)
return True
def output_file(self, outdir, strlist):
"""Print output from analysis to a file."""
log = logging.getLogger('Mastiff.Plugins.' + self.name + '.output_file')
# if the string is of the right len, print it
outstr = ""
for string in strlist:
if len(string[1]) >= int(self.length):
outstr = outstr + '0x%x: %s\n' % (string[0], string[1])
if len(outstr) > 0:
try:
outfile = open(outdir + os.sep + 'single-string.txt', 'w')
except IOError, err:
log.debug("Cannot open single-string.txt: %s" % err)
return False
outfile.write(outstr)
outfile.close()
else:
log.debug('No single-byte strings found.')
return True
| 31.640777 | 109 | 0.544799 | 5,120 | 0.785517 | 0 | 0 | 0 | 0 | 0 | 0 | 2,571 | 0.394446 |
6322cf5faf1311f6d38c18bbb9cc68ac52d6e045 | 2,874 | py | Python | Libs/Scene Recognition/SceneRecognitionCNN.py | vpulab/Semantic-Guided-Scene-Attribution | 1e247e48b549eb648d833050fb150f041948422d | [
"MIT"
] | 3 | 2021-03-03T09:07:53.000Z | 2021-07-19T10:44:32.000Z | Libs/Scene Recognition/SceneRecognitionCNN.py | vpulab/Semantic-Guided-Scene-Attribution | 1e247e48b549eb648d833050fb150f041948422d | [
"MIT"
] | null | null | null | Libs/Scene Recognition/SceneRecognitionCNN.py | vpulab/Semantic-Guided-Scene-Attribution | 1e247e48b549eb648d833050fb150f041948422d | [
"MIT"
] | 1 | 2021-03-11T09:17:04.000Z | 2021-03-11T09:17:04.000Z | import torch.nn as nn
from torchvision.models import resnet
class SceneRecognitionCNN(nn.Module):
"""
Generate Model Architecture
"""
def __init__(self, arch, scene_classes=1055):
super(SceneRecognitionCNN, self).__init__()
# --------------------------------#
# Base Network #
# ------------------------------- #
if arch == 'ResNet-18':
# ResNet-18 Network
base = resnet.resnet18(pretrained=True)
# Size parameters for ResNet-18
size_fc_RGB = 512
elif arch == 'ResNet-50':
# ResNet-50 Network
base = resnet.resnet50(pretrained=True)
# Size parameters for ResNet-50
size_fc_RGB = 2048
# --------------------------------#
# RGB Branch #
# ------------------------------- #
# First initial block
self.in_block = nn.Sequential(
nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3, bias=False),
nn.BatchNorm2d(64),
nn.ReLU(inplace=True),
nn.MaxPool2d(kernel_size=3, stride=2, padding=1, return_indices=True)
)
# Encoder
self.encoder1 = base.layer1
self.encoder2 = base.layer2
self.encoder3 = base.layer3
self.encoder4 = base.layer4
# -------------------------------------#
# RGB Classifier #
# ------------------------------------ #
self.dropout = nn.Dropout(0.3)
self.avgpool = nn.AvgPool2d(7, stride=1)
self.fc = nn.Linear(size_fc_RGB, scene_classes)
# Loss
self.criterion = nn.CrossEntropyLoss()
def forward(self, x):
"""
Netowrk forward
:param x: RGB Image
:return: Scene recognition predictions
"""
# --------------------------------#
# RGB Branch #
# ------------------------------- #
x, pool_indices = self.in_block(x)
e1 = self.encoder1(x)
e2 = self.encoder2(e1)
e3 = self.encoder3(e2)
e4 = self.encoder4(e3)
# -------------------------------------#
# RGB Classifier #
# ------------------------------------ #
act = self.avgpool(e4)
act = act.view(act.size(0), -1)
act = self.dropout(act)
act = self.fc(act)
return act
def loss(self, x, target):
"""
Funtion to comput the loss
:param x: Predictions obtained by the network
:param target: Ground-truth scene recognition labels
:return: Loss value
"""
# Check inputs
assert (x.shape[0] == target.shape[0])
# Classification loss
loss = self.criterion(x, target.long())
return loss
| 30.903226 | 81 | 0.451983 | 2,811 | 0.978079 | 0 | 0 | 0 | 0 | 0 | 0 | 1,098 | 0.382046 |
63243b0ed7269143b65e929fe80aa6f12a71bb2a | 1,309 | py | Python | raduga/aws/ec2.py | tuxpiper/raduga | e63bdd8f9d4154c0ac0a72a1182da5d137e38514 | [
"MIT"
] | null | null | null | raduga/aws/ec2.py | tuxpiper/raduga | e63bdd8f9d4154c0ac0a72a1182da5d137e38514 | [
"MIT"
] | null | null | null | raduga/aws/ec2.py | tuxpiper/raduga | e63bdd8f9d4154c0ac0a72a1182da5d137e38514 | [
"MIT"
] | null | null | null | from time import sleep
class AWSEC2(object):
def __init__(self, target):
self.conn = target.get_ec2_conn()
def get_instance_state(self, instance_id):
instance = self.conn.get_only_instances(instance_id)[0]
return instance.state
def stop_instance(self, instance_id):
self.conn.stop_instances(instance_id)
def create_ami(self, instance_id, name, description, tags):
instance = self.conn.get_only_instances(instance_id)[0]
if instance.state != 'stopped':
raise RuntimeError("Won't create AMI from non-stopped instance")
image_id = self.conn.create_image(instance_id, name, description)
sleep(1)
# Add tags to the image
self.conn.create_tags(image_id, tags)
return image_id
def get_ami_state(self, image_id):
ami = self.conn.get_all_images(image_id)[0]
return ami.state
def find_ami(self, **tags):
filters = dict(map(lambda (k,v): ("tag:"+k,v), tags.items()))
results = self.conn.get_all_images(owners=['self'], filters=filters)
if len(results) == 0:
return None
elif len(results) == 1:
return results[0].id
else:
raise RuntimeError("More than ona AMI is matching the requested tags (??!)")
| 35.378378 | 88 | 0.638655 | 1,284 | 0.980901 | 0 | 0 | 0 | 0 | 0 | 0 | 144 | 0.110008 |
6324fc38f5d2e0297dfed36c24f30bc5c9a5e87b | 3,786 | py | Python | sails/commands.py | metrasynth/solar-sails | 3a10774dad29d85834d3acb38171741b3a11ef91 | [
"MIT"
] | 6 | 2016-11-22T14:32:55.000Z | 2021-08-15T01:35:33.000Z | sails/commands.py | metrasynth/s4ils | efc061993d15ebe662b72ab8b3127f7f7ce2f66b | [
"MIT"
] | 2 | 2022-03-18T16:47:43.000Z | 2022-03-18T16:47:44.000Z | sails/commands.py | metrasynth/s4ils | efc061993d15ebe662b72ab8b3127f7f7ce2f66b | [
"MIT"
] | 2 | 2019-07-09T23:44:08.000Z | 2021-08-15T01:35:37.000Z | import rv.api
class Command(object):
args = ()
processed = False
def __init__(self, *args, **kw):
self._apply_args(*args, **kw)
def __repr__(self):
attrs = ' '.join(
'{}={!r}'.format(
arg,
getattr(self, arg),
)
for arg in self.args
if hasattr(self, arg)
)
return '<{}{}>'.format(
self.__class__.__name__,
' ' + attrs if attrs else '',
)
def _apply_args(self, *args, **kw):
for key, value in zip(self.args, args):
setattr(self, key, value)
for key, value in kw.items():
if key in self.args:
setattr(self, key, value)
def copy(self, *args, **kw):
c2 = self.__class__()
for key in self.args:
try:
value = getattr(self, key)
except AttributeError:
pass
else:
setattr(c2, key, value)
c2._apply_args(*args, **kw)
return c2
class ConnectModules(Command):
args = 'engine', 'src', 'dest'
class Engine(Command):
class Track(object):
def __init__(self, engine, index):
self.engine = engine
self.index = index
def __repr__(self):
return '<Track {}>'.format(self.index)
def __ror__(self, other):
if isinstance(other, NoteOn):
note = other.copy(engine=self.engine, track=self)
return note
def off(self):
return NoteOff(self.engine, self)
class Output(object):
index = 0
def __init__(self, engine):
self.engine = engine
def __repr__(self):
return '<Output>'
def __lshift__(self, other):
return ConnectModules(self.engine, other.module, self)
def __rshift__(self, other):
return ConnectModules(self.engine, self, other.module)
def __init__(self, *args, **kw):
super(Engine, self).__init__(*args, **kw)
self.output = self.Output(self)
def new_module(self, obj, *args, **kw):
if isinstance(obj, type) and issubclass(obj, rv.m.Module):
obj = obj(*args, **kw)
if isinstance(obj, rv.m.Module):
return Module(self, obj)
else:
raise ValueError()
def track(self, index):
return self.Track(self, index)
class Generator(Command):
args = 'fn', 'fn_args', 'fn_kw'
generator = None
def advance(self, cursor):
if self.generator is not None:
try:
self.generator.send(cursor)
except StopIteration:
self.stop() | cursor
@property
def started(self):
return self.generator is not None
def start(self):
if not self.started:
self.generator = self.fn(*self.fn_args, **self.fn_kw)
self.generator.send(None)
def stop(self):
return GeneratorStop(self)
@classmethod
def factory(cls, fn):
def factory_fn(*args, **kw):
return cls(fn, args, kw)
return factory_fn
class GeneratorStop(Command):
args = 'parent',
class Module(Command):
args = 'engine', 'module'
def __lshift__(self, other):
return ConnectModules(self, other.module, self.module)
def __rshift__(self, other):
return ConnectModules(self, self.module, other.module)
def on(self, note, vel=None):
return NoteOn(note, vel, self.engine, self.module)
class NoteOff(Command):
args = 'engine', 'track'
class NoteOn(Command):
args = 'note', 'vel', 'engine', 'module', 'track'
def off(self):
return NoteOff(self.engine, self.track)
| 23.811321 | 66 | 0.542789 | 3,748 | 0.989963 | 0 | 0 | 212 | 0.055996 | 0 | 0 | 159 | 0.041997 |
6326017b69b64c91b9adacf50eeea018744aa2c3 | 22,527 | py | Python | rfcc/model.py | IngoMarquart/rfcc | 0e2467610edbe61287fb5f8693e50225327d791f | [
"MIT"
] | null | null | null | rfcc/model.py | IngoMarquart/rfcc | 0e2467610edbe61287fb5f8693e50225327d791f | [
"MIT"
] | 1 | 2021-04-01T10:05:57.000Z | 2021-04-01T11:46:14.000Z | rfcc/model.py | IngoMarquart/rfcc | 0e2467610edbe61287fb5f8693e50225327d791f | [
"MIT"
] | null | null | null | from sklearn.ensemble import RandomForestClassifier, RandomForestRegressor
from scipy.spatial.distance import squareform, pdist
from rfcc.data_ops import ordinal_encode
import pandas as pd
import numpy as np
from typing import Union, Optional
from scipy.cluster.hierarchy import linkage, fcluster
from rfcc.path_ops import recurse_path, encode_path_ordinal
from itertools import combinations
from scipy.stats import skew
class cluster_model():
def __init__(self, model=RandomForestClassifier, max_clusters: Optional[int] = None, logger=None, **kwargs):
"""
Initialize RFCC cluster model
Parameters
----------
model : callable, optional
scikit learn ensemble estimator (RandomForestClassifier,RandomForestRegressor). The default is RandomForestClassifier.
max_clusters : Optional[int], optional
Maximum number of leafs in each tree. The default is None.
logger : TYPE, optional
Unused. The default is None.
**kwargs : TYPE
Keyword arguments to be passed to the sklearn estimator.
Returns
-------
None.
"""
# Try to construct model
try:
self.model = model(max_leaf_nodes=max_clusters, **kwargs)
except:
raise AttributeError(
"Could not initialize {} model with parameters: {}".format(model, kwargs))
self.encoding_dict = None
self.X_col_names = None
self.y_col_names = None
self.y_encoding_dict = None
self.y_enc = None
self.X_enc = None
self.fitted = False
self.categoricals = None
self.cluster_list = None
self.partition = None
self.unique_cluster = None
self.cluster_desc = None
self.leaves = None
self.encode_y = False
def clusters(self):
"""
Returns cluster assignments
Returns
-------
dict
Observation->Cluster.
"""
return self.partition
def cluster_descriptions(self, variables_to_consider: Optional[list] = None, continuous_measures: Optional[list] = None):
"""
Returns a dataframe of clusters and their compositions
Parameters
----------
variables_to_consider : Optional[list], optional
List of feature columns. The default is None.
continuous_measures : Optional[list], optional
List of measures (mean, std, median, max, min, skew). If not given, all are returned. The default is None.
Returns
-------
pd.DataFrame
DataFrame with cluster descriptions.
"""
assert self.fitted is True, "Model needs to be fitted to return cluster descriptions!"
#Unpack
stats = self.cluster_desc['stats']
cont = self.cluster_desc['cont']
catg = self.cluster_desc['cat']
y = self.cluster_desc['y']
if variables_to_consider is None:
variables_to_consider = self.X_col_names
if isinstance(continuous_measures, str):
continuous_measures = [continuous_measures]
medoids = []
for cl in self.unique_cluster:
cl_dict = {}
cl_dict['Cluster_ID'] = cl
cl_dict['Nr_Obs'] = stats[cl]['Nr']
# y-var
for col in y:
cl_y = y[col][cl]
if isinstance(cl_y, str): # categorical
cl_dict[col] = cl_y
else: # Measures
for cm in cl_y:
if continuous_measures is not None:
if cm in continuous_measures:
label = '{}-{}'.format(col, cm)
cl_dict[label] = cl_y[cm]
else:
label = '{}-{}'.format(col, cm)
cl_dict[label] = cl_y[cm]
# cat_var
for col in catg:
if col in variables_to_consider:
cl_dict[col] = catg[col][cl]
# cont-var
for col in cont:
if col in variables_to_consider:
cl_cont = cont[col][cl]
for cm in cl_cont:
if continuous_measures is not None:
if cm in continuous_measures:
label = '{}-{}'.format(col, cm)
cl_dict[label] = cl_cont[cm]
else:
label = '{}-{}'.format(col, cm)
cl_dict[label] = cl_cont[cm]
medoids.append(cl_dict)
return pd.DataFrame(medoids)
def get_observations(self, cluster_id: int):
"""
Get observations of a given cluster
Parameters
----------
cluster_id : int
ID of cluster.
Returns
-------
list
IDs of observations.
"""
return list(np.where(self.cluster_list == cluster_id)[0])
def get_cluster(self, observation_id: int):
"""
Get clusters assigned to a given observation
Parameters
----------
observation_id : int
ID of observation.
Returns
-------
int
ID of cluster.
"""
return self.cluster_list[observation_id]
def path_analysis(self, estimator_id: Optional[int] = None, return_as: Optional[str] = "frame"):
"""
For one of the estimators in the ensemble, do a path analysis:
For each leaf, describe the decision path leading to is
Parameters
----------
estimator_id : Optional[int], optional
Specific estimator ID. The default is None.
return_as : Optional[str], optional
Return dataframe ("frame") or dictionary ("dict"). The default is "frame".
Returns
-------
pd.DataFrame or dict
Path analysis.
"""
assert self.fitted is True, "Model needs to be fitted to return paths descriptions!"
if estimator_id is not None:
assert estimator_id in range(
0, len(self.model.estimators_)), "No estimator for this id found."
else:
# TODO: Pick best tree or aggregate
estimator_id = 0
estimator = self.model.estimators_[estimator_id]
cluster_assignments = self.leaves[:, estimator_id]
leaf_nodes, nr_obs = np.unique(cluster_assignments, return_counts=True)
# Name of cluster will be included in a dict where
# descriptions[cluster_id] gives a list of strings describing the cluster.
descriptions = dict()
# Loop along the clusters and extract decision paths
# From these, we construct the name of the cluster
for i, leaf_id in enumerate(leaf_nodes):
# Lists to fill for this iteration
leaf_path = list()
leaf_feature = list()
leaf_threshold = list()
leaf_direction = list()
# Start with leaf, which are the cluster nodes
current_node = leaf_id
# Extract paths, features and thresholds
leaf_path, leaf_feature, leaf_threshold, leaf_direction = recurse_path(
current_node, leaf_path, leaf_feature, leaf_threshold, leaf_direction, estimator)
desc_path = encode_path_ordinal(
self.X_col_names, leaf_path, leaf_feature, leaf_threshold, leaf_direction, self.encoding_dict)
# Add output predictions for leafs
outputs = estimator.tree_.value[leaf_id]
output_cols = []
for j, ycol in enumerate(self.y_col_names):
nameing = "Output_{}".format(ycol)
output_cols.append(nameing)
desc_path[nameing] = outputs[j]
desc_path['Nr_Obs'] = nr_obs[i]
desc_path['Cluster_ID'] = leaf_id
descriptions[leaf_id] = desc_path
# Get number of nodes
df = pd.DataFrame(descriptions).T
df = df.fillna("-")
a = ['Cluster_ID', 'Nr_Obs']
a.extend(output_cols)
a.extend(self.X_col_names)
df = df[np.intersect1d(df.columns, a)]
if return_as == "dict":
return descriptions
else:
return df
# sklearn interface
def predict(self, X, **kwargs):
"""
Predict on X.
Note: uses categorical transformations that were used when fitting the model
Parameters
----------
X : pd.DataFrame
X.
**kwargs :
Further arguments to pass to the predict function.
Returns
-------
np.array
Output predictions.
"""
assert self.fitted is True, "Model needs to be fitted to return paths descriptions!"
# Transform data to conform to fitted model
y=pd.Series(np.zeros(X.shape[0]))
X,y,categoricals = self.__transform_data(X, y, False, self.categoricals, False)
return self.model.predict(X, **kwargs)
def score(self, X, y, **kwargs):
"""
Score the model.
If regression, this will be R-squared.
If classification, this will be mean accuracy.
Parameters
----------
X : pd.DataFrame
X.
y : pd.DataFrame
Y.
**kwargs :
Further arguments to pass to score.
Returns
-------
float
R-Squared or Mean Accuracy.
"""
assert self.fitted is True, "Model needs to be fitted to return paths descriptions!"
# Transform data to conform to fitted model
X,y,categoricals = self.__transform_data(X, y, self.encode_y, self.categoricals, False)
return self.model.score(X, y, **kwargs)
def fit(self, X: pd.DataFrame, y: Union[pd.DataFrame, pd.Series], encode: Optional[list] = None, encode_y: Optional[bool] = False, clustering_type: Optional[str] = "rfcc", t_param: Optional[float] = None, linkage_method: Optional[str] = 'average', **kwargs):
"""
Fit the model, cluster observations and derive cluster measures and descriptions.
Parameters
----------
X : pd.DataFrame
Input features.
y : Union[pd.DataFrame, pd.Series]
Outcome variables.
encode : Optional[list], optional
List of features (column names) to be encoded ordinally. If None, all are encoded. The default is None.
encode_y : Optional[bool], optional
Whether to encode outcome variable. The default is False.
clustering_type : Optional[str], optional
"rfcc" is consensus clustering on decision paths, "binary" is clustering purely on leafs. The default is "rfcc".
t_param : Optional[float], optional
Cutoff parameter for hierarchical clustering. Choose cluster levels such that all distances within a cluster are less than this. Higher values imply larger clusters.
If None, then the clusterer will choose the amount of clusters to equal the average number of leafs. The default is None.
linkage_method : Optional[str], optional
Linkage method (distance method) for clusterer.
Options: "single","complete","average","ward". The default is 'average'.
**kwargs :
Further keyword arguments to pass to the sklean fit method.
Returns
-------
None.
"""
assert isinstance(X, pd.DataFrame), "Please provide X as pd.DataFrame"
assert isinstance(y, (pd.Series, pd.DataFrame)
), "Please provide y as pd.DataFrame or pd.Series"
# Set np seed if given
random_state = kwargs.get('random_state', 0)
np.random.seed(random_state)
# Prepare data
X,y,encode = self.__transform_data(X, y, encode_y, encode)
self.encode = encode
self.X_col_names = X.columns
self.y_col_names = y.columns
self.encode_y = encode_y
n = y.shape[0]
y_index = np.array(y.index)
# Need a slightly different call for 1 dimensional outputs
if y.shape[1] <= 1:
self.model.fit(X, np.ravel(y), **kwargs)
else:
self.model.fit(X, y, **kwargs)
# Model has been fitted
self.fitted = True
# Get nr_obs and save the leaf nodes
nr_obs = X.shape[0]
self.leaves = self.model.apply(X)
nr_leave_nodes = [len(np.unique(self.leaves[:, i]))
for i in range(0, self.leaves.shape[1])]
avg_nr_leaves = int(np.mean(nr_leave_nodes))
# Regular clustering, or consensus clustering
if clustering_type == "rfcc":
# Create one distance matrix for all estimators
# TODO: use lil sparse matrix and a cutoff to populate matrices
# if nr_obs is very high!
distance_matrix = np.zeros([nr_obs, nr_obs], dtype=float)
nr_estimator = len(self.model.estimators_)
# Get distances in each decision tree
for estimator in self.model.estimators_:
# Extract the cluster labels
obs_cluster_labels = estimator.apply(X)
leaf_nodes = np.unique(obs_cluster_labels)
leaf_path_dict = {}
# Get paths for each decision tree
for leaf_id in leaf_nodes:
leaf_path = list()
leaf_feature = list()
leaf_threshold = list()
leaf_direction = list()
leaf_path, leaf_feature, leaf_threshold, leaf_direction = recurse_path(
leaf_id, leaf_path, leaf_feature, leaf_threshold, leaf_direction, estimator)
leaf_path.insert(0, leaf_id)
leaf_path_dict[leaf_id] = [
leaf_path, leaf_feature, leaf_threshold, leaf_direction]
# Now create a distance matrix for each leaf node based
# on path distances
# TODO: Condition this on threshold and feature distances as well
leaf_distance_matrix = np.zeros(
[len(leaf_nodes), len(leaf_nodes)])
leaf_distance_matrix = pd.DataFrame(
leaf_distance_matrix, index=leaf_nodes, columns=leaf_nodes)
for dyad in combinations(leaf_nodes, 2):
p1 = leaf_path_dict[dyad[0]][0]
p2 = leaf_path_dict[dyad[1]][0]
path_length = len(np.setxor1d(p1, p2))
leaf_distance_matrix.loc[dyad[0],
dyad[1]] = path_length
leaf_distance_matrix += leaf_distance_matrix.T
# Normalize path distance
leaf_distance_matrix = leaf_distance_matrix / \
np.max(leaf_distance_matrix.values)
# Build the rows for each leaf->obs, and then apply
# to observation the fitting leaf node
for leaf_id in leaf_nodes:
row = np.array(obs_cluster_labels, dtype=float)
row = np.zeros(obs_cluster_labels.shape, dtype=float)
for alter in leaf_nodes:
row[obs_cluster_labels ==
alter] = leaf_distance_matrix.loc[leaf_id, alter]
# Add result for applicable observations to the distance matrix
distance_matrix[obs_cluster_labels == leaf_id, :] += row
# Normalize
distance_matrix = squareform(distance_matrix)/nr_estimator
else:
# Regular clustering based on similarities of leaf nodes
distance_matrix = pdist(self.leaves, metric='hamming')
# Run Clustering
dendogram = linkage(distance_matrix, method=linkage_method)
if t_param == None:
self.cluster_list = fcluster(dendogram, avg_nr_leaves, 'maxclust')
else:
self.cluster_list = fcluster(dendogram, t_param, 'distance')
self.unique_cluster = np.unique(self.cluster_list)
self.partition = {y_index[i]: self.cluster_list[i]
for i in range(0, n)}
# Create descriptions
if encode_y:
y_cols = y.columns
y_ind = y.index
y = pd.DataFrame(self.y_enc.inverse_transform(y),
columns=y_cols, index=y_ind)
X.loc[:, encode] = self.X_enc.inverse_transform(X[encode])
self.__create_cluster_descriptions(X, y, encode_y, encode)
def __transform_data(self, X: pd.DataFrame, y: Union[pd.DataFrame, pd.Series], encode_y: bool, encode: Optional[list] = None, save_encoding:Optional[bool]= True):
if isinstance(y, np.ndarray):
y = pd.DataFrame(y)
if isinstance(y, pd.Series):
y = y.to_frame()
# Check if y is categorical
# Determine which columns are not numeric
x_numeric=X.select_dtypes(include=np.number).columns.tolist()
y_numeric=y.select_dtypes(include=np.number).columns.tolist()
x_categorical = np.setdiff1d(X.columns,x_numeric)
y_categorical = np.setdiff1d(y.columns,y_numeric)
if encode is None:
encode = x_categorical
else:
assert isinstance(
encode, list), "Please provide categorical variables to encode as list"
# We have to encode all non numerical columsn
x_categorical= np.union1d(x_categorical,encode)
# Prepare data by ordinal encoding
X, encoding_dict, X_enc = ordinal_encode(
X, x_categorical, return_enc=True)
if encode_y:
y, y_encoding_dict, y_enc = ordinal_encode(
y, y.columns, return_enc=True)
else:
y_encoding_dict = None
y_enc = None
# If needed, save new encoding (after fit)
if save_encoding:
self.encoding_dict = encoding_dict
self.X_enc = X_enc
self.y_encoding_dict = y_encoding_dict
self.y_enc = y_enc
self.y_categorical=y_categorical
self.x_categorical=x_categorical
self.y_numeric = y_numeric
self.x_numeric = x_numeric
return X,y,x_categorical
def __create_cluster_descriptions(self, X: pd.DataFrame, y: Union[pd.DataFrame, pd.Series], y_categorical: bool, encode: Optional[list] = None, variables_to_consider: Optional[list] = None):
assert self.fitted is True, "Model needs to be fitted to create cluster descriptions!"
outcome = y.columns
#rcdata = pd.merge(y, X, left_index=True, right_index=True)
#continuous = np.setdiff1d(X.columns, categoricals)
descriptions = {}
# Continuous variables
column_dict = {}
for col in self.x_numeric:
cluster_dict = {}
for cl in self.unique_cluster:
cl_mask = self.cluster_list == cl
subset = X.loc[cl_mask, col]
cluster_dict[cl] = {'mean': np.mean(subset), 'median': np.median(subset), 'std': np.std(
subset), 'max': np.max(subset), 'min': np.min(subset), 'skew': skew(subset)}
column_dict[col] = cluster_dict
descriptions['cont'] = column_dict
# Categorical variables
column_dict = {}
for col in self.x_categorical:
cluster_dict = {}
for cl in self.unique_cluster:
cl_mask = self.cluster_list == cl
subset = X.loc[cl_mask, col]
values, number = np.unique(subset, return_counts=True)
total = np.sum(number)
ratios = np.round(number/total, 2)
desc = ['{}: {}%'.format(x, y) for x, y in zip(values, ratios)]
desc = ', '.join(desc)
cluster_dict[cl] = desc
column_dict[col] = cluster_dict
descriptions['cat'] = column_dict
# Outcome either categorical or not
column_dict = {}
for col in outcome:
if col in self.y_categorical:
cluster_dict = {}
for cl in self.unique_cluster:
cl_mask = self.cluster_list == cl
subset = y.loc[cl_mask, col]
values, number = np.unique(subset, return_counts=True)
total = np.sum(number)
ratios = np.round(number/total, 2)
desc = ['{}: {}%'.format(x, y)
for x, y in zip(values, ratios)]
desc = ', '.join(desc)
cluster_dict[cl] = desc
column_dict[col] = cluster_dict
else:
cluster_dict = {}
for cl in self.unique_cluster:
cl_mask = self.cluster_list == cl
subset = y.loc[cl_mask, col]
cluster_dict[cl] = {'mean': np.mean(subset), 'median': np.median(subset), 'std': np.std(
subset), 'max': np.max(subset), 'min': np.min(subset), 'skew': skew(subset)}
column_dict[col] = cluster_dict
descriptions['y'] = column_dict
# Further stats
cluster_dict = {}
for cl in self.unique_cluster:
cl_mask = self.cluster_list == cl
subset = X.loc[cl_mask, :]
cluster_dict[cl] = {'Nr': subset.shape[0]}
descriptions['stats'] = cluster_dict
self.cluster_desc = descriptions
| 39.800353 | 263 | 0.550317 | 22,090 | 0.980601 | 0 | 0 | 0 | 0 | 0 | 0 | 7,309 | 0.324455 |
632674ddcc745dd77d3ba6cadd61eccacbe5de74 | 4,632 | py | Python | zoloto/cameras/camera.py | RealOrangeOne/yuri | 6ed55bdf97c6add22cd6c71c39ca30e2229337cb | [
"BSD-3-Clause"
] | null | null | null | zoloto/cameras/camera.py | RealOrangeOne/yuri | 6ed55bdf97c6add22cd6c71c39ca30e2229337cb | [
"BSD-3-Clause"
] | null | null | null | zoloto/cameras/camera.py | RealOrangeOne/yuri | 6ed55bdf97c6add22cd6c71c39ca30e2229337cb | [
"BSD-3-Clause"
] | null | null | null | from pathlib import Path
from typing import Any, Generator, Optional, Tuple
from cv2 import CAP_PROP_BUFFERSIZE, VideoCapture
from numpy import ndarray
from zoloto.marker_type import MarkerType
from .base import BaseCamera
from .mixins import IterableCameraMixin, VideoCaptureMixin, ViewableCameraMixin
from .utils import (
get_video_capture_resolution,
set_video_capture_resolution,
validate_calibrated_video_capture_resolution,
)
def find_camera_ids() -> Generator[int, None, None]:
"""
Find and return ids of connected cameras.
Works the same as VideoCapture(-1).
"""
for camera_id in range(8):
capture = VideoCapture(camera_id)
opened = capture.isOpened()
capture.release()
if opened:
yield camera_id
class Camera(VideoCaptureMixin, IterableCameraMixin, BaseCamera, ViewableCameraMixin):
def __init__(
self,
camera_id: int,
*,
marker_size: Optional[int] = None,
marker_type: MarkerType,
calibration_file: Optional[Path] = None,
resolution: Optional[Tuple[int, int]] = None,
) -> None:
super().__init__(
marker_size=marker_size,
marker_type=marker_type,
calibration_file=calibration_file,
)
self.camera_id = camera_id
self.video_capture = self.get_video_capture(self.camera_id)
if resolution is not None:
self._set_resolution(resolution)
if self.calibration_params is not None:
validate_calibrated_video_capture_resolution(
self.video_capture,
self.calibration_params,
override=resolution is not None,
)
def __repr__(self) -> str:
return f"<{self.__class__.__name__}: {self.camera_id}>"
def get_video_capture(self, camera_id: int) -> VideoCapture:
cap = VideoCapture(camera_id)
cap.set(CAP_PROP_BUFFERSIZE, 1)
return cap
def _set_resolution(self, resolution: Tuple[int, int]) -> None:
set_video_capture_resolution(self.video_capture, resolution)
def get_resolution(self) -> Tuple[int, int]:
return get_video_capture_resolution(self.video_capture)
def capture_frame(self) -> ndarray:
# Hack: Double capture frames to fill buffer.
self.video_capture.read()
return super().capture_frame()
def close(self) -> None:
super().close()
self.video_capture.release()
@classmethod
def discover(cls, **kwargs: Any) -> Generator["Camera", None, None]:
for camera_id in find_camera_ids():
yield cls(camera_id, **kwargs)
class SnapshotCamera(VideoCaptureMixin, BaseCamera):
"""
A modified version of Camera optimised for single use.
- Doesn't keep the camera open between captures
"""
def __init__(
self,
camera_id: int,
*,
marker_size: Optional[int] = None,
marker_type: MarkerType,
calibration_file: Optional[Path] = None,
resolution: Optional[Tuple[int, int]] = None,
) -> None:
super().__init__(
marker_size=marker_size,
marker_type=marker_type,
calibration_file=calibration_file,
)
self.camera_id = camera_id
self._resolution = resolution
def __repr__(self) -> str:
return f"<{self.__class__.__name__}: {self.camera_id}>"
def get_video_capture(self, camera_id: int) -> VideoCapture:
video_capture = VideoCapture(camera_id)
if self._resolution is not None:
set_video_capture_resolution(video_capture, self._resolution)
else:
self._resolution = get_video_capture_resolution(video_capture)
if self.calibration_params is not None:
validate_calibrated_video_capture_resolution(
video_capture, self.calibration_params, override=False
)
return video_capture
def get_resolution(self) -> Tuple[int, int]:
if self._resolution is None:
raise ValueError(
"Cannot find resolution of camera until at least 1 frame has been captured."
)
return self._resolution
def capture_frame(self) -> ndarray:
self.video_capture = self.get_video_capture(self.camera_id)
frame = super().capture_frame()
self.video_capture.release()
return frame
@classmethod
def discover(cls, **kwargs: Any) -> Generator["SnapshotCamera", None, None]:
for camera_id in find_camera_ids():
yield cls(camera_id, **kwargs)
| 31.726027 | 92 | 0.649827 | 3,839 | 0.8288 | 655 | 0.141408 | 352 | 0.075993 | 0 | 0 | 462 | 0.099741 |
6326a7cc80d3d1603f92a048b09d9d4066064738 | 448 | py | Python | .config/vim/python3/tex.py | psvenk/dotfiles | 2c07e97b5087a224e5df3060405f8bbfac28866e | [
"0BSD"
] | null | null | null | .config/vim/python3/tex.py | psvenk/dotfiles | 2c07e97b5087a224e5df3060405f8bbfac28866e | [
"0BSD"
] | null | null | null | .config/vim/python3/tex.py | psvenk/dotfiles | 2c07e97b5087a224e5df3060405f8bbfac28866e | [
"0BSD"
] | null | null | null | import vim
from datetime import datetime
from os import path
from subprocess import run, DEVNULL
def include_screenshot():
directory = path.dirname(vim.current.buffer.name)
timestamp = datetime.now().strftime("%Y-%m-%d-%H-%M-%S")
filename = f"screenshot-{timestamp}.png"
with open(path.join(directory, filename), mode="wb") as outfile:
run(["maim", "-s"], stdin=DEVNULL, stdout=outfile, stderr=DEVNULL)
return filename
| 34.461538 | 74 | 0.703125 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 62 | 0.138393 |
6327c56d9c670d44daabc2e832ec49f09adf169e | 433 | py | Python | tests/ext/test_ext_plugin.py | tomekr/cement | fece8629c48bcd598fd61d8aa7457a5df4c4f831 | [
"BSD-3-Clause"
] | 826 | 2015-01-09T13:23:35.000Z | 2022-03-18T01:19:40.000Z | tests/ext/test_ext_plugin.py | tomekr/cement | fece8629c48bcd598fd61d8aa7457a5df4c4f831 | [
"BSD-3-Clause"
] | 316 | 2015-01-14T10:35:22.000Z | 2022-03-08T17:18:10.000Z | tests/ext/test_ext_plugin.py | tomekr/cement | fece8629c48bcd598fd61d8aa7457a5df4c4f831 | [
"BSD-3-Clause"
] | 112 | 2015-01-10T15:04:26.000Z | 2022-03-16T08:11:58.000Z |
from cement.ext.ext_plugin import CementPluginHandler
# module tests
class TestCementPluginHandler(object):
def test_subclassing(self):
class MyPluginHandler(CementPluginHandler):
class Meta:
label = 'my_plugin_handler'
h = MyPluginHandler()
assert h._meta.interface == 'plugin'
assert h._meta.label == 'my_plugin_handler'
# app functionality and coverage tests
| 22.789474 | 53 | 0.681293 | 318 | 0.734411 | 0 | 0 | 0 | 0 | 0 | 0 | 98 | 0.226328 |
6327d98e619405f59bb76b328007d961e5267f30 | 655 | py | Python | wofry/beamline/optical_elements/ideal_elements/screen.py | PaNOSC-ViNYL/wofry | 779b5a738ee7738e959a58aafe01e7e49b03894a | [
"MIT"
] | null | null | null | wofry/beamline/optical_elements/ideal_elements/screen.py | PaNOSC-ViNYL/wofry | 779b5a738ee7738e959a58aafe01e7e49b03894a | [
"MIT"
] | null | null | null | wofry/beamline/optical_elements/ideal_elements/screen.py | PaNOSC-ViNYL/wofry | 779b5a738ee7738e959a58aafe01e7e49b03894a | [
"MIT"
] | null | null | null | """
Represents an ideal lens.
"""
from syned.beamline.optical_elements.ideal_elements.screen import Screen
from wofry.beamline.decorators import OpticalElementDecorator
class WOScreen(Screen, OpticalElementDecorator):
def __init__(self, name="Undefined"):
Screen.__init__(self, name=name)
def applyOpticalElement(self, wavefront, parameters=None, element_index=None):
return wavefront
class WOScreen1D(Screen, OpticalElementDecorator):
def __init__(self, name="Undefined"):
Screen.__init__(self, name=name)
def applyOpticalElement(self, wavefront, parameters=None, element_index=None):
return wavefront
| 32.75 | 82 | 0.760305 | 482 | 0.735878 | 0 | 0 | 0 | 0 | 0 | 0 | 55 | 0.083969 |
6329f54bdc4ccb92a6490bc61cacdf57859127e4 | 1,799 | py | Python | scripts/sample_imgnet.py | duanzhiihao/mycv | 184b52f7a5c1b6f603122d4f4050952b65ba0ead | [
"MIT"
] | null | null | null | scripts/sample_imgnet.py | duanzhiihao/mycv | 184b52f7a5c1b6f603122d4f4050952b65ba0ead | [
"MIT"
] | null | null | null | scripts/sample_imgnet.py | duanzhiihao/mycv | 184b52f7a5c1b6f603122d4f4050952b65ba0ead | [
"MIT"
] | null | null | null | import os
from tqdm import tqdm
from pathlib import Path
import random
from mycv.paths import IMAGENET_DIR
from mycv.datasets.imagenet import WNIDS, WNID_TO_IDX
def main():
sample(200, 600, 50)
def sample(num_cls=200, num_train=600, num_val=50):
assert IMAGENET_DIR.is_dir()
train_root = IMAGENET_DIR / 'train'
# check if imageset file already exist
trainlabel_path = IMAGENET_DIR / f'annotations/train{num_cls}_{num_train}.txt'
vallabel_path = IMAGENET_DIR / f'annotations/val{num_cls}_{num_train}.txt'
if trainlabel_path.exists():
print(f'Warning: {trainlabel_path} already exist. Removing it...')
os.remove(trainlabel_path)
if vallabel_path.exists():
print(f'Warning: {vallabel_path} already exist. Removing it...')
os.remove(vallabel_path)
wnid_subset = random.sample(WNIDS, k=num_cls)
for cls_idx, wnid in tqdm(enumerate(wnid_subset)):
img_dir = train_root / wnid
assert img_dir.is_dir()
img_names = os.listdir(img_dir)
# selelct the num_train and num_val images
assert len(img_names) > num_train + num_val
imname_subset = random.sample(img_names, num_train + num_val)
train_names = imname_subset[:num_train]
val_names = imname_subset[num_train:num_train+num_val]
# write names to an annotation file
with open(trainlabel_path, 'a', newline='\n') as f:
for imname in train_names:
assert imname.endswith('.JPEG')
f.write(f'{wnid}/{imname} {cls_idx}\n')
with open(vallabel_path, 'a', newline='\n') as f:
for imname in val_names:
assert imname.endswith('.JPEG')
f.write(f'{wnid}/{imname} {cls_idx}\n')
if __name__ == "__main__":
main()
| 34.596154 | 82 | 0.659811 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 424 | 0.235686 |
632d57e33c16941deaf1e4e00a7a754b488df9c5 | 702 | py | Python | test/test_document.py | hibtc/madseq | 1e218726a01a1817464f84f0ce887be3186f2c08 | [
"MIT"
] | null | null | null | test/test_document.py | hibtc/madseq | 1e218726a01a1817464f84f0ce887be3186f2c08 | [
"MIT"
] | 2 | 2015-05-25T00:42:49.000Z | 2015-05-25T00:43:19.000Z | test/test_document.py | hibtc/madseq | 1e218726a01a1817464f84f0ce887be3186f2c08 | [
"MIT"
] | null | null | null | # test utilities
import unittest
from decimal import Decimal
# tested module
import madseq
class Test_Document(unittest.TestCase):
def test_parse_line(self):
parse = madseq.Document.parse_line
Element = madseq.Element
self.assertEqual(list(parse(' \t ')),
[''])
self.assertEqual(list(parse(' \t ! a comment; ! ')),
['! a comment; ! '])
self.assertEqual(list(parse(' use, z=23.23e2; k: z; !')),
['!',
Element(None, 'use', {'z': Decimal('23.23e2')}),
Element('k', 'z', {})])
if __name__ == '__main__':
unittest.main()
| 22.645161 | 74 | 0.497151 | 557 | 0.793447 | 0 | 0 | 0 | 0 | 0 | 0 | 139 | 0.198006 |
632f191e3e228f8b92b0a400fa3722a5c25e9f21 | 5,307 | py | Python | Assignment3/src/main/bkool/utils/Visitor.py | ntnguyen648936/PPL-BKOOOL | 458f54683da01d2105cf3cffc8ecbc2aeb796df8 | [
"Apache-2.0"
] | null | null | null | Assignment3/src/main/bkool/utils/Visitor.py | ntnguyen648936/PPL-BKOOOL | 458f54683da01d2105cf3cffc8ecbc2aeb796df8 | [
"Apache-2.0"
] | null | null | null | Assignment3/src/main/bkool/utils/Visitor.py | ntnguyen648936/PPL-BKOOOL | 458f54683da01d2105cf3cffc8ecbc2aeb796df8 | [
"Apache-2.0"
] | 1 | 2022-01-27T15:38:22.000Z | 2022-01-27T15:38:22.000Z | from abc import ABC, abstractmethod, ABCMeta
class Visitor(ABC):
@abstractmethod
def visitProgram(self, ast, param):
pass
@abstractmethod
def visitVarDecl(self, ast, param):
pass
@abstractmethod
def visitConstDecl(self, ast, param):
pass
@abstractmethod
def visitClassDecl(self, ast, param):
pass
@abstractmethod
def visitStatic(self, ast, param):
pass
@abstractmethod
def visitInstance(self, ast, param):
pass
@abstractmethod
def visitMethodDecl(self, ast, param):
pass
@abstractmethod
def visitAttributeDecl(self, ast, param):
pass
@abstractmethod
def visitIntType(self, ast, param):
pass
@abstractmethod
def visitFloatType(self, ast, param):
pass
@abstractmethod
def visitBoolType(self, ast, param):
pass
@abstractmethod
def visitStringType(self, ast, param):
pass
@abstractmethod
def visitVoidType(self, ast, param):
pass
@abstractmethod
def visitArrayType(self, ast, param):
pass
@abstractmethod
def visitClassType(self, ast, param):
pass
@abstractmethod
def visitBinaryOp(self, ast, param):
pass
@abstractmethod
def visitUnaryOp(self, ast, param):
pass
@abstractmethod
def visitCallExpr(self, ast, param):
pass
@abstractmethod
def visitNewExpr(self, ast, param):
pass
@abstractmethod
def visitId(self, ast, param):
pass
@abstractmethod
def visitArrayCell(self, ast, param):
pass
@abstractmethod
def visitFieldAccess(self, ast, param):
pass
@abstractmethod
def visitBlock(self, ast, param):
pass
@abstractmethod
def visitIf(self, ast, param):
pass
@abstractmethod
def visitFor(self, ast, param):
pass
@abstractmethod
def visitContinue(self, ast, param):
pass
@abstractmethod
def visitBreak(self, ast, param):
pass
@abstractmethod
def visitReturn(self, ast, param):
pass
@abstractmethod
def visitAssign(self, ast, param):
pass
@abstractmethod
def visitCallStmt(self, ast, param):
pass
@abstractmethod
def visitIntLiteral(self, ast, param):
pass
@abstractmethod
def visitFloatLiteral(self, ast, param):
pass
@abstractmethod
def visitBooleanLiteral(self, ast, param):
pass
@abstractmethod
def visitStringLiteral(self, ast, param):
pass
@abstractmethod
def visitNullLiteral(self, ast, param):
pass
@abstractmethod
def visitSelfLiteral(self, ast, param):
pass
@abstractmethod
def visitArrayLiteral(self, ast, param):
pass
class BaseVisitor(Visitor):
def visitProgram(self, ast, param):
return None
def visitVarDecl(self, ast, param):
return None
def visitConstDecl(self, ast, param):
return None
def visitClassDecl(self, ast, param):
return None
def visitStatic(self, ast, param):
return None
def visitInstance(self, ast, param):
return None
def visitMethodDecl(self, ast, param):
return None
def visitAttributeDecl(self, ast, param):
return None
def visitIntType(self, ast, param):
return None
def visitFloatType(self, ast, param):
return None
def visitBoolType(self, ast, param):
return None
def visitStringType(self, ast, param):
return None
def visitVoidType(self, ast, param):
return None
def visitArrayType(self, ast, param):
return None
def visitClassType(self, ast, param):
return None
def visitBinaryOp(self, ast, param):
return None
def visitUnaryOp(self, ast, param):
return None
def visitCallExpr(self, ast, param):
return None
def visitNewExpr(self, ast, param):
return None
def visitId(self, ast, param):
return None
def visitArrayCell(self, ast, param):
return None
def visitFieldAccess(self, ast, param):
return None
def visitBlock(self, ast, param):
return None
def visitIf(self, ast, param):
return None
def visitFor(self, ast, param):
return None
def visitContinue(self, ast, param):
return None
def visitBreak(self, ast, param):
return None
def visitReturn(self, ast, param):
return None
def visitAssign(self, ast, param):
return None
def visitCallStmt(self, ast, param):
return None
def visitIntLiteral(self, ast, param):
return None
def visitFloatLiteral(self, ast, param):
return None
def visitBooleanLiteral(self, ast, param):
return None
def visitStringLiteral(self, ast, param):
return None
def visitNullLiteral(self, ast, param):
return None
def visitSelfLiteral(self, ast, param):
return None
def visitArrayLiteral(self, ast, param):
return None
| 23.174672 | 46 | 0.603919 | 5,251 | 0.989448 | 0 | 0 | 2,564 | 0.483135 | 0 | 0 | 0 | 0 |
633116f53d75e450606cb590875b6e5bf2ea6638 | 4,917 | py | Python | opensanctions/crawlers/eu_fsf.py | quantumchips/opensanctions | 56f19dcfea704480e56a311d2a807c8446237457 | [
"MIT"
] | 102 | 2018-03-22T16:33:17.000Z | 2021-01-20T07:39:43.000Z | opensanctions/crawlers/eu_fsf.py | quantumchips/opensanctions | 56f19dcfea704480e56a311d2a807c8446237457 | [
"MIT"
] | 101 | 2021-02-12T18:26:16.000Z | 2022-01-27T14:01:53.000Z | opensanctions/crawlers/eu_fsf.py | quantumchips/opensanctions | 56f19dcfea704480e56a311d2a807c8446237457 | [
"MIT"
] | 50 | 2018-05-11T18:00:49.000Z | 2021-01-26T12:11:20.000Z | from prefixdate import parse_parts
from opensanctions import helpers as h
from opensanctions.util import remove_namespace
def parse_address(context, el):
country = el.get("countryDescription")
if country == "UNKNOWN":
country = None
# context.log.info("Addrr", el=el)
return h.make_address(
context,
street=el.get("street"),
po_box=el.get("poBox"),
city=el.get("city"),
place=el.get("place"),
postal_code=el.get("zipCode"),
region=el.get("region"),
country=country,
country_code=el.get("countryIso2Code"),
)
def parse_entry(context, entry):
subject_type = entry.find("./subjectType")
schema = context.lookup_value("subject_type", subject_type.get("code"))
if schema is None:
context.log.warning("Unknown subject type", type=subject_type)
return
entity = context.make(schema)
entity.id = context.make_slug(entry.get("euReferenceNumber"))
entity.add("notes", entry.findtext("./remark"))
entity.add("topics", "sanction")
sanction = h.make_sanction(context, entity)
regulation = entry.find("./regulation")
source_url = regulation.findtext("./publicationUrl", "")
sanction.set("sourceUrl", source_url)
sanction.add("program", regulation.get("programme"))
sanction.add("reason", regulation.get("numberTitle"))
sanction.add("startDate", regulation.get("entryIntoForceDate"))
sanction.add("listingDate", regulation.get("publicationDate"))
for name in entry.findall("./nameAlias"):
if entry.get("strong") == "false":
entity.add("weakAlias", name.get("wholeName"))
else:
entity.add("name", name.get("wholeName"))
entity.add("title", name.get("title"), quiet=True)
entity.add("firstName", name.get("firstName"), quiet=True)
entity.add("middleName", name.get("middleName"), quiet=True)
entity.add("lastName", name.get("lastName"), quiet=True)
entity.add("position", name.get("function"), quiet=True)
gender = h.clean_gender(name.get("gender"))
entity.add("gender", gender, quiet=True)
for node in entry.findall("./identification"):
type = node.get("identificationTypeCode")
schema = "Passport" if type == "passport" else "Identification"
passport = context.make(schema)
passport.id = context.make_id("ID", entity.id, node.get("logicalId"))
passport.add("holder", entity)
passport.add("authority", node.get("issuedBy"))
passport.add("type", node.get("identificationTypeDescription"))
passport.add("number", node.get("number"))
passport.add("number", node.get("latinNumber"))
passport.add("startDate", node.get("issueDate"))
passport.add("startDate", node.get("issueDate"))
passport.add("country", node.get("countryIso2Code"))
passport.add("country", node.get("countryDescription"))
for remark in node.findall("./remark"):
passport.add("summary", remark.text)
context.emit(passport)
for node in entry.findall("./address"):
address = parse_address(context, node)
h.apply_address(context, entity, address)
for child in node.getchildren():
if child.tag in ("regulationSummary"):
continue
elif child.tag == "remark":
entity.add("notes", child.text)
elif child.tag == "contactInfo":
prop = context.lookup_value("contact_info", child.get("key"))
if prop is None:
context.log.warning("Unknown contact info", node=child)
else:
entity.add(prop, child.get("value"))
else:
context.log.warning("Unknown address component", node=child)
for birth in entry.findall("./birthdate"):
partialBirth = parse_parts(
birth.get("year"), birth.get("month"), birth.get("day")
)
entity.add("birthDate", birth.get("birthdate"))
entity.add("birthDate", partialBirth)
address = parse_address(context, birth)
if address is not None:
entity.add("birthPlace", address.get("full"))
entity.add("country", address.get("country"))
for node in entry.findall("./citizenship"):
entity.add("nationality", node.get("countryIso2Code"), quiet=True)
entity.add("nationality", node.get("countryDescription"), quiet=True)
context.emit(entity, target=True, unique=True)
context.emit(sanction)
def crawl(context):
path = context.fetch_resource("source.xml", context.dataset.data.url)
context.export_resource(path, "text/xml", title=context.SOURCE_TITLE)
doc = context.parse_resource_xml(path)
doc = remove_namespace(doc)
for entry in doc.findall(".//sanctionEntity"):
parse_entry(context, entry)
| 40.636364 | 77 | 0.630466 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,232 | 0.250559 |
63319d40cb5498bfc7c0d34898459019f0749096 | 1,031 | py | Python | lre/nlp/__init__.py | ovixiao/lre | e60de7b4457efd5a85165e89a4477c14f52c471b | [
"Apache-2.0"
] | null | null | null | lre/nlp/__init__.py | ovixiao/lre | e60de7b4457efd5a85165e89a4477c14f52c471b | [
"Apache-2.0"
] | null | null | null | lre/nlp/__init__.py | ovixiao/lre | e60de7b4457efd5a85165e89a4477c14f52c471b | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
"""
处理各种语言的库,主要是实现分段落、句子、词的功能
"""
from __future__ import unicode_literals
from .nlp_zh import NlpZh
class Nlp(object):
def __init__(self, config):
self.config = config
if self.config.language == 'zh':
self.nlp = NlpZh(config)
else:
raise ValueError('invalid_language', config.language)
def text2para(self, text):
"""
从文本切分成为段落, 会依据 config 的 language 来切分
:param text: 输入的文本整体,段落以 \n 作为分割
:return: 返回段落的列表
"""
return self.nlp.text2para(text[:self.config.max_text_len])
def para2sent(self, paragraph):
"""
从段落切分句子,会依据 config 的 language 来切分
:param paragraph: 输入的段落文本,段落以 \n 作为分割,仅支持一个 \n
:return: 返回句子的列表
"""
return self.nlp.para2sent(paragraph)
def sent2word(self, sentence):
"""
从句子切分成词,会依据 config 的 language 来切分
:param sentence: 输入的句子文本
:return: 返回词的列表
"""
return self.nlp.sent2word(sentence)
| 24.547619 | 66 | 0.591659 | 1,121 | 0.862972 | 0 | 0 | 0 | 0 | 0 | 0 | 723 | 0.556582 |
633225f23e4a4bf2dbb424fc735d054300fb8162 | 730 | py | Python | scripts/vertical/scr_process_stats.py | juhi24/radcomp | ccb727e4fa516ae708362ed1b05335de92f8c7fd | [
"MIT"
] | 1 | 2019-06-18T01:54:00.000Z | 2019-06-18T01:54:00.000Z | scripts/vertical/scr_process_stats.py | juhi24/radcomp | ccb727e4fa516ae708362ed1b05335de92f8c7fd | [
"MIT"
] | null | null | null | scripts/vertical/scr_process_stats.py | juhi24/radcomp | ccb727e4fa516ae708362ed1b05335de92f8c7fd | [
"MIT"
] | 1 | 2020-05-27T10:13:49.000Z | 2020-05-27T10:13:49.000Z | # coding: utf-8
import matplotlib.pyplot as plt
from scr_class_stats import init_rain, cl_frac_in_case, frac_in_case_hist
def proc_frac(cases, lcl, frac=True):
"""fraction or sum of process occurrences per case"""
cl_sum = cases.case.apply(lambda x: 0)
for cl in lcl:
cl_sum += cases.case.apply(lambda x: cl_frac_in_case(x, cl, frac=False))
if frac:
sizes = cases.case.apply(lambda x: x.classes.size)
return cl_sum/sizes
return cl_sum
if __name__ == '__main__':
plt.close('all')
#cases_r, cc_r = init_rain()
cl_hm = (10, 13)
cl_dend = (11, 12, 14, 15)
fracs = proc_frac(cases_r, cl_dend, frac=True)
ax = frac_in_case_hist(cases_r, frac_per_case=fracs)
| 24.333333 | 80 | 0.669863 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 111 | 0.152055 |
2d47ab385e2aad6d3b4b15b6488db399d09b39ca | 3,784 | py | Python | scripts/supervised/exam_real_robot_data/analysis_icm_model_real_bot.py | fredshentu/public_model_based_controller | 9301699bc56aa49ba5c699f7d5be299046a8aa0c | [
"MIT"
] | null | null | null | scripts/supervised/exam_real_robot_data/analysis_icm_model_real_bot.py | fredshentu/public_model_based_controller | 9301699bc56aa49ba5c699f7d5be299046a8aa0c | [
"MIT"
] | null | null | null | scripts/supervised/exam_real_robot_data/analysis_icm_model_real_bot.py | fredshentu/public_model_based_controller | 9301699bc56aa49ba5c699f7d5be299046a8aa0c | [
"MIT"
] | null | null | null | """
Since the size of real robot data is huge, we first go though all data then save loss array,
then sort loss array. Finally we use the indexes to find the corresponding graphs
"""
import time
from rllab.core.serializable import Serializable
from numpy.linalg import norm
from numpy import mean
from numpy import std
import numpy as np
import csv, os
import scipy.misc as scm
# import pickle
import tensorflow as tf
from rllab.policies.uniform_control_policy import UniformControlPolicy
from rllab.sampler.utils import rollout
from railrl.policies.cmaes_icm import CMAESPolicy
import argparse
import matplotlib.pyplot as plt
import pickle
OBS_INPUT_SHAPE = [128,128,6]
ACTION_SHAPE = [4]
STATE_SHAPE = [8]
#return img, action, next_img, state, next_state
def load_data(filename):
obs = []
next_obs = []
action = []
file = open(filename,'rb')
load_dict = pickle.load(file,encoding='latin1')
states = load_dict["states"]
images = load_dict["images"]
actions = load_dict["action_list"]
assert(len(images) == 601)
assert(len(actions) == 600)
assert(len(states) == 601)
return images[:600], actions,images[1:], states[:600], states[1:]
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('file', type=str,
help='path to the data file, should be pickle')
with tf.Session() as sess:
data = joblib.load(args.file)
_conv_encoder = data["encoder"]
_inverse_model = data["inverse_model"]
_forward_model = data["forward_model"]
_state_encoder = data["state_encoder"]
s1_ph = tf.placeholder(tf.float32, [None] + OBS_INPUT_SHAPE)/255 - 0.5
s2_ph = tf.placeholder(tf.float32, [None] + OBS_INPUT_SHAPE)/255 - 0.5
a_ph = tf.placeholder(tf.float32, [None, 4]) * [1./1023, 1./249, 1./249, 1./1023]
arm_state1_ph = tf.placeholder(tf.float32, [None, 8]) / 2048
arm_state2_ph = tf.placeholder(tf.float32, [None, 8]) / 2048
encoder1 = _conv_encoder.get_weight_tied_copy(observation_input=s1_ph)
encoder2 = _conv_encoder.get_weight_tied_copy(observation_input=s2_ph)
state_encoder1 = _state_encoder.get_weight_tied_copy(observation_input=arm_state1_ph)
state_encoder2 = _state_encoder.get_weight_tied_copy(observation_input=arm_state2_ph)
feature1 = tf.concat(1, [encoder1.output, state_encoder1.output])
feature2 = tf.concat(1, [encoder2.output, state_encoder2.output])
inverse_model = _inverse_model.get_weight_tied_copy(feature_input1=feature1,
feature_input2=feature2)
forward_model = _forward_model.get_weight_tied_copy(feature_input=feature1,
action_input=a_ph)
def get_forward_loss(obs, state, next_obs, next_state, actions):
forward_loss = sess.run(
tf.reduce_mean(tf.square(
encoder2.output - forward_model.output
), axis=1),
feed_dict={
s1_ph: obs,
s2_ph: next_obs,
a_ph: actions,
arm_state1_ph = state,
arm_state2_ph = next_state,
}
)
return forward_loss
# Call rllab rollout for parallel
while True:
plt.clf()
plt.ion()
ob = env.reset()
next_ob = None
x = []
y = []
for t in range(env.wrapped_env._wrapped_env.env.spec.max_episode_steps):
action, _ = policy.get_action(ob)
next_ob, reward, done, env_infos = env.step(action)
env.render()
forward_loss = get_forward_loss([ob], [next_ob], [action])
if done:
ob = env.reset()
else:
ob = next_ob
x.append(t)
y.append(forward_loss)
# import pdb; pdb.set_trace()
flag = env_infos["contact_reward"]
if flag == 1:
plt.title("touching table")
if flag == 0:
plt.title("touching nothing")
else:
plt.title("touching box")
plt.plot(x, y, c="blue")
plt.pause(0.05)
# print ("Should plot")
plt.show() | 32.067797 | 92 | 0.700317 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 552 | 0.145877 |
2d4823293bcf928bc47dd1c13d4ff3d6fb02d8f4 | 12,225 | py | Python | data_preprocess/build_data_to_tfrecord.py | YuxianMeng/CorefQA-pytorch | 0017e85dc09075c640c5fe91673230f505e825f2 | [
"Apache-2.0"
] | 6 | 2021-06-30T03:08:27.000Z | 2021-12-23T07:15:03.000Z | data_preprocess/build_data_to_tfrecord.py | YuxianMeng/CorefQA-pytorch | 0017e85dc09075c640c5fe91673230f505e825f2 | [
"Apache-2.0"
] | null | null | null | data_preprocess/build_data_to_tfrecord.py | YuxianMeng/CorefQA-pytorch | 0017e85dc09075c640c5fe91673230f505e825f2 | [
"Apache-2.0"
] | 2 | 2020-10-20T20:20:54.000Z | 2021-01-27T08:15:53.000Z | #!/usr/bin/env python
# -*- coding:utf-8 -*-
"""
@author: Wei Wu
@license: Apache Licence
@file: prepare_training_data.py
@time: 2019/12/19
@contact: wu.wei@pku.edu.cn
将conll的v4_gold_conll文件格式转成模型训练所需的jsonlines数据格式
"""
import argparse
import json
import logging
import os
import re
import sys
from collections import defaultdict
from typing import List, Tuple
import tensorflow as tf
import data_preprocess.conll as conll
from bert.tokenization import FullTokenizer
parser = argparse.ArgumentParser(description='Process some integers.')
parser.add_argument('--data_dir', type=str, help='The data directory to save *.v4_gold_conll and *.jsonlines')
parser.add_argument('--language', type=str, default='english', help='The language to process.')
parser.add_argument('--vocab_file', type=str, help='The original vocab file for bert tokenization')
parser.add_argument('--sliding_window_size', type=int, default=384, help='Sliding window size for BERT processing')
args = parser.parse_args()
handlers = [logging.FileHandler(filename='prepare_training_data.log'), logging.StreamHandler(sys.stdout)]
logging.basicConfig(level=logging.INFO, handlers=handlers,
format='[%(asctime)s] {%(filename)s:%(lineno)d} %(levelname)s - %(message)s')
logger = logging.getLogger()
SPEAKER_START = '[unused19]'
SPEAKER_END = '[unused73]'
def read_conll_file(conll_file_path: str) -> List[Tuple]:
documents = []
with open(conll_file_path) as fi:
for line in fi:
begin_document_match = re.match(conll.BEGIN_DOCUMENT_REGEX, line)
if begin_document_match:
doc_key = conll.get_doc_key(begin_document_match.group(1), begin_document_match.group(2))
documents.append((doc_key, []))
elif line.startswith("#end document"):
continue
else:
documents[-1][1].append(line.strip())
return documents
def normalize_word(word, language):
if language == "arabic":
word = word[:word.find("#")]
if word == "/." or word == "/?":
return word[1:]
else:
return word
def parse_document(document: Tuple[str, List], language: str) -> dict:
"""
get basic information from one document annotation.
:param document:
:param language: english, chinese or arabic
:return:
"""
doc_key = document[0]
sentences = [[]]
speakers = []
coreferences = []
word_idx = -1
last_speaker = ''
for line_id, line in enumerate(document[1]):
row = line.split()
sentence_end = len(row) == 0
if not sentence_end:
assert len(row) >= 12
word_idx += 1
word = normalize_word(row[3], language)
sentences[-1].append(word)
speaker = row[9]
if speaker != last_speaker:
speakers.append((word_idx, speaker))
last_speaker = speaker
coreferences.append(row[-1])
else:
sentences.append([])
clusters = coreference_annotations_to_clusters(coreferences)
doc_info = {'doc_key': doc_key, 'sentences': sentences[: -1], 'speakers': speakers, 'clusters': clusters}
return doc_info
def coreference_annotations_to_clusters(annotations: List[str]) -> List[List[Tuple]]:
"""
convert coreference information to clusters
:param annotations:
:return:
"""
clusters = defaultdict(list)
coref_stack = defaultdict(list)
for word_idx, annotation in enumerate(annotations):
if annotation == '-':
continue
for ann in annotation.split('|'):
cluster_id = int(ann.replace('(', '').replace(')', ''))
if ann[0] == '(' and ann[-1] == ')':
clusters[cluster_id].append((word_idx, word_idx))
elif ann[0] == '(':
coref_stack[cluster_id].append(word_idx)
elif ann[-1] == ')':
span_start = coref_stack[cluster_id].pop()
clusters[cluster_id].append((span_start, word_idx))
else:
raise NotImplementedError
assert all([len(starts) == 0 for starts in coref_stack.values()])
return list(clusters.values())
def checkout_clusters(doc_info):
words = [i for j in doc_info['sentences'] for i in j]
clusters = [[' '.join(words[start: end + 1]) for start, end in cluster] for cluster in doc_info['clusters']]
print(clusters)
def tokenize_document(doc_info: dict, tokenizer: FullTokenizer) -> dict:
"""
tokenize into sub tokens
:param doc_info:
:param tokenizer:
:return:
"""
sub_tokens: List[str] = [] # all sub tokens of a document
sentence_map: List[int] = [] # collected tokenized tokens -> sentence id
subtoken_map: List[int] = [] # collected tokenized tokens -> original token id
word_idx = -1
for sentence_id, sentence in enumerate(doc_info['sentences']):
for token in sentence:
word_idx += 1
word_tokens = tokenizer.tokenize(token)
sub_tokens.extend(word_tokens)
sentence_map.extend([sentence_id] * len(word_tokens))
subtoken_map.extend([word_idx] * len(word_tokens))
speakers = {subtoken_map.index(word_index): tokenizer.tokenize(speaker)
for word_index, speaker in doc_info['speakers']}
clusters = [[(subtoken_map.index(start), len(subtoken_map) - 1 - subtoken_map[::-1].index(end))
for start, end in cluster] for cluster in doc_info['clusters']]
tokenized_document = {'sub_tokens': sub_tokens, 'sentence_map': sentence_map, 'subtoken_map': subtoken_map,
'speakers': speakers, 'clusters': clusters, 'doc_key': doc_info['doc_key']}
return tokenized_document
def expand_with_speakers(tokenized_document: dict) -> Tuple[List[str], List[int]]:
"""
add speaker name information
:param tokenized_document: tokenized document information
:return:
"""
expanded_tokens = []
expanded_masks = []
for token_idx, token in enumerate(tokenized_document['sub_tokens']):
if token_idx in tokenized_document['speakers']:
speaker = [SPEAKER_START] + tokenized_document['speakers'][token_idx] + [SPEAKER_END]
expanded_tokens.extend(speaker)
expanded_masks.extend([-1] * len(speaker))
expanded_tokens.append(token)
expanded_masks.append(token_idx)
return expanded_tokens, expanded_masks
def construct_sliding_windows(sequence_length: int, sliding_window_size: int):
"""
construct sliding windows for BERT processing
:param sequence_length: e.g. 9
:param sliding_window_size: e.g. 4
:return: [(0, 4, [1, 1, 1, 0]), (2, 6, [0, 1, 1, 0]), (4, 8, [0, 1, 1, 0]), (6, 9, [0, 1, 1])]
"""
sliding_windows = []
stride = int(sliding_window_size / 2)
start_index = 0
end_index = 0
while end_index < sequence_length:
end_index = min(start_index + sliding_window_size, sequence_length)
left_value = 1 if start_index == 0 else 0
right_value = 1 if end_index == sequence_length else 0
mask = [left_value] * int(sliding_window_size / 4) + [1] * int(sliding_window_size / 2) \
+ [right_value] * (sliding_window_size - int(sliding_window_size / 2) - int(sliding_window_size / 4))
mask = mask[: end_index - start_index]
sliding_windows.append((start_index, end_index, mask))
start_index += stride
assert sum([sum(window[2]) for window in sliding_windows]) == sequence_length
return sliding_windows
def flatten_clusters(clusters: List[List[Tuple[int, int]]]) -> Tuple[List[int], List[int], List[int]]:
"""
flattern cluster information
:param clusters:
:return:
"""
span_starts = []
span_ends = []
cluster_ids = []
for cluster_id, cluster in enumerate(clusters):
for start, end in cluster:
span_starts.append(start)
span_ends.append(end)
cluster_ids.append(cluster_id + 1)
return span_starts, span_ends, cluster_ids
def convert_to_sliding_window(tokenized_document: dict, sliding_window_size: int):
"""
construct sliding windows, allocate tokens and masks into each window
:param tokenized_document:
:param sliding_window_size:
:return:
"""
expanded_tokens, expanded_masks = expand_with_speakers(tokenized_document)
sliding_windows = construct_sliding_windows(len(expanded_tokens), sliding_window_size - 2)
token_windows = [] # expanded tokens to sliding window
mask_windows = [] # expanded masks to sliding window
for window_start, window_end, window_mask in sliding_windows:
original_tokens = expanded_tokens[window_start: window_end]
original_masks = expanded_masks[window_start: window_end]
window_masks = [-2 if w == 0 else o for w, o in zip(window_mask, original_masks)]
one_window_token = ['[CLS]'] + original_tokens + ['[SEP]'] + ['[PAD]'] * (sliding_window_size - 2 - len(original_tokens))
one_window_mask = [-3] + window_masks + [-3] + [-4] * (sliding_window_size - 2 - len(original_tokens))
assert len(one_window_token) == sliding_window_size
assert len(one_window_mask) == sliding_window_size
token_windows.append(one_window_token)
mask_windows.append(one_window_mask)
assert len(tokenized_document['sentence_map']) == sum([i >= 0 for j in mask_windows for i in j])
return token_windows, mask_windows
def prepare_training_data(data_dir: str, language: str, vocab_file: str, sliding_window_size: int):
tokenizer = FullTokenizer(vocab_file=vocab_file, do_lower_case=False)
for dataset in ['train', 'dev', 'test']:
conll_file_path = os.path.join(data_dir, F"{dataset}.{language}.v4_gold_conll")
writer = tf.python_io.TFRecordWriter(os.path.join(data_dir, F"{dataset}.{language}.tfrecord"))
doc_map = {}
documents = read_conll_file(conll_file_path)
for doc_idx, document in enumerate(documents):
doc_info = parse_document(document, language)
checkout_clusters(doc_info)
tokenized_document = tokenize_document(doc_info, tokenizer)
doc_map[doc_idx] = tokenized_document['doc_key']
token_windows, mask_windows = convert_to_sliding_window(tokenized_document, sliding_window_size)
input_id_windows = [tokenizer.convert_tokens_to_ids(tokens) for tokens in token_windows]
span_starts, span_ends, cluster_ids = flatten_clusters(tokenized_document['clusters'])
instance = (doc_idx, tokenized_document['sentence_map'], tokenized_document['subtoken_map'],
input_id_windows, mask_windows, span_starts, span_ends, cluster_ids)
write_instance_to_example_file(writer, instance)
with open(os.path.join(data_dir, F"{dataset}.{language}.map"), 'w') as fo:
json.dump(doc_map, fo, indent=2)
def write_instance_to_example_file(writer: tf.python_io.TFRecordWriter, instance: tuple):
doc_idx, sentence_map, subtoken_map, input_id_windows, mask_windows, span_starts, span_ends, cluster_ids = instance
flattened_input_ids = [i for j in input_id_windows for i in j]
flattened_input_mask = [i for j in mask_windows for i in j]
features = {
'doc_idx': create_int_feature([doc_idx]),
'sentence_map': create_int_feature(sentence_map),
'subtoken_map': create_int_feature(subtoken_map),
'flattened_input_ids': create_int_feature(flattened_input_ids),
'flattened_input_mask': create_int_feature(flattened_input_mask),
'span_starts': create_int_feature(span_starts),
'span_ends': create_int_feature(span_ends),
'cluster_ids': create_int_feature(cluster_ids),
}
tf_example = tf.train.Example(features=tf.train.Features(feature=features))
writer.write(tf_example.SerializeToString())
def create_int_feature(values):
feature = tf.train.Feature(int64_list=tf.train.Int64List(value=list(values)))
return feature
if __name__ == '__main__':
prepare_training_data(args.data_dir, args.language, args.vocab_file, args.sliding_window_size)
| 41.866438 | 129 | 0.669857 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,362 | 0.192612 |
2d485dc621723bda46a0ef23e63d9f2aea8f60fb | 8,611 | py | Python | pulseplot/pulseplot.py | kaustubhmote/pulseplot | 9003ab0784666cd34023e29a5f97acb9861f3567 | [
"BSD-3-Clause"
] | 2 | 2022-02-23T11:23:53.000Z | 2022-03-12T06:39:54.000Z | pulseplot/pulseplot.py | kaustubhmote/pulseplot | 9003ab0784666cd34023e29a5f97acb9861f3567 | [
"BSD-3-Clause"
] | null | null | null | pulseplot/pulseplot.py | kaustubhmote/pulseplot | 9003ab0784666cd34023e29a5f97acb9861f3567 | [
"BSD-3-Clause"
] | null | null | null | """
Utilities for making plots
"""
from warnings import warn
import matplotlib.pyplot as plt
from matplotlib.projections import register_projection
from matplotlib.animation import ArtistAnimation
from .parse import Delay, Pulse, PulseSeq
def subplots(*args, **kwargs):
"""
Wrapper around matplotlib.pyplot.subplots
Automatically incorporates the PulseProgram projection
in subplot keywords
"""
register_projection(PulseProgram)
if "subplot_kw" in kwargs.keys():
if "projection" in kwargs["subplot_kw"]:
warn(
f"Projection will be set to 'PulseProgram' instead of {kwargs['subplot_kw']['projection']}"
)
kwargs["subplot_kw"]["projection"] = "PulseProgram"
else:
kwargs["subplot_kw"] = {"projection": "PulseProgram"}
fig, ax = plt.subplots(*args, **kwargs)
return fig, ax
def subplot_mosaic(*args, **kwargs):
"""
Wrapper around matplotlib.pyplot.subplot_mosiac
Automatically incorporates the PulseProgram projection
in subplot keywords
"""
register_projection(PulseProgram)
if "subplot_kw" in kwargs.keys():
if "projection" in kwargs["subplot_kw"]:
warn(
f"Projection will be set to 'PulseProgram' instead of {kwargs['subplot_kw']['projection']}"
)
kwargs["subplot_kw"]["projection"] = "PulseProgram"
else:
kwargs["subplot_kw"] = {"projection": "PulseProgram"}
fig, ax = plt.subplot_mosaic(*args, **kwargs)
return fig, ax
def show(*args, **kwargs):
"""
Calls matplotlib.pyplot.show
This is just to avoid the import
of matploltib.pyplot while making
pulse diagrams.
"""
plt.show(*args, **kwargs)
return
def animation(*args, **kwargs):
"""
Artist animation wrapper to avoid another import
"""
return ArtistAnimation(*args, **kwargs)
class PulseProgram(plt.Axes):
"""
A class that defines convinience functions for
plotting elements of a NMR pulse squence on a
matplotlib axes object.
Usage
-----
>>> from pulseplot import pplot
>>> fig, ax = pplot()
>>> ax.params["p1"] = 0.5
>>> ax.pulse("p1 pl1 ph1 f1")
>>> ax.delay(2)
>>> ax.pulse("p2 pl1 ph2 f1 w")
>>> ax.pulse("p2 pl1 ph2 f2")
>>> ax.delay(2)
>>> ax.pulse("p1 pl1 ph2 f1 w")
>>> ax.pulse("p1 pl1 ph2 f2 w")
>>> ax.fid("p1 pl1 phrec f2")
"""
name = "PulseProgram"
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.center_align = False
self.spacing = 0.0
self.phase_dy = 0.0
self.text_dy = 0.0
self.fontsize = None
self.time = 0.0
self.params = {}
self.limits = {
"xlow": 10,
"xhigh": -10,
"ylow": 10,
"yhigh": -10,
"dx": 0.1,
"dy": 0.1,
}
self.set_limits()
self.axis(False)
def pulse(self, *args, **kwargs):
if isinstance(args[0], Pulse):
p = args[0]
else:
p = Pulse(*args, **kwargs, external_params=self.params)
if p.defer_start_time:
p.start_time = self.time + self.spacing
p.plen -= 2 * self.spacing
if not p.wait:
self.time = p.end_time() + 2 * self.spacing
p.text_dy += self.text_dy
p.phtxt_dy += self.phase_dy
if self.fontsize:
if "fontsize" not in p.text_kw:
p.text_kw["fontsize"] = self.fontsize
if "fontsize" not in p.phase_kw:
p.phase_kw["fontsize"] = self.fontsize
# add the actual pulse
pulse_patch = p.patch()
super().add_patch(pulse_patch)
xarr, yarr = pulse_patch.xy[:, 0], pulse_patch.xy[:, 1]
if self.center_align:
center = (yarr.min() + yarr.max()) / 2.0 - yarr.min()
yarr -= center
pulse_patch.xy[:, 1] = yarr
p.text_dy -= center
p.phtxt_dy -= center
self.edit_limits(
xlow=xarr.min(), xhigh=xarr.max(), ylow=yarr.min(), yhigh=yarr.max()
)
p.start_time -= self.spacing
p.plen += 2 * self.spacing
try:
super().text(**p.label_params())
xpos, ypos = p.label_params["x"], p.label_params["y"]
self.edit_limits(xlow=xpos, xhigh=xpos, ylow=ypos, yhigh=ypos)
except:
pass
try:
super().text(**p.phase_params())
xpos, ypos = p.phase_params["x"], p.phase_params["y"]
self.edit_limits(xlow=xpos, xhigh=xpos, ylow=ypos, yhigh=ypos)
except:
pass
p.text_dy -= self.text_dy
p.phtxt_dy -= self.phase_dy
def delay(self, *args, **kwargs):
if isinstance(args[0], Delay):
d = args[0]
else:
d = Delay(*args, **kwargs, external_params=self.params)
if d.defer_start_time:
d.start_time = self.time
self.time += d.time
try:
super().text(**d.label_params())
except:
pass
def fid(self, *args, **kwargs):
self.pulse(
*args,
**kwargs,
shape="fid",
truncate_off=True,
open=True,
facecolor="none",
)
def clear(self):
"""
Removes all channels and resets the time to zero
"""
self.time = 0.0
super().clear()
def draw_channels(self, *args, **kwargs):
"""
Draws lines marking the channels
"""
defaults = {"color": "k", "linewidth": 1.0, "zorder": -1}
try:
x0, x1 = kwargs["limits"]
kwargs.pop("limits")
except KeyError:
x0, x1 = self.limits["xlow"], self.limits["xhigh"]
defaults = {**defaults, **kwargs}
for channel in args:
if channel in self.params.keys():
super().hlines(self.params[channel], x0, x1, **defaults)
else:
try:
super().hlines(channel, x0, x1, **defaults)
except ValueError:
raise ValueError(
"Channel must be present in parameters, or must be a number"
)
def pseq(self, instruction):
"""
Main way in which
"""
if isinstance(instruction, str):
instruction = PulseSeq(instruction, external_params=self.params)
for item in instruction.elements:
if isinstance(item, Pulse):
self.pulse(item)
elif isinstance(item, Delay):
self.delay(item)
self.sequence = instruction
def get_time(self, name=None, index=None):
if name is not None:
try:
index_ = self.sequence.named_elements[name]
x = self.sequence.elements[index_].start_time
except KeyError:
raise KeyError(f"Cannot find the element named {name}")
elif index is not None:
try:
x = self.sequence.elements[index].start_time
except KeyError:
raise KeyError(f"Cannot find the element named {name}")
else:
raise ValueError("Either a name of a index must be supplied")
return x
def set_limits(self, limits=None):
if limits is not None:
self.limits = limits
try:
super().set_xlim(self.limits["xlow"], self.limits["xhigh"])
super().set_ylim(self.limits["ylow"], self.limits["yhigh"])
except IndexError:
raise IndexError("limits should be given as [xlow, xhigh, ylow, yhigh]")
def edit_limits(self, xlow=None, xhigh=None, ylow=None, yhigh=None):
dx, dy = self.limits["dx"], self.limits["dy"]
if (xlow is not None) and (xlow - dx < self.limits["xlow"]):
self.limits["xlow"] = xlow - dx
if (ylow is not None) and (ylow - dy < self.limits["ylow"]):
self.limits["ylow"] = ylow - dy
if (xhigh is not None) and (xhigh + dx > self.limits["xhigh"]):
self.limits["xhigh"] = xhigh + dx
if (yhigh is not None) and (yhigh + dy > self.limits["yhigh"]):
self.limits["yhigh"] = yhigh + dy
self.limits["dx"] = (self.limits["xhigh"] - self.limits["xlow"]) / 50
self.limits["dy"] = (self.limits["yhigh"] - self.limits["ylow"]) / 50
self.set_limits()
| 26.495385 | 107 | 0.541517 | 6,690 | 0.776913 | 0 | 0 | 0 | 0 | 0 | 0 | 2,160 | 0.250842 |
2d487db3bf1f6d954a1dfa7aa6ad6f0c54cdd16d | 1,835 | py | Python | test/test_immutable_maps.py | zuoralabs/autolisp | ed5d37f36914e25a2ab3196eb5985c7fff8fd00b | [
"BSD-2-Clause"
] | null | null | null | test/test_immutable_maps.py | zuoralabs/autolisp | ed5d37f36914e25a2ab3196eb5985c7fff8fd00b | [
"BSD-2-Clause"
] | null | null | null | test/test_immutable_maps.py | zuoralabs/autolisp | ed5d37f36914e25a2ab3196eb5985c7fff8fd00b | [
"BSD-2-Clause"
] | 1 | 2020-10-03T12:23:46.000Z | 2020-10-03T12:23:46.000Z | from genlisp.immutables import _ImmutableMap as ImmutableMap
import random
test_dicts = [{1: 2},
{1: 3},
{1: 2, 3: 4},
dict(a=1, b=2, c=3, d=4, e=5),
dict(a=3, b=1, c=3, d=4, e=5),
{ii: random.randint(0, 10000) for ii in range(20000)},
{ii: random.randint(0, 10000) for ii in range(20000)},
]
def test_get():
for aa in test_dicts:
bb = ImmutableMap(aa)
for kk in aa:
assert aa[kk] == bb[kk]
def test_eq_and_hash():
frozen = [ImmutableMap(x) for x in test_dicts]
for ii, aa in enumerate(test_dicts): # type: (int, dict)
bb = ImmutableMap(aa)
cc = ImmutableMap(aa)
assert bb == cc
assert hash(bb) == hash(cc)
for jj, bb_jj in enumerate(frozen):
if ii != jj:
assert bb != bb_jj
assert hash(bb) != hash(bb_jj)
assert aa == dict(bb)
def test_items():
for aa in test_dicts:
bb = ImmutableMap(aa)
for item in aa.items():
assert item in bb.items()
#print(set(aa.items()), set(bb.items()))
assert set(aa.items()) == set(bb.items())
def test_values():
for aa in test_dicts:
bb = ImmutableMap(aa)
# loop would grow as O(n^2) if we didn't break early
for cnt, vv in enumerate(aa.values()):
if cnt > 100:
break
assert vv in bb.values()
assert set(aa.values()) == set(bb.values())
def test_repeated_keys():
assert len(ImmutableMap([(1, 2), (1, 3)]).items()) == 1
def test_get():
assert ImmutableMap({1: 2}).get(2, 3) == 3
def test_update():
aa = ImmutableMap({1: 2})
bb = aa.update({3: 4})
assert bb == {1: 2, 3: 4}
assert aa == ImmutableMap({1: 2})
| 25.136986 | 68 | 0.520981 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 111 | 0.06049 |
2d493273ba4aac5351552304e302799fbea8feee | 740 | py | Python | Mentorama/Modulo 3 - POO/Quadrado.py | MOURAIGOR/python | b267f8ef277a385e3e315e88a22390512bf1e101 | [
"MIT"
] | null | null | null | Mentorama/Modulo 3 - POO/Quadrado.py | MOURAIGOR/python | b267f8ef277a385e3e315e88a22390512bf1e101 | [
"MIT"
] | null | null | null | Mentorama/Modulo 3 - POO/Quadrado.py | MOURAIGOR/python | b267f8ef277a385e3e315e88a22390512bf1e101 | [
"MIT"
] | null | null | null | class Quadrado:
def __init__(self, lado):
self.tamanho_lado = lado
def mudar_valor_lado(self, novo_lado):
lado = novo_lado
self.tamanho_lado = novo_lado
def retornar_valor_lado(self, retorno):
self.tamanho_lado = retorno
print(retorno)
def calcular_area(self, area):
self.tamanho_lado = area
print(area*area)
quadrado = Quadrado(6)
print('Tamanho atual é:')
print(quadrado.tamanho_lado)
print('----------------')
quadrado.mudar_valor_lado(3)
print('Novo tamanho é:')
print(quadrado.tamanho_lado)
print('----------------')
print('Tamanho atual:')
quadrado.retornar_valor_lado(3)
print('----------------')
print('Total da area ficou em :')
quadrado.calcular_area(3)
| 24.666667 | 43 | 0.647297 | 383 | 0.516173 | 0 | 0 | 0 | 0 | 0 | 0 | 133 | 0.179245 |
2d49e0003cf21961e6f6dc0b7737191d0a1bdba9 | 772 | py | Python | src/sima/riflex/fileformatcode.py | SINTEF/simapy | 650b8c2f15503dad98e2bfc0d0788509593822c7 | [
"MIT"
] | null | null | null | src/sima/riflex/fileformatcode.py | SINTEF/simapy | 650b8c2f15503dad98e2bfc0d0788509593822c7 | [
"MIT"
] | null | null | null | src/sima/riflex/fileformatcode.py | SINTEF/simapy | 650b8c2f15503dad98e2bfc0d0788509593822c7 | [
"MIT"
] | null | null | null | # Generated with FileFormatCode
#
from enum import Enum
from enum import auto
class FileFormatCode(Enum):
""""""
BINARY_OUTPUT_ONLY = auto()
ASCII_OUTPUT_ONLY = auto()
NO_ADDITIONAL_OUTPUT = auto()
ASCII_OUTPUT = auto()
BINARY_OUTPUT = auto()
def label(self):
if self == FileFormatCode.BINARY_OUTPUT_ONLY:
return "Binary format"
if self == FileFormatCode.ASCII_OUTPUT_ONLY:
return "ASCII format"
if self == FileFormatCode.NO_ADDITIONAL_OUTPUT:
return "Outmod (IFNDYN) format"
if self == FileFormatCode.ASCII_OUTPUT:
return "Outmod (IFNDYN) and ASCII format"
if self == FileFormatCode.BINARY_OUTPUT:
return "Outmod (IFNDYN) and Binary format" | 32.166667 | 55 | 0.650259 | 692 | 0.896373 | 0 | 0 | 0 | 0 | 0 | 0 | 161 | 0.208549 |
2d4a0bf88b1675b2283b37ee3a00023fb0c67f4b | 13,075 | py | Python | wolframclient/evaluation/pool.py | krbarker/WolframClientForPython | f2198b15cad0f406b78ad40a4d1e3ca76125b408 | [
"MIT"
] | null | null | null | wolframclient/evaluation/pool.py | krbarker/WolframClientForPython | f2198b15cad0f406b78ad40a4d1e3ca76125b408 | [
"MIT"
] | null | null | null | wolframclient/evaluation/pool.py | krbarker/WolframClientForPython | f2198b15cad0f406b78ad40a4d1e3ca76125b408 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
from __future__ import absolute_import, print_function, unicode_literals
import itertools
import logging
from asyncio import CancelledError
from wolframclient.evaluation.base import WolframAsyncEvaluator
from wolframclient.evaluation.kernel.asyncsession import (
WolframLanguageAsyncSession)
from wolframclient.exception import WolframKernelException
from wolframclient.utils import six
from wolframclient.utils.api import asyncio
from wolframclient.utils.functional import is_iterable
logger = logging.getLogger(__name__)
__all__ = ['WolframEvaluatorPool', 'parallel_evaluate']
class WolframEvaluatorPool(WolframAsyncEvaluator):
""" A pool of kernels to dispatch one-shot evaluations asynchronously.
Evaluators can be specified in various ways: as a string representing the path to a local kernel,
a :class:`~wolframclient.evaluation.WolframCloudAsyncSession` or
an instance of a :class:`~wolframclient.evaluation.WolframLanguageAsyncSession`. More than one evaluator specification
can be provided in the form of an iterable object, yielding the abovementioned specification.
If the number of evaluators is less than the requested pool size (`poolsize`), elements are duplicated until the
requested number of evaluators is reached.
Create a pool from a Wolfram kernel default location::
async with WolframEvaluatorPool() as pool:
await pool.evaluate('$InstallationDirectory')
Create a pool from a specific Wolfram kernel::
async with WolframEvaluatorPool('/path/to/local/kernel') as pool:
await pool.evaluate('1+1')
Create a pool from a cloud evaluator::
cloud_session = WolframCloudAsyncSession(credentials=myCredentials)
async with WolframEvaluatorPool(cloud_session) as pool:
await pool.evaluate('$MachineName')
Create a pool from a list of specifications::
evaluators = [
WolframCloudAsyncSession(credentials=myCredentials),
'/path/to/local/kernel'
]
async with WolframEvaluatorPool(evaluators) as pool:
await pool.evaluate('$MachineName')
Set `poolsize` to the number of kernel instances. The requested size may not be reached due to licencing
restrictions.
Set `load_factor` to specify how many workloads are queued per kernel before a new evaluation becomes a blocking
operation. Values below or equal to 0 mean an infinite queue size.
Set `loop` to the event loop to use.
`kwargs` are passed to :class:`~wolframclient.evaluation.WolframLanguageAsyncSession` during initialization.
"""
def __init__(self,
async_evaluators=None,
poolsize=4,
load_factor=0,
loop=None,
async_language_session_class=WolframLanguageAsyncSession,
**kwargs):
super().__init__(loop)
if poolsize <= 0:
raise ValueError(
'Invalid pool size value %i. Expecting a positive integer.' %
i)
self._queue = asyncio.Queue(load_factor * poolsize, loop=self._loop)
self.async_language_session_class = async_language_session_class
self._evaluators = set()
if async_evaluators is None or isinstance(async_evaluators,
six.string_types):
for _ in range(poolsize):
self._add_evaluator(async_evaluators, **kwargs)
else:
if not is_iterable(async_evaluators):
async_evaluators = itertools.repeat(async_evaluators)
for evaluator in itertools.cycle(async_evaluators):
if len(self._evaluators) >= poolsize:
break
self._add_evaluator(evaluator)
self._started_tasks = []
self._pending_init_tasks = None
self.last = 0
self.eval_count = 0
self.requestedsize = poolsize
def _add_evaluator(self, evaluator, **kwargs):
if evaluator is None or isinstance(evaluator, six.string_types):
self._evaluators.add(
self.async_language_session_class(
kernel=evaluator, loop=self._loop, **kwargs))
elif isinstance(evaluator, WolframAsyncEvaluator):
if evaluator in self._evaluators:
self._evaluators.add(evaluator.duplicate())
else:
self._evaluators.add(evaluator)
else:
raise ValueError(
'Invalid asynchronous evaluator specifications. %s is neither a string nor a WolframAsyncEvaluator instance.'
% evaluator)
async def _kernel_loop(self, kernel):
while True:
try:
future = None
task = None
logger.debug('Wait for a new queue entry.')
task = await self._queue.get()
if task is None:
logger.info(
'Termination requested for kernel: %s.' % kernel)
break
# func is one of the evaluate* methods from WolframAsyncEvaluator.
future, func, args, kwargs = task
# those method can't be cancelled since the kernel is evaluating anyway.
try:
func = getattr(kernel, func)
result = await asyncio.shield(func(*args, **kwargs))
future.set_result(result)
except Exception as e:
future.set_exception(e)
# First exceptions are those we can't recover from.
except KeyboardInterrupt as interrupt:
logger.error(
'Loop associated to kernel %s interrupted by user.',
kernel)
raise interrupt
except CancelledError as cancel:
logger.warning('Loop associated to kernel %s cancelled.',
kernel)
raise cancel
except RuntimeError as runtime:
logger.error('Unexpected runtime error: {}', runtime)
raise runtime
except Exception as e:
if future:
logger.warning(
'Exception raised in loop returned in future object. Exception was: %s'
% e)
future.set_exception(e)
else:
logger.warning(
'No future object. Exception raised in loop was: %s' %
e)
raise e
finally:
if task:
self._queue.task_done()
async def _async_start_kernel(self, kernel):
kernel_started = False
try:
# start the kernel
await kernel.start()
kernel_started = True
except asyncio.CancelledError:
logger.info('Cancelled signal during kernel start.')
except Exception as e:
try:
if logger.isEnabledFor(logging.INFO):
logger.info(
'A kernel from pool failed to start: %s. Reason is %s',
kernel, e)
await kernel.stop()
except asyncio.CancelledError:
logger.info('Cancelled signal.')
except Exception as e2:
logger.info(
'Exception raised during clean-up after failed start: %s',
e2)
if kernel_started:
# schedule the infinite evaluation loop
task = asyncio.create_task(self._kernel_loop(kernel))
if logger.isEnabledFor(logging.INFO):
logger.info('New kernel started in pool: %s.', kernel)
# register the task. The loop is not always started at this point.
self._started_tasks.append(task)
@property
def started(self):
return len(self._started_tasks) > 0
async def start(self):
""" Start a pool of kernels and wait for at least one of them to
be ready for evaluation.
This method is a coroutine.
If not all the kernels were able to start, it fails and terminates the pool.
"""
self.stopped = False
# keep track of the init tasks. We have to wait before terminating.
self._pending_init_tasks = {(asyncio.ensure_future(
self._async_start_kernel(kernel), loop=self._loop))
for kernel in self._evaluators}
# uninitialized kernels are removed if they failed to start
# if they do start the task (the loop) is added to _started_tasks.
# we need at least one working kernel.
# we also need to keep track of start kernel tasks in case of early termination.
while len(self._started_tasks) == 0:
if len(self._pending_init_tasks) == 0:
raise WolframKernelException('Failed to start any kernel.')
_, self._pending_init_tasks = await asyncio.wait(
self._pending_init_tasks, return_when=asyncio.FIRST_COMPLETED)
logger.info('Pool initialized with %i running kernels',
len(self._started_tasks))
async def stop(self):
self.stopped = True
# make sure all init tasks are finished.
if len(self._pending_init_tasks) > 0:
for task in self._pending_init_tasks:
task.cancel()
await asyncio.wait(self._pending_init_tasks)
if len(self._started_tasks) > 0:
try:
# request for loop termination.
for _ in range(len(self._started_tasks)):
await self._queue.put(None)
# wait for loop to finish before terminating the kernels
await asyncio.wait(self._started_tasks, loop=self._loop)
except CancelledError:
pass
except Exception as e:
logger.warning('Exception raised while terminating loop: %s',
e)
# terminate the kernel instances, if any started.
tasks = {
asyncio.create_task(kernel.stop())
for kernel in self._evaluators
}
# `wait` raises the first exception, but wait for all tasks to finish.
await asyncio.wait(tasks, loop=self._loop)
async def terminate(self):
await self.stop()
async def ensure_started(self):
if not self.started:
await self.start()
if self.stopped:
await self.restart()
async def _put_evaluation_task(self, future, func, expr, **kwargs):
await self.ensure_started()
await self._queue.put((future, func, (expr, ), kwargs))
self.eval_count += 1
async def evaluate(self, expr, **kwargs):
future = asyncio.Future(loop=self._loop)
await self._put_evaluation_task(future, 'evaluate', expr, **kwargs)
return await future
async def evaluate_wxf(self, expr, **kwargs):
future = asyncio.Future(loop=self._loop)
await self._put_evaluation_task(future, 'evaluate_wxf', expr, **kwargs)
return await future
async def evaluate_wrap(self, expr, **kwargs):
future = asyncio.Future(loop=self._loop)
await self._put_evaluation_task(future, 'evaluate_wrap', expr,
**kwargs)
return await future
def evaluate_all(self, iterable):
return self._loop.run_until_complete(self._evaluate_all(iterable))
async def _evaluate_all(self, iterable):
tasks = [asyncio.create_task(self.evaluate(expr)) for expr in iterable]
return await asyncio.gather(*tasks)
def __repr__(self):
return '<%s %i/%i started evaluators, cumulating %i evaluations>' % (
self.__class__.__name__, len(
self._started_tasks), self.requestedsize, self.eval_count)
def __len__(self):
return len(self._started_tasks)
def parallel_evaluate(expressions,
evaluator_spec=None,
max_evaluators=4,
loop=None):
""" Start a kernel pool and evaluate the expressions in parallel.
The pool is created with the value of `evaluator_spec`. The pool is automatically stopped when it is no longer
needed. The expressions are evaluated and returned in order.
Note that each evaluation should be independent and not rely on any previous one. There is no guarantee that two
given expressions evaluate on the same kernel.
"""
loop = loop or asyncio.get_event_loop()
pool = None
try:
pool = WolframEvaluatorPool(
evaluator_spec, poolsize=max_evaluators, loop=loop)
loop.run_until_complete(pool.start())
return pool.evaluate_all(expressions)
finally:
if pool:
loop.run_until_complete(pool.terminate())
| 41.246057 | 125 | 0.609025 | 11,527 | 0.881606 | 0 | 0 | 76 | 0.005813 | 6,895 | 0.527342 | 4,464 | 0.341415 |
2d4a9f16085458c84c9dbbf956decc433cb478e6 | 1,237 | py | Python | results/neural_nets/trainsize_varyresults/run_charcnn.py | k-ivey/FastSK | 3316f8078a516e808c2c4fe7ed3fdc8db808fc11 | [
"Apache-2.0"
] | 13 | 2020-04-23T21:25:51.000Z | 2021-11-19T23:56:17.000Z | results/neural_nets/trainsize_varyresults/run_charcnn.py | k-ivey/FastSK | 3316f8078a516e808c2c4fe7ed3fdc8db808fc11 | [
"Apache-2.0"
] | 3 | 2020-08-24T22:15:50.000Z | 2021-05-11T12:42:14.000Z | results/neural_nets/trainsize_varyresults/run_charcnn.py | k-ivey/FastSK | 3316f8078a516e808c2c4fe7ed3fdc8db808fc11 | [
"Apache-2.0"
] | 10 | 2020-04-24T09:27:19.000Z | 2021-06-16T21:05:46.000Z | import os.path as osp
import subprocess
dna_datasets = [
"CTCF",
"EP300",
"JUND",
"RAD21",
"SIN3A",
"Pbde",
"EP300_47848",
"KAT2B",
"TP53",
"ZZZ3",
"Mcf7",
"Hek29",
"NR2C2",
"ZBTB33",
]
prot_datasets = [
"1.1",
"1.34",
"2.1",
"2.19",
"2.31",
"2.34",
"2.41",
"2.8",
"3.19",
"3.25",
"3.33",
"3.50",
]
nlp_datasets = [
"AImed",
"BioInfer",
"CC1-LLL",
"CC2-IEPA",
"CC3-HPRD50",
"DrugBank",
"MedLine",
]
datasets = dna_datasets + prot_datasets + nlp_datasets
for dataset in datasets:
"""s = input("Confirm to train on " + dataset + "\n")
if s != "y":
continue"""
train_file = osp.join("../../data/", dataset + ".train.fasta")
test_file = osp.join("../../data/", dataset + ".test.fasta")
log_dir = "log/{}_cnn_results".format(dataset)
epochs = 200
command = [
"python",
"run_cnn.py",
"--trn",
train_file,
"--tst",
test_file,
"--log_dir",
log_dir,
"--epochs",
str(epochs),
]
print(" ".join(command))
output = subprocess.check_output(command)
# print("done with " + dataset)
| 17.180556 | 66 | 0.481811 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 487 | 0.393694 |
2d4e716ec32e46db3c8f8b92ca201b3088f776bf | 1,911 | py | Python | pages/cart_page.py | kukushdi3981/sel-1_test-project | 81066e1501a62f642b5b76745801170ca62ad237 | [
"Apache-2.0"
] | null | null | null | pages/cart_page.py | kukushdi3981/sel-1_test-project | 81066e1501a62f642b5b76745801170ca62ad237 | [
"Apache-2.0"
] | null | null | null | pages/cart_page.py | kukushdi3981/sel-1_test-project | 81066e1501a62f642b5b76745801170ca62ad237 | [
"Apache-2.0"
] | null | null | null | from selenium.webdriver.support.wait import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.common.by import By
class CartPage:
def __init__(self, driver):
self.driver = driver
self.wait = WebDriverWait(driver, 10)
def del_from_cart_button(self):
return self.driver.find_elements_by_css_selector("li.item button[name='remove_cart_item']")
@property
def smal_prod_icons(self):
return self.driver.find_elements_by_css_selector("li.shortcut a")
@property
def product_image(self):
return self.driver.find_elements_by_css_selector("li.item a[class='image-wrapper shadow']")
@property
def back_main_page_link(self):
return self.driver.find_element_by_css_selector("div#checkout-cart-wrapper a")
def get_count_product_in_cart(self):
self.wait.until(EC.presence_of_element_located((By.ID, "checkout-summary-wrapper")))
# определяем количество товаров для удаления из корзины (кол-во строк в таблице)
return self.driver.find_elements_by_css_selector("div#checkout-summary-wrapper td.item")
def delete_all_prod_from_cart(self, prod_count):
for i in range(prod_count):
# определяем элемент исчезновение которого будем ожидать после очередного удаления товара из корзины
if i != prod_count-1:
shortcuts = self.smal_prod_icons
shortcuts[0].click()
else:
shortcuts = self.product_image
products_del = self.del_from_cart_button()
products_del[0].click()
# ожидаем исчезновение нужного элемента для продолжения действий
self.wait.until(EC.staleness_of(shortcuts[0]))
def wait_for_empty_cart(self):
self.wait.until(EC.presence_of_element_located((By.CSS_SELECTOR, "div#checkout-cart-wrapper a")))
| 39 | 112 | 0.70539 | 1,950 | 0.920245 | 0 | 0 | 383 | 0.180746 | 0 | 0 | 671 | 0.316659 |
2d4f0d89f4feb1da62ed45be7c3ee895624c079b | 2,945 | py | Python | malaya/entity.py | aizatrosli/Malaya | d326384d2c0925c139a7224b77ac20f0ad57f237 | [
"MIT"
] | 1 | 2019-08-20T17:59:20.000Z | 2019-08-20T17:59:20.000Z | malaya/entity.py | aizatrosli/Malaya | d326384d2c0925c139a7224b77ac20f0ad57f237 | [
"MIT"
] | null | null | null | malaya/entity.py | aizatrosli/Malaya | d326384d2c0925c139a7224b77ac20f0ad57f237 | [
"MIT"
] | null | null | null | from ._utils import _tag_class
from ._utils._paths import PATH_ENTITIES, S3_PATH_ENTITIES
def available_deep_model():
"""
List available deep learning entities models, ['concat', 'bahdanau', 'luong']
"""
return ['concat', 'bahdanau', 'luong']
def available_bert_model():
"""
List available bert entities models, ['multilanguage', 'base', 'small']
"""
return ['multilanguage', 'base', 'small']
def deep_model(model = 'bahdanau', validate = True):
"""
Load deep learning NER model.
Parameters
----------
model : str, optional (default='bahdanau')
Model architecture supported. Allowed values:
* ``'concat'`` - Concating character and word embedded for BiLSTM.
* ``'bahdanau'`` - Concating character and word embedded including Bahdanau Attention for BiLSTM.
* ``'luong'`` - Concating character and word embedded including Luong Attention for BiLSTM.
validate: bool, optional (default=True)
if True, malaya will check model availability and download if not available.
Returns
-------
TAGGING: malaya._models._tensorflow_model.TAGGING class
"""
if not isinstance(model, str):
raise ValueError('model must be a string')
if not isinstance(validate, bool):
raise ValueError('validate must be a boolean')
model = model.lower()
if model not in available_deep_model():
raise Exception(
'model not supported, please check supported models from malaya.entity.available_deep_model()'
)
return _tag_class.deep_model(
PATH_ENTITIES,
S3_PATH_ENTITIES,
'entity',
model = model,
validate = validate,
)
def bert(model = 'base', validate = True):
"""
Load BERT NER model.
Parameters
----------
model : str, optional (default='base')
Model architecture supported. Allowed values:
* ``'multilanguage'`` - bert multilanguage released by Google, trained on NER.
* ``'base'`` - base bert-bahasa released by Malaya, trained on NER.
* ``'small'`` - small bert-bahasa released by Malaya, trained on NER.
validate: bool, optional (default=True)
if True, malaya will check model availability and download if not available.
Returns
-------
TAGGING_BERT: malaya._models._tensorflow_model.TAGGING_BERT class
"""
if not isinstance(model, str):
raise ValueError('model must be a string')
if not isinstance(validate, bool):
raise ValueError('validate must be a boolean')
model = model.lower()
if model not in available_bert_model():
raise Exception(
'model not supported, please check supported models from malaya.entity.available_bert_model()'
)
return _tag_class.bert(
PATH_ENTITIES,
S3_PATH_ENTITIES,
'entity',
model = model,
validate = validate,
)
| 30.05102 | 106 | 0.642784 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,860 | 0.631579 |
2d51dd7e74d06bcb018f0868e88b1e186fc49ead | 1,451 | py | Python | src/invoices/tests/test_models.py | brianl9995/payinv | 7fc2160c2c9bbb9568a659ff3edf2526142d33fc | [
"MIT"
] | 2 | 2019-09-21T23:36:49.000Z | 2019-10-02T23:31:21.000Z | src/invoices/tests/test_models.py | brianl9995/payinv | 7fc2160c2c9bbb9568a659ff3edf2526142d33fc | [
"MIT"
] | 2 | 2019-10-04T13:51:43.000Z | 2021-06-10T21:57:55.000Z | src/invoices/tests/test_models.py | brianl9995/payinv | 7fc2160c2c9bbb9568a659ff3edf2526142d33fc | [
"MIT"
] | 2 | 2019-10-02T23:31:22.000Z | 2020-06-07T14:57:55.000Z | from django.test import TestCase
from core.tests.factories import SaleFactory, InvoiceFactory
from invoices.models import Invoice
class InvoiceModelTestCase(TestCase):
def test_sales_pending_without_invoice(self):
""" Should return this sale if not have Invoice to cover"""
sale = SaleFactory(total_value=100)
self.assertCountEqual(Invoice.sales_pending().all(), [sale])
def test_sales_pending_with_invoice_not_all_value(self):
""" Should return this sale if has invoices for not all total_value"""
sale = SaleFactory(total_value=1000)
InvoiceFactory(sale=sale, total_value=50)
InvoiceFactory(sale=sale, total_value=50)
self.assertCountEqual(Invoice.sales_pending().all(), [sale])
def test_sales_pending_with_invoice_for_all_value(self):
""" Should return not sale if has invoices for all total_value"""
sale = SaleFactory(total_value=1000)
InvoiceFactory(sale=sale, total_value=500)
InvoiceFactory(sale=sale, total_value=500)
self.assertEqual(Invoice.sales_pending().count(), 0)
def test_sales_pending_with_invoice_for_most_all_value(self):
""" Should return not sale if has invoices for all total_value"""
sale = SaleFactory(total_value=1000)
InvoiceFactory(sale=sale, total_value=1500)
InvoiceFactory(sale=sale, total_value=500)
self.assertEqual(Invoice.sales_pending().count(), 0)
| 43.969697 | 78 | 0.724328 | 1,318 | 0.908339 | 0 | 0 | 0 | 0 | 0 | 0 | 259 | 0.178498 |
2d524adbfa80670475741f1458c92253a9a3edbe | 523 | py | Python | testes e exercícios/URI/somaImparesConsecutivosII.py | LightSnow17/exercicios-Python | 3ac016ce284860f45d71cfb396d33a73ec06c25d | [
"MIT"
] | null | null | null | testes e exercícios/URI/somaImparesConsecutivosII.py | LightSnow17/exercicios-Python | 3ac016ce284860f45d71cfb396d33a73ec06c25d | [
"MIT"
] | null | null | null | testes e exercícios/URI/somaImparesConsecutivosII.py | LightSnow17/exercicios-Python | 3ac016ce284860f45d71cfb396d33a73ec06c25d | [
"MIT"
] | null | null | null | valores = []
quant = int(input())
for c in range(0, quant):
x, y = input().split(' ')
x = int(x)
y = int(y)
maior = menor = soma = 0
if x > y:
maior = x
menor = y
else:
maior = y
menor = x
if maior == menor+1 or maior == menor:
valores.append(0)
else:
for i in range(menor+1, maior):
if i % 2 != 0:
soma += i
if i+1 == maior:
valores.append(soma)
for valor in valores:
print(valor)
| 20.92 | 42 | 0.447419 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 3 | 0.005736 |
2d57a7c0a415efe520854cd9d0641e6992522bef | 53,932 | py | Python | magmap/stats/vols.py | kaparna126/magellanmapper | 6a50e82b3bcdbbb4706f749f366b055f0c6f13f2 | [
"BSD-3-Clause"
] | null | null | null | magmap/stats/vols.py | kaparna126/magellanmapper | 6a50e82b3bcdbbb4706f749f366b055f0c6f13f2 | [
"BSD-3-Clause"
] | null | null | null | magmap/stats/vols.py | kaparna126/magellanmapper | 6a50e82b3bcdbbb4706f749f366b055f0c6f13f2 | [
"BSD-3-Clause"
] | null | null | null | # Regional volume and density management
# Author: David Young, 2018, 2019
"""Measure volumes and densities by regions.
Intended to be higher-level, relatively atlas-agnostic measurements.
"""
from enum import Enum
from time import time
import numpy as np
import pandas as pd
from scipy.spatial.distance import cdist
from skimage import measure
from magmap.cv import chunking
from magmap.stats import atlas_stats, clustering
from magmap.settings import config
from magmap.io import libmag
from magmap.atlas import ontology
from magmap.cv import cv_nd
from magmap.io import df_io
# metric keys and column names
LabelMetrics = Enum(
"LabelMetrics", [
"Region",
"Volume", # volume, converted to physical units
"VolAlt", # alternate volume, eg smoothed volume
"VolPx", # volume in pixels
"VolAltPx", # alternate volume in pixels
"Intensity",
"Nuclei",
# densities; "Density" = nuclei density
# TODO: change density to nuclei density
# TODO: consider changing enum for KEY: name format
"Density", "DensityIntens",
"RegVolMean", "RegNucMean", "RegDensityMean", # per region
"VarNuclei", "VarNucIn", "VarNucOut",
"VarIntensity", "VarIntensIn", "VarIntensOut",
"MeanIntensity",
"MedIntensity",
"LowIntensity",
"HighIntensity",
"EntropyIntensity",
"VarIntensMatch",
"VarIntensDiff",
"MeanNuclei",
"VarNucMatch",
# Distances
"EdgeSize", # edge pixels
"EdgeDistSum", # sum of distances between edges in two images
"EdgeDistMean", # mean of these distances
"Dist", # generic distance
# Variation
"CoefVarIntens", "CoefVarNuc",
# Shape measurements
"SurfaceArea", "Compactness",
# Overlap metrics
"VolDSC", "NucDSC", # volume/nuclei Dice Similarity Coefficient
"VolOut", "NucOut", # volume/nuclei shifted out of orig position
# Point cloud measurements
"NucCluster", # number of nuclei clusters
"NucClusNoise", # number of nuclei that do not fit into a cluster
"NucClusLarg", # number of nuclei in the largest cluster
]
)
# variation metrics
VAR_METRICS = (
LabelMetrics.RegVolMean, LabelMetrics.RegNucMean,
LabelMetrics.VarNuclei, LabelMetrics.VarNucIn, LabelMetrics.VarNucOut,
LabelMetrics.VarIntensity, LabelMetrics.VarIntensIn,
LabelMetrics.VarIntensOut,
LabelMetrics.MeanIntensity,
LabelMetrics.MedIntensity,
LabelMetrics.LowIntensity,
LabelMetrics.HighIntensity,
LabelMetrics.EntropyIntensity,
LabelMetrics.VarIntensMatch,
LabelMetrics.VarIntensDiff,
LabelMetrics.MeanNuclei,
LabelMetrics.VarNucMatch,
LabelMetrics.CoefVarIntens, LabelMetrics.CoefVarNuc,
)
# nuclei metrics
NUC_METRICS = (
LabelMetrics.Nuclei,
LabelMetrics.RegNucMean,
LabelMetrics.MeanNuclei,
LabelMetrics.VarNuclei,
LabelMetrics.VarNucIn,
LabelMetrics.VarNucOut,
LabelMetrics.VarNucMatch,
LabelMetrics.CoefVarNuc,
)
# metrics computed from weighted averages
WT_METRICS = (
*VAR_METRICS,
LabelMetrics.EdgeDistMean,
)
def _coef_var(df):
# calculate coefficient of variation from data frame columns,
# where first column is std and second is mean
return np.divide(df.iloc[:, 0], df.iloc[:, 1])
class MetricCombos(Enum):
"""Combinations of metrics.
Each combination should be a tuple of combination name, a
tuple of metric Enums, and a function to use for aggregation applied
across colums to give a new metric value for each row.
"""
# sum of columns measuring regional homogeneity; missing columns
# will be ignored
HOMOGENEITY = (
"Homogeneity",
(LabelMetrics.VarIntensity, #LabelMetrics.VarIntensDiff,
LabelMetrics.EdgeDistSum, LabelMetrics.VarNuclei),
lambda x: np.nanmean(x, axis=1))
# coefficient of variation of intensity values
COEFVAR_INTENS = (
"CoefVarIntensity",
(LabelMetrics.VarIntensity, LabelMetrics.MeanIntensity),
_coef_var)
# coefficient of variation of intensity values
COEFVAR_NUC = (
"CoefVarNuclei",
(LabelMetrics.VarNuclei, LabelMetrics.MeanNuclei),
_coef_var)
class LabelToEdge(object):
"""Convert a label to an edge with class methods as an encapsulated
way to use in multiprocessing without requirement for global variables.
Attributes:
labels_img_np: Integer labels images as a Numpy array.
"""
labels_img_np = None
@classmethod
def set_labels_img_np(cls, val):
"""Set the labels image.
Args:
val: Labels image to set as class attribute.
"""
cls.labels_img_np = val
@classmethod
def find_label_edge(cls, label_id):
"""Convert a label into just its border.
Args:
label_id: Integer of the label to extract from
:attr:``labels_img_np``.
Returns:
Tuple of the given label ID; list of slices defining the
location of the ROI where the edges can be found; and the
ROI as a volume mask defining where the edges exist.
"""
print("getting edge for {}".format(label_id))
slices = None
borders = None
# get mask of label to get bounding box
label_mask = cls.labels_img_np == label_id
props = measure.regionprops(label_mask.astype(np.int))
if len(props) > 0 and props[0].bbox is not None:
_, slices = cv_nd.get_bbox_region(props[0].bbox)
# work on a view of the region for efficiency, obtaining borders
# as eroded region and writing into new array
region = cls.labels_img_np[tuple(slices)]
label_mask_region = region == label_id
borders = cv_nd.perimeter_nd(label_mask_region)
return label_id, slices, borders
def make_labels_edge(labels_img_np):
"""Convert labels image into label borders image.
The atlas is assumed to be a sample (eg microscopy) image on which
an edge-detection filter will be applied.
Args:
labels_img_np: Image as a Numpy array, assumed to be an
annotated image whose edges will be found by obtaining
the borders of all annotations.
Returns:
Binary image array the same shape as ``labels_img_np`` with labels
reduced to their corresponding borders.
"""
start_time = time()
labels_edge = np.zeros_like(labels_img_np)
label_ids = np.unique(labels_img_np)
# use a class to set and process the label without having to
# reference the labels image as a global variable
LabelToEdge.set_labels_img_np(labels_img_np)
pool = chunking.get_mp_pool()
pool_results = []
for label_id in label_ids:
pool_results.append(
pool.apply_async(
LabelToEdge.find_label_edge, args=(label_id, )))
for result in pool_results:
label_id, slices, borders = result.get()
if slices is not None:
borders_region = labels_edge[tuple(slices)]
borders_region[borders] = label_id
pool.close()
pool.join()
print("time elapsed to make labels edge:", time() - start_time)
return labels_edge
class MeasureLabel(object):
"""Measure metrics within image labels in a way that allows
multiprocessing without global variables.
All images should be of the same shape. If :attr:``df`` is available,
it will be used in place of underlying images. Typically this
data frame contains metrics for labels only at the lowest level,
such as drawn or non-overlapping labels. These labels can then be
used to aggregate values through summation or weighted means to
generate metrics for superseding labels that contains these
individual labels.
Attributes:
atlas_img_np: Sample image as a Numpy array.
labels_img_np: Integer labels image as a Numpy array.
labels_edge: Numpy array of labels reduced to their edges.
dist_to_orig: Distance map of labels to edges, with intensity values
in the same placement as in ``labels_edge``.
heat_map: Numpy array as a density map.
blobs (:obj:`np.ndarray`): 2D array of blobs such as nuclei in the
format, ``[[z, y, x, label_id, ...], ...]``. Defaults to None.
subseg: Integer sub-segmentations labels image as Numpy array.
df: Pandas data frame with a row for each sub-region.
"""
# metric keys
_COUNT_METRICS = (
LabelMetrics.Volume, LabelMetrics.Intensity, LabelMetrics.Nuclei)
_EDGE_METRICS = (
LabelMetrics.EdgeSize, LabelMetrics.EdgeDistSum,
LabelMetrics.EdgeDistMean)
_SHAPE_METRICS = (
LabelMetrics.SurfaceArea, LabelMetrics.Compactness)
_PCL_METRICS = (
LabelMetrics.NucCluster, LabelMetrics.NucClusNoise,
LabelMetrics.NucClusLarg,
)
# images and data frame
atlas_img_np = None
labels_img_np = None
labels_edge = None
dist_to_orig = None
labels_interior = None
heat_map = None
blobs = None
subseg = None
df = None
spacing = None
@classmethod
def set_data(cls, atlas_img_np, labels_img_np, labels_edge=None,
dist_to_orig=None, labels_interior=None, heat_map=None,
blobs=None, subseg=None, df=None, spacing=None):
"""Set the images and data frame."""
cls.atlas_img_np = atlas_img_np
cls.labels_img_np = labels_img_np
cls.labels_edge = labels_edge
cls.dist_to_orig = dist_to_orig
cls.labels_interior = labels_interior
cls.heat_map = heat_map
cls.blobs = blobs
cls.subseg = subseg
cls.df = df
cls.spacing = spacing
@classmethod
def label_metrics(cls, label_id, extra_metrics=None):
"""Calculate metrics for a given label or set of labels.
Wrapper to call :func:``measure_variation``,
:func:``measure_variation``, and :func:``measure_edge_dist``.
Args:
label_id: Integer of the label or sequence of multiple labels
in :attr:``labels_img_np`` for which to measure variation.
extra_metrics (List[:obj:`config.MetricGroups`]): Sequence of
additional metric groups to measure; defaults to None.
Returns:
Tuple of the given label ID, intensity variation, number of
pixels in the label, density variation, number of blobs,
sum edge distances, mean of edge distances, and number of
pixels in the label edge.
"""
# process basic metrics
#print("getting label metrics for {}".format(label_id))
_, count_metrics = cls.measure_counts(label_id)
_, var_metrics = cls.measure_variation(label_id)
_, edge_metrics = cls.measure_edge_dist(label_id)
metrics = {**count_metrics, **var_metrics, **edge_metrics}
if extra_metrics:
for extra_metric in extra_metrics:
# process additional metrics by applying corresponding function
fn = None
if extra_metric is config.MetricGroups.SHAPES:
fn = cls.measure_shapes
elif extra_metric is config.MetricGroups.POINT_CLOUD:
fn = cls.measure_point_cloud
if fn:
_, extra_metrics = fn(label_id)
metrics.update(extra_metrics)
return label_id, metrics
@classmethod
def measure_counts(cls, label_ids):
"""Measure the distance between edge images.
If :attr:``df`` is available, it will be used to sum values
from labels in ``label_ids`` found in the data frame
rather than re-measuring values from images.
Args:
label_ids: Integer of the label or sequence of multiple labels
in :attr:``labels_img_np`` for which to measure variation.
Returns:
Tuple of the given label ID and a dictionary of metrics.
The metrics are NaN if the label size is 0.
"""
metrics = dict.fromkeys(cls._COUNT_METRICS, np.nan)
nuclei = np.nan
if cls.df is None:
# sum up counts within the collective region
label_mask = np.isin(cls.labels_img_np, label_ids)
label_size = np.sum(label_mask)
intens = np.sum(cls.atlas_img_np[label_mask]) # tot intensity
if cls.heat_map is not None:
nuclei = np.sum(cls.heat_map[label_mask])
else:
# get all rows associated with region and sum stats within columns
labels = cls.df.loc[
cls.df[LabelMetrics.Region.name].isin(label_ids)]
label_size = np.nansum(labels[LabelMetrics.Volume.name])
intens = np.nansum(labels[LabelMetrics.Intensity.name])
if LabelMetrics.Nuclei.name in labels:
nuclei = np.nansum(labels[LabelMetrics.Nuclei.name])
if label_size > 0:
metrics[LabelMetrics.Volume] = label_size
metrics[LabelMetrics.Intensity] = intens
metrics[LabelMetrics.Nuclei] = nuclei
disp_id = get_single_label(label_ids)
print("counts within label {}: {}"
.format(disp_id, libmag.enum_dict_aslist(metrics)))
return label_ids, metrics
@classmethod
def region_props(cls, region, metrics, keys):
"""Measure properties for a region and add to a dictionary.
Args:
region: Region to measure, which can be a flattened array.
metrics: Dictionary to store metrics.
keys: Sequence of keys corresponding to standard deviation,
median, and Shannon Entropy measurements.
"""
if region.size < 1:
for key in keys: metrics[key] = np.nan
else:
#print(region.size, len(region))
metrics[keys[0]] = np.std(region)
metrics[keys[1]] = np.mean(region)
metrics[keys[2]] = np.median(region)
metrics[keys[3]], metrics[keys[4]] = np.percentile(region, (5, 95))
metrics[keys[5]] = measure.shannon_entropy(region)
@classmethod
def measure_variation(cls, label_ids):
"""Measure the variation in underlying atlas intensity.
Variation is measured by standard deviation of atlas intensity and,
if :attr:``heat_map`` is available, that of the blob density.
If :attr:``df`` is available, it will be used to calculated
weighted averages from labels in ``label_ids`` found in the
data frame rather than re-measuring values from images.
Args:
label_ids: Integer of the label or sequence of multiple labels
in :attr:``labels_img_np`` for which to measure variation.
Returns:
Tuple of the given label ID and a dictionary a metrics.
The metrics are NaN if the label size is 0.
"""
metrics = dict((key, []) for key in VAR_METRICS)
if not libmag.is_seq(label_ids): label_ids = [label_ids]
seg_ids = []
for label_id in label_ids:
# collect all sub-regions
if cls.subseg is not None:
# get sub-segmentations within region
label_mask = cls.labels_img_np == label_id
seg_ids.extend(np.unique(cls.subseg[label_mask]).tolist())
else:
seg_ids.append(label_id)
if cls.df is None:
# calculate stats for each sub-segmentation; regional ("reg")
# means are weighted across regions and sub-segs, where the
# mean for each region which should equal total of full region
# if only one sub-seg
for seg_id in seg_ids:
if cls.subseg is not None:
seg_mask = cls.subseg == seg_id
else:
seg_mask = cls.labels_img_np == seg_id
size = np.sum(seg_mask)
if size > 0:
# variation in intensity of underlying atlas/sample region
vals = dict((key, np.nan) for key in VAR_METRICS)
vals[LabelMetrics.RegVolMean] = size
atlas_mask = cls.atlas_img_np[seg_mask]
cls.region_props(
atlas_mask, vals,
(LabelMetrics.VarIntensity,
LabelMetrics.MeanIntensity,
LabelMetrics.MedIntensity,
LabelMetrics.LowIntensity,
LabelMetrics.HighIntensity,
LabelMetrics.EntropyIntensity))
vals[LabelMetrics.CoefVarIntens] = (
vals[LabelMetrics.VarIntensity]
/ vals[LabelMetrics.MeanIntensity])
interior_mask = None
border_mask = None
if cls.labels_interior is not None:
# inner vs border variability
interior_mask = cls.labels_interior == seg_id
border_mask = np.logical_xor(seg_mask, interior_mask)
atlas_interior = cls.atlas_img_np[interior_mask]
atlas_border = cls.atlas_img_np[border_mask]
vals[LabelMetrics.VarIntensIn] = np.std(atlas_interior)
vals[LabelMetrics.VarIntensOut] = np.std(atlas_border)
# get variability interior-border match as abs diff
vals[LabelMetrics.VarIntensMatch] = abs(
vals[LabelMetrics.VarIntensOut]
- vals[LabelMetrics.VarIntensIn])
# get variability interior-border simple difference
vals[LabelMetrics.VarIntensDiff] = (
vals[LabelMetrics.VarIntensOut]
- vals[LabelMetrics.VarIntensIn])
if cls.heat_map is not None:
# number of blob and variation in blob density
blobs_per_px = cls.heat_map[seg_mask]
vals[LabelMetrics.VarNuclei] = np.std(blobs_per_px)
vals[LabelMetrics.RegNucMean] = np.sum(blobs_per_px)
vals[LabelMetrics.MeanNuclei] = np.mean(blobs_per_px)
if (interior_mask is not None and
border_mask is not None):
heat_interior = cls.heat_map[interior_mask]
heat_border = cls.heat_map[border_mask]
vals[LabelMetrics.VarNucIn] = np.std(heat_interior)
vals[LabelMetrics.VarNucOut] = np.std(heat_border)
vals[LabelMetrics.VarNucMatch] = abs(
vals[LabelMetrics.VarNucOut]
- vals[LabelMetrics.VarNucIn])
vals[LabelMetrics.CoefVarNuc] = (
vals[LabelMetrics.VarNuclei]
/ vals[LabelMetrics.MeanNuclei])
for metric in VAR_METRICS:
metrics[metric].append(vals[metric])
else:
# get sub-region stats stored in data frame
labels = cls.df.loc[cls.df[LabelMetrics.Region.name].isin(seg_ids)]
for i, row in labels.iterrows():
if row[LabelMetrics.RegVolMean.name] > 0:
for metric in VAR_METRICS:
if metric.name in row:
metrics[metric].append(row[metric.name])
else:
metrics[metric] = np.nan
# weighted average, with weights given by frac of region or
# sub-region size from total size
disp_id = get_single_label(label_ids)
vols = np.copy(metrics[LabelMetrics.RegVolMean])
tot_size = np.sum(vols) # assume no nans
nucs = np.copy(metrics[LabelMetrics.RegNucMean])
tot_nucs = np.nansum(nucs)
for key in metrics.keys():
#print("{} {}: {}".format(disp_id, key.name, metrics[key]))
if tot_size > 0 and metrics[key] != np.nan:
# take weighted mean
if key in NUC_METRICS:
# use weighting from nuclei for nuclei-oriented metrics
metrics[key] = np.nansum(
np.multiply(metrics[key], nucs)) / tot_nucs
else:
# default to weighting by volume
metrics[key] = np.nansum(
np.multiply(metrics[key], vols)) / tot_size
if tot_size <= 0 or metrics[key] == 0: metrics[key] = np.nan
print("variation within label {}: {}"
.format(disp_id, libmag.enum_dict_aslist(metrics)))
return label_ids, metrics
@classmethod
def measure_edge_dist(cls, label_ids):
"""Measure the distance between edge images.
If :attr:``df`` is available, it will be used to calculated
a sum from edge distance sum or weighted averages from edge
distance mean values from labels in ``label_ids`` found in the
data frame rather than re-measuring values from images.
Args:
label_ids: Integer of the label or sequence of multiple labels
in :attr:``labels_img_np`` for which to measure variation.
Returns:
Tuple of the given label ID and dictionary of metrics.
The metrics are NaN if the label size is 0.
"""
metrics = dict.fromkeys(cls._EDGE_METRICS, np.nan)
# get collective region
label_mask = None
labels = None
if cls.df is None:
# get region directly from image
label_mask = np.isin(cls.labels_edge, label_ids)
label_size = np.sum(label_mask)
else:
# get all row associated with region
labels = cls.df.loc[
cls.df[LabelMetrics.Region.name].isin(label_ids)]
label_size = np.nansum(labels[LabelMetrics.Volume.name])
if label_size > 0:
if cls.df is None:
# sum and take average directly from image
region_dists = cls.dist_to_orig[label_mask]
metrics[LabelMetrics.EdgeDistSum] = np.sum(region_dists)
metrics[LabelMetrics.EdgeDistMean] = np.mean(region_dists)
metrics[LabelMetrics.EdgeSize] = region_dists.size
else:
# take sum from rows and weight means by edge sizes
if LabelMetrics.EdgeDistSum.name in labels:
metrics[LabelMetrics.EdgeDistSum] = np.nansum(
labels[LabelMetrics.EdgeDistSum.name])
if LabelMetrics.EdgeSize.name in labels:
sizes = labels[LabelMetrics.EdgeSize.name]
size = np.sum(sizes)
metrics[LabelMetrics.EdgeSize] = size
if LabelMetrics.EdgeDistMean.name in labels:
metrics[LabelMetrics.EdgeDistMean] = (
np.sum(np.multiply(
sizes, labels[LabelMetrics.EdgeDistMean.name]))
/ size)
disp_id = get_single_label(label_ids)
print("dist within edge of label {}: {}"
.format(disp_id, libmag.enum_dict_aslist(metrics)))
return label_ids, metrics
@classmethod
def measure_shapes(cls, label_ids):
"""Measure label shapes.
Labels will be measured even if :attr:``df`` is available
to account for the global shape rather than using weighted-averages.
Args:
label_ids: Integer of the label or sequence of multiple labels
in :attr:``labels_img_np`` for which to measure shapes.
Returns:
Tuple of the given label ID and a dictionary of metrics.
"""
metrics = dict.fromkeys(cls._SHAPE_METRICS, np.nan)
# sum up counts within the collective region
label_mask = np.isin(cls.labels_img_np, label_ids)
label_size = np.sum(label_mask)
if label_size > 0:
compactness, area, _ = cv_nd.compactness_3d(
label_mask, cls.spacing)
metrics[LabelMetrics.SurfaceArea] = area
metrics[LabelMetrics.Compactness] = compactness
# TODO: high memory consumption with these measurements
# props = measure.regionprops(label_mask.astype(np.uint8))
# if props:
# prop = props[0]
# metrics[LabelMetrics.ConvexVolume] = prop.convex_area
# metrics[LabelMetrics.Solidity] = prop.solidity
props = None
disp_id = get_single_label(label_ids)
print("shape measurements of label {}: {}"
.format(disp_id, libmag.enum_dict_aslist(metrics)))
return label_ids, metrics
@classmethod
def measure_point_cloud(cls, label_ids):
"""Measure point cloud statistics such as those from nuclei.
Assumes that the class attribute :attr:`blobs` is available.
Args:
label_ids: Integer of the label or sequence of multiple labels
in :attr:``labels_img_np`` for which to measure variation.
Returns:
Tuple of the given label ID and dictionary of metrics.
The metrics are NaN if the label size is 0.
"""
metrics = dict.fromkeys(cls._PCL_METRICS, np.nan)
if cls.df is None and cls.blobs is None:
print("data frame and blobs not available, unable to measure"
"point cloud stats")
return label_ids, metrics
# get collective region
labels = None
if cls.df is None:
# get region directly from image
label_mask = np.isin(cls.labels_img_np, label_ids)
label_size = np.sum(label_mask)
else:
# get all row associated with region
labels = cls.df.loc[
cls.df[LabelMetrics.Region.name].isin(label_ids)]
label_size = np.nansum(labels[LabelMetrics.Volume.name])
if label_size > 0:
if cls.df is None:
# sum and take average directly from image
blobs = cls.blobs[np.isin(cls.blobs[:, 3], label_ids)]
num_clusters, num_noise, num_largest = (
clustering.cluster_dbscan_metrics(blobs[:, 4]))
metrics[LabelMetrics.NucCluster] = num_clusters
metrics[LabelMetrics.NucClusNoise] = num_noise
metrics[LabelMetrics.NucClusLarg] = num_largest
else:
for key in metrics.keys():
if key.name not in labels: continue
metrics[key] = np.nansum(labels[key.name])
disp_id = get_single_label(label_ids)
print("nuclei clusters within label {}: {}"
.format(disp_id, libmag.enum_dict_aslist(metrics)))
return label_ids, metrics
def get_single_label(label_id):
"""Get an ID as a single element.
Args:
label_id: Single ID or sequence of IDs.
Returns:
The first elements if ``label_id`` is a sequence, or the
``label_id`` itself if not.
"""
if libmag.is_seq(label_id) and len(label_id) > 0:
return label_id[0]
return label_id
def _update_df_side(df):
# invert label IDs of right-sided regions; assumes that using df
# will specify sides explicitly in label_ids
# TODO: consider removing combine_sides and using label_ids only
df.loc[df[config.AtlasMetrics.SIDE.value] == config.HemSides.RIGHT.value,
LabelMetrics.Region.name] *= -1
def _parse_vol_metrics(label_metrics, spacing=None, unit_factor=None,
extra_keys=None):
# parse volume metrics into physical units and nuclei density
physical_mult = None if spacing is None else np.prod(spacing)
keys = [LabelMetrics.Volume, LabelMetrics.VolAlt]
if extra_keys is not None:
keys.extend(extra_keys)
vols_phys = []
found_keys = []
for key in keys:
if key in label_metrics:
vols_phys.append(label_metrics[key])
found_keys.append(key)
label_size = vols_phys[0]
if physical_mult is not None:
# convert to physical units at the given value unless
# using data frame, where values presumably already converted
vols_phys = np.multiply(vols_phys, physical_mult)
if unit_factor is not None:
# further conversion to given unit size
unit_factor_vol = unit_factor ** 3
vols_phys = np.divide(vols_phys, unit_factor_vol)
if unit_factor is not None:
# convert metrics not extracted from data frame
if LabelMetrics.SurfaceArea in label_metrics:
# already incorporated physical units but needs to convert
# to unit size
label_metrics[LabelMetrics.SurfaceArea] /= unit_factor ** 2
# calculate densities based on physical volumes
for key, val in zip(found_keys, vols_phys):
label_metrics[key] = val
nuc = np.nan
if LabelMetrics.Nuclei in label_metrics:
nuc = label_metrics[LabelMetrics.Nuclei]
label_metrics[LabelMetrics.Density] = nuc / vols_phys[0]
return label_size, nuc, vols_phys
def _update_vol_dicts(label_id, label_metrics, grouping, metrics):
# parse volume metrics metadata into master metrics dictionary
side = ontology.get_label_side(label_id)
grouping[config.AtlasMetrics.SIDE.value] = side
disp_id = get_single_label(label_id)
label_metrics[LabelMetrics.Region] = abs(disp_id)
for key, val in grouping.items():
metrics.setdefault(key, []).append(val)
for col in LabelMetrics:
if col in label_metrics:
metrics.setdefault(col.name, []).append(label_metrics[col])
def measure_labels_metrics(atlas_img_np, labels_img_np,
labels_edge, dist_to_orig, labels_interior=None,
heat_map=None, blobs=None,
subseg=None, spacing=None, unit_factor=None,
combine_sides=True, label_ids=None, grouping={},
df=None, extra_metrics=None):
"""Compute metrics such as variation and distances within regions
based on maps corresponding to labels image.
Args:
atlas_img_np: Atlas or sample image as a Numpy array.
labels_img_np: Integer labels image as a Numpy array.
labels_edge: Numpy array of labels reduced to their edges.
dist_to_orig: Distance map of labels to edges, with intensity values
in the same placement as in ``labels_edge``.
labels_interior: Numpy array of labels eroded to interior region.
heat_map: Numpy array as a density map; defaults to None to ignore
density measurements.
blobs (:obj:`np.ndarray`): 2D array of blobs; defaults to None.
subseg: Integer sub-segmentations labels image as Numpy array;
defaults to None to ignore label sub-divisions.
spacing: Sequence of image spacing for each pixel in the images.
unit_factor: Unit factor conversion; defaults to None. Eg use
1000 to convert from um to mm.
combine_sides: True to combine corresponding labels from opposite
sides of the sample; defaults to True. Corresponding labels
are assumed to have the same absolute numerical number and
differ only in signage. May be False if combining by passing
both pos/neg labels in ``label_ids``.
label_ids: Sequence of label IDs to include. Defaults to None,
in which case the labels will be taken from unique values
in ``labels_img_np``.
grouping: Dictionary of sample grouping metadata, where each
entry will be added as a separate column. Defaults to an
empty dictionary.
df: Data frame with rows for all drawn labels to pool into
parent labels instead of re-measuring stats for all
children of each parent; defaults to None.
extra_metrics (List[:obj:`config.MetricGroups`]): List of enums
specifying additional stats; defaults to None.
Returns:
Pandas data frame of the regions and weighted means for the metrics.
"""
start_time = time()
if df is None:
# convert to physical units based on spacing and unit conversion
vol_args = {"spacing": spacing, "unit_factor": unit_factor}
else:
# units already converted, but need to convert sides
_update_df_side(df)
vol_args = {}
# use a class to set and process the label without having to
# reference the labels image as a global variable
MeasureLabel.set_data(
atlas_img_np, labels_img_np, labels_edge, dist_to_orig,
labels_interior, heat_map, blobs, subseg, df, spacing)
metrics = {}
grouping[config.AtlasMetrics.SIDE.value] = None
pool = chunking.get_mp_pool()
pool_results = []
if label_ids is None:
label_ids = np.unique(labels_img_np)
if combine_sides: label_ids = label_ids[label_ids >= 0]
for label_id in label_ids:
# include corresponding labels from opposite sides while skipping
# background
if label_id == 0: continue
if combine_sides: label_id = [label_id, -1 * label_id]
pool_results.append(
pool.apply_async(
MeasureLabel.label_metrics, args=(label_id, extra_metrics)))
totals = {}
for result in pool_results:
# get metrics by label
label_id, label_metrics = result.get()
label_size, nuc, (vol_physical, vol_mean_physical) = _parse_vol_metrics(
label_metrics, extra_keys=(LabelMetrics.RegVolMean,), **vol_args)
reg_nuc_mean = label_metrics[LabelMetrics.RegNucMean]
edge_size = label_metrics[LabelMetrics.EdgeSize]
# calculate densities based on physical volumes
label_metrics[LabelMetrics.RegVolMean] = vol_mean_physical
label_metrics[LabelMetrics.RegDensityMean] = (
reg_nuc_mean / vol_mean_physical)
# transfer all found metrics to master dictionary
_update_vol_dicts(label_id, label_metrics, grouping, metrics)
# weight and accumulate total metrics
totals.setdefault(LabelMetrics.EdgeDistSum, []).append(
label_metrics[LabelMetrics.EdgeDistSum] * edge_size)
totals.setdefault(LabelMetrics.EdgeDistMean, []).append(
label_metrics[LabelMetrics.EdgeDistMean] * edge_size)
totals.setdefault(LabelMetrics.EdgeSize, []).append(edge_size)
totals.setdefault(LabelMetrics.VarIntensity, []).append(
label_metrics[LabelMetrics.VarIntensity] * label_size)
totals.setdefault("vol", []).append(label_size)
totals.setdefault(LabelMetrics.Volume, []).append(vol_physical)
totals.setdefault(LabelMetrics.RegVolMean, []).append(
vol_mean_physical * label_size)
var_nuc = label_metrics[LabelMetrics.VarNuclei]
if var_nuc != np.nan:
totals.setdefault(LabelMetrics.VarNuclei, []).append(
label_metrics[LabelMetrics.VarNuclei] * label_size)
totals.setdefault(LabelMetrics.Nuclei, []).append(nuc)
if reg_nuc_mean != np.nan:
totals.setdefault(LabelMetrics.RegNucMean, []).append(
reg_nuc_mean * label_size)
pool.close()
pool.join()
# make data frame of raw metrics, dropping columns of all NaNs
df = pd.DataFrame(metrics)
df = df.dropna(axis=1, how="all")
df_io.print_data_frame(df)
# build data frame of total metrics from weighted means
metrics_all = {}
grouping[config.AtlasMetrics.SIDE.value] = "both"
for key, val in grouping.items():
metrics_all.setdefault(key, []).append(val)
for key in totals.keys():
totals[key] = np.nansum(totals[key])
if totals[key] == 0: totals[key] = np.nan
# divide weighted values by sum of corresponding weights
totals[LabelMetrics.Region] = "all"
totals[LabelMetrics.RegVolMean] /= totals["vol"]
if LabelMetrics.Nuclei in totals:
totals[LabelMetrics.Density] = (
totals[LabelMetrics.Nuclei] / totals[LabelMetrics.Volume])
if LabelMetrics.RegNucMean in totals:
totals[LabelMetrics.RegNucMean] /= totals["vol"]
totals[LabelMetrics.RegDensityMean] = (
totals[LabelMetrics.RegNucMean] / totals[LabelMetrics.RegVolMean])
if LabelMetrics.VarNuclei in totals:
totals[LabelMetrics.VarNuclei] /= totals["vol"]
totals[LabelMetrics.VarIntensity] /= totals["vol"]
totals[LabelMetrics.EdgeDistMean] /= totals[LabelMetrics.EdgeSize]
for col in LabelMetrics:
if col in totals:
metrics_all.setdefault(col.name, []).append(totals[col])
df_all = pd.DataFrame(metrics_all)
df_io.print_data_frame(df_all)
print("time elapsed to measure variation:", time() - start_time)
return df, df_all
class MeasureLabelOverlap(object):
"""Measure metrics comparing two versions of image labels in a way
that allows multiprocessing without global variables.
All images should be of the same shape. If :attr:``df`` is available,
it will be used in place of underlying images. Typically this
data frame contains metrics for labels only at the lowest level,
such as drawn or non-overlapping labels. These labels can then be
used to aggregate values through summation or weighted means to
generate metrics for superseding labels that contains these
individual labels.
Attributes:
labels_imgs: Sequence of integer labels image as Numpy arrays.
heat_map: Numpy array as a density map; defaults to None to ignore
density measurements.
df: Pandas data frame with a row for each sub-region.
"""
_OVERLAP_METRICS = (
LabelMetrics.Volume,
LabelMetrics.VolPx,
LabelMetrics.VolAlt,
LabelMetrics.VolAltPx,
LabelMetrics.Nuclei,
LabelMetrics.VolDSC,
LabelMetrics.NucDSC,
LabelMetrics.VolOut,
LabelMetrics.NucOut,
)
# images and data frame
labels_imgs = None
heat_map = None
df = None
@classmethod
def set_data(cls, labels_imgs, heat_map=None, df=None):
"""Set the images and data frame."""
cls.labels_imgs = labels_imgs
cls.heat_map = heat_map
cls.df = df
@classmethod
def measure_overlap(cls, label_ids):
"""Measure the overlap between image labels.
If :attr:``df`` is available, it will be used to sum values
from labels in ``label_ids`` found in the data frame
rather than re-measuring values from images.
Args:
label_ids: Integer of the label or sequence of multiple labels
in :attr:``labels_img_np`` for which to measure variation.
Returns:
Tuple of the given label ID and a dictionary of metrics.
The metrics are NaN if the label size is 0.
"""
metrics = dict.fromkeys(cls._OVERLAP_METRICS, np.nan)
nuclei = np.nan
nuc_dsc = np.nan
nuc_out = np.nan
if cls.df is None:
# find DSC between original and updated versions of the
# collective region
label_masks = [np.isin(l, label_ids) for l in cls.labels_imgs]
label_vol = np.sum(label_masks[0])
label_vol_alt = np.sum(label_masks[1])
vol_dsc = atlas_stats.meas_dice(label_masks[0], label_masks[1])
# sum up volume and nuclei count in the new version outside of
# the original version; assume that remaining original volume
# will be accounted for by the other labels that reoccupy it
mask_out = np.logical_and(label_masks[1], ~label_masks[0])
vol_out = np.sum(mask_out)
if cls.heat_map is not None:
nuclei = np.sum(cls.heat_map[label_masks[0]])
nuc_dsc = atlas_stats.meas_dice(
label_masks[0], label_masks[1], cls.heat_map)
nuc_out = np.sum(cls.heat_map[mask_out])
else:
# get weighted average of DSCs from all rows in a super-region,
# assuming all rows are at the lowest hierarchical level
labels = cls.df.loc[
cls.df[LabelMetrics.Region.name].isin(label_ids)]
label_vols = labels[LabelMetrics.Volume.name]
label_vol = np.nansum(label_vols)
label_vol_alt = np.nansum(labels[LabelMetrics.VolAlt.name])
vol_dscs = labels[LabelMetrics.VolDSC.name]
vol_dsc = df_io.weight_mean(vol_dscs, label_vols)
# sum up volume and nuclei outside of original regions
vol_out = np.nansum(labels[LabelMetrics.VolOut.name])
if LabelMetrics.Nuclei.name in labels:
nucs = labels[LabelMetrics.Nuclei.name]
nuclei = np.nansum(nucs)
nuc_dscs = labels[LabelMetrics.NucDSC.name]
nuc_dsc = df_io.weight_mean(nuc_dscs, nucs)
nuc_out = np.nansum(labels[LabelMetrics.NucOut.name])
if label_vol > 0:
# update dict with metric values; px vals will not get converted
# to physical units
metrics[LabelMetrics.Volume] = label_vol
metrics[LabelMetrics.VolPx] = label_vol
metrics[LabelMetrics.VolAlt] = label_vol_alt
metrics[LabelMetrics.VolAltPx] = label_vol_alt
metrics[LabelMetrics.Nuclei] = nuclei
metrics[LabelMetrics.VolDSC] = vol_dsc
metrics[LabelMetrics.NucDSC] = nuc_dsc
metrics[LabelMetrics.VolOut] = vol_out
metrics[LabelMetrics.NucOut] = nuc_out
disp_id = get_single_label(label_ids)
print("overlaps within label {}: {}"
.format(disp_id, libmag.enum_dict_aslist(metrics)))
return label_ids, metrics
def measure_labels_overlap(labels_imgs, heat_map=None, spacing=None,
unit_factor=None, combine_sides=True,
label_ids=None, grouping={}, df=None):
"""Compute metrics comparing two version of atlas labels.
Args:
labels_imgs: Sequence of integer labels image as Numpy arrays.
heat_map: Numpy array as a density map; defaults to None to ignore
density measurements.
spacing: Sequence of image spacing for each pixel in the images.
unit_factor: Unit factor conversion; defaults to None. Eg use
1000 to convert from um to mm.
combine_sides: True to combine corresponding labels from opposite
sides of the sample; defaults to True. Corresponding labels
are assumed to have the same absolute numerical number and
differ only in signage. May be False if combining by passing
both pos/neg labels in ``label_ids``.
label_ids: Sequence of label IDs to include. Defaults to None,
in which case the labels will be taken from unique values
in ``labels_img_np``.
grouping: Dictionary of sample grouping metadata, where each
entry will be added as a separate column. Defaults to an
empty dictionary.
df: Data frame with rows for all drawn labels to pool into
parent labels instead of re-measuring stats for all
children of each parent; defaults to None.
Returns:
:obj:`pd.DataFrame`: Pandas data frame of the regions and weighted
means for the metrics.
"""
start_time = time()
if df is None:
vol_args = {"spacing": spacing, "unit_factor": unit_factor}
else:
_update_df_side(df)
vol_args = {}
# use a class to set and process the label without having to
# reference the labels image as a global variable
MeasureLabelOverlap.set_data(labels_imgs, heat_map, df)
metrics = {}
grouping[config.AtlasMetrics.SIDE.value] = None
pool = chunking.get_mp_pool()
pool_results = []
for label_id in label_ids:
# include corresponding labels from opposite sides while skipping
# background
if label_id == 0: continue
if combine_sides: label_id = [label_id, -1 * label_id]
pool_results.append(
pool.apply_async(
MeasureLabelOverlap.measure_overlap, args=(label_id,)))
for result in pool_results:
# get metrics by label
label_id, label_metrics = result.get()
label_size, nuc, _ = _parse_vol_metrics(
label_metrics, extra_keys=(LabelMetrics.VolOut,), **vol_args)
# transfer all found metrics to master dictionary
_update_vol_dicts(label_id, label_metrics, grouping, metrics)
pool.close()
pool.join()
# make data frame of raw metrics, dropping columns of all NaNs
df = pd.DataFrame(metrics)
df = df.dropna(axis=1, how="all")
df_io.print_data_frame(df)
print("time elapsed to measure variation:", time() - start_time)
return df
def map_meas_to_labels(labels_img, df, meas, fn_avg, skip_nans=False,
reverse=False, col_wt=None):
"""Generate a map of a given measurement on a labels image.
The intensity values of labels will be replaced by the given metric
of the chosen measurement, such as the mean of the densities. If
multiple conditions exist, the difference of metrics for the first
two conditions will be taken under the assumption that the values for
each condition are in matching order.
Args:
labels_img: Labels image as a Numpy array in x,y,z.
df: Pandas data frame with measurements by regions corresponding
to that of ``labels_img``.
meas: Name of column in ``df`` from which to extract measurements.
fn_avg: Function to apply to the column for each region. If None,
``df`` is assumed to already contain statistics generated from
the ``clrstats`` R package, which will be extracted directly.
skip_nans: True to skip any region with NaNs, leaving 0 instead;
defaults to False to allow NaNs in resulting image. Some
applications may not be able to read NaNs, so this parameter
allows giving a neutral value instead.
reverse: Reverse the order of sorted conditions when generating
stats by ``fn_avg`` to compare conditions; defaults to False.
col_wt (str): Name of column to use for weighting, where the
magnitude of ``meas`` will be adjusted as fractions of the max
value in this weighting column for labels found in ``labels_img``;
defaults to None.
Retunrs:
A map of averages for the given measurement as an image of the
same shape as ``labels_img`` of float data type, or None if no
values for ``meas`` are found.
"""
if meas not in df or np.all(np.isnan(df[meas])):
# ensure that measurement column is present with non-NaNs
print("{} not in data frame or all NaNs, no image to generate"
.format(meas))
return None
# make image array to map differences for each label and filter data
# frame to get only these regions
labels_diff = np.zeros_like(labels_img, dtype=np.float)
labels_img_abs = np.abs(labels_img)
regions = np.unique(labels_img_abs)
df = df.loc[df["Region"].isin(regions)].copy()
df_cond = None
conds = None
if "Condition" in df:
# get and sort conditions
df_cond = df["Condition"]
conds = sorted(np.unique(df_cond), reverse=reverse)
if col_wt is not None:
# weight given column for the first condition and normalizing it to
# its maximum value, or use the whole column if no conditions exist
print("weighting stats by", col_wt)
wts = df.loc[df_cond == conds[0], col_wt] if conds else df[col_wt]
wts /= max(wts)
if conds:
for cond in conds:
# use raw values to avoid multiplying by index; assumes
# matching order of values between conditions
df.loc[df_cond == cond, meas] = np.multiply(
df.loc[df_cond == cond, meas].values, wts.values)
else:
df.loc[:, meas] *= wts
for region in regions:
# get difference for each region, either from a single column
# that already has the difference of effect size of by taking
# the difference from two columns
df_region = df[df[LabelMetrics.Region.name] == region]
labels_region = labels_img_abs == region
diff = np.nan
if fn_avg is None:
# assume that df was output by R clrstats package
if df_region.shape[0] > 0:
diff = df_region[meas]
else:
if len(conds) >= 2:
# compare the metrics for the first two conditions
avgs = []
for cond in conds:
# gather separate metrics for each condition
df_region_cond = df_region[df_region["Condition"] == cond]
# print(df_region_cond)
reg_avg = fn_avg(df_region_cond[meas])
# print(region, cond, reg_avg)
avgs.append(reg_avg)
# TODO: consider making order customizable
diff = avgs[1] - avgs[0]
else:
# take the metric for the single condition
diff = fn_avg(df_region[meas])
if skip_nans and np.isnan(diff):
diff = 0
labels_diff[labels_region] = diff
print("label {} difference: {}".format(region, diff))
return labels_diff
def labels_distance(labels_img1, labels_img2, spacing=None, out_path=None,
name=None):
"""Measure distances between corresponding labels in two images.
Assumes that a 0 is background and will be skipped.
Args:
labels_img1 (:class:`numpy.nhdarray`): Labels image 1.
labels_img2 (:class:`numpy.nhdarray`): Labels image 2. Does not
have to be of the same shape as ``labels_img``, but assumed
to have the same origin/offset and ``spacing`` as distances
are based on centroid coordinates of the corresponding labels.
spacing (list[float]): Spacing/scaling in ``z,y,x``.
out_path (str): CSV output path; defaults to None to not save.
name (str): Sample name; defaults to None.
Returns:
:class:`pandas.DataFrame`: Data frame of output metrics.
"""
dists = []
# pool unique labels from both images
label_ids1 = np.unique(labels_img1)
label_ids2 = np.unique(labels_img2)
label_ids = np.unique(np.append(label_ids1, label_ids2))
imgs = (labels_img1, labels_img2)
for label_id in label_ids:
if label_id == 0: continue
if label_id in label_ids1 and label_id in label_ids2:
# compute distance between centroids of corresponding labels
# in both images
centroids = [
np.multiply(cv_nd.get_label_props(
img, label_id)[0].centroid, spacing) for img in imgs]
dist = cdist(
np.array([centroids[0]]), np.array([centroids[1]]))[0][0]
else:
# label missing from at least one image
centroids = [np.nan] * 2
dist = np.nan
dists.append((name, label_id, *centroids[:2], dist))
# export metrics to data frame
df = df_io.dict_to_data_frame(
dists, out_path, show=True, records_cols=(
config.AtlasMetrics.SAMPLE.value,
LabelMetrics.Region.name, "Centroid1", "Centroid2",
LabelMetrics.Dist.name))
return df
def get_metric_weight_col(stat):
"""Get the weighting column for a given metric.
Args:
stat (str): The metric for which to find the appropriate weighting
metric.
Returns:
The name of the corresponding weighting metric as a string.
"""
col_wt = None
if stat in [metric.name for metric in WT_METRICS]:
col_wt = LabelMetrics.Volume.name
if stat in [metric.name for metric in NUC_METRICS]:
col_wt = LabelMetrics.Nuclei.name
return col_wt
| 41.872671 | 80 | 0.61483 | 28,262 | 0.52403 | 0 | 0 | 23,729 | 0.43998 | 0 | 0 | 22,461 | 0.416469 |
2d5842d1c5376eb851aa288ea6f1a9e5f74b9b80 | 251 | py | Python | PythonCode/Pyboard/Examples/SImpleUART.py | CarterWS/Summer2020 | 120f8ed5e225dcbf9f469daf17a787e3f0b93417 | [
"MIT"
] | null | null | null | PythonCode/Pyboard/Examples/SImpleUART.py | CarterWS/Summer2020 | 120f8ed5e225dcbf9f469daf17a787e3f0b93417 | [
"MIT"
] | null | null | null | PythonCode/Pyboard/Examples/SImpleUART.py | CarterWS/Summer2020 | 120f8ed5e225dcbf9f469daf17a787e3f0b93417 | [
"MIT"
] | 11 | 2020-06-03T10:12:28.000Z | 2020-06-05T16:02:40.000Z | import time,pyb
uart = pyb. UART(3,9600, bits=8, parity=None, stop=1)
while(True):
time.sleep_ms(100)
size = uart.any()
if (size > 0):
string = uart.read(size)
data = int(string[-1])
print('Data: %3d' % (data))
| 17.928571 | 53 | 0.549801 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 11 | 0.043825 |
2d58a7a9f6b56ec2da62f50746c4430128a39551 | 2,089 | py | Python | scripts/add_session_chairs.py | pranav-ust/naacl-2021-website | b6353f2e7d5cbd5b3dae485035560df07e9a4b5c | [
"MIT",
"BSD-3-Clause"
] | 8 | 2020-09-25T09:52:53.000Z | 2022-01-24T04:14:33.000Z | scripts/add_session_chairs.py | pranav-ust/naacl-2021-website | b6353f2e7d5cbd5b3dae485035560df07e9a4b5c | [
"MIT",
"BSD-3-Clause"
] | 6 | 2020-10-01T19:41:29.000Z | 2021-06-18T16:32:02.000Z | scripts/add_session_chairs.py | pranav-ust/naacl-2021-website | b6353f2e7d5cbd5b3dae485035560df07e9a4b5c | [
"MIT",
"BSD-3-Clause"
] | 9 | 2020-07-14T20:44:08.000Z | 2021-07-20T16:23:11.000Z | # Adds session chairs to the existing (static) program.html file.
# Source: https://docs.google.com/spreadsheets/d/1aoUGr44xmU6bnJ_S61WTJkwarOcKzI_u1BgK4H99Yt4/edit?usp=sharing
# Please download and save the spreadsheet in CSV format.
PATH_TO_CSV = "/tmp/sessions.csv"
#PATH_TO_HTML = "../conference-program/main/program.html"
PATH_TO_HTML_IN = "/tmp/program_old.html"
PATH_TO_HTML_OUT = "/tmp/program.html"
SESSION_TITLE_COL = 4
SESSION_CHAIR_COL = 6
SESSION_CHAIR_AFFILIATION = 7
SESSION_CHAIR_EMAIL = 8
import csv
def mailto(email):
return '<a href="mailto:{}">email</a>'.format(email, email)
class Chair(object):
def __init__(self, session, name, affiliation, email):
self.session = session
self.name = name
self.affiliation = affiliation
self.email = email
@property
def html(self):
before = '<tr><td valign=top style="padding-top: 10px;"> </td><td valign=top style="padding-top: 10px;"><i> Session chair: '
after = '</i></td></tr>'
return "{} {} ({}) {}".format(before, self.name, self.affiliation, after)
chairs = []
with open(PATH_TO_CSV, "r") as csvfile:
csvreader = csv.reader(csvfile, delimiter=",")
for i, session in enumerate(csvreader):
if i == 0:
assert session[SESSION_CHAIR_COL] == "Session Chair" # sanity check that we got the column indices right
continue # skip header
session_title = session[SESSION_TITLE_COL]
name = session[SESSION_CHAIR_COL]
affiliation = session[SESSION_CHAIR_AFFILIATION]
email = session[SESSION_CHAIR_EMAIL]
chairs.append(Chair(session_title, name, affiliation, email))
print("Found %d chairs" % len(chairs))
current_chair_idx = 0
with open(PATH_TO_HTML_OUT, "w") as out_html:
with open(PATH_TO_HTML_IN, "r") as in_html:
for line in in_html:
out_html.write(line)
if current_chair_idx < len(chairs):
chair = chairs[current_chair_idx]
if chair.session in line:
# print another line with the session chair.
print(chair.html)
out_html.write(chair.html)
current_chair_idx += 1
| 31.651515 | 133 | 0.699856 | 455 | 0.217808 | 0 | 0 | 268 | 0.128291 | 0 | 0 | 684 | 0.327429 |
2d598f2e4196c937db359c1fb681b55186a150c3 | 2,084 | py | Python | generator/request.py | WulffHunter/log_generator | 47b8ab77d003494aa21b2c2ab85d1d21f7bae8fc | [
"MIT"
] | 5 | 2021-02-12T15:52:59.000Z | 2021-05-26T13:22:12.000Z | generator/request.py | WulffHunter/log_generator | 47b8ab77d003494aa21b2c2ab85d1d21f7bae8fc | [
"MIT"
] | 1 | 2021-11-03T10:10:58.000Z | 2021-11-05T11:43:16.000Z | generator/request.py | WulffHunter/log_generator | 47b8ab77d003494aa21b2c2ab85d1d21f7bae8fc | [
"MIT"
] | 2 | 2021-11-07T08:12:46.000Z | 2022-02-10T01:13:47.000Z | from faker import Faker
import random
import parameters
from utils import chance_choose, chance
from uri_generator import gen_path, uri_extensions, gen_uri_useable
# TODO: Continue to expand this list with the proper formats for other application
# layer protocols (e.g. FTP, SSH, SMTP...)
protocols = ['HTTP/1.0', 'HTTP/1.1', 'HTTP/2']
common_methods = ['GET', 'PUT', 'POST', 'DELETE']
# Faker is passed in as an argument to prevent unnecessary re-decleration,
# but is not needed to make this method run.
def gen_req_method(test_mode=False, faker=None):
if faker is None:
faker = Faker()
return chance_choose(
random.choice(common_methods),
faker.http_method(),
parameters.frequency['common_http'] / 100)
def gen_uri_path(test_mode=False):
# TODO: Continue extending the possible URI paths and file
# extension types
# TODO: Add in querystrings
# This format allows for choice of a URI path or a document
path_options = [
gen_path(test_mode),
'{}{}'.format(
gen_path(test_mode),
random.choice(uri_extensions)
)
]
return random.choice(path_options)
def gen_querystring(test_mode=False):
# There's an 80% chance that a querystring will be non-existant
if chance(parameters.frequency['empty_querystring']):
return ''
queries = []
for _ in range(
random.randint(
1,
parameters.max_val['querystring_elements'])):
queries.append(
'{}={}'.format(
gen_uri_useable(),
gen_uri_useable()))
querystring = '&'.join(queries)
return '?{}'.format(querystring)
def gen_req_protocol(test_mode=False):
return random.choice(protocols)
def gen_request(test_mode=False):
fake = Faker()
# 90% chance of being a common method
method = gen_req_method(fake)
path = gen_uri_path(test_mode)
querystring = gen_querystring()
protocol = gen_req_protocol()
return '{} {}{} {}'.format(method, path, querystring, protocol)
| 24.232558 | 82 | 0.65499 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 644 | 0.309021 |
2d59d5bd3118fef6e7534ad9d74a82446be2e23e | 2,570 | py | Python | ktx_parser/format_jupyter.py | SebastianoF/ktx-parser | 9445b7251b9d2984e57cf9e051cf975ab2b81109 | [
"MIT"
] | null | null | null | ktx_parser/format_jupyter.py | SebastianoF/ktx-parser | 9445b7251b9d2984e57cf9e051cf975ab2b81109 | [
"MIT"
] | null | null | null | ktx_parser/format_jupyter.py | SebastianoF/ktx-parser | 9445b7251b9d2984e57cf9e051cf975ab2b81109 | [
"MIT"
] | null | null | null | from pathlib import PosixPath, Path
from typing import Optional
import nbformat as nbf
from ktx_parser.abs_format import AbsFormat
from ktx_parser.abs_getter import AbsGetter
from ktx_parser.decorations import keys_to_decorations
class FormatJupyter(AbsFormat):
def __init__(self, getter: AbsGetter):
self.getter = getter
@staticmethod
def get_format_tag() -> str:
return "jupyter"
def convert(self, destination_file: PosixPath, subset_numbered_keys: Optional[str] = None):
ktx_dict = self.getter.get_dict()
destination_file = Path(destination_file)
getter_tag = self.getter.get_getter_tag()
format_tag = self.get_format_tag()
# Create cells sequence
nb = nbf.v4.new_notebook()
nb["cells"] = []
# - Write header if any:
for hdr_key in self.getter.get_headers_keys():
prefix, suffix, add_key = keys_to_decorations(getter_tag, format_tag, hdr_key)
if add_key:
prefix += f"{hdr_key}. "
nb["cells"].append(nbf.v4.new_markdown_cell(prefix + ktx_dict[hdr_key] + suffix))
# - Write initializer - for interactive formats
nb["cells"].append(nbf.v4.new_code_cell(self.getter.get_initializer()))
# - Write numbered keys if any:
n_keys = self.getter.get_quantity_numbered_keys()
numbered_keys = self.getter.get_numbered_keys()
if isinstance(numbered_keys, dict):
if subset_numbered_keys is None:
raise ValueError("Please specify a key for the provided dictionary of keyed text")
numbered_keys = numbered_keys[subset_numbered_keys]
num_numbered_keys_found = 0
for n in range(n_keys[0], n_keys[1] + 1):
for key in numbered_keys:
prefix, suffix, add_key = keys_to_decorations(getter_tag, format_tag, key)
nmb_key = f"{key}{n}"
if add_key:
prefix += f"{n}. "
if nmb_key in ktx_dict.keys():
num_numbered_keys_found += 1
nb["cells"].append(nbf.v4.new_markdown_cell(prefix + ktx_dict[nmb_key] + suffix))
nb["cells"].append(nbf.v4.new_code_cell(""))
# - Delete file if one with the same name is found
if destination_file.exists():
destination_file.unlink()
# - Save result to file
nbf.write(nb, str(destination_file))
print(f"File {destination_file} created with {num_numbered_keys_found} numbered keys.")
| 37.246377 | 101 | 0.635798 | 2,335 | 0.90856 | 0 | 0 | 71 | 0.027626 | 0 | 0 | 421 | 0.163813 |
2d5abdd3abf9c29ea242210df43416a2eeef80b9 | 787 | py | Python | leetcode/binary_search.py | verthais/exercise-python | d989647e8fbfe8a79b9b5f2c3ab003715d238851 | [
"MIT"
] | null | null | null | leetcode/binary_search.py | verthais/exercise-python | d989647e8fbfe8a79b9b5f2c3ab003715d238851 | [
"MIT"
] | null | null | null | leetcode/binary_search.py | verthais/exercise-python | d989647e8fbfe8a79b9b5f2c3ab003715d238851 | [
"MIT"
] | null | null | null | def binary_search(collection, lhs, rhs, value):
if rhs > lhs:
mid = lhs + (rhs - lhs) // 2
if collection[mid] == value:
return mid
if collection[mid] > value:
return binary_search(collection, lhs, mid-1, value)
return binary_search(collection, mid+1, rhs, value)
return -1
def eq(exp, val):
assert exp == val, f'Expected: {exp}, got value {val}'
def main():
tests = [
(0, 5, [5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]),
(8, 13, [5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]),
(8, 9, [1,2,3,4,5,6,7,8,9]),
]
for expected, value, collection in tests:
eq(expected, binary_search(collection, 0, len(collection), value))
if __name__ == '__main__':
main()
print('success') | 24.59375 | 74 | 0.537484 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 54 | 0.068615 |
2d5b27caab5eb536bb49195481bfb64795974fb0 | 4,831 | py | Python | pywakeup.py | CRImier/pyWakeUp | 3e80787bf5c771e9ade29d71642338cecfb8e023 | [
"MIT"
] | null | null | null | pywakeup.py | CRImier/pyWakeUp | 3e80787bf5c771e9ade29d71642338cecfb8e023 | [
"MIT"
] | null | null | null | pywakeup.py | CRImier/pyWakeUp | 3e80787bf5c771e9ade29d71642338cecfb8e023 | [
"MIT"
] | null | null | null | #! /usr/bin/env python
from datetime import datetime, timedelta
from threading import Thread
import logging
import os
from time import sleep
logging.basicConfig(level = logging.DEBUG)
"""# define various handy options
usage = "usage: %prog [options] [+]hours:minutes"
parser = OptionParser(usage=usage)
parser.add_option("-m", "--music", dest="music", help="Specify alarm music. May be path or number of track in known tracks list. Path is assumed relative to " + collection_location + " if relative path given, unless -r set. Default is random known track.", metavar="PATH_TO_MUSIC")
parser.add_option("-r", "--relative", dest="relative", help="Use path relative to current directory", default=False, action="store_true")
parser.add_option("-a", "--again", dest="again", help="Play a second alarm as a reminder a few minutes after the first", default=False, action="store_true")
parser.add_option("-c", "--command", help="Specify a command to play the alarm. Defaults to VLC.", default=False)
(options, args) = parser.parse_args()"""
"""while True:
# loop until alarm time
now = datetime.now()
print "\rAlarm in", alarmtime - now,
sys.stdout.flush()
if alarmtime < now:
# alarm time has passed, sound alarm
print "\nGood morning, sir! It is", now, "and this is your requested wake up call."
fred = Popen(cmd + [music])
fred.communicate()
break
else:
sleep(5)
if options.again:
# second alarm has been requested, snooze then trigger
end = datetime.now()
reminder = end + timedelta(minutes=snooze_time)
while True:
now = datetime.now()
if reminder < now:
print "You should be up by now, sir. You'll be late."
while True:
fred = Popen(cmd + [rickroll_path])
fred.communicate()
break
else:
sleep(5)"""
class AlarmDaemon():
set_alarms = []
#Structure of element: [datetime object of alarm time, whatever, {whatever...}]
#Only first element matters here, others will be preserved and can be used for alarm identification
triggered_alarms = []
sleep_time = 0
on_alarm_trigger_cb = lambda self, alarm: None
#Want to set a callback? Wrap around these!
sleep_divisor = 2 #Amount by which time till next alarm is divided to get time fo daemon to sleep
empty_check_interval = 10 #Amount of time to sleep in case there are no set alarms
def __init__(self):
#Being overly cautious
self.set_alarms = []
self.triggered_alarms = []
def adjust_sleep_time(self, time_till_next_alarm):
if time_till_next_alarm < 5:
self.sleep_time = 1
else:
self.sleep_time = time_till_next_alarm/self.sleep_divisor
def start_thread(self):
self.thread = Thread(target=self.run)
self.thread.daemon=True
self.thread.start()
def run(self):
while True:
self.check_alarms()
#self.sound_triggered_alarms()
sleep(self.sleep_time)
def set_alarm(self, alarm):
self.set_alarms.append(alarm)
def check_alarms(self):
logging.debug("Checking alarms...")
now = datetime.now()
time_till_alarms = [] #Storing all yet-to-happen times-till-alarms to see when's the next closest alarm
for alarm in self.set_alarms[:]:
time_till_alarm = int((alarm[0] - now).total_seconds())
if time_till_alarm <= 0: #Gone off!
logging.info("Alarm happened, alarm info: {}".format(alarm[1:]))
self.triggered_alarms.append(alarm)
self.set_alarms.remove(alarm)
self.on_alarm_trigger_cb(alarm)
else: #Saving time only if alarm didn't happen this cycle
logging.debug("Alarm didn't happen yet, time till alarm: {}".format(time_till_alarm))
time_till_alarms.append(time_till_alarm)
if self.set_alarms:
logging.debug("Currently set alarms: {}".format(self.set_alarms))
logging.debug("Alarms will happen after: {}".format(time_till_alarms))
time_till_next_alarm = min(time_till_alarms) #The lowest value from the list of times-till-alarms
self.adjust_sleep_time(time_till_next_alarm)
logging.debug("Set sleep time to {}".format(self.sleep_time))
else:
logging.debug("No alarms, will sleep for {} seconds".format(self.empty_check_interval))
self.sleep_time = self.empty_check_interval
if __name__ == "__main__":
ad = AlarmDaemon()
ad.start_thread()
ad.set_alarm([datetime.now()+timedelta(seconds=20), "Alarm 1"])
ad.set_alarm([datetime.now()+timedelta(seconds=30), "Alarm 2"])
| 40.596639 | 281 | 0.643552 | 2,710 | 0.56096 | 0 | 0 | 0 | 0 | 0 | 0 | 2,572 | 0.532395 |
2d5b33059a390f5db8e8df5de2be104977259c36 | 10,442 | py | Python | lib/mpmath/tests/test_hp.py | np0212/Capstone-Project | 424c4fd59b4d2d1b4ddbea3b78e93d7fa8575ca7 | [
"Apache-2.0"
] | 4 | 2018-07-04T17:20:12.000Z | 2019-07-14T18:07:25.000Z | lib/mpmath/tests/test_hp.py | np0212/Capstone-Project | 424c4fd59b4d2d1b4ddbea3b78e93d7fa8575ca7 | [
"Apache-2.0"
] | 7 | 2017-05-01T14:15:32.000Z | 2017-09-06T20:44:24.000Z | lib/mpmath/tests/test_hp.py | np0212/Capstone-Project | 424c4fd59b4d2d1b4ddbea3b78e93d7fa8575ca7 | [
"Apache-2.0"
] | 1 | 2018-09-03T03:02:06.000Z | 2018-09-03T03:02:06.000Z | """
Check that the output from irrational functions is accurate for
high-precision input, from 5 to 200 digits. The reference values were
verified with Mathematica.
"""
import time
from mpmath import *
precs = [5, 15, 28, 35, 57, 80, 100, 150, 200]
# sqrt(3) + pi/2
a = \
"3.302847134363773912758768033145623809041389953497933538543279275605"\
"841220051904536395163599428307109666700184672047856353516867399774243594"\
"67433521615861420725323528325327484262075464241255915238845599752675"
# e + 1/euler**2
b = \
"5.719681166601007617111261398629939965860873957353320734275716220045750"\
"31474116300529519620938123730851145473473708966080207482581266469342214"\
"824842256999042984813905047895479210702109260221361437411947323431"
# sqrt(a)
sqrt_a = \
"1.817373691447021556327498239690365674922395036495564333152483422755"\
"144321726165582817927383239308173567921345318453306994746434073691275094"\
"484777905906961689902608644112196725896908619756404253109722911487"
# sqrt(a+b*i).real
sqrt_abi_real = \
"2.225720098415113027729407777066107959851146508557282707197601407276"\
"89160998185797504198062911768240808839104987021515555650875977724230130"\
"3584116233925658621288393930286871862273400475179312570274423840384"
# sqrt(a+b*i).imag
sqrt_abi_imag = \
"1.2849057639084690902371581529110949983261182430040898147672052833653668"\
"0629534491275114877090834296831373498336559849050755848611854282001250"\
"1924311019152914021365263161630765255610885489295778894976075186"
# log(a)
log_a = \
"1.194784864491089550288313512105715261520511949410072046160598707069"\
"4336653155025770546309137440687056366757650909754708302115204338077595203"\
"83005773986664564927027147084436553262269459110211221152925732612"
# log(a+b*i).real
log_abi_real = \
"1.8877985921697018111624077550443297276844736840853590212962006811663"\
"04949387789489704203167470111267581371396245317618589339274243008242708"\
"014251531496104028712866224020066439049377679709216784954509456421"
# log(a+b*i).imag
log_abi_imag = \
"1.0471204952840802663567714297078763189256357109769672185219334169734948"\
"4265809854092437285294686651806426649541504240470168212723133326542181"\
"8300136462287639956713914482701017346851009323172531601894918640"
# exp(a)
exp_a = \
"27.18994224087168661137253262213293847994194869430518354305430976149"\
"382792035050358791398632888885200049857986258414049540376323785711941636"\
"100358982497583832083513086941635049329804685212200507288797531143"
# exp(a+b*i).real
exp_abi_real = \
"22.98606617170543596386921087657586890620262522816912505151109385026"\
"40160179326569526152851983847133513990281518417211964710397233157168852"\
"4963130831190142571659948419307628119985383887599493378056639916701"
# exp(a+b*i).imag
exp_abi_imag = \
"-14.523557450291489727214750571590272774669907424478129280902375851196283"\
"3377162379031724734050088565710975758824441845278120105728824497308303"\
"6065619788140201636218705414429933685889542661364184694108251449"
# a**b
pow_a_b = \
"928.7025342285568142947391505837660251004990092821305668257284426997"\
"361966028275685583421197860603126498884545336686124793155581311527995550"\
"580229264427202446131740932666832138634013168125809402143796691154"
# (a**(a+b*i)).real
pow_a_abi_real = \
"44.09156071394489511956058111704382592976814280267142206420038656267"\
"67707916510652790502399193109819563864568986234654864462095231138500505"\
"8197456514795059492120303477512711977915544927440682508821426093455"
# (a**(a+b*i)).imag
pow_a_abi_imag = \
"27.069371511573224750478105146737852141664955461266218367212527612279886"\
"9322304536553254659049205414427707675802193810711302947536332040474573"\
"8166261217563960235014674118610092944307893857862518964990092301"
# ((a+b*i)**(a+b*i)).real
pow_abi_abi_real = \
"-0.15171310677859590091001057734676423076527145052787388589334350524"\
"8084195882019497779202452975350579073716811284169068082670778986235179"\
"0813026562962084477640470612184016755250592698408112493759742219150452"\
# ((a+b*i)**(a+b*i)).imag
pow_abi_abi_imag = \
"1.2697592504953448936553147870155987153192995316950583150964099070426"\
"4736837932577176947632535475040521749162383347758827307504526525647759"\
"97547638617201824468382194146854367480471892602963428122896045019902"
# sin(a)
sin_a = \
"-0.16055653857469062740274792907968048154164433772938156243509084009"\
"38437090841460493108570147191289893388608611542655654723437248152535114"\
"528368009465836614227575701220612124204622383149391870684288862269631"
# sin(1000*a)
sin_1000a = \
"-0.85897040577443833776358106803777589664322997794126153477060795801"\
"09151695416961724733492511852267067419573754315098042850381158563024337"\
"216458577140500488715469780315833217177634490142748614625281171216863"
# sin(a+b*i)
sin_abi_real = \
"-24.4696999681556977743346798696005278716053366404081910969773939630"\
"7149215135459794473448465734589287491880563183624997435193637389884206"\
"02151395451271809790360963144464736839412254746645151672423256977064"
sin_abi_imag = \
"-150.42505378241784671801405965872972765595073690984080160750785565810981"\
"8314482499135443827055399655645954830931316357243750839088113122816583"\
"7169201254329464271121058839499197583056427233866320456505060735"
# cos
cos_a = \
"-0.98702664499035378399332439243967038895709261414476495730788864004"\
"05406821549361039745258003422386169330787395654908532996287293003581554"\
"257037193284199198069707141161341820684198547572456183525659969145501"
cos_1000a = \
"-0.51202523570982001856195696460663971099692261342827540426136215533"\
"52686662667660613179619804463250686852463876088694806607652218586060613"\
"951310588158830695735537073667299449753951774916401887657320950496820"
# tan
tan_a = \
"0.162666873675188117341401059858835168007137819495998960250142156848"\
"639654718809412181543343168174807985559916643549174530459883826451064966"\
"7996119428949951351938178809444268785629011625179962457123195557310"
tan_abi_real = \
"6.822696615947538488826586186310162599974827139564433912601918442911"\
"1026830824380070400102213741875804368044342309515353631134074491271890"\
"467615882710035471686578162073677173148647065131872116479947620E-6"
tan_abi_imag = \
"0.9999795833048243692245661011298447587046967777739649018690797625964167"\
"1446419978852235960862841608081413169601038230073129482874832053357571"\
"62702259309150715669026865777947502665936317953101462202542168429"
def test_hp():
for dps in precs:
mp.dps = dps + 8
aa = mpf(a)
bb = mpf(b)
a1000 = 1000*mpf(a)
abi = mpc(aa, bb)
mp.dps = dps
assert (sqrt(3) + pi/2).ae(aa)
assert (e + 1/euler**2).ae(bb)
assert sqrt(aa).ae(mpf(sqrt_a))
assert sqrt(abi).ae(mpc(sqrt_abi_real, sqrt_abi_imag))
assert log(aa).ae(mpf(log_a))
assert log(abi).ae(mpc(log_abi_real, log_abi_imag))
assert exp(aa).ae(mpf(exp_a))
assert exp(abi).ae(mpc(exp_abi_real, exp_abi_imag))
assert (aa**bb).ae(mpf(pow_a_b))
assert (aa**abi).ae(mpc(pow_a_abi_real, pow_a_abi_imag))
assert (abi**abi).ae(mpc(pow_abi_abi_real, pow_abi_abi_imag))
assert sin(a).ae(mpf(sin_a))
assert sin(a1000).ae(mpf(sin_1000a))
assert sin(abi).ae(mpc(sin_abi_real, sin_abi_imag))
assert cos(a).ae(mpf(cos_a))
assert cos(a1000).ae(mpf(cos_1000a))
assert tan(a).ae(mpf(tan_a))
assert tan(abi).ae(mpc(tan_abi_real, tan_abi_imag))
# check that complex cancellation is avoided so that both
# real and imaginary parts have high relative accuracy.
# abs_eps should be 0, but has to be set to 1e-205 to pass the
# 200-digit case, probably due to slight inaccuracy in the
# precomputed input
assert (tan(abi).real).ae(mpf(tan_abi_real), abs_eps=1e-205)
assert (tan(abi).imag).ae(mpf(tan_abi_imag), abs_eps=1e-205)
mp.dps = 460
assert str(log(3))[-20:] == '02166121184001409826'
mp.dps = 15
# Since str(a) can differ in the last digit from rounded a, and I want
# to compare the last digits of big numbers with the results in Mathematica,
# I made this hack to get the last 20 digits of rounded a
def last_digits(a):
r = repr(a)
s = str(a)
#dps = mp.dps
#mp.dps += 3
m = 10
r = r.replace(s[:-m],'')
r = r.replace("mpf('",'').replace("')",'')
num0 = 0
for c in r:
if c == '0':
num0 += 1
else:
break
b = float(int(r))/10**(len(r) - m)
if b >= 10**m - 0.5:
raise NotImplementedError
n = int(round(b))
sn = str(n)
s = s[:-m] + '0'*num0 + sn
return s[-20:]
# values checked with Mathematica
def test_log_hp():
mp.dps = 2000
a = mpf(10)**15000/3
r = log(a)
res = last_digits(r)
# Mathematica N[Log[10^15000/3], 2000]
# ...7443804441768333470331
assert res == '44380444176833347033'
# see issue 145
r = log(mpf(3)/2)
# Mathematica N[Log[3/2], 2000]
# ...69653749808140753263288
res = last_digits(r)
assert res == '53749808140753263288'
mp.dps = 10000
r = log(2)
res = last_digits(r)
# Mathematica N[Log[2], 10000]
# ...695615913401856601359655561
assert res == '91340185660135965556'
r = log(mpf(10)**10/3)
res = last_digits(r)
# Mathematica N[Log[10^10/3], 10000]
# ...587087654020631943060007154
assert res == '54020631943060007154', res
r = log(mpf(10)**100/3)
res = last_digits(r)
# Mathematica N[Log[10^100/3], 10000]
# ,,,59246336539088351652334666
assert res == '36539088351652334666', res
mp.dps += 10
a = 1 - mpf(1)/10**10
mp.dps -= 10
r = log(a)
res = last_digits(r)
# ...3310334360482956137216724048322957404
# 372167240483229574038733026370
# Mathematica N[Log[1 - 10^-10]*10^10, 10000]
# ...60482956137216724048322957404
assert res == '37216724048322957404', res
mp.dps = 10000
mp.dps += 100
a = 1 + mpf(1)/10**100
mp.dps -= 100
r = log(a)
res = last_digits(+r)
# Mathematica N[Log[1 + 10^-100]*10^10, 10030]
# ...3994733877377412241546890854692521568292338268273 10^-91
assert res == '39947338773774122415', res
mp.dps = 15
def test_exp_hp():
mp.dps = 4000
r = exp(mpf(1)/10)
# IntegerPart[N[Exp[1/10] * 10^4000, 4000]]
# ...92167105162069688129
assert int(r * 10**mp.dps) % 10**20 == 92167105162069688129
| 35.638225 | 76 | 0.77648 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 7,160 | 0.685692 |
2d5d4b0e8ee8b61a38dc6be3798b8feb193f02e1 | 61 | py | Python | mp2ragelib/ui.py | ofgulban/mp2ragelib | 3df294f02ae4aadd0c9f91e8a101305f3f5f15fe | [
"BSD-3-Clause"
] | 1 | 2020-09-04T19:36:58.000Z | 2020-09-04T19:36:58.000Z | mp2ragelib/ui.py | ofgulban/mp2ragelib | 3df294f02ae4aadd0c9f91e8a101305f3f5f15fe | [
"BSD-3-Clause"
] | null | null | null | mp2ragelib/ui.py | ofgulban/mp2ragelib | 3df294f02ae4aadd0c9f91e8a101305f3f5f15fe | [
"BSD-3-Clause"
] | null | null | null | """Commandline interface."""
# TODO: After scripting works.
| 15.25 | 30 | 0.704918 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 58 | 0.95082 |
2d5d92bfe2c4a6e4236c3e16196184c932e0ccc3 | 1,396 | py | Python | DQMServices/Components/scripts/dqmiodumpindices.py | ckamtsikis/cmssw | ea19fe642bb7537cbf58451dcf73aa5fd1b66250 | [
"Apache-2.0"
] | 6 | 2017-09-08T14:12:56.000Z | 2022-03-09T23:57:01.000Z | DQMServices/Components/scripts/dqmiodumpindices.py | ckamtsikis/cmssw | ea19fe642bb7537cbf58451dcf73aa5fd1b66250 | [
"Apache-2.0"
] | 545 | 2017-09-19T17:10:19.000Z | 2022-03-07T16:55:27.000Z | DQMServices/Components/scripts/dqmiodumpindices.py | ckamtsikis/cmssw | ea19fe642bb7537cbf58451dcf73aa5fd1b66250 | [
"Apache-2.0"
] | 14 | 2017-10-04T09:47:21.000Z | 2019-10-23T18:04:45.000Z | #!/usr/bin/env python
from __future__ import print_function
import uproot
import argparse
from prettytable import PrettyTable
from collections import defaultdict
parser = argparse.ArgumentParser(description="Shows Indices table in a DQMIO file. Last column (ME count) is computed like this: lastIndex - firstIndex + 1")
parser.add_argument('filename', help='Name of local root file. For remote files, use edmCopyUtil first: `edmCopyUtil root://cms-xrd-global.cern.ch/<FILEPATH> .`')
args = parser.parse_args()
typeNames = ['Ints','Floats', 'Strings', 'TH1Fs','TH1Ss', 'TH1Ds',
'TH2Fs', 'TH2Ss', 'TH2Ds', 'TH3Fs', 'TProfiles','TProfile2Ds']
f = uproot.open(args.filename)
things = f.keys()
if 'Indices;1' in things:
indices = f['Indices']
runs = indices.array('Run')
lumis = indices.array('Lumi')
firstindex = indices.array('FirstIndex')
lastindex = indices.array('LastIndex')
types = indices.array('Type')
table = PrettyTable()
table.field_names = ['Run', 'Lumi', 'FirstIndex', 'LastIndex', 'Type', 'ME Count']
for run, lumi, first, last, type in zip(runs, lumis, firstindex, lastindex, types):
typeName = 'Unknown'
if type < len(typeNames):
typeName = typeNames[type]
table.add_row([run, lumi, first, last, '%s (%s)' % (type, typeName), int(last - first + 1)])
print(table)
else:
print("This does not look like DQMIO data.")
| 33.238095 | 162 | 0.689112 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 527 | 0.377507 |
2d5dc4553abbc55c9321bc9cfe1937d4082c6990 | 272 | py | Python | Modulo1/exercicio009_2.py | natterra/python3 | e462192f03e81e2a614b1f2a441ffa702e4988e0 | [
"MIT"
] | null | null | null | Modulo1/exercicio009_2.py | natterra/python3 | e462192f03e81e2a614b1f2a441ffa702e4988e0 | [
"MIT"
] | null | null | null | Modulo1/exercicio009_2.py | natterra/python3 | e462192f03e81e2a614b1f2a441ffa702e4988e0 | [
"MIT"
] | null | null | null | #Exercício Python 9: Faça um programa que leia um número Inteiro qualquer e mostre na tela a sua tabuada.
n = int(input("Digite um número: "))
i = 0
print("--------------")
while i < 10:
i += 1
print("{:2} x {:2} = {:2}".format(n, i, n*i))
print("--------------") | 30.222222 | 105 | 0.544118 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 181 | 0.655797 |
2d5e1da162df4150bdc1aed06c46f5054aabf1f7 | 52 | py | Python | test3a.py | naveen912014/pyneta | 637bf25d39a8e773845dc57eac276382642d92d0 | [
"Apache-2.0"
] | null | null | null | test3a.py | naveen912014/pyneta | 637bf25d39a8e773845dc57eac276382642d92d0 | [
"Apache-2.0"
] | null | null | null | test3a.py | naveen912014/pyneta | 637bf25d39a8e773845dc57eac276382642d92d0 | [
"Apache-2.0"
] | null | null | null |
print
print('This is Naveen')
:q
| 2.26087 | 23 | 0.442308 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 16 | 0.307692 |
2d5eb9a5f86eac06485fcefb4b677be4db7c878c | 4,988 | py | Python | tests/test_metrics.py | Patte1808/moda | 312c9594754ae0f6d17cbfafaa2c4c790c58efe5 | [
"MIT"
] | 27 | 2019-03-15T21:21:17.000Z | 2022-01-03T14:37:02.000Z | tests/test_metrics.py | Patte1808/moda | 312c9594754ae0f6d17cbfafaa2c4c790c58efe5 | [
"MIT"
] | 4 | 2019-09-30T13:29:20.000Z | 2021-03-20T20:05:36.000Z | tests/test_metrics.py | Patte1808/moda | 312c9594754ae0f6d17cbfafaa2c4c790c58efe5 | [
"MIT"
] | 11 | 2019-03-15T21:21:19.000Z | 2021-08-09T22:10:15.000Z | """Test evaluation functionality."""
from moda.evaluators import f_beta
from moda.evaluators.metrics import calculate_metrics_with_shift, _join_metrics
def test_f_beta1():
precision = 0.6
recall = 1.0
beta = 1
f = f_beta(precision, recall, beta)
assert (f > 0.74) and (f < 0.76)
def test_f_beta3():
precision = 0.6
recall = 1.0
beta = 3
f = f_beta(precision, recall, beta)
assert (f > 0.937) and (f < 0.938)
def test_calculate_metrics_with_shift_all_zero():
actual = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
predicted = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
metrics = calculate_metrics_with_shift(predicted, actual, window_size=1)
assert metrics['TP'] == 0
assert metrics['FP'] == 0
assert metrics['FN'] == 0
def test_calculate_metrics_with_shift_actual_zero():
actual = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
predicted = [0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0]
metrics = calculate_metrics_with_shift(predicted, actual, window_size=1)
assert metrics['TP'] == 0
assert metrics['FP'] == 2
assert metrics['FN'] == 0
predicted = [1, 0, 0, 1, 0, 0, 0, 1, 0, 0, 1]
metrics = calculate_metrics_with_shift(predicted, actual, window_size=1)
assert metrics['TP'] == 0
assert metrics['FP'] == 4
assert metrics['FN'] == 0
def test_calculate_metrics_with_shift_predicted_zero():
actual = [0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0]
predicted = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
metrics = calculate_metrics_with_shift(predicted, actual, window_size=1)
assert metrics['TP'] == 0
assert metrics['FP'] == 0
assert metrics['FN'] == 2
actual = [1, 0, 0, 1, 0, 0, 0, 1, 0, 0, 1]
metrics = calculate_metrics_with_shift(predicted, actual, window_size=1)
assert metrics['TP'] == 0
assert metrics['FP'] == 0
assert metrics['FN'] == 4
def test_calculate_metrics_with_shift_perfect():
actual = [0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0]
predicted = [0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0]
metrics = calculate_metrics_with_shift(predicted, actual, window_size=1)
assert metrics['TP'] == 2
assert metrics['FP'] == 0
assert metrics['FN'] == 0
actual = [1, 0, 0, 1, 0, 0, 0, 1, 0, 0, 1]
predicted = [1, 0, 0, 1, 0, 0, 0, 1, 0, 0, 1]
metrics = calculate_metrics_with_shift(predicted, actual, window_size=1)
assert metrics['TP'] == 4
assert metrics['FP'] == 0
assert metrics['FN'] == 0
def test_calculate_metrics_with_shift_mixed():
actual = [0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0]
predicted = [0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0]
metrics = calculate_metrics_with_shift(predicted, actual, window_size=1)
assert metrics['TP'] == 1
assert metrics['FP'] == 1
assert metrics['FN'] == 1
def test_calculate_metrics_with_shift_in_window():
actual = [0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0]
predicted = [0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0]
metrics = calculate_metrics_with_shift(predicted, actual, window_size=1)
assert metrics['TP'] == 1
assert metrics['FP'] == 0
assert metrics['FN'] == 0
def test_calculate_metrics_with_shift_in_large_window():
actual = [0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0]
predicted = [0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0]
metrics = calculate_metrics_with_shift(predicted, actual, window_size=4)
assert metrics['TP'] == 1
assert metrics['FP'] == 0
assert metrics['FN'] == 0
def test_join_metrics():
metrics1 = {'cat1': {'TP': 12, 'FP': 7, 'FN': 35, 'num_samples': 1, 'num_values': 2},
'cat2': {'TP': 0, 'FP': 0, 'FN': 0, 'num_samples': 1, 'num_values': 2},
'cat4': {'TP': 9, 'FP': 9, 'FN': 9, 'num_samples': 1, 'num_values': 2}}
metrics2 = {'cat1': {'TP': 10, 'FP': 10, 'FN': 10, 'num_samples': 1, 'num_values': 2},
'cat2': {'TP': 2, 'FP': 2, 'FN': 2, 'num_samples': 1, 'num_values': 2},
'cat3': {'TP': 1, 'FP': 1, 'FN': 1, 'num_samples': 1, 'num_values': 2}}
metrics = _join_metrics(metrics1, metrics2)
assert metrics['cat1']['TP'] == 22
assert metrics['cat1']['FP'] == 17
assert metrics['cat1']['FN'] == 45
assert metrics['cat1']['num_samples'] == 2
assert metrics['cat1']['num_values'] == 4
assert metrics['cat2']['TP'] == 2
assert metrics['cat2']['FP'] == 2
assert metrics['cat2']['FN'] == 2
assert metrics['cat3']['TP'] == 1
assert metrics['cat3']['FP'] == 1
assert metrics['cat3']['FN'] == 1
assert metrics['cat4']['TP'] == 9
assert metrics['cat4']['FP'] == 9
assert metrics['cat4']['FN'] == 9
if __name__ == '__main__':
test_f_beta1()
test_f_beta3()
test_calculate_metrics_with_shift_all_zero()
test_calculate_metrics_with_shift_actual_zero()
test_calculate_metrics_with_shift_predicted_zero()
test_calculate_metrics_with_shift_perfect()
test_calculate_metrics_with_shift_mixed()
test_calculate_metrics_with_shift_in_window()
test_calculate_metrics_with_shift_in_large_window()
test_join_metrics()
| 34.4 | 90 | 0.604451 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 581 | 0.11648 |
2d601a610adba2a9dbf87218ce0ac031d70916ca | 9,070 | py | Python | server/utils/server_db.py | DoctorChe/Python_DataBase_PyQT | 6f65bad52edf9afa8cfce9689f7e88f87d420d9d | [
"MIT"
] | 1 | 2019-08-07T20:08:32.000Z | 2019-08-07T20:08:32.000Z | server/utils/server_db.py | DoctorChe/Python_DataBase_PyQT | 6f65bad52edf9afa8cfce9689f7e88f87d420d9d | [
"MIT"
] | 6 | 2019-08-08T11:53:09.000Z | 2019-09-11T14:45:59.000Z | server/utils/server_db.py | DoctorChe/Python_DataBase_PyQT | 6f65bad52edf9afa8cfce9689f7e88f87d420d9d | [
"MIT"
] | null | null | null | from contextlib import contextmanager
from sqlalchemy import MetaData
from sqlalchemy import create_engine
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import sessionmaker
from .config_server import SERVER_DATABASE
engine = create_engine(SERVER_DATABASE)
Base = declarative_base(metadata=MetaData(bind=engine))
Session = sessionmaker(bind=engine)
@contextmanager
def session_scope():
session = Session()
try:
yield session
session.commit()
except:
session.rollback()
raise
finally:
session.close()
# # Класс - серверная база данных:
# class ServerStorage:
# Base = declarative_base()
#
# # Класс - отображение таблицы всех пользователей
# # Экземпляр этого класса = запись в таблице AllUsers
# class AllUsers(Base):
# __tablename__ = "users"
# id = Column(Integer, primary_key=True)
# name = Column(String, unique=True)
# last_login = Column(String)
# contacts = relationship("Contact", back_populates="user")
#
# def __init__(self, username):
# self.id = None
# self.name = username
# self.last_login = datetime.datetime.now()
#
# # Класс - отображение таблицы активных пользователей:
# # Экземпляр этого класса = запись в таблице ActiveUsers
# class ActiveUsers(Base):
# __tablename__ = "active_users"
# id = Column(Integer, primary_key=True)
# user = Column(ForeignKey("users.id"), unique=True)
# ip_address = Column(String)
# port = Column(Integer)
# login_time = Column(DateTime)
#
# def __init__(self, user_id, ip_address, port, login_time):
# self.id = None
# self.user = user_id
# self.ip_address = ip_address
# self.port = port
# self.login_time = login_time
#
# # Класс - отображение таблицы истории входов
# # Экземпляр этого класса = запись в таблице LoginHistory
# class LoginHistory(Base):
# __tablename__ = "login_history"
# id = Column(Integer, primary_key=True)
# name = Column(ForeignKey("users.id"))
# date_time = Column(DateTime)
# ip = Column(String)
# port = Column(Integer)
#
# def __init__(self, name, date, ip, port):
# self.id = None
# self.name = name
# self.date_time = date
# self.ip = ip
# self.port = port
#
# class Contact(Base):
# __tablename__ = "contact"
# id = Column(Integer, primary_key=True, autoincrement=True)
# name = Column(String)
# information = Column(String)
# user_id = Column(Integer, ForeignKey("users.id"))
# user = relationship("AllUsers", back_populates="contacts")
#
# def __init__(self, contact_name, user_id, information):
# self.id = None
# self.name = contact_name
# self.information = information
# self.user_id = user_id
#
# def __init__(self):
# # Создаём движок базы данных
# # SERVER_DATABASE - sqlite:///server/db/server_db.sqlite3
# # echo=False - отключаем ведение лога (вывод sql-запросов)
# # pool_recycle - По умолчанию соединение с БД через 8 часов простоя обрывается.
# # Чтобы это не случилось нужно добавить опцию pool_recycle = 7200 (переустановка соединения через 2 часа)
# self.database_engine = create_engine(SERVER_DATABASE, echo=False, pool_recycle=7200)
#
# # Метаданные доступны через класс Base
# self.metadata = self.Base.metadata
#
# # Создаём таблицы
# self.metadata.create_all(self.database_engine)
#
# # Создаём сессию
# Session = sessionmaker(bind=self.database_engine)
# self.session = Session()
#
# # Если в таблице активных пользователей есть записи, то их необходимо удалить
# # Когда устанавливаем соединение, очищаем таблицу активных пользователей
# self.session.query(self.ActiveUsers).delete()
# self.session.commit()
#
# # Функция выполняющяяся при входе пользователя, записывает в базу факт входа
# def user_login(self, username, ip_address, port):
# # Запрос в таблицу пользователей на наличие там пользователя с таким именем
# result = self.session.query(self.AllUsers).filter_by(name=username)
#
# # Если имя пользователя уже присутствует в таблице, обновляем время последнего входа
# if result.count():
# user = result.first()
# user.last_login = datetime.datetime.now()
# # Если нету, то создаздаём нового пользователя
# else:
# # Создаем экземпляр класса self.AllUsers, через который передаем данные в таблицу
# user = self.AllUsers(username)
# self.session.add(user)
# # Комит здесь нужен, чтобы присвоился ID
# self.session.commit()
#
# # Теперь можно создать запись в таблицу активных пользователей о факте входа.
# # Создаем экземпляр класса self.ActiveUsers, через который передаем данные в таблицу
# new_active_user = self.ActiveUsers(user.id, ip_address, port, datetime.datetime.now())
# self.session.add(new_active_user)
#
# # и сохранить в историю входов
# # Создаем экземпляр класса self.LoginHistory, через который передаем данные в таблицу
# history = self.LoginHistory(user.id, datetime.datetime.now(), ip_address, port)
# self.session.add(history)
#
# # Сохраняем изменения
# self.session.commit()
#
# # Функция фиксирующая отключение пользователя
# def user_logout(self, username):
# # Запрашиваем пользователя, что покидает нас
# # получаем запись из таблицы AllUsers
# user = self.session.query(self.AllUsers).filter_by(name=username).first()
#
# # Удаляем его из таблицы активных пользователей.
# # Удаляем запись из таблицы ActiveUsers
# self.session.query(self.ActiveUsers).filter_by(user=user.id).delete()
#
# # Применяем изменения
# self.session.commit()
#
# # Функция возвращает список известных пользователей со временем последнего входа.
# def users_list(self):
# query = self.session.query(
# self.AllUsers.name,
# self.AllUsers.last_login,
# )
# # Возвращаем список кортежей
# return query.all()
#
# # Функция возвращает список активных пользователей
# def active_users_list(self):
# # Запрашиваем соединение таблиц и собираем кортежи имя, адрес, порт, время.
# query = self.session.query(
# self.AllUsers.name,
# self.ActiveUsers.ip_address,
# self.ActiveUsers.port,
# self.ActiveUsers.login_time
# ).join(self.AllUsers)
# # Возвращаем список кортежей
# return query.all()
#
# # Функция возвращающая историю входов по пользователю или всем пользователям
# def login_history(self, username=None):
# # Запрашиваем историю входа
# query = self.session.query(self.AllUsers.name,
# self.LoginHistory.date_time,
# self.LoginHistory.ip,
# self.LoginHistory.port
# ).join(self.AllUsers)
# # Если было указано имя пользователя, то фильтруем по нему
# if username:
# query = query.filter(self.AllUsers.name == username)
# return query.all()
#
#
# # Отладка
# if __name__ == "__main__":
# SERVER_DATABASE = "sqlite:///server_db.sqlite3"
# test_db = ServerStorage()
# # выполняем 'подключение' пользователя
# test_db.user_login("client_1", "192.168.1.4", 8888)
# test_db.user_login("client_2", "192.168.1.5", 7777)
# # выводим список кортежей - активных пользователей
# print(test_db.active_users_list())
# # выполянем 'отключение' пользователя
# test_db.user_logout("client_1")
# # выводим список активных пользователей
# print(test_db.active_users_list())
# # запрашиваем историю входов по пользователю
# test_db.login_history("client_1")
# # выводим список известных пользователей
# print(test_db.users_list())
# test_db.add_contact("client_1", "client_2")
# test_db.user_login("client_3", "192.168.1.5", 9999)
# test_db.add_contact("client_1", "client_3")
# test_db.add_contact("client_1", "client_3")
# print(test_db.get_contacts("client_1"))
# test_db.remove_contact("client_1", "client_2")
# print(test_db.get_contacts("client_1"))
# print(f'{test_db.get_contact("client_1", "client_3").name} '
# f'- info: {test_db.get_contact("client_1", "client_3").information}')
# test_db.update_contact("client_1", "client_3", "New information")
# print(f'{test_db.get_contact("client_1", "client_3").name} '
# f'- info: {test_db.get_contact("client_1", "client_3").information}')
| 40.672646 | 115 | 0.634179 | 0 | 0 | 190 | 0.017318 | 206 | 0.018777 | 0 | 0 | 10,184 | 0.928265 |
2d62a8f36d7f870a7dac21567c1c12af16462737 | 3,064 | py | Python | glouton/repositories/archive/archiveRepo.py | deckbsd/glouton-satnogs-data-downloader | 9674081b669b0ca3c04513ede4127c6221962a73 | [
"MIT"
] | 13 | 2018-01-29T06:08:15.000Z | 2020-03-04T07:00:56.000Z | glouton/repositories/archive/archiveRepo.py | deckbsd/glouton-satnogs-data-downloader | 9674081b669b0ca3c04513ede4127c6221962a73 | [
"MIT"
] | 10 | 2018-12-21T11:37:21.000Z | 2021-05-09T12:39:23.000Z | glouton/repositories/archive/archiveRepo.py | deckbsd/glouton-satnogs-data-downloader | 9674081b669b0ca3c04513ede4127c6221962a73 | [
"MIT"
] | 4 | 2019-01-25T13:40:13.000Z | 2019-07-22T08:14:19.000Z | from queue import Queue
from threading import Thread
from glouton.commands.download.downloadCommandParams import DownloadCommandParams
from glouton.commands.download.archiveDownloadCommand import ArchiveDownloadCommand
from glouton.commands.module.endModuleCommand import EndModuleCommand
from glouton.commands.module.endModuleCommandParams import EndModuleCommandParams
from glouton.workers.downloadWorker import DownloadWorker
from glouton.workers.moduleWorker import ModuleWorker
from glouton.workers.endModuleWorker import EndModuleWorker
from glouton.domain.interfaces.downloadable import Downloadable
from glouton.shared import threadHelper
from threading import Event
import os
class ArchiveRepo(Downloadable):
def __init__(self, working_dir, modules, end_modules):
self.__working_dir = working_dir
self.__archive_commands = Queue()
self.__archive_modules_commands = Queue()
self.__archive_end_modules_commands = Queue()
self.__modules = modules
self.__end_modules = end_modules
self.__download_status = Event()
self.__is_download_finished = Event()
def register_download_command(self, observation, start_date, end_date):
cmd_parameters = DownloadCommandParams(
self.__working_dir, self.__create_dir_name('archive', start_date, end_date), self.__modules)
waterfallDownloadCommand = ArchiveDownloadCommand(
cmd_parameters, observation, self.__archive_modules_commands)
self.__archive_commands.put(waterfallDownloadCommand)
def register_end_command(self, start_date, end_date):
if self.__end_modules is not None:
dir_name = self.__create_dir_name('archive', start_date, end_date)
module_parameters = EndModuleCommandParams(full_path=os.path.join(
self.__working_dir, dir_name), modules=self.__end_modules)
archive_end_module_command = EndModuleCommand(
module_parameters)
self.__archive_end_modules_commands.put(archive_end_module_command)
def create_worker(self):
threads = []
downloadWorker = DownloadWorker(
self.__archive_commands, self.__download_status, self.__is_download_finished if self.__modules is None else None)
threads.append(threadHelper.create_thread(downloadWorker.execute))
if self.__modules is not None:
moduleWorker = ModuleWorker(
self.__archive_modules_commands, self.__download_status, self.__is_download_finished)
threads.append(threadHelper.create_thread(moduleWorker.execute))
if self.__end_modules is not None:
endWorker = EndModuleWorker(
self.__archive_end_modules_commands, self.__is_download_finished)
threads.append(threadHelper.create_thread(endWorker.execute()))
return threads
def __create_dir_name(self, target, start_date, end_date):
return target + '__' + start_date.strftime('%Y-%m-%dT%H-%M-%S') + '__' + end_date.strftime('%Y-%m-%dT%H-%M-%S')
| 49.419355 | 125 | 0.744778 | 2,376 | 0.775457 | 0 | 0 | 0 | 0 | 0 | 0 | 64 | 0.020888 |
2d63bc571680f79f12326140ffc844c72f08a20c | 418 | py | Python | DataWorkflow/file_deletion/migrations/0005_maxlength_filename.py | Swiss-Polar-Institute/data-workflow | 4c7fee1d78d67512ae6710449e625fd945468dd9 | [
"MIT"
] | null | null | null | DataWorkflow/file_deletion/migrations/0005_maxlength_filename.py | Swiss-Polar-Institute/data-workflow | 4c7fee1d78d67512ae6710449e625fd945468dd9 | [
"MIT"
] | 12 | 2019-10-25T15:01:06.000Z | 2021-09-22T18:02:03.000Z | DataWorkflow/file_deletion/migrations/0005_maxlength_filename.py | Swiss-Polar-Institute/data-workflow | 4c7fee1d78d67512ae6710449e625fd945468dd9 | [
"MIT"
] | null | null | null | # Generated by Django 2.2.6 on 2019-10-25 19:50
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('file_deletion', '0004_maxlength_etag'),
]
operations = [
migrations.AlterField(
model_name='deletedfile',
name='filename',
field=models.CharField(help_text='Filename', max_length=256),
),
]
| 22 | 73 | 0.617225 | 325 | 0.777512 | 0 | 0 | 0 | 0 | 0 | 0 | 116 | 0.277512 |
2d641c2640b0ffe80cc97c24622ffeee1f154b10 | 84 | py | Python | tools/Sikuli/DoReplace.sikuli/DoReplace.py | marmyshev/vanessa-automation | 9f87bd6df58b4c205104d3ae8e3643752d67eef7 | [
"BSD-3-Clause"
] | 296 | 2018-05-27T08:03:14.000Z | 2022-03-19T08:36:11.000Z | tools/Sikuli/DoReplace.sikuli/DoReplace.py | marmyshev/vanessa-automation | 9f87bd6df58b4c205104d3ae8e3643752d67eef7 | [
"BSD-3-Clause"
] | 1,562 | 2018-05-27T18:36:25.000Z | 2022-03-31T07:35:11.000Z | tools/Sikuli/DoReplace.sikuli/DoReplace.py | marmyshev/vanessa-automation | 9f87bd6df58b4c205104d3ae8e3643752d67eef7 | [
"BSD-3-Clause"
] | 299 | 2018-06-18T20:00:56.000Z | 2022-03-29T12:29:55.000Z | click(Pattern("Bameumbrace.png").similar(0.80))
sleep(1)
click("3abnb.png")
exit(0)
| 16.8 | 47 | 0.714286 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 28 | 0.333333 |
2d6468482d56c0366eac034ecc843d9749364fb7 | 1,459 | py | Python | src/python/procyon/types.py | orbea/procyon | 469d94427d3b6e7cc2ab93606bdf968717a49150 | [
"Apache-2.0"
] | null | null | null | src/python/procyon/types.py | orbea/procyon | 469d94427d3b6e7cc2ab93606bdf968717a49150 | [
"Apache-2.0"
] | null | null | null | src/python/procyon/types.py | orbea/procyon | 469d94427d3b6e7cc2ab93606bdf968717a49150 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# Copyright 2017 The Procyon Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import, division, print_function, unicode_literals
import enum
from . import py3
class Type(enum.Enum):
NULL = 0
BOOL = 1
INT = 2
FLOAT = 3
DATA = 4
STRING = 5
ARRAY = 6
MAP = 7
def typeof(x):
if x is None:
return Type.NULL
elif (x is True) or (x is False):
return Type.BOOL
elif isinstance(x, (int, py3.long)):
return Type.INT
elif isinstance(x, float):
return Type.FLOAT
elif isinstance(x, (bytes, bytearray, memoryview)):
return Type.DATA
elif isinstance(x, py3.unicode):
return Type.STRING
elif isinstance(x, (tuple, list)):
return Type.ARRAY
elif isinstance(x, dict):
return Type.MAP
else:
raise TypeError("%r is not Procyon-serializable" % x)
| 27.018519 | 82 | 0.669637 | 128 | 0.087731 | 0 | 0 | 0 | 0 | 0 | 0 | 648 | 0.44414 |
2d6528c8b581dc40057b868ca114d1c79a0e8e3f | 5,053 | py | Python | approval/models.py | rajeshr188/django-onex | f1086a4159b1d135e54327c77c93fcc6c446338f | [
"MIT"
] | 2 | 2019-06-08T22:50:59.000Z | 2020-07-12T14:13:18.000Z | approval/models.py | rajeshr188/django-onex | f1086a4159b1d135e54327c77c93fcc6c446338f | [
"MIT"
] | 13 | 2020-02-11T23:51:43.000Z | 2021-06-05T13:10:49.000Z | approval/models.py | rajeshr188/django-onex | f1086a4159b1d135e54327c77c93fcc6c446338f | [
"MIT"
] | null | null | null | from django.db import models,transaction
from contact.models import Customer
from product.models import Stock
from django.urls import reverse
from django.db.models import Sum
# Create your models here.
class Approval(models.Model):
created_at = models.DateTimeField(auto_now_add = True,
editable = False)
updated_at = models.DateTimeField(auto_now = True,
editable = False)
created_by = models.ForeignKey('users.CustomUser', on_delete=models.CASCADE,
null=True,blank = True)
contact = models.ForeignKey(Customer,
related_name = 'contact',on_delete = models.CASCADE)
total_wt = models.DecimalField(max_digits=10,
decimal_places=3,default =0)
total_qty = models.IntegerField(default=0)
posted = models.BooleanField(default = False)
is_billed = models.BooleanField(default = False)
status = models.CharField(max_length = 10,
choices = (('Pending','Pending'),
('Complete','Complete')),default = 'Pending')
class Meta:
ordering = ('created_at',)
def __str__(self):
return f"{self.id}"
def get_absolute_url(self):
return reverse('approval_approval_detail',args=(self.pk,))
def get_update_url(self):
return reverse('approval_approval_update',args = (self.pk,))
@transaction.atomic()
def post(self):
if not self.posted:
for i in self.items.all():
i.post()
self.posted = True
self.save(update_fields=['posted'])
@transaction.atomic()
def unpost(self):
# if is billed cant unpost
if self.posted and not self.is_billed:
for i in self.items.all():
i.unpost()
self.posted = False
self.save(update_fields=['posted'])
def update_status(self):
print('in approval update_Status')
for i in self.items.all():
print(f"{i}-{i.status} ")
if any(i.status == 'Pending' for i in self.items.all()):
self.status ='Pending'
else:
self.status ='Complete'
self.save()
class ApprovalLine(models.Model):
product = models.ForeignKey(Stock,related_name = 'product',
on_delete=models.CASCADE)
quantity = models.IntegerField(default=0)
weight = models.DecimalField(max_digits=10,
decimal_places=3,default = 0.0)
touch = models.DecimalField(max_digits=10,
decimal_places=3,default = 0.0)
approval = models.ForeignKey(Approval,
on_delete = models.CASCADE,
related_name='items')
status = models.CharField(max_length =30,
choices = (
('Pending','Pending'),
('Returned','Returned'),
('Billed','Billed')),
default = 'Pending',
blank = True )
class Meta:
ordering = ('approval',)
def __str__(self):
return f"{self.id}"
def balance(self):
return (self.weight - self.approvallinereturn_set.filter(posted = True).\
aggregate(t = Sum('weight'))['t'])
def post(self):
self.product.remove(self.weight,self.quantity,None,'A')
def unpost(self):
for i in self.approvallinereturn_set.all():
i.unpost()
i.delete()
self.product.add(self.weight, self.quantity, None, 'AR')
def update_status(self):
ret = self.approvallinereturn_set.filter(
posted = True
).aggregate(
qty = Sum('quantity'),
wt = Sum('weight'))
if self.quantity == ret['qty'] and self.weight == ret['wt']:
self.status = 'Returned'
else:
self.status = 'Pending'
self.save()
self.approval.update_status()
class ApprovalLineReturn(models.Model):
created_at = models.DateTimeField(auto_now_add = True)
created_by = models.ForeignKey(
'users.CustomUser', on_delete=models.CASCADE,
null=True, blank=True)
line = models.ForeignKey(ApprovalLine,on_delete = models.CASCADE)
quantity = models.IntegerField(default = 0)
weight = models.DecimalField(max_digits = 10,
decimal_places = 3,default =0.0)
posted = models.BooleanField(default=False)
class Meta:
ordering = ('id',)
def __str__(self):
return f"{self.line.product}"
def post(self):
if not self.posted:
self.line.product.add(self.weight, self.quantity, None, 'AR')
self.posted = True
self.save(update_fields=['posted'])
self.line.update_status()
def unpost(self):
if self.posted:
self.line.product.remove(self.weight, self.quantity, None, 'A')
self.posted = False
self.save(update_fields=['posted'])
self.line.update_status()
| 34.37415 | 81 | 0.579458 | 4,839 | 0.957649 | 0 | 0 | 475 | 0.094004 | 0 | 0 | 523 | 0.103503 |
2d662757e1d86c36d9d911b8a2139bac3250db00 | 221 | py | Python | Other/pdf2imageTest.py | Wanganator414/python | afa7a931bd9da8a5235a6cd889bfc417950165fe | [
"MIT"
] | 1 | 2019-08-17T03:31:19.000Z | 2019-08-17T03:31:19.000Z | Other/pdf2imageTest.py | Wanganator414/python | afa7a931bd9da8a5235a6cd889bfc417950165fe | [
"MIT"
] | null | null | null | Other/pdf2imageTest.py | Wanganator414/python | afa7a931bd9da8a5235a6cd889bfc417950165fe | [
"MIT"
] | null | null | null | from pdf2image import convert_from_path, convert_from_bytes
from pdf2image.exceptions import (
PDFInfoNotInstalledError,
PDFPageCountError,
PDFSyntaxError
)
images = convert_from_path('.\git_cheat_sheet.pdf')
| 27.625 | 59 | 0.809955 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 23 | 0.104072 |
2d677e943a4a9eb487c23dd1525395a69ff39881 | 3,097 | py | Python | Google-Meet-Scheduler/script.py | dsrao711/Amazing-Python-Scripts | 4a8bf7bc3d0c6f2c7838d1380c7f9ddbfce766b7 | [
"MIT"
] | 1 | 2021-04-17T08:33:25.000Z | 2021-04-17T08:33:25.000Z | Google-Meet-Scheduler/script.py | dsrao711/Amazing-Python-Scripts | 4a8bf7bc3d0c6f2c7838d1380c7f9ddbfce766b7 | [
"MIT"
] | null | null | null | Google-Meet-Scheduler/script.py | dsrao711/Amazing-Python-Scripts | 4a8bf7bc3d0c6f2c7838d1380c7f9ddbfce766b7 | [
"MIT"
] | 1 | 2021-07-22T07:06:09.000Z | 2021-07-22T07:06:09.000Z | from googleapiclient.discovery import build
from uuid import uuid4
from google.auth.transport.requests import Request
from pathlib import Path
from google_auth_oauthlib.flow import InstalledAppFlow
from typing import Dict, List
from pickle import load, dump
class CreateMeet:
def __init__(self, attendees: Dict[str, str], event_time: Dict[str, str], topic):
authe = self._auth()
attendees = [{"email": e} for e in attendees.values()]
self.event_states = self._create_event(attendees, event_time, authe, topic)
@staticmethod
def _create_event(attendees: List[Dict[str, str]], event_time, authe: build, topic):
event = {"conferenceData": {"createRequest": {"requestId": f"{uuid4().hex}", "conferenceSolutionKey": {"type": "hangoutsMeet"}}},
"attendees": attendees,
"start": {"dateTime": event_time["start"], 'timeZone': 'Asia/Kolkata'},
"end": {"dateTime": event_time["end"], 'timeZone': 'Asia/Kolkata'},
"summary": topic,
"reminders": {"useDefault": True}
}
event = authe.events().insert(calendarId="primary", sendNotifications=True, body=event, conferenceDataVersion=1).execute()
return event
@staticmethod
def _auth():
token_file, scopes = Path("./token.pickle"), ["https://www.googleapis.com/auth/calendar"]
credentials = None
if token_file.exists():
with open(token_file, "rb") as token:
credentials = load(token)
if not credentials or not credentials.valid:
if credentials and credentials.expired and credentials.refresh_token:
credentials.refresh(Request())
else:
flow = InstalledAppFlow.from_client_secrets_file('credentials.json', scopes)
credentials = flow.run_local_server(port=0)
with open(token_file, "wb") as token:
dump(credentials, token)
calendar_service = build("calendar", "v3", credentials=credentials)
return calendar_service
print('------------------------------')
print('-- Follow YYYY-MM-DD format --')
print('------------------------------')
date = input('Enter date of the meeting : ').strip()
print('------------------------------------')
print('-- Follow HH:MM and 24 hrs format --')
print('------------------------------------')
start = input('Enter starting time : ').strip()
end = input('Enter ending time : ').strip()
emails = list(input('Enter the emails of guests separated by 1 space each : ').strip().split())
topic = input('Enter the topic of the meeting : ')
time = {
'start':date+'T'+start+':00.000000',
'end':date+'T'+end+':00.000000'
}
guests = { email : email for email in emails }
meet = CreateMeet(guests, time, topic)
keys = ['organizer','hangoutLink', 'summary', 'start', 'end', 'attendees']
details = { key: meet.event_states[key] for key in keys }
print('---------------------')
print('-- Meeting Details --')
print('---------------------')
for key in keys:
print(key+' : ', details[key])
| 44.884058 | 137 | 0.60155 | 1,834 | 0.592186 | 0 | 0 | 1,543 | 0.498224 | 0 | 0 | 900 | 0.290604 |
2d6b129b7aafbb8730c539aaa5e1008046619452 | 392 | py | Python | app/main/models/rover.py | Jeffmusa/Twende_Dev_Project | 97881cd4ad754cfd01ba02da912b4c4515d76327 | [
"MIT"
] | 1 | 2019-10-08T13:09:21.000Z | 2019-10-08T13:09:21.000Z | app/main/models/rover.py | Jeffmusa/Twende_Dev_Project | 97881cd4ad754cfd01ba02da912b4c4515d76327 | [
"MIT"
] | null | null | null | app/main/models/rover.py | Jeffmusa/Twende_Dev_Project | 97881cd4ad754cfd01ba02da912b4c4515d76327 | [
"MIT"
] | 4 | 2018-09-04T07:10:25.000Z | 2019-10-08T12:53:43.000Z | class Rover:
def __init__(self,photo,name,date):
self.photo = photo
self.name = name
self.date = date
class Articles:
def __init__(self,author,title,description,url,poster,time):
self.author = author
self.title = title
self.description = description
self.url = url
self.poster = poster
self.time = time | 26.133333 | 64 | 0.594388 | 381 | 0.971939 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
2d6b6bfc0fc1ea8f28924520ef3ca4e11fecded3 | 782 | py | Python | submissions/urls.py | annalee/alienplan | b54722d683a5e8eba03f4467a367bcf24339bb32 | [
"MIT"
] | 5 | 2018-07-23T13:44:33.000Z | 2021-12-24T20:13:57.000Z | submissions/urls.py | annalee/alienplan | b54722d683a5e8eba03f4467a367bcf24339bb32 | [
"MIT"
] | 25 | 2018-02-11T00:02:08.000Z | 2021-07-06T22:35:30.000Z | submissions/urls.py | annalee/alienplan | b54722d683a5e8eba03f4467a367bcf24339bb32 | [
"MIT"
] | 1 | 2021-12-02T14:48:15.000Z | 2021-12-02T14:48:15.000Z | from django.urls import path
from . import views
urlpatterns = [
path('panel/', views.panel, name='panel-noslug'),
path('panel/<slug:conslug>/', views.panel, name='panel'),
path('panelreview/',
views.PendingPanelList.as_view(), name='pending-panel-list-noslug'),
path('panelreview/<slug:conslug>/',
views.PendingPanelList.as_view(), name='pending-panel-list'),
path('panelreview/detail/<int:pk>/',
views.PendingPanelDetail.as_view(), name='pending-panel-detail-noslug'),
path('panelreview/<slug:conslug>/detail/<int:pk>/',
views.PendingPanelDetail.as_view(), name='pending-panel-detail'),
path('panelist/', views.panelist, name='panelist-noslug'),
path('panelist/<slug:conslug>/', views.panelist, name='panelist'),
] | 43.444444 | 80 | 0.677749 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 332 | 0.424552 |
2d6c392d23be86039709559e2b39f5d2016733c8 | 1,538 | py | Python | voicebox_project.py | raccoonML/audiotools | 9b378a7e4b136cdb5e1b7a048f8d35794880a4b0 | [
"MIT"
] | null | null | null | voicebox_project.py | raccoonML/audiotools | 9b378a7e4b136cdb5e1b7a048f8d35794880a4b0 | [
"MIT"
] | null | null | null | voicebox_project.py | raccoonML/audiotools | 9b378a7e4b136cdb5e1b7a048f8d35794880a4b0 | [
"MIT"
] | null | null | null | import librosa
import numpy as np
import audio
from hparams import hparams
"""
This helps implement a user interface for a vocoder.
Currently this is Griffin-Lim but can be extended to different vocoders.
Required elements for the vocoder UI are:
self.sample_rate
self.source_action
self.vocode_action
"""
class Voicebox_Project:
def __init__(self):
# Property needed for voicebox
self.sample_rate = hparams.sample_rate
# Initialization for project
self.source_spec = None
"""
The following action methods are called by Voicebox on button press
Source: [Load] --> source_action
Vocode: [Vocode] --> vocode_action
"""
def source_action(self, wav):
# The vocoder toolbox also vocodes the spectrogram with Griffin-Lim for comparison.
# Inputs: wav (from voicebox)
# Outputs: spec, wav_GL, spec_GL (to voicebox)
self.source_spec = audio.melspectrogram(wav, hparams)
wav_GL = audio.inv_mel_spectrogram(self.source_spec, hparams)
spec_GL = audio.melspectrogram(wav_GL, hparams)
return self.source_spec.T, wav_GL, spec_GL.T
def vocode_action(self):
# For this sample vocoder project, we will use Griffin-Lim as the vocoder.
# Other projects will substitute an actual neural vocoder.
# Inputs: None
# Outputs: wav, spec (to voicebox)
wav = audio.inv_mel_spectrogram(self.source_spec, hparams)
spec = audio.melspectrogram(wav, hparams)
return wav, spec.T
| 32.041667 | 91 | 0.692458 | 1,228 | 0.79844 | 0 | 0 | 0 | 0 | 0 | 0 | 786 | 0.511053 |
2d6d0fc2ece7d6b6190b0adbadaceaefdccb6df6 | 472 | py | Python | templatetags/spreedly_tags.py | shelfworthy/django-spreedly | 95e4572fec67501835d08c3e54e9437345733784 | [
"MIT"
] | 2 | 2015-09-28T10:08:13.000Z | 2015-11-08T12:46:43.000Z | templatetags/spreedly_tags.py | shelfworthy/django-spreedly | 95e4572fec67501835d08c3e54e9437345733784 | [
"MIT"
] | null | null | null | templatetags/spreedly_tags.py | shelfworthy/django-spreedly | 95e4572fec67501835d08c3e54e9437345733784 | [
"MIT"
] | null | null | null | from django.conf import settings
from django import template
from spreedly.functions import subscription_url
register = template.Library()
@register.simple_tag
def existing_plan_url(user):
return 'https://spreedly.com/%(site_name)s/subscriber_accounts/%(user_token)s' % {
'site_name': settings.SPREEDLY_SITE_NAME,
'user_token': user.subscription.token
}
@register.simple_tag
def new_plan_url(plan, user):
return subscription_url(plan, user) | 27.764706 | 86 | 0.760593 | 0 | 0 | 0 | 0 | 328 | 0.694915 | 0 | 0 | 94 | 0.199153 |
2d6f0d4c488163d6af2961ff00285a48a9efa061 | 981 | py | Python | tests/test_replace_lcsh.py | BookOps-CAT/ChangeSubject | bff86ad58685db169a99f9ad3b2df1179cffb5a3 | [
"MIT"
] | 1 | 2022-01-13T20:28:12.000Z | 2022-01-13T20:28:12.000Z | tests/test_replace_lcsh.py | BookOps-CAT/ChangeSubject | bff86ad58685db169a99f9ad3b2df1179cffb5a3 | [
"MIT"
] | null | null | null | tests/test_replace_lcsh.py | BookOps-CAT/ChangeSubject | bff86ad58685db169a99f9ad3b2df1179cffb5a3 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
import pytest
from src.replace_lcsh import replace_term, lcsh_fields, normalize_subfields
# def test_flip_impacted_fields(fake_bib):
# pass
@pytest.mark.parametrize(
"arg,expectation",
[
(1, ["a", "local term", "x", "subX1", "x", "subX2", "z", "subZ."]),
(3, ["a", "subA", "x", "local term", "x", "subX2", "z", "subZ."]),
(5, ["a", "subA", "x", "subX1", "x", "local term", "z", "subZ."]),
(7, ["a", "subA", "x", "subX1", "x", "subX2", "z", "local term."]),
],
)
def test_replace_term(fake_subfields, arg, expectation):
assert replace_term(fake_subfields, arg, "local term") == expectation
def test_lcsh_fields(fake_subjects):
assert len(lcsh_fields(fake_subjects)) == 2
def test_normalize_subields(fake_subfields):
assert normalize_subfields(fake_subfields) == [
"a",
"suba",
"x",
"subx1",
"x",
"subx2",
"z",
"subz",
]
| 24.525 | 75 | 0.554536 | 0 | 0 | 0 | 0 | 496 | 0.505607 | 0 | 0 | 320 | 0.326198 |
2d6f17060fd47cde9fb3914ca0a07ef8a05b18d0 | 6,178 | py | Python | tutorplanner/input/data.py | tutor-planner/tutor-planner | 25119d2d50322e3a68ebc2a12f9c49f95ff51ad8 | [
"ISC"
] | 1 | 2018-04-16T17:35:57.000Z | 2018-04-16T17:35:57.000Z | tutorplanner/input/data.py | tutor-planner/tutor-planner | 25119d2d50322e3a68ebc2a12f9c49f95ff51ad8 | [
"ISC"
] | null | null | null | tutorplanner/input/data.py | tutor-planner/tutor-planner | 25119d2d50322e3a68ebc2a12f9c49f95ff51ad8 | [
"ISC"
] | 1 | 2020-10-14T12:56:19.000Z | 2020-10-14T12:56:19.000Z | __author__ = ("Matthias Rost <mrost AT inet.tu-berlin.de>, "
"Alexander Elvers <aelvers AT inet.tu-berlin.de>")
__all__ = ["Data"]
from collections import OrderedDict
from typing import Dict, List, Optional
from . import tutor
from . import rooms
from ..util import converter
from ..util.settings import settings
class SingletonMeta(type):
"""
Singleton meta class
Saves the instance when class is called.
"""
def __init__(cls, *args, **kwargs):
type.__init__(cls, *args, **kwargs)
cls._instance = None
def __call__(cls):
if cls._instance is None:
cls._instance = type.__call__(cls)
return cls._instance
class Data(metaclass=SingletonMeta):
"""
Singleton for keeping all data together.
Because some data might not yet exist, it is loaded on access.
"""
_tutor_by_name: Optional[Dict[str, tutor.Tutor]] = None
_room_by_name: Optional[Dict[str, rooms.Room]] = None
# first type, then name
_room_by_type: Optional[Dict[str, Dict[str, rooms.Room]]] = None
# tutor name -> day index -> hour -> availability value
_availability: Optional[Dict[str, Dict[int, Dict[int, int]]]] = None
# day index -> hour -> list of room names
_bookings_tutorials: Optional[Dict[int, Dict[int, List[str]]]] = None
# day index -> hour -> list of room names
_bookings_pools: Optional[Dict[int, Dict[int, List[str]]]] = None
_rooms_external: Optional[List[str]] = None
@property
def tutor_by_name(self) -> Dict[str, tutor.Tutor]:
"""
tutors hashed by last name
"""
if self._tutor_by_name is None:
self._tutor_by_name = OrderedDict(sorted([(t.last_name, t) for t in tutor.load_tutors()],
key=lambda x: x[0]))
return self._tutor_by_name
@property
def room_by_name(self) -> Dict[str, rooms.Room]:
"""
room hashed by room name
"""
if self._room_by_name is None:
self._room_by_name = {r.name: r for r in rooms.import_rooms_from_csv(settings.paths.bookings())}
return self._room_by_name
@property
def room_by_type(self) -> Dict[str, Dict[str, rooms.Room]]:
"""
room hashed by room type and then by room name
"""
if self._room_by_type is None:
self._room_by_type = {}
for room in self.room_by_name.values():
self.room_by_type.setdefault(room.type, {})[room.name] = room
return self._room_by_type
@property
def availability(self) -> Dict[str, Dict[int, Dict[int, int]]]:
"""
tutor availability
"""
if self._availability is None:
self._availability = {} # tutor, day index, hour
# TODO: use datetime.date instead of day index
for t in self.tutor_by_name.values():
self._availability[t.last_name] = {}
for day, day_availability in t.availability.items():
d = converter.date_to_day_index(day)
self._availability[t.last_name][d] = {}
for hour, availability in day_availability.items():
self._availability[t.last_name][d][hour] = availability if availability is not None else 0
# self.availability[t.last_name][d][hour+1] = availability if availability is not None else 0
return self._availability
@property
def bookings_tutorials(self) -> Dict[int, Dict[int, List[str]]]:
"""
bookings of tutorial rooms
"""
if self._bookings_tutorials is None:
self._bookings_tutorials = {}
for day in settings.days():
day_index = converter.date_to_day_index(day)
self._bookings_tutorials[day_index] = {}
for time in settings.times():
self._bookings_tutorials[day_index][time] = []
for room in self.room_by_type["tutorial"].values():
times = room.get_booked_times(day)
for time in times:
self._bookings_tutorials.setdefault(day_index, {}).setdefault(time, []).append(room.name)
self._bookings_tutorials = converter.to_single_hour_precision(self._bookings_tutorials)
return self._bookings_tutorials
@property
def bookings_pools(self) -> Dict[int, Dict[int, List[str]]]:
"""
bookings of exercise pools
"""
if self._bookings_pools is None:
self._bookings_pools = {}
for day in settings.days():
day_index = converter.date_to_day_index(day)
self._bookings_pools[day_index] = {}
for time in settings.times():
self._bookings_pools[day_index][time] = []
for room in self.room_by_name.values():
if room.type.startswith("exercise"):
times = room.get_booked_times(day)
for time in times:
self._bookings_pools.setdefault(day_index, {}).setdefault(time, []).append(room.name)
self._bookings_pools = converter.to_single_hour_precision(self._bookings_pools)
return self._bookings_pools
@property
def rooms_external(self) -> List[str]:
"""
list of room names
"""
if self._rooms_external is None:
self._rooms_external = [] # this is static
return self._rooms_external
def get_number_of_tutorial_rooms(self, day: int, hour: int) -> int:
"""
Get the number of tutorial rooms at the time slot.
"""
if day not in self.bookings_tutorials or hour not in self.bookings_tutorials[day]:
return 0
return len(self.bookings_tutorials[day][hour])
def get_exercise_rooms(self) -> List[str]:
"""
Get a list of public pool rooms that are shown to the students.
"""
return list(self.room_by_type["exercise"].keys()) + list(self.room_by_type.get("exerciseMAR", {}).keys())
| 38.855346 | 117 | 0.597443 | 5,842 | 0.945613 | 0 | 0 | 4,061 | 0.657332 | 0 | 0 | 1,202 | 0.194561 |
2d6f721c6dc031148182f002703abf47c155dce1 | 359 | py | Python | tests/doubles/producers.py | ess-dmsc/JustBinIt | dc8242ed44f03e92f60618c96596025ec8cbc40e | [
"BSD-2-Clause"
] | null | null | null | tests/doubles/producers.py | ess-dmsc/JustBinIt | dc8242ed44f03e92f60618c96596025ec8cbc40e | [
"BSD-2-Clause"
] | 23 | 2018-12-04T11:50:37.000Z | 2022-03-17T11:30:39.000Z | tests/doubles/producers.py | ess-dmsc/JustBinIt | dc8242ed44f03e92f60618c96596025ec8cbc40e | [
"BSD-2-Clause"
] | 2 | 2019-07-24T11:13:41.000Z | 2020-08-04T18:33:22.000Z | from just_bin_it.exceptions import KafkaException
class SpyProducer:
def __init__(self, brokers=None):
self.messages = []
def publish_message(self, topic, message):
self.messages.append((topic, message))
class StubProducerThatThrows:
def publish_message(self, topic, message):
raise KafkaException("Some Kafka error")
| 23.933333 | 49 | 0.715877 | 303 | 0.844011 | 0 | 0 | 0 | 0 | 0 | 0 | 18 | 0.050139 |